Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mips_5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Thomas Bogendoerfer:

- removed get_fs/set_fs

- removed broken/unmaintained MIPS KVM trap and emulate support

- added support for Loongson-2K1000

- fixes and cleanups

* tag 'mips_5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (107 commits)
MIPS: BCM63XX: Use BUG_ON instead of condition followed by BUG.
MIPS: select ARCH_KEEP_MEMBLOCK unconditionally
mips: Do not include hi and lo in clobber list for R6
MIPS:DTS:Correct the license for Loongson-2K
MIPS:DTS:Fix label name and interrupt number of ohci for Loongson-2K
MIPS: Avoid handcoded DIVU in `__div64_32' altogether
lib/math/test_div64: Correct the spelling of "dividend"
lib/math/test_div64: Fix error message formatting
mips/bootinfo:correct some comments of fw_arg
MIPS: Avoid DIVU in `__div64_32' is result would be zero
MIPS: Reinstate platform `__div64_32' handler
div64: Correct inline documentation for `do_div'
lib/math: Add a `do_div' test module
MIPS: Makefile: Replace -pg with CC_FLAGS_FTRACE
MIPS: pci-legacy: revert "use generic pci_enable_resources"
MIPS: Loongson64: Add kexec/kdump support
MIPS: pci-legacy: use generic pci_enable_resources
MIPS: pci-legacy: remove busn_resource field
MIPS: pci-legacy: remove redundant info messages
MIPS: pci-legacy: stop using of_pci_range_to_resource
...

+2691 -6031
-1
Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
··· 47 47 48 48 spi-max-frequency = <3125000>; 49 49 spi-3wire; 50 - spi-cs-high; 51 50 52 51 reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; 53 52
+32 -4
Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
··· 10 10 - Jiaxun Yang <jiaxun.yang@flygoat.com> 11 11 12 12 description: | 13 - This interrupt controller is found in the Loongson-3 family of chips as the primary 14 - package interrupt controller which can route local I/O interrupt to interrupt lines 15 - of cores. 13 + This interrupt controller is found in the Loongson-3 family of chips and 14 + Loongson-2K1000 chip, as the primary package interrupt controller which 15 + can route local I/O interrupt to interrupt lines of cores. 16 16 17 17 allOf: 18 18 - $ref: /schemas/interrupt-controller.yaml# ··· 22 22 oneOf: 23 23 - const: loongson,liointc-1.0 24 24 - const: loongson,liointc-1.0a 25 + - const: loongson,liointc-2.0 25 26 26 27 reg: 27 - maxItems: 1 28 + minItems: 1 29 + maxItems: 3 30 + 31 + reg-names: 32 + items: 33 + - const: main 34 + - const: isr0 35 + - const: isr1 28 36 29 37 interrupt-controller: true 30 38 ··· 76 68 77 69 78 70 unevaluatedProperties: false 71 + 72 + if: 73 + properties: 74 + compatible: 75 + contains: 76 + enum: 77 + - loongson,liointc-2.0 78 + 79 + then: 80 + properties: 81 + reg: 82 + minItems: 3 83 + 84 + required: 85 + - reg-names 86 + 87 + else: 88 + properties: 89 + reg: 90 + maxItems: 1 79 91 80 92 examples: 81 93 - |
+10 -21
arch/mips/Kconfig
··· 4 4 default y 5 5 select ARCH_32BIT_OFF_T if !64BIT 6 6 select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT 7 + select ARCH_HAS_DEBUG_VIRTUAL if !64BIT 7 8 select ARCH_HAS_FORTIFY_SOURCE 8 9 select ARCH_HAS_KCOV 10 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA 9 11 select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI) 10 12 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 11 13 select ARCH_HAS_UBSAN_SANITIZE_ALL 12 14 select ARCH_HAS_GCOV_PROFILE_ALL 13 - select ARCH_KEEP_MEMBLOCK if DEBUG_KERNEL 15 + select ARCH_KEEP_MEMBLOCK 14 16 select ARCH_SUPPORTS_UPROBES 15 17 select ARCH_USE_BUILTIN_BSWAP 16 18 select ARCH_USE_CMPXCHG_LOCKREF if 64BIT ··· 28 26 select GENERIC_ATOMIC64 if !64BIT 29 27 select GENERIC_CMOS_UPDATE 30 28 select GENERIC_CPU_AUTOPROBE 29 + select GENERIC_FIND_FIRST_BIT 31 30 select GENERIC_GETTIMEOFDAY 32 31 select GENERIC_IOMAP 33 32 select GENERIC_IRQ_PROBE ··· 94 91 select PERF_USE_VMALLOC 95 92 select PCI_MSI_ARCH_FALLBACKS if PCI_MSI 96 93 select RTC_LIB 97 - select SET_FS 98 94 select SYSCTL_EXCEPTION_TRACE 99 95 select VIRT_TO_BUS 100 96 select ARCH_HAS_ELFCORE_COMPAT ··· 714 712 select ARC_CMDLINE_ONLY 715 713 select BOOT_ELF64 716 714 select DEFAULT_SGI_PARTITION 715 + select FORCE_PCI 717 716 select SYS_HAS_EARLY_PRINTK 718 717 select HAVE_PCI 719 718 select IRQ_MIPS_CPU ··· 777 774 select BOOT_ELF64 778 775 select CEVT_R4K 779 776 select CSRC_R4K 777 + select FORCE_PCI 780 778 select SYNC_R4K if SMP 781 779 select ZONE_DMA32 782 780 select HAVE_PCI ··· 1002 998 select NR_CPUS_DEFAULT_64 1003 999 select MIPS_NR_CPU_NR_MAP_1024 1004 1000 select BUILTIN_DTB 1001 + select MTD 1005 1002 select MTD_COMPLEX_MAPPINGS 1006 1003 select SWIOTLB 1007 1004 select SYS_SUPPORTS_RELOCATABLE ··· 2123 2118 config CPU_MIPS64 2124 2119 bool 2125 2120 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \ 2126 - CPU_MIPS64_R6 2121 + CPU_MIPS64_R6 || CPU_LOONGSON64 || CPU_CAVIUM_OCTEON 2127 2122 2128 2123 # 2129 2124 # These indicate the revision of the architecture ··· 2190 2185 depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) 2191 2186 config MIPS_PGD_C0_CONTEXT 2192 2187 bool 2193 - default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP 2188 + depends on 64BIT 2189 + default y if (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP 2194 2190 2195 2191 # 2196 2192 # Set to y for ptrace access to watch registers. ··· 2224 2218 Select this option if you want to build a 64-bit kernel. 2225 2219 2226 2220 endchoice 2227 - 2228 - config KVM_GUEST 2229 - bool "KVM Guest Kernel" 2230 - depends on CPU_MIPS32_R2 2231 - depends on !64BIT && BROKEN_ON_SMP 2232 - help 2233 - Select this option if building a guest kernel for KVM (Trap & Emulate) 2234 - mode. 2235 - 2236 - config KVM_GUEST_TIMER_FREQ 2237 - int "Count/Compare Timer Frequency (MHz)" 2238 - depends on KVM_GUEST 2239 - default 100 2240 - help 2241 - Set this to non-zero if building a guest kernel for KVM to skip RTC 2242 - emulation when determining guest CPU Frequency. Instead, the guest's 2243 - timer frequency is specified directly. 2244 2221 2245 2222 config MIPS_VA_BITS_48 2246 2223 bool "48 bits virtual memory"
+1
arch/mips/Kconfig.debug
··· 77 77 config SB1XXX_CORELIS 78 78 bool "Corelis Debugger" 79 79 depends on SIBYTE_SB1xxx_SOC 80 + select DEBUG_KERNEL if !COMPILE_TEST 80 81 select DEBUG_INFO if !COMPILE_TEST 81 82 help 82 83 Select compile flags that produce code that can be processed by the
+1 -2
arch/mips/alchemy/common/clock.c
··· 111 111 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */ 112 112 static spinlock_t alchemy_clk_fg0_lock; 113 113 static spinlock_t alchemy_clk_fg1_lock; 114 - static spinlock_t alchemy_clk_csrc_lock; 114 + static DEFINE_SPINLOCK(alchemy_clk_csrc_lock); 115 115 116 116 /* CPU Core clock *****************************************************/ 117 117 ··· 996 996 if (!a) 997 997 return -ENOMEM; 998 998 999 - spin_lock_init(&alchemy_clk_csrc_lock); 1000 999 ret = 0; 1001 1000 1002 1001 for (i = 0; i < 6; i++) {
+1 -1
arch/mips/bcm63xx/clk.c
··· 76 76 }; 77 77 78 78 /* 79 - * Ethernet MAC clocks: only revelant on 6358, silently enable misc 79 + * Ethernet MAC clocks: only relevant on 6358, silently enable misc 80 80 * clocks 81 81 */ 82 82 static void enetx_set(struct clk *clk, int enable)
+3 -6
arch/mips/bcm63xx/gpio.c
··· 43 43 u32 *v; 44 44 unsigned long flags; 45 45 46 - if (gpio >= chip->ngpio) 47 - BUG(); 46 + BUG_ON(gpio >= chip->ngpio); 48 47 49 48 if (gpio < 32) { 50 49 reg = gpio_out_low_reg; ··· 69 70 u32 reg; 70 71 u32 mask; 71 72 72 - if (gpio >= chip->ngpio) 73 - BUG(); 73 + BUG_ON(gpio >= chip->ngpio); 74 74 75 75 if (gpio < 32) { 76 76 reg = gpio_out_low_reg; ··· 90 92 u32 tmp; 91 93 unsigned long flags; 92 94 93 - if (gpio >= chip->ngpio) 94 - BUG(); 95 + BUG_ON(gpio >= chip->ngpio); 95 96 96 97 if (gpio < 32) { 97 98 reg = GPIO_CTL_LO_REG;
+1 -1
arch/mips/bmips/dma.c
··· 10 10 11 11 #include <linux/device.h> 12 12 #include <linux/dma-direction.h> 13 - #include <linux/dma-mapping.h> 13 + #include <linux/dma-direct.h> 14 14 #include <linux/init.h> 15 15 #include <linux/io.h> 16 16 #include <linux/of.h>
+1 -1
arch/mips/boot/compressed/Makefile
··· 18 18 BOOT_HEAP_SIZE := 0x400000 19 19 20 20 # Disable Function Tracer 21 - KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS)) 21 + KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE), $(KBUILD_CFLAGS)) 22 22 23 23 KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) 24 24
+1 -1
arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm3368.dtsi" 4 + #include "bcm3368.dtsi" 5 5 6 6 / { 7 7 compatible = "netgear,cvg834g", "brcm,bcm3368";
+4 -1
arch/mips/boot/dts/brcm/bcm3368.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm3368-clock.h" 4 + 2 5 / { 3 6 #address-cells = <1>; 4 7 #size-cells = <1>; ··· 62 59 63 60 periph_cntl: syscon@fff8c008 { 64 61 compatible = "syscon"; 65 - reg = <0xfff8c000 0x4>; 62 + reg = <0xfff8c008 0x4>; 66 63 native-endian; 67 64 }; 68 65
+1 -1
arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm63268.dtsi" 4 + #include "bcm63268.dtsi" 5 5 6 6 / { 7 7 compatible = "comtrend,vr-3032u", "brcm,bcm63268";
+125 -18
arch/mips/boot/dts/brcm/bcm63268.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm63268-clock.h" 4 + #include "dt-bindings/reset/bcm63268-reset.h" 5 + #include "dt-bindings/soc/bcm63268-pm.h" 6 + 2 7 / { 3 8 #address-cells = <1>; 4 9 #size-cells = <1>; ··· 29 24 }; 30 25 31 26 clocks { 32 - periph_clk: periph-clk { 27 + periph_osc: periph-osc { 33 28 compatible = "fixed-clock"; 34 29 #clock-cells = <0>; 35 30 clock-frequency = <50000000>; 31 + clock-output-names = "periph"; 32 + }; 33 + 34 + hsspi_osc: hsspi-osc { 35 + compatible = "fixed-clock"; 36 + 37 + #clock-cells = <0>; 38 + 39 + clock-frequency = <400000000>; 40 + clock-output-names = "hsspi_osc"; 36 41 }; 37 42 }; 38 43 39 44 aliases { 45 + nflash = &nflash; 40 46 serial0 = &uart0; 41 47 serial1 = &uart1; 48 + spi0 = &lsspi; 49 + spi1 = &hsspi; 42 50 }; 43 51 44 52 cpu_intc: interrupt-controller { ··· 69 51 compatible = "simple-bus"; 70 52 ranges; 71 53 72 - clkctl: clock-controller@10000004 { 54 + periph_clk: clock-controller@10000004 { 73 55 compatible = "brcm,bcm63268-clocks"; 74 56 reg = <0x10000004 0x4>; 75 57 #clock-cells = <1>; 76 58 }; 77 59 78 - periph_cntl: syscon@10000008 { 60 + pll_cntl: syscon@10000008 { 79 61 compatible = "syscon"; 80 - reg = <0x10000000 0xc>; 62 + reg = <0x10000008 0x4>; 81 63 native-endian; 82 - }; 83 64 84 - reboot: syscon-reboot@10000008 { 85 - compatible = "syscon-reboot"; 86 - regmap = <&periph_cntl>; 87 - offset = <0x0>; 88 - mask = <0x1>; 65 + reboot { 66 + compatible = "syscon-reboot"; 67 + offset = <0x0>; 68 + mask = <0x1>; 69 + }; 89 70 }; 90 71 91 72 periph_rst: reset-controller@10000010 { ··· 105 88 interrupts = <2>, <3>; 106 89 }; 107 90 91 + wdt: watchdog@1000009c { 92 + compatible = "brcm,bcm7038-wdt"; 93 + reg = <0x1000009c 0xc>; 94 + 95 + clocks = <&periph_osc>; 96 + clock-names = "refclk"; 97 + 98 + timeout-sec = <30>; 99 + }; 100 + 108 101 uart0: serial@10000180 { 109 102 compatible = "brcm,bcm6345-uart"; 110 103 reg = <0x10000180 0x18>; ··· 122 95 interrupt-parent = <&periph_intc>; 123 96 interrupts = <5>; 124 97 125 - clocks = <&periph_clk>; 98 + clocks = <&periph_osc>; 126 99 clock-names = "refclk"; 100 + 101 + status = "disabled"; 102 + }; 103 + 104 + nflash: nand@10000200 { 105 + #address-cells = <1>; 106 + #size-cells = <0>; 107 + compatible = "brcm,nand-bcm6368", 108 + "brcm,brcmnand-v4.0", 109 + "brcm,brcmnand"; 110 + reg = <0x10000200 0x180>, 111 + <0x10000600 0x200>, 112 + <0x100000b0 0x10>; 113 + reg-names = "nand", 114 + "nand-cache", 115 + "nand-int-base"; 116 + 117 + interrupt-parent = <&periph_intc>; 118 + interrupts = <50>; 119 + 120 + clocks = <&periph_clk BCM63268_CLK_NAND>; 121 + clock-names = "nand"; 127 122 128 123 status = "disabled"; 129 124 }; ··· 157 108 interrupt-parent = <&periph_intc>; 158 109 interrupts = <34>; 159 110 160 - clocks = <&periph_clk>; 111 + clocks = <&periph_osc>; 161 112 clock-names = "refclk"; 162 113 163 114 status = "disabled"; 115 + }; 116 + 117 + lsspi: spi@10000800 { 118 + #address-cells = <1>; 119 + #size-cells = <0>; 120 + compatible = "brcm,bcm6358-spi"; 121 + reg = <0x10000800 0x70c>; 122 + 123 + interrupt-parent = <&periph_intc>; 124 + interrupts = <80>; 125 + 126 + clocks = <&periph_clk BCM63268_CLK_SPI>; 127 + clock-names = "spi"; 128 + 129 + resets = <&periph_rst BCM63268_RST_SPI>; 130 + 131 + status = "disabled"; 132 + }; 133 + 134 + hsspi: spi@10001000 { 135 + #address-cells = <1>; 136 + #size-cells = <0>; 137 + compatible = "brcm,bcm6328-hsspi"; 138 + reg = <0x10001000 0x600>; 139 + 140 + interrupt-parent = <&periph_intc>; 141 + interrupts = <6>; 142 + 143 + clocks = <&periph_clk BCM63268_CLK_HSSPI>, 144 + <&hsspi_osc>; 145 + clock-names = "hsspi", 146 + "pll"; 147 + 148 + resets = <&periph_rst BCM63268_RST_SPI>; 149 + 150 + status = "disabled"; 151 + }; 152 + 153 + periph_pwr: power-controller@1000184c { 154 + compatible = "brcm,bcm6328-power-controller"; 155 + reg = <0x1000184c 0x4>; 156 + #power-domain-cells = <1>; 164 157 }; 165 158 166 159 leds0: led-controller@10001900 { ··· 214 123 status = "disabled"; 215 124 }; 216 125 217 - periph_pwr: power-controller@1000184c { 218 - compatible = "brcm,bcm6328-power-controller"; 219 - reg = <0x1000184c 0x4>; 220 - #power-domain-cells = <1>; 221 - }; 222 - 223 126 ehci: usb@10002500 { 224 127 compatible = "brcm,bcm63268-ehci", "generic-ehci"; 225 128 reg = <0x10002500 0x100>; ··· 221 136 222 137 interrupt-parent = <&periph_intc>; 223 138 interrupts = <10>; 139 + 140 + phys = <&usbh 0>; 141 + phy-names = "usb"; 224 142 225 143 status = "disabled"; 226 144 }; ··· 236 148 237 149 interrupt-parent = <&periph_intc>; 238 150 interrupts = <9>; 151 + 152 + phys = <&usbh 0>; 153 + phy-names = "usb"; 154 + 155 + status = "disabled"; 156 + }; 157 + 158 + usbh: usb-phy@10002700 { 159 + compatible = "brcm,bcm63268-usbh-phy"; 160 + reg = <0x10002700 0x38>; 161 + #phy-cells = <1>; 162 + 163 + clocks = <&periph_clk BCM63268_CLK_USBH>; 164 + clock-names = "usbh"; 165 + 166 + power-domains = <&periph_pwr BCM63268_POWER_DOMAIN_USBH>; 167 + 168 + resets = <&periph_rst BCM63268_RST_USBH>; 169 + reset-names = "usbh"; 239 170 240 171 status = "disabled"; 241 172 };
+110 -14
arch/mips/boot/dts/brcm/bcm6328.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm6328-clock.h" 4 + #include "dt-bindings/reset/bcm6328-reset.h" 5 + #include "dt-bindings/soc/bcm6328-pm.h" 6 + 2 7 / { 3 8 #address-cells = <1>; 4 9 #size-cells = <1>; ··· 29 24 }; 30 25 31 26 clocks { 32 - periph_clk: periph-clk { 27 + periph_osc: periph-osc { 33 28 compatible = "fixed-clock"; 34 29 #clock-cells = <0>; 35 30 clock-frequency = <50000000>; 31 + clock-output-names = "periph"; 32 + }; 33 + 34 + hsspi_osc: hsspi-osc { 35 + compatible = "fixed-clock"; 36 + #clock-cells = <0>; 37 + clock-frequency = <133333333>; 38 + clock-output-names = "hsspi_osc"; 36 39 }; 37 40 }; 38 41 39 42 aliases { 43 + nflash = &nflash; 40 44 serial0 = &uart0; 41 45 serial1 = &uart1; 46 + spi1 = &hsspi; 42 47 }; 43 48 44 49 cpu_intc: interrupt-controller { ··· 66 51 compatible = "simple-bus"; 67 52 ranges; 68 53 69 - clkctl: clock-controller@10000004 { 54 + periph_clk: clock-controller@10000004 { 70 55 compatible = "brcm,bcm6328-clocks"; 71 56 reg = <0x10000004 0x4>; 72 57 #clock-cells = <1>; ··· 90 75 interrupts = <2>, <3>; 91 76 }; 92 77 78 + wdt: watchdog@1000005c { 79 + compatible = "brcm,bcm7038-wdt"; 80 + reg = <0x1000005c 0xc>; 81 + 82 + clocks = <&periph_osc>; 83 + clock-names = "refclk"; 84 + 85 + timeout-sec = <30>; 86 + }; 87 + 88 + soft_reset: syscon@10000068 { 89 + compatible = "syscon"; 90 + reg = <0x10000068 0x4>; 91 + native-endian; 92 + 93 + reboot { 94 + compatible = "syscon-reboot"; 95 + offset = <0x0>; 96 + mask = <0x1>; 97 + }; 98 + }; 99 + 93 100 uart0: serial@10000100 { 94 101 compatible = "brcm,bcm6345-uart"; 95 102 reg = <0x10000100 0x18>; 103 + 96 104 interrupt-parent = <&periph_intc>; 97 105 interrupts = <28>; 98 - clocks = <&periph_clk>; 106 + 107 + clocks = <&periph_osc>; 99 108 clock-names = "refclk"; 109 + 100 110 status = "disabled"; 101 111 }; 102 112 103 113 uart1: serial@10000120 { 104 114 compatible = "brcm,bcm6345-uart"; 105 115 reg = <0x10000120 0x18>; 116 + 106 117 interrupt-parent = <&periph_intc>; 107 118 interrupts = <39>; 108 - clocks = <&periph_clk>; 119 + 120 + clocks = <&periph_osc>; 109 121 clock-names = "refclk"; 122 + 110 123 status = "disabled"; 111 124 }; 112 125 113 - timer: syscon@10000040 { 114 - compatible = "syscon"; 115 - reg = <0x10000040 0x2c>; 116 - native-endian; 117 - }; 126 + nflash: nand@10000200 { 127 + #address-cells = <1>; 128 + #size-cells = <0>; 129 + compatible = "brcm,nand-bcm6368", 130 + "brcm,brcmnand-v2.2", 131 + "brcm,brcmnand"; 132 + reg = <0x10000200 0x180>, 133 + <0x10000400 0x200>, 134 + <0x10000070 0x10>; 135 + reg-names = "nand", 136 + "nand-cache", 137 + "nand-int-base"; 118 138 119 - reboot: syscon-reboot@10000068 { 120 - compatible = "syscon-reboot"; 121 - regmap = <&timer>; 122 - offset = <0x28>; 123 - mask = <0x1>; 139 + interrupt-parent = <&periph_intc>; 140 + interrupts = <0>; 141 + 142 + status = "disabled"; 124 143 }; 125 144 126 145 leds0: led-controller@10000800 { ··· 162 113 #size-cells = <0>; 163 114 compatible = "brcm,bcm6328-leds"; 164 115 reg = <0x10000800 0x24>; 116 + 117 + status = "disabled"; 118 + }; 119 + 120 + hsspi: spi@10001000 { 121 + #address-cells = <1>; 122 + #size-cells = <0>; 123 + compatible = "brcm,bcm6328-hsspi"; 124 + reg = <0x10001000 0x600>; 125 + 126 + interrupt-parent = <&periph_intc>; 127 + interrupts = <29>; 128 + 129 + clocks = <&periph_clk BCM6328_CLK_HSSPI>, 130 + <&hsspi_osc>; 131 + clock-names = "hsspi", 132 + "pll"; 133 + 134 + resets = <&periph_rst BCM6328_RST_SPI>; 135 + reset-names = "hsspi"; 136 + 165 137 status = "disabled"; 166 138 }; 167 139 ··· 196 126 compatible = "brcm,bcm6328-ehci", "generic-ehci"; 197 127 reg = <0x10002500 0x100>; 198 128 big-endian; 129 + 199 130 interrupt-parent = <&periph_intc>; 200 131 interrupts = <42>; 132 + 133 + phys = <&usbh 0>; 134 + phy-names = "usb"; 135 + 201 136 status = "disabled"; 202 137 }; 203 138 ··· 211 136 reg = <0x10002600 0x100>; 212 137 big-endian; 213 138 no-big-frame-no; 139 + 214 140 interrupt-parent = <&periph_intc>; 215 141 interrupts = <41>; 142 + 143 + phys = <&usbh 0>; 144 + phy-names = "usb"; 145 + 146 + status = "disabled"; 147 + }; 148 + 149 + usbh: usb-phy@10002700 { 150 + compatible = "brcm,bcm6328-usbh-phy"; 151 + reg = <0x10002700 0x38>; 152 + #phy-cells = <1>; 153 + 154 + clocks = <&periph_clk BCM6328_CLK_USBH>; 155 + clock-names = "usbh"; 156 + 157 + power-domains = <&periph_pwr BCM6328_POWER_DOMAIN_USBH>; 158 + 159 + resets = <&periph_rst BCM6328_RST_USBH>; 160 + reset-names = "usbh"; 161 + 216 162 status = "disabled"; 217 163 }; 218 164 };
+1 -1
arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm6358.dtsi" 4 + #include "bcm6358.dtsi" 5 5 6 6 / { 7 7 compatible = "sfr,nb4-ser", "brcm,bcm6358";
+77 -12
arch/mips/boot/dts/brcm/bcm6358.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm6358-clock.h" 4 + #include "dt-bindings/reset/bcm6358-reset.h" 5 + 2 6 / { 3 7 #address-cells = <1>; 4 8 #size-cells = <1>; ··· 28 24 }; 29 25 30 26 clocks { 31 - periph_clk: periph-clk { 27 + periph_osc: periph-osc { 32 28 compatible = "fixed-clock"; 33 29 #clock-cells = <0>; 34 30 clock-frequency = <50000000>; 31 + clock-output-names = "periph"; 35 32 }; 36 33 }; 37 34 38 35 aliases { 36 + pflash = &pflash; 39 37 serial0 = &uart0; 40 38 serial1 = &uart1; 39 + spi0 = &lsspi; 41 40 }; 42 41 43 42 cpu_intc: interrupt-controller { ··· 58 51 compatible = "simple-bus"; 59 52 ranges; 60 53 61 - clkctl: clock-controller@fffe0004 { 54 + periph_clk: clock-controller@fffe0004 { 62 55 compatible = "brcm,bcm6358-clocks"; 63 56 reg = <0xfffe0004 0x4>; 64 57 #clock-cells = <1>; 65 58 }; 66 59 67 - periph_cntl: syscon@fffe0008 { 60 + pll_cntl: syscon@fffe0008 { 68 61 compatible = "syscon"; 69 - reg = <0xfffe0000 0x4>; 62 + reg = <0xfffe0008 0x4>; 70 63 native-endian; 71 - }; 72 64 73 - reboot: syscon-reboot@fffe0008 { 74 - compatible = "syscon-reboot"; 75 - regmap = <&periph_cntl>; 76 - offset = <0x0>; 77 - mask = <0x1>; 65 + reboot { 66 + compatible = "syscon-reboot"; 67 + offset = <0x0>; 68 + mask = <0x1>; 69 + }; 78 70 }; 79 71 80 72 periph_intc: interrupt-controller@fffe000c { ··· 94 88 #reset-cells = <1>; 95 89 }; 96 90 91 + wdt: watchdog@fffe005c { 92 + compatible = "brcm,bcm7038-wdt"; 93 + reg = <0xfffe005c 0xc>; 94 + 95 + clocks = <&periph_osc>; 96 + clock-names = "refclk"; 97 + 98 + timeout-sec = <30>; 99 + }; 100 + 97 101 leds0: led-controller@fffe00d0 { 98 102 #address-cells = <1>; 99 103 #size-cells = <0>; ··· 120 104 interrupt-parent = <&periph_intc>; 121 105 interrupts = <2>; 122 106 123 - clocks = <&periph_clk>; 107 + clocks = <&periph_osc>; 124 108 clock-names = "refclk"; 125 109 126 110 status = "disabled"; ··· 133 117 interrupt-parent = <&periph_intc>; 134 118 interrupts = <3>; 135 119 136 - clocks = <&periph_clk>; 120 + clocks = <&periph_osc>; 137 121 clock-names = "refclk"; 122 + 123 + status = "disabled"; 124 + }; 125 + 126 + lsspi: spi@fffe0800 { 127 + #address-cells = <1>; 128 + #size-cells = <0>; 129 + compatible = "brcm,bcm6358-spi"; 130 + reg = <0xfffe0800 0x70c>; 131 + 132 + interrupt-parent = <&periph_intc>; 133 + interrupts = <1>; 134 + 135 + clocks = <&periph_clk BCM6358_CLK_SPI>; 136 + clock-names = "spi"; 137 + 138 + resets = <&periph_rst BCM6358_RST_SPI>; 139 + reset-names = "spi"; 138 140 139 141 status = "disabled"; 140 142 }; ··· 161 127 compatible = "brcm,bcm6358-ehci", "generic-ehci"; 162 128 reg = <0xfffe1300 0x100>; 163 129 big-endian; 130 + 164 131 interrupt-parent = <&periph_intc>; 165 132 interrupts = <10>; 133 + 134 + phys = <&usbh 0>; 135 + phy-names = "usb"; 136 + 166 137 status = "disabled"; 167 138 }; 168 139 ··· 176 137 reg = <0xfffe1400 0x100>; 177 138 big-endian; 178 139 no-big-frame-no; 140 + 179 141 interrupt-parent = <&periph_intc>; 180 142 interrupts = <5>; 143 + 144 + phys = <&usbh 0>; 145 + phy-names = "usb"; 146 + 181 147 status = "disabled"; 182 148 }; 149 + 150 + usbh: usb-phy@fffe1500 { 151 + compatible = "brcm,bcm6358-usbh-phy"; 152 + reg = <0xfffe1500 0x38>; 153 + #phy-cells = <1>; 154 + 155 + resets = <&periph_rst BCM6358_RST_USBH>; 156 + reset-names = "usbh"; 157 + 158 + status = "disabled"; 159 + }; 160 + }; 161 + 162 + pflash: nor@1e000000 { 163 + #address-cells = <1>; 164 + #size-cells = <1>; 165 + compatible = "cfi-flash"; 166 + reg = <0x1e000000 0x2000000>; 167 + bank-width = <2>; 168 + 169 + status = "disabled"; 183 170 }; 184 171 };
+1 -1
arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm6362.dtsi" 4 + #include "bcm6362.dtsi" 5 5 6 6 / { 7 7 compatible = "sfr,nb6-ser", "brcm,bcm6362";
+122 -12
arch/mips/boot/dts/brcm/bcm6362.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm6362-clock.h" 4 + #include "dt-bindings/reset/bcm6362-reset.h" 5 + #include "dt-bindings/soc/bcm6362-pm.h" 6 + 2 7 / { 3 8 #address-cells = <1>; 4 9 #size-cells = <1>; ··· 29 24 }; 30 25 31 26 clocks { 32 - periph_clk: periph-clk { 27 + periph_osc: periph-osc { 33 28 compatible = "fixed-clock"; 34 29 #clock-cells = <0>; 35 30 clock-frequency = <50000000>; 31 + clock-output-names = "periph"; 32 + }; 33 + 34 + hsspi_osc: hsspi-osc { 35 + compatible = "fixed-clock"; 36 + 37 + #clock-cells = <0>; 38 + 39 + clock-frequency = <400000000>; 40 + clock-output-names = "hsspi_osc"; 36 41 }; 37 42 }; 38 43 39 44 aliases { 45 + nflash = &nflash; 40 46 serial0 = &uart0; 41 47 serial1 = &uart1; 48 + spi0 = &lsspi; 49 + spi1 = &hsspi; 42 50 }; 43 51 44 52 cpu_intc: interrupt-controller { ··· 69 51 compatible = "simple-bus"; 70 52 ranges; 71 53 72 - clkctl: clock-controller@10000004 { 54 + periph_clk: clock-controller@10000004 { 73 55 compatible = "brcm,bcm6362-clocks"; 74 56 reg = <0x10000004 0x4>; 75 57 #clock-cells = <1>; 76 58 }; 77 59 78 - periph_cntl: syscon@10000008 { 60 + pll_cntl: syscon@10000008 { 79 61 compatible = "syscon"; 80 - reg = <0x10000000 0xc>; 62 + reg = <0x10000008 0x4>; 81 63 native-endian; 82 - }; 83 64 84 - reboot: syscon-reboot@10000008 { 85 - compatible = "syscon-reboot"; 86 - regmap = <&periph_cntl>; 87 - offset = <0x0>; 88 - mask = <0x1>; 65 + reboot { 66 + compatible = "syscon-reboot"; 67 + offset = <0x0>; 68 + mask = <0x1>; 69 + }; 89 70 }; 90 71 91 72 periph_rst: reset-controller@10000010 { ··· 105 88 interrupts = <2>, <3>; 106 89 }; 107 90 91 + wdt: watchdog@1000005c { 92 + compatible = "brcm,bcm7038-wdt"; 93 + reg = <0x1000005c 0xc>; 94 + 95 + clocks = <&periph_osc>; 96 + clock-names = "refclk"; 97 + 98 + timeout-sec = <30>; 99 + }; 100 + 108 101 uart0: serial@10000100 { 109 102 compatible = "brcm,bcm6345-uart"; 110 103 reg = <0x10000100 0x18>; ··· 122 95 interrupt-parent = <&periph_intc>; 123 96 interrupts = <3>; 124 97 125 - clocks = <&periph_clk>; 98 + clocks = <&periph_osc>; 126 99 clock-names = "refclk"; 127 100 128 101 status = "disabled"; ··· 135 108 interrupt-parent = <&periph_intc>; 136 109 interrupts = <4>; 137 110 138 - clocks = <&periph_clk>; 111 + clocks = <&periph_osc>; 139 112 clock-names = "refclk"; 113 + 114 + status = "disabled"; 115 + }; 116 + 117 + nflash: nand@10000200 { 118 + #address-cells = <1>; 119 + #size-cells = <0>; 120 + compatible = "brcm,nand-bcm6368", 121 + "brcm,brcmnand-v2.2", 122 + "brcm,brcmnand"; 123 + reg = <0x10000200 0x180>, 124 + <0x10000600 0x200>, 125 + <0x10000070 0x10>; 126 + reg-names = "nand", 127 + "nand-cache", 128 + "nand-int-base"; 129 + 130 + interrupt-parent = <&periph_intc>; 131 + interrupts = <12>; 132 + 133 + clocks = <&periph_clk BCM6362_CLK_NAND>; 134 + clock-names = "nand"; 135 + 136 + status = "disabled"; 137 + }; 138 + 139 + lsspi: spi@10000800 { 140 + #address-cells = <1>; 141 + #size-cells = <0>; 142 + compatible = "brcm,bcm6358-spi"; 143 + reg = <0x10000800 0x70c>; 144 + 145 + interrupt-parent = <&periph_intc>; 146 + interrupts = <2>; 147 + 148 + clocks = <&periph_clk BCM6362_CLK_SPI>; 149 + clock-names = "spi"; 150 + 151 + resets = <&periph_rst BCM6362_RST_SPI>; 152 + reset-names = "spi"; 153 + 154 + status = "disabled"; 155 + }; 156 + 157 + hsspi: spi@10001000 { 158 + #address-cells = <1>; 159 + #size-cells = <0>; 160 + compatible = "brcm,bcm6328-hsspi"; 161 + reg = <0x10001000 0x600>; 162 + 163 + interrupt-parent = <&periph_intc>; 164 + interrupts = <5>; 165 + 166 + clocks = <&periph_clk BCM6362_CLK_HSSPI>, 167 + <&hsspi_osc>; 168 + clock-names = "hsspi", 169 + "pll"; 170 + 171 + resets = <&periph_rst BCM6362_RST_SPI>; 172 + reset-names = "hsspi"; 140 173 141 174 status = "disabled"; 142 175 }; ··· 224 137 interrupt-parent = <&periph_intc>; 225 138 interrupts = <10>; 226 139 140 + phys = <&usbh 0>; 141 + phy-names = "usb"; 142 + 227 143 status = "disabled"; 228 144 }; 229 145 ··· 238 148 239 149 interrupt-parent = <&periph_intc>; 240 150 interrupts = <9>; 151 + 152 + phys = <&usbh 0>; 153 + phy-names = "usb"; 154 + 155 + status = "disabled"; 156 + }; 157 + 158 + usbh: usb-phy@10002700 { 159 + compatible = "brcm,bcm6362-usbh-phy"; 160 + reg = <0x10002700 0x38>; 161 + 162 + #phy-cells = <1>; 163 + 164 + clocks = <&periph_clk BCM6362_CLK_USBH>; 165 + clock-names = "usbh"; 166 + 167 + power-domains = <&periph_pwr BCM6362_POWER_DOMAIN_USBH>; 168 + 169 + resets = <&periph_rst BCM6362_RST_USBH>; 170 + reset-names = "usbh"; 241 171 242 172 status = "disabled"; 243 173 };
+121 -12
arch/mips/boot/dts/brcm/bcm6368.dtsi
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "dt-bindings/clock/bcm6368-clock.h" 4 + #include "dt-bindings/reset/bcm6368-reset.h" 5 + 2 6 / { 3 7 #address-cells = <1>; 4 8 #size-cells = <1>; ··· 28 24 }; 29 25 30 26 clocks { 31 - periph_clk: periph-clk { 27 + periph_osc: periph-osc { 32 28 compatible = "fixed-clock"; 33 29 #clock-cells = <0>; 34 30 clock-frequency = <50000000>; 31 + clock-output-names = "periph"; 35 32 }; 36 33 }; 37 34 38 35 aliases { 36 + nflash = &nflash; 37 + pflash = &pflash; 39 38 serial0 = &uart0; 40 39 serial1 = &uart1; 40 + spi0 = &lsspi; 41 41 }; 42 42 43 43 cpu_intc: interrupt-controller { ··· 59 51 compatible = "simple-bus"; 60 52 ranges; 61 53 62 - clkctl: clock-controller@10000004 { 54 + periph_clk: clock-controller@10000004 { 63 55 compatible = "brcm,bcm6368-clocks"; 64 56 reg = <0x10000004 0x4>; 65 57 #clock-cells = <1>; 66 58 }; 67 59 68 - periph_cntl: syscon@100000008 { 60 + pll_cntl: syscon@100000008 { 69 61 compatible = "syscon"; 70 - reg = <0x10000000 0xc>; 62 + reg = <0x10000008 0x4>; 71 63 native-endian; 72 - }; 73 64 74 - reboot: syscon-reboot@10000008 { 75 - compatible = "syscon-reboot"; 76 - regmap = <&periph_cntl>; 77 - offset = <0x0>; 78 - mask = <0x1>; 65 + reboot { 66 + compatible = "syscon-reboot"; 67 + offset = <0x0>; 68 + mask = <0x1>; 69 + }; 79 70 }; 80 71 81 72 periph_rst: reset-controller@10000010 { ··· 95 88 interrupts = <2>, <3>; 96 89 }; 97 90 91 + wdt: watchdog@1000005c { 92 + compatible = "brcm,bcm7038-wdt"; 93 + reg = <0x1000005c 0xc>; 94 + 95 + clocks = <&periph_osc>; 96 + clock-names = "refclk"; 97 + 98 + timeout-sec = <30>; 99 + }; 100 + 98 101 leds0: led-controller@100000d0 { 99 102 #address-cells = <1>; 100 103 #size-cells = <0>; 101 104 compatible = "brcm,bcm6358-leds"; 102 105 reg = <0x100000d0 0x8>; 106 + 103 107 status = "disabled"; 104 108 }; 105 109 106 110 uart0: serial@10000100 { 107 111 compatible = "brcm,bcm6345-uart"; 108 112 reg = <0x10000100 0x18>; 113 + 109 114 interrupt-parent = <&periph_intc>; 110 115 interrupts = <2>; 111 - clocks = <&periph_clk>; 116 + 117 + clocks = <&periph_osc>; 112 118 clock-names = "refclk"; 119 + 113 120 status = "disabled"; 114 121 }; 115 122 116 123 uart1: serial@10000120 { 117 124 compatible = "brcm,bcm6345-uart"; 118 125 reg = <0x10000120 0x18>; 126 + 119 127 interrupt-parent = <&periph_intc>; 120 128 interrupts = <3>; 121 - clocks = <&periph_clk>; 129 + 130 + clocks = <&periph_osc>; 122 131 clock-names = "refclk"; 132 + 133 + status = "disabled"; 134 + }; 135 + 136 + nflash: nand@10000200 { 137 + #address-cells = <1>; 138 + #size-cells = <0>; 139 + compatible = "brcm,nand-bcm6368", 140 + "brcm,brcmnand-v2.1", 141 + "brcm,brcmnand"; 142 + reg = <0x10000200 0x180>, 143 + <0x10000600 0x200>, 144 + <0x10000070 0x10>; 145 + reg-names = "nand", 146 + "nand-cache", 147 + "nand-int-base"; 148 + 149 + interrupt-parent = <&periph_intc>; 150 + interrupts = <10>; 151 + 152 + clocks = <&periph_clk BCM6368_CLK_NAND>; 153 + clock-names = "nand"; 154 + 155 + status = "disabled"; 156 + }; 157 + 158 + lsspi: spi@10000800 { 159 + #address-cells = <1>; 160 + #size-cells = <0>; 161 + compatible = "brcm,bcm6358-spi"; 162 + reg = <0x10000800 0x70c>; 163 + 164 + interrupt-parent = <&periph_intc>; 165 + interrupts = <1>; 166 + 167 + clocks = <&periph_clk BCM6368_CLK_SPI>; 168 + clock-names = "spi"; 169 + 170 + resets = <&periph_rst BCM6368_RST_SPI>; 171 + reset-names = "spi"; 172 + 123 173 status = "disabled"; 124 174 }; 125 175 ··· 184 120 compatible = "brcm,bcm6368-ehci", "generic-ehci"; 185 121 reg = <0x10001500 0x100>; 186 122 big-endian; 123 + 187 124 interrupt-parent = <&periph_intc>; 188 125 interrupts = <7>; 126 + 127 + phys = <&usbh 0>; 128 + phy-names = "usb"; 129 + 189 130 status = "disabled"; 190 131 }; 191 132 ··· 199 130 reg = <0x10001600 0x100>; 200 131 big-endian; 201 132 no-big-frame-no; 133 + 202 134 interrupt-parent = <&periph_intc>; 203 135 interrupts = <5>; 136 + 137 + phys = <&usbh 0>; 138 + phy-names = "usb"; 139 + 204 140 status = "disabled"; 205 141 }; 142 + 143 + usbh: usb-phy@10001700 { 144 + compatible = "brcm,bcm6368-usbh-phy"; 145 + reg = <0x10001700 0x38>; 146 + #phy-cells = <1>; 147 + 148 + clocks = <&periph_clk BCM6368_CLK_USBH>; 149 + clock-names = "usbh"; 150 + 151 + resets = <&periph_rst BCM6368_RST_USBH>; 152 + reset-names = "usbh"; 153 + 154 + status = "disabled"; 155 + }; 156 + 157 + random: rng@10004180 { 158 + compatible = "brcm,bcm6368-rng"; 159 + reg = <0x10004180 0x14>; 160 + 161 + clocks = <&periph_clk BCM6368_CLK_IPSEC>; 162 + clock-names = "ipsec"; 163 + 164 + resets = <&periph_rst BCM6368_RST_IPSEC>; 165 + reset-names = "ipsec"; 166 + }; 167 + }; 168 + 169 + pflash: nor@18000000 { 170 + #address-cells = <1>; 171 + #size-cells = <1>; 172 + compatible = "cfi-flash"; 173 + reg = <0x18000000 0x2000000>; 174 + bank-width = <2>; 175 + 176 + status = "disabled"; 206 177 }; 207 178 };
+1 -1
arch/mips/boot/dts/brcm/bcm93384wvg.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm3384_zephyr.dtsi" 4 + #include "bcm3384_zephyr.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
+1 -1
arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm3384_viper.dtsi" 4 + #include "bcm3384_viper.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper";
+1 -1
arch/mips/boot/dts/brcm/bcm96368mvwg.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm6368.dtsi" 4 + #include "bcm6368.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm96368mvwg", "brcm,bcm6368";
+1 -1
arch/mips/boot/dts/brcm/bcm97125cbmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7125.dtsi" 4 + #include "bcm7125.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm97125cbmb", "brcm,bcm7125";
+2 -2
arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7346.dtsi" 5 - /include/ "bcm97xxx-nand-cs1-bch24.dtsi" 4 + #include "bcm7346.dtsi" 5 + #include "bcm97xxx-nand-cs1-bch24.dtsi" 6 6 7 7 / { 8 8 compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
+2 -2
arch/mips/boot/dts/brcm/bcm97358svmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7358.dtsi" 5 - /include/ "bcm97xxx-nand-cs1-bch4.dtsi" 4 + #include "bcm7358.dtsi" 5 + #include "bcm97xxx-nand-cs1-bch4.dtsi" 6 6 7 7 / { 8 8 compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
+1 -1
arch/mips/boot/dts/brcm/bcm97360svmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7360.dtsi" 4 + #include "bcm7360.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm97360svmb", "brcm,bcm7360";
+2 -2
arch/mips/boot/dts/brcm/bcm97362svmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7362.dtsi" 5 - /include/ "bcm97xxx-nand-cs1-bch4.dtsi" 4 + #include "bcm7362.dtsi" 5 + #include "bcm97xxx-nand-cs1-bch4.dtsi" 6 6 7 7 / { 8 8 compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
+1 -1
arch/mips/boot/dts/brcm/bcm97420c.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7420.dtsi" 4 + #include "bcm7420.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm97420c", "brcm,bcm7420";
+2 -2
arch/mips/boot/dts/brcm/bcm97425svmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7425.dtsi" 5 - /include/ "bcm97xxx-nand-cs1-bch24.dtsi" 4 + #include "bcm7425.dtsi" 5 + #include "bcm97xxx-nand-cs1-bch24.dtsi" 6 6 7 7 / { 8 8 compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
+2 -2
arch/mips/boot/dts/brcm/bcm97435svmb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm7435.dtsi" 5 - /include/ "bcm97xxx-nand-cs1-bch24.dtsi" 4 + #include "bcm7435.dtsi" 5 + #include "bcm97xxx-nand-cs1-bch24.dtsi" 6 6 7 7 / { 8 8 compatible = "brcm,bcm97435svmb", "brcm,bcm7435";
+1 -1
arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /dts-v1/; 3 3 4 - /include/ "bcm6328.dtsi" 4 + #include "bcm6328.dtsi" 5 5 6 6 / { 7 7 compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328";
-1
arch/mips/boot/dts/ingenic/gcw0.dts
··· 345 345 346 346 spi-max-frequency = <3125000>; 347 347 spi-3wire; 348 - spi-cs-high; 349 348 350 349 reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>; 351 350
+1
arch/mips/boot/dts/loongson/Makefile
··· 1 1 # SPDX_License_Identifier: GPL_2.0 2 + dtb-$(CONFIG_MACH_LOONGSON64) += loongson64_2core_2k1000.dtb 2 3 dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb 3 4 dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb 4 5 dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb
+243
arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + /dts-v1/; 4 + 5 + #include <dt-bindings/interrupt-controller/irq.h> 6 + 7 + / { 8 + compatible = "loongson,loongson2k1000"; 9 + 10 + #address-cells = <2>; 11 + #size-cells = <2>; 12 + 13 + cpus { 14 + #address-cells = <1>; 15 + #size-cells = <0>; 16 + 17 + cpu0: cpu@0 { 18 + device_type = "cpu"; 19 + compatible = "loongson,gs264"; 20 + reg = <0x0>; 21 + #clock-cells = <1>; 22 + clocks = <&cpu_clk>; 23 + }; 24 + }; 25 + 26 + memory { 27 + compatible = "memory"; 28 + device_type = "memory"; 29 + reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */ 30 + <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */ 31 + <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */ 32 + }; 33 + 34 + cpu_clk: cpu_clk { 35 + #clock-cells = <0>; 36 + compatible = "fixed-clock"; 37 + clock-frequency = <800000000>; 38 + }; 39 + 40 + cpuintc: interrupt-controller { 41 + #address-cells = <0>; 42 + #interrupt-cells = <1>; 43 + interrupt-controller; 44 + compatible = "mti,cpu-interrupt-controller"; 45 + }; 46 + 47 + package0: bus@10000000 { 48 + compatible = "simple-bus"; 49 + #address-cells = <2>; 50 + #size-cells = <2>; 51 + ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* ioports */ 52 + 0 0x40000000 0 0x40000000 0 0x40000000 53 + 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>; 54 + 55 + liointc0: interrupt-controller@1fe11400 { 56 + compatible = "loongson,liointc-2.0"; 57 + reg = <0 0x1fe11400 0 0x40>, 58 + <0 0x1fe11040 0 0x8>, 59 + <0 0x1fe11140 0 0x8>; 60 + reg-names = "main", "isr0", "isr1"; 61 + 62 + interrupt-controller; 63 + #interrupt-cells = <2>; 64 + 65 + interrupt-parent = <&cpuintc>; 66 + interrupts = <2>; 67 + interrupt-names = "int0"; 68 + 69 + loongson,parent_int_map = <0xffffffff>, /* int0 */ 70 + <0x00000000>, /* int1 */ 71 + <0x00000000>, /* int2 */ 72 + <0x00000000>; /* int3 */ 73 + }; 74 + 75 + liointc1: interrupt-controller@1fe11440 { 76 + compatible = "loongson,liointc-2.0"; 77 + reg = <0 0x1fe11440 0 0x40>, 78 + <0 0x1fe11048 0 0x8>, 79 + <0 0x1fe11148 0 0x8>; 80 + reg-names = "main", "isr0", "isr1"; 81 + 82 + interrupt-controller; 83 + #interrupt-cells = <2>; 84 + 85 + interrupt-parent = <&cpuintc>; 86 + interrupts = <3>; 87 + interrupt-names = "int1"; 88 + 89 + loongson,parent_int_map = <0x00000000>, /* int0 */ 90 + <0xffffffff>, /* int1 */ 91 + <0x00000000>, /* int2 */ 92 + <0x00000000>; /* int3 */ 93 + }; 94 + 95 + uart0: serial@1fe00000 { 96 + compatible = "ns16550a"; 97 + reg = <0 0x1fe00000 0 0x8>; 98 + clock-frequency = <125000000>; 99 + interrupt-parent = <&liointc0>; 100 + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 101 + no-loopback-test; 102 + }; 103 + 104 + pci@1a000000 { 105 + compatible = "loongson,ls2k-pci"; 106 + device_type = "pci"; 107 + #address-cells = <3>; 108 + #size-cells = <2>; 109 + #interrupt-cells = <2>; 110 + 111 + reg = <0 0x1a000000 0 0x02000000>, 112 + <0xfe 0x00000000 0 0x20000000>; 113 + 114 + ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>, 115 + <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>; 116 + 117 + ehci@4,1 { 118 + compatible = "pci0014,7a14.0", 119 + "pci0014,7a14", 120 + "pciclass0c0320", 121 + "pciclass0c03"; 122 + 123 + reg = <0x2100 0x0 0x0 0x0 0x0>; 124 + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; 125 + interrupt-parent = <&liointc1>; 126 + }; 127 + 128 + ohci@4,2 { 129 + compatible = "pci0014,7a24.0", 130 + "pci0014,7a24", 131 + "pciclass0c0310", 132 + "pciclass0c03"; 133 + 134 + reg = <0x2200 0x0 0x0 0x0 0x0>; 135 + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; 136 + interrupt-parent = <&liointc1>; 137 + }; 138 + 139 + sata@8,0 { 140 + compatible = "pci0014,7a08.0", 141 + "pci0014,7a08", 142 + "pciclass010601", 143 + "pciclass0106"; 144 + 145 + reg = <0x4000 0x0 0x0 0x0 0x0>; 146 + interrupts = <19 IRQ_TYPE_LEVEL_LOW>; 147 + interrupt-parent = <&liointc0>; 148 + }; 149 + 150 + pci_bridge@9,0 { 151 + compatible = "pci0014,7a19.0", 152 + "pci0014,7a19", 153 + "pciclass060400", 154 + "pciclass0604"; 155 + 156 + reg = <0x4800 0x0 0x0 0x0 0x0>; 157 + #interrupt-cells = <1>; 158 + interrupts = <0 IRQ_TYPE_LEVEL_LOW>; 159 + interrupt-parent = <&liointc1>; 160 + interrupt-map-mask = <0 0 0 0>; 161 + interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>; 162 + external-facing; 163 + }; 164 + 165 + pci_bridge@a,0 { 166 + compatible = "pci0014,7a19.0", 167 + "pci0014,7a19", 168 + "pciclass060400", 169 + "pciclass0604"; 170 + 171 + reg = <0x5000 0x0 0x0 0x0 0x0>; 172 + #interrupt-cells = <1>; 173 + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; 174 + interrupt-parent = <&liointc1>; 175 + interrupt-map-mask = <0 0 0 0>; 176 + interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>; 177 + external-facing; 178 + }; 179 + 180 + pci_bridge@b,0 { 181 + compatible = "pci0014,7a19.0", 182 + "pci0014,7a19", 183 + "pciclass060400", 184 + "pciclass0604"; 185 + 186 + reg = <0x5800 0x0 0x0 0x0 0x0>; 187 + #interrupt-cells = <1>; 188 + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; 189 + interrupt-parent = <&liointc1>; 190 + interrupt-map-mask = <0 0 0 0>; 191 + interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>; 192 + external-facing; 193 + }; 194 + 195 + pci_bridge@c,0 { 196 + compatible = "pci0014,7a19.0", 197 + "pci0014,7a19", 198 + "pciclass060400", 199 + "pciclass0604"; 200 + 201 + reg = <0x6000 0x0 0x0 0x0 0x0>; 202 + #interrupt-cells = <1>; 203 + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; 204 + interrupt-parent = <&liointc1>; 205 + interrupt-map-mask = <0 0 0 0>; 206 + interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>; 207 + external-facing; 208 + }; 209 + 210 + pci_bridge@d,0 { 211 + compatible = "pci0014,7a19.0", 212 + "pci0014,7a19", 213 + "pciclass060400", 214 + "pciclass0604"; 215 + 216 + reg = <0x6800 0x0 0x0 0x0 0x0>; 217 + #interrupt-cells = <1>; 218 + interrupts = <4 IRQ_TYPE_LEVEL_LOW>; 219 + interrupt-parent = <&liointc1>; 220 + interrupt-map-mask = <0 0 0 0>; 221 + interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>; 222 + external-facing; 223 + }; 224 + 225 + pci_bridge@e,0 { 226 + compatible = "pci0014,7a19.0", 227 + "pci0014,7a19", 228 + "pciclass060400", 229 + "pciclass0604"; 230 + 231 + reg = <0x7000 0x0 0x0 0x0 0x0>; 232 + #interrupt-cells = <1>; 233 + interrupts = <5 IRQ_TYPE_LEVEL_LOW>; 234 + interrupt-parent = <&liointc1>; 235 + interrupt-map-mask = <0 0 0 0>; 236 + interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>; 237 + external-facing; 238 + }; 239 + 240 + }; 241 + }; 242 + }; 243 +
+10
arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + /dts-v1/; 4 + 5 + #include "loongson64-2k1000.dtsi" 6 + 7 + / { 8 + compatible = "loongson,loongson64-2core-2k1000"; 9 + }; 10 +
+1 -1
arch/mips/cavium-octeon/oct_ilm.c
··· 62 62 return 0; 63 63 } 64 64 65 - DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); 65 + DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); 66 66 67 67 static void init_debugfs(void) 68 68 {
+6 -2
arch/mips/cavium-octeon/octeon-memcpy.S
··· 150 150 EXPORT_SYMBOL(memcpy) 151 151 move v0, dst /* return value */ 152 152 __memcpy: 153 - FEXPORT(__copy_user) 154 - EXPORT_SYMBOL(__copy_user) 153 + FEXPORT(__raw_copy_from_user) 154 + EXPORT_SYMBOL(__raw_copy_from_user) 155 + FEXPORT(__raw_copy_to_user) 156 + EXPORT_SYMBOL(__raw_copy_to_user) 157 + FEXPORT(__raw_copy_in_user) 158 + EXPORT_SYMBOL(__raw_copy_in_user) 155 159 /* 156 160 * Note: dst & src may be unaligned, len may be 0 157 161 * Temps
-4
arch/mips/configs/bigsur_defconfig
··· 105 105 CONFIG_BLK_DEV_NBD=m 106 106 CONFIG_EEPROM_LEGACY=y 107 107 CONFIG_EEPROM_MAX6875=y 108 - CONFIG_IDE=y 109 - CONFIG_BLK_DEV_IDECD=y 110 - CONFIG_BLK_DEV_IDETAPE=y 111 - CONFIG_BLK_DEV_TC86C001=m 112 108 CONFIG_BLK_DEV_SD=y 113 109 CONFIG_CHR_DEV_ST=y 114 110 CONFIG_BLK_DEV_SR=y
+353
arch/mips/configs/loongson2k_defconfig
··· 1 + # CONFIG_LOCALVERSION_AUTO is not set 2 + CONFIG_KERNEL_LZMA=y 3 + CONFIG_SYSVIPC=y 4 + CONFIG_POSIX_MQUEUE=y 5 + CONFIG_AUDIT=y 6 + CONFIG_NO_HZ=y 7 + CONFIG_HIGH_RES_TIMERS=y 8 + CONFIG_PREEMPT=y 9 + CONFIG_BSD_PROCESS_ACCT=y 10 + CONFIG_BSD_PROCESS_ACCT_V3=y 11 + CONFIG_TASKSTATS=y 12 + CONFIG_TASK_DELAY_ACCT=y 13 + CONFIG_TASK_XACCT=y 14 + CONFIG_TASK_IO_ACCOUNTING=y 15 + CONFIG_MEMCG=y 16 + CONFIG_BLK_CGROUP=y 17 + CONFIG_SCHED_AUTOGROUP=y 18 + CONFIG_SYSFS_DEPRECATED=y 19 + CONFIG_RELAY=y 20 + CONFIG_BLK_DEV_INITRD=y 21 + CONFIG_EMBEDDED=y 22 + CONFIG_MACH_LOONGSON64=y 23 + # CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION is not set 24 + CONFIG_HZ_256=y 25 + CONFIG_MIPS32_O32=y 26 + CONFIG_MIPS32_N32=y 27 + CONFIG_MODULES=y 28 + CONFIG_MODULE_FORCE_LOAD=y 29 + CONFIG_MODULE_UNLOAD=y 30 + CONFIG_MODULE_FORCE_UNLOAD=y 31 + CONFIG_MODVERSIONS=y 32 + CONFIG_PARTITION_ADVANCED=y 33 + CONFIG_MQ_IOSCHED_DEADLINE=m 34 + CONFIG_IOSCHED_BFQ=y 35 + CONFIG_BFQ_GROUP_IOSCHED=y 36 + CONFIG_BINFMT_MISC=m 37 + CONFIG_KSM=y 38 + CONFIG_NET=y 39 + CONFIG_PACKET=y 40 + CONFIG_UNIX=y 41 + CONFIG_XFRM_USER=y 42 + CONFIG_NET_KEY=y 43 + CONFIG_INET=y 44 + CONFIG_IP_MULTICAST=y 45 + CONFIG_IP_ADVANCED_ROUTER=y 46 + CONFIG_IP_MULTIPLE_TABLES=y 47 + CONFIG_IP_ROUTE_MULTIPATH=y 48 + CONFIG_IP_ROUTE_VERBOSE=y 49 + CONFIG_NETFILTER=y 50 + CONFIG_NETFILTER_NETLINK_LOG=m 51 + CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 52 + CONFIG_NETFILTER_XT_TARGET_MARK=m 53 + CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 54 + CONFIG_NETFILTER_XT_MATCH_COMMENT=m 55 + CONFIG_NETFILTER_XT_MATCH_DCCP=m 56 + CONFIG_NETFILTER_XT_MATCH_ESP=m 57 + CONFIG_NETFILTER_XT_MATCH_LENGTH=m 58 + CONFIG_NETFILTER_XT_MATCH_LIMIT=m 59 + CONFIG_NETFILTER_XT_MATCH_MAC=m 60 + CONFIG_NETFILTER_XT_MATCH_MARK=m 61 + CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 62 + CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 63 + CONFIG_NETFILTER_XT_MATCH_QUOTA=m 64 + CONFIG_NETFILTER_XT_MATCH_REALM=m 65 + CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 66 + CONFIG_NETFILTER_XT_MATCH_STRING=m 67 + CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 68 + CONFIG_IP_VS=m 69 + CONFIG_IP_NF_IPTABLES=m 70 + CONFIG_IP_NF_MATCH_AH=m 71 + CONFIG_IP_NF_MATCH_ECN=m 72 + CONFIG_IP_NF_MATCH_TTL=m 73 + CONFIG_IP_NF_FILTER=m 74 + CONFIG_IP_NF_TARGET_REJECT=m 75 + CONFIG_IP_NF_MANGLE=m 76 + CONFIG_IP_NF_TARGET_ECN=m 77 + CONFIG_IP_NF_TARGET_TTL=m 78 + CONFIG_IP_NF_RAW=m 79 + CONFIG_IP_NF_ARPTABLES=m 80 + CONFIG_IP_NF_ARPFILTER=m 81 + CONFIG_IP_NF_ARP_MANGLE=m 82 + CONFIG_IP_SCTP=m 83 + CONFIG_L2TP=m 84 + CONFIG_BRIDGE=m 85 + CONFIG_CFG80211=m 86 + CONFIG_CFG80211_WEXT=y 87 + CONFIG_MAC80211=m 88 + CONFIG_RFKILL=m 89 + CONFIG_RFKILL_INPUT=y 90 + CONFIG_PCIEPORTBUS=y 91 + CONFIG_HOTPLUG_PCI_PCIE=y 92 + CONFIG_PCIEASPM_PERFORMANCE=y 93 + CONFIG_HOTPLUG_PCI=y 94 + CONFIG_DEVTMPFS=y 95 + CONFIG_DEVTMPFS_MOUNT=y 96 + CONFIG_MTD=m 97 + CONFIG_BLK_DEV_LOOP=y 98 + CONFIG_BLK_DEV_CRYPTOLOOP=y 99 + CONFIG_BLK_DEV_RAM=y 100 + CONFIG_BLK_DEV_RAM_SIZE=8192 101 + CONFIG_RAID_ATTRS=m 102 + CONFIG_BLK_DEV_SD=y 103 + CONFIG_BLK_DEV_SR=y 104 + CONFIG_CHR_DEV_SG=y 105 + CONFIG_CHR_DEV_SCH=m 106 + CONFIG_SCSI_CONSTANTS=y 107 + CONFIG_SCSI_LOGGING=y 108 + CONFIG_SCSI_SPI_ATTRS=m 109 + CONFIG_SCSI_FC_ATTRS=m 110 + CONFIG_ISCSI_TCP=m 111 + CONFIG_MEGARAID_NEWGEN=y 112 + CONFIG_MEGARAID_MM=y 113 + CONFIG_MEGARAID_MAILBOX=y 114 + CONFIG_MEGARAID_LEGACY=y 115 + CONFIG_MEGARAID_SAS=y 116 + CONFIG_ATA=y 117 + CONFIG_SATA_AHCI=y 118 + CONFIG_PATA_ATIIXP=y 119 + CONFIG_MD=y 120 + CONFIG_BLK_DEV_MD=m 121 + CONFIG_MD_LINEAR=m 122 + CONFIG_MD_RAID0=m 123 + CONFIG_MD_RAID1=m 124 + CONFIG_MD_RAID10=m 125 + CONFIG_MD_RAID456=m 126 + CONFIG_MD_MULTIPATH=m 127 + CONFIG_BLK_DEV_DM=m 128 + CONFIG_DM_CRYPT=m 129 + CONFIG_DM_SNAPSHOT=m 130 + CONFIG_DM_MIRROR=m 131 + CONFIG_DM_ZERO=m 132 + CONFIG_TARGET_CORE=m 133 + CONFIG_TCM_IBLOCK=m 134 + CONFIG_TCM_FILEIO=m 135 + CONFIG_TCM_PSCSI=m 136 + CONFIG_LOOPBACK_TARGET=m 137 + CONFIG_ISCSI_TARGET=m 138 + CONFIG_NETDEVICES=y 139 + CONFIG_TUN=m 140 + # CONFIG_NET_VENDOR_3COM is not set 141 + # CONFIG_NET_VENDOR_ADAPTEC is not set 142 + # CONFIG_NET_VENDOR_ALTEON is not set 143 + # CONFIG_NET_VENDOR_AMD is not set 144 + # CONFIG_NET_VENDOR_ARC is not set 145 + # CONFIG_NET_VENDOR_ATHEROS is not set 146 + # CONFIG_NET_VENDOR_BROADCOM is not set 147 + # CONFIG_NET_VENDOR_BROCADE is not set 148 + # CONFIG_NET_VENDOR_CHELSIO is not set 149 + # CONFIG_NET_VENDOR_CIRRUS is not set 150 + # CONFIG_NET_VENDOR_CISCO is not set 151 + # CONFIG_NET_VENDOR_DEC is not set 152 + # CONFIG_NET_VENDOR_DLINK is not set 153 + # CONFIG_NET_VENDOR_EMULEX is not set 154 + # CONFIG_NET_VENDOR_I825XX is not set 155 + CONFIG_E1000=y 156 + CONFIG_E1000E=y 157 + CONFIG_IGB=y 158 + CONFIG_IXGB=y 159 + CONFIG_IXGBE=y 160 + # CONFIG_NET_VENDOR_MARVELL is not set 161 + # CONFIG_NET_VENDOR_MELLANOX is not set 162 + # CONFIG_NET_VENDOR_MICREL is not set 163 + # CONFIG_NET_VENDOR_MICROCHIP is not set 164 + # CONFIG_NET_VENDOR_MICROSEMI is not set 165 + # CONFIG_NET_VENDOR_MYRI is not set 166 + # CONFIG_NET_VENDOR_NATSEMI is not set 167 + # CONFIG_NET_VENDOR_NETERION is not set 168 + # CONFIG_NET_VENDOR_NETRONOME is not set 169 + # CONFIG_NET_VENDOR_NI is not set 170 + # CONFIG_NET_VENDOR_NVIDIA is not set 171 + # CONFIG_NET_VENDOR_OKI is not set 172 + # CONFIG_NET_VENDOR_PACKET_ENGINES is not set 173 + # CONFIG_NET_VENDOR_PENSANDO is not set 174 + # CONFIG_NET_VENDOR_QLOGIC is not set 175 + # CONFIG_NET_VENDOR_QUALCOMM is not set 176 + # CONFIG_NET_VENDOR_RDC is not set 177 + CONFIG_8139CP=y 178 + CONFIG_8139TOO=y 179 + # CONFIG_8139TOO_PIO is not set 180 + CONFIG_R8169=y 181 + # CONFIG_NET_VENDOR_RENESAS is not set 182 + # CONFIG_NET_VENDOR_ROCKER is not set 183 + # CONFIG_NET_VENDOR_SAMSUNG is not set 184 + # CONFIG_NET_VENDOR_SEEQ is not set 185 + # CONFIG_NET_VENDOR_SOLARFLARE is not set 186 + # CONFIG_NET_VENDOR_SILAN is not set 187 + # CONFIG_NET_VENDOR_SIS is not set 188 + # CONFIG_NET_VENDOR_SMSC is not set 189 + CONFIG_STMMAC_ETH=y 190 + # CONFIG_NET_VENDOR_SUN is not set 191 + # CONFIG_NET_VENDOR_TEHUTI is not set 192 + # CONFIG_NET_VENDOR_TI is not set 193 + # CONFIG_NET_VENDOR_TOSHIBA is not set 194 + # CONFIG_NET_VENDOR_VIA is not set 195 + # CONFIG_NET_VENDOR_WIZNET is not set 196 + CONFIG_PPP=m 197 + CONFIG_PPP_BSDCOMP=m 198 + CONFIG_PPP_DEFLATE=m 199 + CONFIG_PPP_FILTER=y 200 + CONFIG_PPP_MPPE=m 201 + CONFIG_PPP_MULTILINK=y 202 + CONFIG_PPPOE=m 203 + CONFIG_PPPOL2TP=m 204 + CONFIG_PPP_ASYNC=m 205 + CONFIG_PPP_SYNC_TTY=m 206 + CONFIG_ATH9K=m 207 + CONFIG_HOSTAP=m 208 + CONFIG_INPUT_LEDS=m 209 + CONFIG_INPUT_SPARSEKMAP=y 210 + CONFIG_INPUT_EVDEV=y 211 + # CONFIG_KEYBOARD_ATKBD is not set 212 + CONFIG_KEYBOARD_XTKBD=m 213 + # CONFIG_MOUSE_PS2 is not set 214 + CONFIG_INPUT_MISC=y 215 + CONFIG_INPUT_UINPUT=m 216 + # CONFIG_SERIO_I8042 is not set 217 + CONFIG_SERIO_SERPORT=m 218 + CONFIG_SERIO_LIBPS2=y 219 + CONFIG_SERIO_RAW=m 220 + CONFIG_LEGACY_PTY_COUNT=16 221 + CONFIG_SERIAL_8250=y 222 + # CONFIG_SERIAL_8250_16550A_VARIANTS is not set 223 + CONFIG_SERIAL_8250_CONSOLE=y 224 + CONFIG_SERIAL_8250_NR_UARTS=16 225 + CONFIG_SERIAL_8250_EXTENDED=y 226 + CONFIG_SERIAL_8250_MANY_PORTS=y 227 + CONFIG_SERIAL_8250_SHARE_IRQ=y 228 + CONFIG_SERIAL_8250_RSA=y 229 + CONFIG_SERIAL_OF_PLATFORM=y 230 + CONFIG_SERIAL_NONSTANDARD=y 231 + CONFIG_HW_RANDOM=y 232 + CONFIG_RAW_DRIVER=m 233 + CONFIG_I2C_CHARDEV=y 234 + CONFIG_I2C_PIIX4=y 235 + CONFIG_GPIO_LOONGSON=y 236 + CONFIG_SENSORS_LM75=m 237 + CONFIG_SENSORS_LM93=m 238 + CONFIG_SENSORS_W83627HF=m 239 + # CONFIG_MEDIA_CEC_SUPPORT is not set 240 + CONFIG_MEDIA_SUPPORT=m 241 + # CONFIG_MEDIA_CONTROLLER is not set 242 + CONFIG_MEDIA_USB_SUPPORT=y 243 + CONFIG_USB_VIDEO_CLASS=m 244 + CONFIG_DRM=y 245 + CONFIG_DRM_RADEON=y 246 + CONFIG_FB_RADEON=y 247 + CONFIG_LCD_CLASS_DEVICE=y 248 + CONFIG_LCD_PLATFORM=m 249 + # CONFIG_VGA_CONSOLE is not set 250 + CONFIG_FRAMEBUFFER_CONSOLE=y 251 + CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 252 + CONFIG_LOGO=y 253 + CONFIG_SOUND=y 254 + CONFIG_SND=y 255 + CONFIG_SND_VERBOSE_PRINTK=y 256 + CONFIG_SND_SEQUENCER=y 257 + CONFIG_SND_SEQ_DUMMY=m 258 + # CONFIG_SND_ISA is not set 259 + CONFIG_SND_HDA_INTEL=y 260 + CONFIG_SND_HDA_HWDEP=y 261 + CONFIG_SND_HDA_PATCH_LOADER=y 262 + CONFIG_SND_HDA_CODEC_REALTEK=y 263 + CONFIG_SND_HDA_CODEC_ANALOG=y 264 + CONFIG_SND_HDA_CODEC_SIGMATEL=y 265 + CONFIG_SND_HDA_CODEC_VIA=y 266 + CONFIG_SND_HDA_CODEC_CONEXANT=y 267 + # CONFIG_SND_USB is not set 268 + CONFIG_SND_SOC=y 269 + CONFIG_HID_A4TECH=m 270 + CONFIG_HID_SUNPLUS=m 271 + CONFIG_USB=y 272 + CONFIG_USB_MON=y 273 + CONFIG_USB_XHCI_HCD=y 274 + CONFIG_USB_EHCI_HCD=y 275 + CONFIG_USB_EHCI_ROOT_HUB_TT=y 276 + CONFIG_USB_OHCI_HCD=y 277 + CONFIG_USB_UHCI_HCD=m 278 + CONFIG_USB_STORAGE=y 279 + CONFIG_USB_SERIAL=m 280 + CONFIG_USB_SERIAL_OPTION=m 281 + CONFIG_RTC_CLASS=y 282 + CONFIG_RTC_DRV_CMOS=y 283 + CONFIG_DMADEVICES=y 284 + # CONFIG_CPU_HWMON is not set 285 + CONFIG_PM_DEVFREQ=y 286 + CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y 287 + CONFIG_DEVFREQ_GOV_PERFORMANCE=y 288 + CONFIG_DEVFREQ_GOV_POWERSAVE=y 289 + CONFIG_DEVFREQ_GOV_USERSPACE=y 290 + CONFIG_EXT2_FS=y 291 + CONFIG_EXT2_FS_XATTR=y 292 + CONFIG_EXT2_FS_POSIX_ACL=y 293 + CONFIG_EXT2_FS_SECURITY=y 294 + CONFIG_EXT3_FS=y 295 + CONFIG_EXT3_FS_POSIX_ACL=y 296 + CONFIG_EXT3_FS_SECURITY=y 297 + CONFIG_XFS_FS=y 298 + CONFIG_XFS_QUOTA=y 299 + CONFIG_XFS_POSIX_ACL=y 300 + CONFIG_QUOTA=y 301 + # CONFIG_PRINT_QUOTA_WARNING is not set 302 + CONFIG_AUTOFS4_FS=y 303 + CONFIG_FUSE_FS=m 304 + CONFIG_ISO9660_FS=m 305 + CONFIG_JOLIET=y 306 + CONFIG_MSDOS_FS=y 307 + CONFIG_VFAT_FS=y 308 + CONFIG_FAT_DEFAULT_CODEPAGE=936 309 + CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" 310 + CONFIG_PROC_KCORE=y 311 + CONFIG_TMPFS=y 312 + CONFIG_TMPFS_POSIX_ACL=y 313 + CONFIG_CONFIGFS_FS=y 314 + CONFIG_CRAMFS=m 315 + CONFIG_SQUASHFS=y 316 + CONFIG_SQUASHFS_XATTR=y 317 + CONFIG_NFS_FS=m 318 + CONFIG_NFS_V3_ACL=y 319 + CONFIG_NFS_V4=m 320 + CONFIG_NFSD=m 321 + CONFIG_NFSD_V3_ACL=y 322 + CONFIG_NFSD_V4=y 323 + CONFIG_CIFS=m 324 + CONFIG_NLS_CODEPAGE_437=y 325 + CONFIG_NLS_CODEPAGE_936=y 326 + CONFIG_NLS_ASCII=y 327 + CONFIG_NLS_UTF8=y 328 + CONFIG_SECURITY=y 329 + CONFIG_SECURITYFS=y 330 + CONFIG_SECURITY_NETWORK=y 331 + CONFIG_SECURITY_PATH=y 332 + CONFIG_SECURITY_SELINUX=y 333 + CONFIG_SECURITY_SELINUX_BOOTPARAM=y 334 + CONFIG_SECURITY_SELINUX_DISABLE=y 335 + CONFIG_DEFAULT_SECURITY_DAC=y 336 + CONFIG_CRYPTO_SEQIV=m 337 + CONFIG_CRYPTO_HMAC=y 338 + CONFIG_CRYPTO_MD5=y 339 + CONFIG_CRYPTO_TGR192=m 340 + CONFIG_CRYPTO_WP512=m 341 + CONFIG_CRYPTO_BLOWFISH=m 342 + CONFIG_CRYPTO_CAST5=m 343 + CONFIG_CRYPTO_CAST6=m 344 + CONFIG_CRYPTO_SERPENT=m 345 + CONFIG_CRYPTO_TWOFISH=m 346 + CONFIG_CRYPTO_DEFLATE=m 347 + CONFIG_PRINTK_TIME=y 348 + CONFIG_FRAME_WARN=1024 349 + CONFIG_STRIP_ASM_SYMS=y 350 + CONFIG_MAGIC_SYSRQ=y 351 + # CONFIG_SCHED_DEBUG is not set 352 + # CONFIG_DEBUG_PREEMPT is not set 353 + # CONFIG_FTRACE is not set
+7 -2
arch/mips/configs/loongson3_defconfig
··· 26 26 CONFIG_SYSFS_DEPRECATED=y 27 27 CONFIG_RELAY=y 28 28 CONFIG_BLK_DEV_INITRD=y 29 + CONFIG_BPF_SYSCALL=y 29 30 CONFIG_EMBEDDED=y 30 31 CONFIG_PERF_EVENTS=y 31 32 CONFIG_MACH_LOONGSON64=y ··· 40 39 CONFIG_MIPS32_N32=y 41 40 CONFIG_VIRTUALIZATION=y 42 41 CONFIG_KVM=m 43 - CONFIG_KVM_MIPS_VZ=y 42 + CONFIG_KPROBES=y 44 43 CONFIG_MODULES=y 45 44 CONFIG_MODULE_FORCE_LOAD=y 46 45 CONFIG_MODULE_UNLOAD=y ··· 130 129 CONFIG_BRIDGE=m 131 130 CONFIG_VSOCKETS=m 132 131 CONFIG_VIRTIO_VSOCKETS=m 132 + CONFIG_BPF_JIT=y 133 133 CONFIG_CFG80211=m 134 134 CONFIG_CFG80211_WEXT=y 135 135 CONFIG_MAC80211=m ··· 320 318 CONFIG_USB_UHCI_HCD=m 321 319 CONFIG_USB_STORAGE=m 322 320 CONFIG_USB_SERIAL=m 321 + CONFIG_USB_SERIAL_PL2303=m 323 322 CONFIG_USB_SERIAL_OPTION=m 324 323 CONFIG_RTC_CLASS=y 325 324 CONFIG_RTC_DRV_CMOS=y ··· 408 405 CONFIG_PRINTK_TIME=y 409 406 CONFIG_STRIP_ASM_SYMS=y 410 407 CONFIG_MAGIC_SYSRQ=y 408 + CONFIG_DEBUG_FS=y 411 409 # CONFIG_SCHED_DEBUG is not set 412 410 # CONFIG_DEBUG_PREEMPT is not set 413 - # CONFIG_FTRACE is not set 411 + CONFIG_FUNCTION_TRACER=y 412 + CONFIG_FTRACE_SYSCALLS=y 414 413 CONFIG_CMDLINE_BOOL=y 415 414 CONFIG_CMDLINE="ieee754=relaxed"
-3
arch/mips/configs/malta_kvm_defconfig
··· 238 238 CONFIG_BLK_DEV_RAM=y 239 239 CONFIG_CDROM_PKTCDVD=m 240 240 CONFIG_ATA_OVER_ETH=m 241 - CONFIG_IDE=y 242 - CONFIG_BLK_DEV_IDECD=y 243 - CONFIG_BLK_DEV_TC86C001=m 244 241 CONFIG_RAID_ATTRS=m 245 242 CONFIG_BLK_DEV_SD=y 246 243 CONFIG_CHR_DEV_ST=m
-436
arch/mips/configs/malta_kvm_guest_defconfig
··· 1 - CONFIG_SYSVIPC=y 2 - CONFIG_NO_HZ=y 3 - CONFIG_HIGH_RES_TIMERS=y 4 - CONFIG_LOG_BUF_SHIFT=15 5 - CONFIG_NAMESPACES=y 6 - CONFIG_RELAY=y 7 - CONFIG_BLK_DEV_INITRD=y 8 - CONFIG_EXPERT=y 9 - # CONFIG_COMPAT_BRK is not set 10 - CONFIG_SLAB=y 11 - CONFIG_MIPS_MALTA=y 12 - CONFIG_CPU_LITTLE_ENDIAN=y 13 - CONFIG_CPU_MIPS32_R2=y 14 - CONFIG_KVM_GUEST=y 15 - CONFIG_PAGE_SIZE_16KB=y 16 - # CONFIG_MIPS_MT_SMP is not set 17 - CONFIG_HZ_100=y 18 - CONFIG_PCI=y 19 - CONFIG_MODULES=y 20 - CONFIG_MODULE_UNLOAD=y 21 - CONFIG_MODVERSIONS=y 22 - CONFIG_MODULE_SRCVERSION_ALL=y 23 - CONFIG_NET=y 24 - CONFIG_PACKET=y 25 - CONFIG_UNIX=y 26 - CONFIG_XFRM_USER=m 27 - CONFIG_NET_KEY=y 28 - CONFIG_NET_KEY_MIGRATE=y 29 - CONFIG_INET=y 30 - CONFIG_IP_MULTICAST=y 31 - CONFIG_IP_ADVANCED_ROUTER=y 32 - CONFIG_IP_MULTIPLE_TABLES=y 33 - CONFIG_IP_ROUTE_MULTIPATH=y 34 - CONFIG_IP_ROUTE_VERBOSE=y 35 - CONFIG_IP_PNP=y 36 - CONFIG_IP_PNP_DHCP=y 37 - CONFIG_IP_PNP_BOOTP=y 38 - CONFIG_NET_IPIP=m 39 - CONFIG_IP_MROUTE=y 40 - CONFIG_IP_PIMSM_V1=y 41 - CONFIG_IP_PIMSM_V2=y 42 - CONFIG_SYN_COOKIES=y 43 - CONFIG_INET_AH=m 44 - CONFIG_INET_ESP=m 45 - CONFIG_INET_IPCOMP=m 46 - CONFIG_INET_XFRM_MODE_TRANSPORT=m 47 - CONFIG_INET_XFRM_MODE_TUNNEL=m 48 - CONFIG_TCP_MD5SIG=y 49 - CONFIG_IPV6_ROUTER_PREF=y 50 - CONFIG_IPV6_ROUTE_INFO=y 51 - CONFIG_IPV6_OPTIMISTIC_DAD=y 52 - CONFIG_INET6_AH=m 53 - CONFIG_INET6_ESP=m 54 - CONFIG_INET6_IPCOMP=m 55 - CONFIG_IPV6_TUNNEL=m 56 - CONFIG_IPV6_MROUTE=y 57 - CONFIG_IPV6_PIMSM_V2=y 58 - CONFIG_NETWORK_SECMARK=y 59 - CONFIG_NETFILTER=y 60 - CONFIG_NF_CONNTRACK=m 61 - CONFIG_NF_CONNTRACK_SECMARK=y 62 - CONFIG_NF_CONNTRACK_EVENTS=y 63 - CONFIG_NF_CONNTRACK_AMANDA=m 64 - CONFIG_NF_CONNTRACK_FTP=m 65 - CONFIG_NF_CONNTRACK_H323=m 66 - CONFIG_NF_CONNTRACK_IRC=m 67 - CONFIG_NF_CONNTRACK_PPTP=m 68 - CONFIG_NF_CONNTRACK_SANE=m 69 - CONFIG_NF_CONNTRACK_SIP=m 70 - CONFIG_NF_CONNTRACK_TFTP=m 71 - CONFIG_NF_CT_NETLINK=m 72 - CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 73 - CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 74 - CONFIG_NETFILTER_XT_TARGET_MARK=m 75 - CONFIG_NETFILTER_XT_TARGET_NFLOG=m 76 - CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 77 - CONFIG_NETFILTER_XT_TARGET_TPROXY=m 78 - CONFIG_NETFILTER_XT_TARGET_TRACE=m 79 - CONFIG_NETFILTER_XT_TARGET_SECMARK=m 80 - CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 81 - CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m 82 - CONFIG_NETFILTER_XT_MATCH_COMMENT=m 83 - CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m 84 - CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m 85 - CONFIG_NETFILTER_XT_MATCH_CONNMARK=m 86 - CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m 87 - CONFIG_NETFILTER_XT_MATCH_DCCP=m 88 - CONFIG_NETFILTER_XT_MATCH_ESP=m 89 - CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m 90 - CONFIG_NETFILTER_XT_MATCH_HELPER=m 91 - CONFIG_NETFILTER_XT_MATCH_IPRANGE=m 92 - CONFIG_NETFILTER_XT_MATCH_LENGTH=m 93 - CONFIG_NETFILTER_XT_MATCH_LIMIT=m 94 - CONFIG_NETFILTER_XT_MATCH_MAC=m 95 - CONFIG_NETFILTER_XT_MATCH_MARK=m 96 - CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 97 - CONFIG_NETFILTER_XT_MATCH_OWNER=m 98 - CONFIG_NETFILTER_XT_MATCH_POLICY=m 99 - CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 100 - CONFIG_NETFILTER_XT_MATCH_QUOTA=m 101 - CONFIG_NETFILTER_XT_MATCH_RATEEST=m 102 - CONFIG_NETFILTER_XT_MATCH_REALM=m 103 - CONFIG_NETFILTER_XT_MATCH_RECENT=m 104 - CONFIG_NETFILTER_XT_MATCH_SOCKET=m 105 - CONFIG_NETFILTER_XT_MATCH_STATE=m 106 - CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 107 - CONFIG_NETFILTER_XT_MATCH_STRING=m 108 - CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 109 - CONFIG_NETFILTER_XT_MATCH_TIME=m 110 - CONFIG_NETFILTER_XT_MATCH_U32=m 111 - CONFIG_IP_VS=m 112 - CONFIG_IP_VS_IPV6=y 113 - CONFIG_IP_VS_PROTO_TCP=y 114 - CONFIG_IP_VS_PROTO_UDP=y 115 - CONFIG_IP_VS_PROTO_ESP=y 116 - CONFIG_IP_VS_PROTO_AH=y 117 - CONFIG_IP_VS_RR=m 118 - CONFIG_IP_VS_WRR=m 119 - CONFIG_IP_VS_LC=m 120 - CONFIG_IP_VS_WLC=m 121 - CONFIG_IP_VS_LBLC=m 122 - CONFIG_IP_VS_LBLCR=m 123 - CONFIG_IP_VS_DH=m 124 - CONFIG_IP_VS_SH=m 125 - CONFIG_IP_VS_SED=m 126 - CONFIG_IP_VS_NQ=m 127 - CONFIG_IP_NF_IPTABLES=m 128 - CONFIG_IP_NF_MATCH_AH=m 129 - CONFIG_IP_NF_MATCH_ECN=m 130 - CONFIG_IP_NF_MATCH_TTL=m 131 - CONFIG_IP_NF_FILTER=m 132 - CONFIG_IP_NF_TARGET_REJECT=m 133 - CONFIG_IP_NF_MANGLE=m 134 - CONFIG_IP_NF_TARGET_CLUSTERIP=m 135 - CONFIG_IP_NF_TARGET_ECN=m 136 - CONFIG_IP_NF_TARGET_TTL=m 137 - CONFIG_IP_NF_RAW=m 138 - CONFIG_IP_NF_ARPTABLES=m 139 - CONFIG_IP_NF_ARPFILTER=m 140 - CONFIG_IP_NF_ARP_MANGLE=m 141 - CONFIG_IP6_NF_MATCH_AH=m 142 - CONFIG_IP6_NF_MATCH_EUI64=m 143 - CONFIG_IP6_NF_MATCH_FRAG=m 144 - CONFIG_IP6_NF_MATCH_OPTS=m 145 - CONFIG_IP6_NF_MATCH_HL=m 146 - CONFIG_IP6_NF_MATCH_IPV6HEADER=m 147 - CONFIG_IP6_NF_MATCH_MH=m 148 - CONFIG_IP6_NF_MATCH_RT=m 149 - CONFIG_IP6_NF_TARGET_HL=m 150 - CONFIG_IP6_NF_FILTER=m 151 - CONFIG_IP6_NF_TARGET_REJECT=m 152 - CONFIG_IP6_NF_MANGLE=m 153 - CONFIG_IP6_NF_RAW=m 154 - CONFIG_BRIDGE_NF_EBTABLES=m 155 - CONFIG_BRIDGE_EBT_BROUTE=m 156 - CONFIG_BRIDGE_EBT_T_FILTER=m 157 - CONFIG_BRIDGE_EBT_T_NAT=m 158 - CONFIG_BRIDGE_EBT_802_3=m 159 - CONFIG_BRIDGE_EBT_AMONG=m 160 - CONFIG_BRIDGE_EBT_ARP=m 161 - CONFIG_BRIDGE_EBT_IP=m 162 - CONFIG_BRIDGE_EBT_IP6=m 163 - CONFIG_BRIDGE_EBT_LIMIT=m 164 - CONFIG_BRIDGE_EBT_MARK=m 165 - CONFIG_BRIDGE_EBT_PKTTYPE=m 166 - CONFIG_BRIDGE_EBT_STP=m 167 - CONFIG_BRIDGE_EBT_VLAN=m 168 - CONFIG_BRIDGE_EBT_ARPREPLY=m 169 - CONFIG_BRIDGE_EBT_DNAT=m 170 - CONFIG_BRIDGE_EBT_MARK_T=m 171 - CONFIG_BRIDGE_EBT_REDIRECT=m 172 - CONFIG_BRIDGE_EBT_SNAT=m 173 - CONFIG_BRIDGE_EBT_LOG=m 174 - CONFIG_BRIDGE_EBT_NFLOG=m 175 - CONFIG_IP_SCTP=m 176 - CONFIG_BRIDGE=m 177 - CONFIG_VLAN_8021Q=m 178 - CONFIG_VLAN_8021Q_GVRP=y 179 - CONFIG_ATALK=m 180 - CONFIG_DEV_APPLETALK=m 181 - CONFIG_IPDDP=m 182 - CONFIG_IPDDP_ENCAP=y 183 - CONFIG_PHONET=m 184 - CONFIG_NET_SCHED=y 185 - CONFIG_NET_SCH_CBQ=m 186 - CONFIG_NET_SCH_HTB=m 187 - CONFIG_NET_SCH_HFSC=m 188 - CONFIG_NET_SCH_PRIO=m 189 - CONFIG_NET_SCH_RED=m 190 - CONFIG_NET_SCH_SFQ=m 191 - CONFIG_NET_SCH_TEQL=m 192 - CONFIG_NET_SCH_TBF=m 193 - CONFIG_NET_SCH_GRED=m 194 - CONFIG_NET_SCH_DSMARK=m 195 - CONFIG_NET_SCH_NETEM=m 196 - CONFIG_NET_SCH_INGRESS=m 197 - CONFIG_NET_CLS_BASIC=m 198 - CONFIG_NET_CLS_TCINDEX=m 199 - CONFIG_NET_CLS_ROUTE4=m 200 - CONFIG_NET_CLS_FW=m 201 - CONFIG_NET_CLS_U32=m 202 - CONFIG_NET_CLS_RSVP=m 203 - CONFIG_NET_CLS_RSVP6=m 204 - CONFIG_NET_CLS_FLOW=m 205 - CONFIG_NET_CLS_ACT=y 206 - CONFIG_NET_ACT_POLICE=y 207 - CONFIG_NET_ACT_GACT=m 208 - CONFIG_GACT_PROB=y 209 - CONFIG_NET_ACT_MIRRED=m 210 - CONFIG_NET_ACT_IPT=m 211 - CONFIG_NET_ACT_NAT=m 212 - CONFIG_NET_ACT_PEDIT=m 213 - CONFIG_NET_ACT_SIMP=m 214 - CONFIG_NET_ACT_SKBEDIT=m 215 - CONFIG_CFG80211=m 216 - CONFIG_MAC80211=m 217 - CONFIG_MAC80211_MESH=y 218 - CONFIG_RFKILL=m 219 - CONFIG_DEVTMPFS=y 220 - CONFIG_CONNECTOR=m 221 - CONFIG_MTD=y 222 - CONFIG_MTD_BLOCK=y 223 - CONFIG_MTD_OOPS=m 224 - CONFIG_MTD_CFI=y 225 - CONFIG_MTD_CFI_INTELEXT=y 226 - CONFIG_MTD_CFI_AMDSTD=y 227 - CONFIG_MTD_CFI_STAA=y 228 - CONFIG_MTD_PHYSMAP_OF=y 229 - CONFIG_MTD_UBI=m 230 - CONFIG_MTD_UBI_GLUEBI=m 231 - CONFIG_BLK_DEV_FD=m 232 - CONFIG_BLK_DEV_UMEM=m 233 - CONFIG_BLK_DEV_LOOP=m 234 - CONFIG_BLK_DEV_CRYPTOLOOP=m 235 - CONFIG_BLK_DEV_NBD=m 236 - CONFIG_BLK_DEV_RAM=y 237 - CONFIG_CDROM_PKTCDVD=m 238 - CONFIG_ATA_OVER_ETH=m 239 - CONFIG_VIRTIO_BLK=y 240 - CONFIG_IDE=y 241 - CONFIG_BLK_DEV_IDECD=y 242 - CONFIG_BLK_DEV_TC86C001=m 243 - CONFIG_RAID_ATTRS=m 244 - CONFIG_BLK_DEV_SD=y 245 - CONFIG_CHR_DEV_ST=m 246 - CONFIG_CHR_DEV_OSST=m 247 - CONFIG_BLK_DEV_SR=y 248 - CONFIG_CHR_DEV_SG=m 249 - CONFIG_SCSI_CONSTANTS=y 250 - CONFIG_SCSI_LOGGING=y 251 - CONFIG_SCSI_SCAN_ASYNC=y 252 - CONFIG_SCSI_FC_ATTRS=m 253 - CONFIG_ISCSI_TCP=m 254 - CONFIG_BLK_DEV_3W_XXXX_RAID=m 255 - CONFIG_SCSI_3W_9XXX=m 256 - CONFIG_SCSI_ACARD=m 257 - CONFIG_SCSI_AACRAID=m 258 - CONFIG_SCSI_AIC7XXX=m 259 - CONFIG_AIC7XXX_RESET_DELAY_MS=15000 260 - # CONFIG_AIC7XXX_DEBUG_ENABLE is not set 261 - CONFIG_ATA=y 262 - CONFIG_ATA_PIIX=y 263 - CONFIG_PATA_IT8213=m 264 - CONFIG_PATA_OLDPIIX=y 265 - CONFIG_PATA_MPIIX=y 266 - CONFIG_ATA_GENERIC=y 267 - CONFIG_PATA_LEGACY=y 268 - CONFIG_MD=y 269 - CONFIG_BLK_DEV_MD=m 270 - CONFIG_MD_LINEAR=m 271 - CONFIG_MD_RAID0=m 272 - CONFIG_MD_RAID1=m 273 - CONFIG_MD_RAID10=m 274 - CONFIG_MD_RAID456=m 275 - CONFIG_MD_MULTIPATH=m 276 - CONFIG_MD_FAULTY=m 277 - CONFIG_BLK_DEV_DM=m 278 - CONFIG_DM_CRYPT=m 279 - CONFIG_DM_SNAPSHOT=m 280 - CONFIG_DM_MIRROR=m 281 - CONFIG_DM_ZERO=m 282 - CONFIG_DM_MULTIPATH=m 283 - CONFIG_NETDEVICES=y 284 - CONFIG_BONDING=m 285 - CONFIG_DUMMY=m 286 - CONFIG_EQUALIZER=m 287 - CONFIG_IFB=m 288 - CONFIG_MACVLAN=m 289 - CONFIG_TUN=m 290 - CONFIG_VETH=m 291 - CONFIG_VIRTIO_NET=y 292 - CONFIG_PCNET32=y 293 - CONFIG_CHELSIO_T3=m 294 - CONFIG_AX88796=m 295 - CONFIG_NETXEN_NIC=m 296 - CONFIG_TC35815=m 297 - CONFIG_BROADCOM_PHY=m 298 - CONFIG_CICADA_PHY=m 299 - CONFIG_DAVICOM_PHY=m 300 - CONFIG_ICPLUS_PHY=m 301 - CONFIG_LXT_PHY=m 302 - CONFIG_MARVELL_PHY=m 303 - CONFIG_QSEMI_PHY=m 304 - CONFIG_REALTEK_PHY=m 305 - CONFIG_SMSC_PHY=m 306 - CONFIG_VITESSE_PHY=m 307 - CONFIG_ATMEL=m 308 - CONFIG_PCI_ATMEL=m 309 - CONFIG_IPW2100=m 310 - CONFIG_IPW2100_MONITOR=y 311 - CONFIG_HOSTAP=m 312 - CONFIG_HOSTAP_FIRMWARE=y 313 - CONFIG_HOSTAP_FIRMWARE_NVRAM=y 314 - CONFIG_HOSTAP_PLX=m 315 - CONFIG_HOSTAP_PCI=m 316 - CONFIG_PRISM54=m 317 - CONFIG_LIBERTAS=m 318 - CONFIG_INPUT_MOUSEDEV=y 319 - CONFIG_SERIAL_8250=y 320 - CONFIG_SERIAL_8250_CONSOLE=y 321 - CONFIG_POWER_RESET=y 322 - CONFIG_POWER_RESET_PIIX4_POWEROFF=y 323 - CONFIG_POWER_RESET_SYSCON=y 324 - # CONFIG_HWMON is not set 325 - CONFIG_FB=y 326 - CONFIG_FB_CIRRUS=y 327 - # CONFIG_VGA_CONSOLE is not set 328 - CONFIG_FRAMEBUFFER_CONSOLE=y 329 - CONFIG_HID=m 330 - CONFIG_RTC_CLASS=y 331 - CONFIG_RTC_DRV_CMOS=y 332 - CONFIG_UIO=m 333 - CONFIG_UIO_CIF=m 334 - CONFIG_VIRTIO_PCI=y 335 - CONFIG_VIRTIO_BALLOON=y 336 - CONFIG_VIRTIO_MMIO=y 337 - CONFIG_EXT2_FS=y 338 - CONFIG_EXT3_FS=y 339 - CONFIG_REISERFS_FS=m 340 - CONFIG_REISERFS_PROC_INFO=y 341 - CONFIG_REISERFS_FS_XATTR=y 342 - CONFIG_REISERFS_FS_POSIX_ACL=y 343 - CONFIG_REISERFS_FS_SECURITY=y 344 - CONFIG_JFS_FS=m 345 - CONFIG_JFS_POSIX_ACL=y 346 - CONFIG_JFS_SECURITY=y 347 - CONFIG_XFS_FS=m 348 - CONFIG_XFS_QUOTA=y 349 - CONFIG_XFS_POSIX_ACL=y 350 - CONFIG_QUOTA=y 351 - CONFIG_QFMT_V2=y 352 - CONFIG_FUSE_FS=m 353 - CONFIG_ISO9660_FS=m 354 - CONFIG_JOLIET=y 355 - CONFIG_ZISOFS=y 356 - CONFIG_UDF_FS=m 357 - CONFIG_MSDOS_FS=m 358 - CONFIG_VFAT_FS=m 359 - CONFIG_PROC_KCORE=y 360 - CONFIG_TMPFS=y 361 - CONFIG_AFFS_FS=m 362 - CONFIG_HFS_FS=m 363 - CONFIG_HFSPLUS_FS=m 364 - CONFIG_BEFS_FS=m 365 - CONFIG_BFS_FS=m 366 - CONFIG_EFS_FS=m 367 - CONFIG_JFFS2_FS=m 368 - CONFIG_JFFS2_FS_XATTR=y 369 - CONFIG_JFFS2_COMPRESSION_OPTIONS=y 370 - CONFIG_JFFS2_RUBIN=y 371 - CONFIG_CRAMFS=m 372 - CONFIG_VXFS_FS=m 373 - CONFIG_MINIX_FS=m 374 - CONFIG_ROMFS_FS=m 375 - CONFIG_SYSV_FS=m 376 - CONFIG_UFS_FS=m 377 - CONFIG_NFS_FS=y 378 - CONFIG_ROOT_NFS=y 379 - CONFIG_NFSD=y 380 - CONFIG_NFSD_V3=y 381 - CONFIG_NLS_CODEPAGE_437=m 382 - CONFIG_NLS_CODEPAGE_737=m 383 - CONFIG_NLS_CODEPAGE_775=m 384 - CONFIG_NLS_CODEPAGE_850=m 385 - CONFIG_NLS_CODEPAGE_852=m 386 - CONFIG_NLS_CODEPAGE_855=m 387 - CONFIG_NLS_CODEPAGE_857=m 388 - CONFIG_NLS_CODEPAGE_860=m 389 - CONFIG_NLS_CODEPAGE_861=m 390 - CONFIG_NLS_CODEPAGE_862=m 391 - CONFIG_NLS_CODEPAGE_863=m 392 - CONFIG_NLS_CODEPAGE_864=m 393 - CONFIG_NLS_CODEPAGE_865=m 394 - CONFIG_NLS_CODEPAGE_866=m 395 - CONFIG_NLS_CODEPAGE_869=m 396 - CONFIG_NLS_CODEPAGE_936=m 397 - CONFIG_NLS_CODEPAGE_950=m 398 - CONFIG_NLS_CODEPAGE_932=m 399 - CONFIG_NLS_CODEPAGE_949=m 400 - CONFIG_NLS_CODEPAGE_874=m 401 - CONFIG_NLS_ISO8859_8=m 402 - CONFIG_NLS_CODEPAGE_1250=m 403 - CONFIG_NLS_CODEPAGE_1251=m 404 - CONFIG_NLS_ASCII=m 405 - CONFIG_NLS_ISO8859_1=m 406 - CONFIG_NLS_ISO8859_2=m 407 - CONFIG_NLS_ISO8859_3=m 408 - CONFIG_NLS_ISO8859_4=m 409 - CONFIG_NLS_ISO8859_5=m 410 - CONFIG_NLS_ISO8859_6=m 411 - CONFIG_NLS_ISO8859_7=m 412 - CONFIG_NLS_ISO8859_9=m 413 - CONFIG_NLS_ISO8859_13=m 414 - CONFIG_NLS_ISO8859_14=m 415 - CONFIG_NLS_ISO8859_15=m 416 - CONFIG_NLS_KOI8_R=m 417 - CONFIG_NLS_KOI8_U=m 418 - CONFIG_CRYPTO_CRYPTD=m 419 - CONFIG_CRYPTO_LRW=m 420 - CONFIG_CRYPTO_PCBC=m 421 - CONFIG_CRYPTO_HMAC=y 422 - CONFIG_CRYPTO_XCBC=m 423 - CONFIG_CRYPTO_MD4=m 424 - CONFIG_CRYPTO_SHA512=m 425 - CONFIG_CRYPTO_TGR192=m 426 - CONFIG_CRYPTO_WP512=m 427 - CONFIG_CRYPTO_ANUBIS=m 428 - CONFIG_CRYPTO_BLOWFISH=m 429 - CONFIG_CRYPTO_CAMELLIA=m 430 - CONFIG_CRYPTO_CAST5=m 431 - CONFIG_CRYPTO_CAST6=m 432 - CONFIG_CRYPTO_FCRYPT=m 433 - CONFIG_CRYPTO_KHAZAD=m 434 - CONFIG_CRYPTO_SERPENT=m 435 - CONFIG_CRYPTO_TEA=m 436 - CONFIG_CRYPTO_TWOFISH=m
-3
arch/mips/configs/maltaup_xpa_defconfig
··· 236 236 CONFIG_BLK_DEV_RAM=y 237 237 CONFIG_CDROM_PKTCDVD=m 238 238 CONFIG_ATA_OVER_ETH=m 239 - CONFIG_IDE=y 240 - CONFIG_BLK_DEV_IDECD=y 241 - CONFIG_BLK_DEV_TC86C001=m 242 239 CONFIG_RAID_ATTRS=m 243 240 CONFIG_BLK_DEV_SD=y 244 241 CONFIG_CHR_DEV_ST=m
-3
arch/mips/configs/rbtx49xx_defconfig
··· 44 44 CONFIG_BLK_DEV_LOOP=y 45 45 CONFIG_BLK_DEV_RAM=y 46 46 CONFIG_BLK_DEV_RAM_SIZE=8192 47 - CONFIG_IDE=y 48 - CONFIG_BLK_DEV_IDE_TX4938=y 49 - CONFIG_BLK_DEV_IDE_TX4939=y 50 47 CONFIG_NETDEVICES=y 51 48 CONFIG_NE2000=y 52 49 CONFIG_SMC91X=y
+9 -11
arch/mips/configs/sb1250_swarm_defconfig
··· 17 17 CONFIG_SMP=y 18 18 CONFIG_NR_CPUS=2 19 19 CONFIG_HZ_1000=y 20 - CONFIG_PCI=y 21 20 CONFIG_MIPS32_O32=y 22 21 CONFIG_PM=y 23 22 CONFIG_MODULES=y ··· 33 34 CONFIG_IP_PNP=y 34 35 CONFIG_IP_PNP_DHCP=y 35 36 CONFIG_IP_PNP_BOOTP=y 36 - CONFIG_INET_XFRM_MODE_TRANSPORT=m 37 - CONFIG_INET_XFRM_MODE_TUNNEL=m 38 - CONFIG_INET_XFRM_MODE_BEET=m 39 37 CONFIG_TCP_MD5SIG=y 40 38 # CONFIG_IPV6 is not set 41 39 CONFIG_NETWORK_SECMARK=y 42 40 CONFIG_CFG80211=m 43 41 CONFIG_MAC80211=m 44 42 CONFIG_RFKILL=m 43 + CONFIG_PCI=y 45 44 CONFIG_FW_LOADER=m 46 45 CONFIG_CONNECTOR=m 47 46 CONFIG_BLK_DEV_RAM=y 48 47 CONFIG_BLK_DEV_RAM_SIZE=9220 49 48 CONFIG_CDROM_PKTCDVD=m 50 49 CONFIG_ATA_OVER_ETH=m 51 - CONFIG_IDE=y 52 - CONFIG_BLK_DEV_IDECD=y 53 - CONFIG_BLK_DEV_IDETAPE=y 54 50 CONFIG_RAID_ATTRS=m 51 + CONFIG_BLK_DEV_SD=y 52 + CONFIG_BLK_DEV_SR=y 53 + CONFIG_CHR_DEV_SG=y 54 + CONFIG_SCSI_CONSTANTS=y 55 + # CONFIG_SCSI_LOWLEVEL is not set 56 + CONFIG_ATA=y 57 + # CONFIG_ATA_BMDMA is not set 58 + CONFIG_PATA_PLATFORM=y 55 59 CONFIG_NETDEVICES=y 56 60 CONFIG_MACVLAN=m 57 61 CONFIG_SB1250_MAC=y ··· 90 88 CONFIG_CRYPTO_SHA512=m 91 89 CONFIG_CRYPTO_TGR192=m 92 90 CONFIG_CRYPTO_WP512=m 93 - CONFIG_CRYPTO_ANUBIS=m 94 91 CONFIG_CRYPTO_BLOWFISH=m 95 92 CONFIG_CRYPTO_CAMELLIA=m 96 93 CONFIG_CRYPTO_CAST5=m 97 94 CONFIG_CRYPTO_CAST6=m 98 95 CONFIG_CRYPTO_DES=m 99 96 CONFIG_CRYPTO_FCRYPT=m 100 - CONFIG_CRYPTO_KHAZAD=m 101 97 CONFIG_CRYPTO_SALSA20=m 102 - CONFIG_CRYPTO_SEED=m 103 98 CONFIG_CRYPTO_SERPENT=m 104 - CONFIG_CRYPTO_TEA=m 105 99 CONFIG_CRYPTO_TWOFISH=m 106 100 CONFIG_CRYPTO_DEFLATE=m 107 101 CONFIG_CRYPTO_LZO=m
+6 -3
arch/mips/configs/workpad_defconfig
··· 26 26 # CONFIG_IPV6 is not set 27 27 CONFIG_NETWORK_SECMARK=y 28 28 CONFIG_BLK_DEV_RAM=m 29 - CONFIG_IDE=y 30 - CONFIG_BLK_DEV_IDECS=m 31 - CONFIG_IDE_GENERIC=y 29 + # CONFIG_SCSI_PROC_FS is not set 30 + # CONFIG_SCSI_LOWLEVEL is not set 31 + CONFIG_ATA=y 32 + # CONFIG_ATA_VERBOSE_ERROR is not set 33 + # CONFIG_ATA_FORCE is not set 34 + # CONFIG_ATA_BMDMA is not set 32 35 CONFIG_NETDEVICES=y 33 36 CONFIG_PCMCIA_3C574=m 34 37 CONFIG_PCMCIA_3C589=m
+2
arch/mips/crypto/.gitignore
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + poly1305-core.S
+5 -5
arch/mips/generic/board-boston.its.S
··· 1 1 / { 2 2 images { 3 - fdt@boston { 3 + fdt-boston { 4 4 description = "img,boston Device Tree"; 5 5 data = /incbin/("boot/dts/img/boston.dtb"); 6 6 type = "flat_dt"; 7 7 arch = "mips"; 8 8 compression = "none"; 9 - hash@0 { 9 + hash { 10 10 algo = "sha1"; 11 11 }; 12 12 }; 13 13 }; 14 14 15 15 configurations { 16 - conf@boston { 16 + conf-boston { 17 17 description = "Boston Linux kernel"; 18 - kernel = "kernel@0"; 19 - fdt = "fdt@boston"; 18 + kernel = "kernel"; 19 + fdt = "fdt-boston"; 20 20 }; 21 21 }; 22 22 };
+8 -8
arch/mips/generic/board-jaguar2.its.S
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 2 / { 3 3 images { 4 - fdt@jaguar2_pcb110 { 4 + fdt-jaguar2_pcb110 { 5 5 description = "MSCC Jaguar2 PCB110 Device Tree"; 6 6 data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb"); 7 7 type = "flat_dt"; 8 8 arch = "mips"; 9 9 compression = "none"; 10 - hash@0 { 10 + hash { 11 11 algo = "sha1"; 12 12 }; 13 13 }; 14 - fdt@jaguar2_pcb111 { 14 + fdt-jaguar2_pcb111 { 15 15 description = "MSCC Jaguar2 PCB111 Device Tree"; 16 16 data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb"); 17 17 type = "flat_dt"; 18 18 arch = "mips"; 19 19 compression = "none"; 20 - hash@0 { 20 + hash { 21 21 algo = "sha1"; 22 22 }; 23 23 }; ··· 26 26 configurations { 27 27 pcb110 { 28 28 description = "Jaguar2 Linux kernel"; 29 - kernel = "kernel@0"; 30 - fdt = "fdt@jaguar2_pcb110"; 29 + kernel = "kernel"; 30 + fdt = "fdt-jaguar2_pcb110"; 31 31 ramdisk = "ramdisk"; 32 32 }; 33 33 pcb111 { 34 34 description = "Jaguar2 Linux kernel"; 35 - kernel = "kernel@0"; 36 - fdt = "fdt@jaguar2_pcb111"; 35 + kernel = "kernel"; 36 + fdt = "fdt-jaguar2_pcb111"; 37 37 ramdisk = "ramdisk"; 38 38 }; 39 39 };
+4 -4
arch/mips/generic/board-luton.its.S
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 2 / { 3 3 images { 4 - fdt@luton_pcb091 { 4 + fdt-luton_pcb091 { 5 5 description = "MSCC Luton PCB091 Device Tree"; 6 6 data = /incbin/("boot/dts/mscc/luton_pcb091.dtb"); 7 7 type = "flat_dt"; 8 8 arch = "mips"; 9 9 compression = "none"; 10 - hash@0 { 10 + hash { 11 11 algo = "sha1"; 12 12 }; 13 13 }; ··· 16 16 configurations { 17 17 pcb091 { 18 18 description = "Luton Linux kernel"; 19 - kernel = "kernel@0"; 20 - fdt = "fdt@luton_pcb091"; 19 + kernel = "kernel"; 20 + fdt = "fdt-luton_pcb091"; 21 21 }; 22 22 }; 23 23 };
+5 -5
arch/mips/generic/board-ni169445.its.S
··· 1 1 / { 2 2 images { 3 - fdt@ni169445 { 3 + fdt-ni169445 { 4 4 description = "NI 169445 device tree"; 5 5 data = /incbin/("boot/dts/ni/169445.dtb"); 6 6 type = "flat_dt"; 7 7 arch = "mips"; 8 8 compression = "none"; 9 - hash@0 { 9 + hash { 10 10 algo = "sha1"; 11 11 }; 12 12 }; 13 13 }; 14 14 15 15 configurations { 16 - conf@ni169445 { 16 + conf-ni169445 { 17 17 description = "NI 169445 Linux Kernel"; 18 - kernel = "kernel@0"; 19 - fdt = "fdt@ni169445"; 18 + kernel = "kernel"; 19 + fdt = "fdt-ni169445"; 20 20 }; 21 21 }; 22 22 };
+10 -10
arch/mips/generic/board-ocelot.its.S
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 2 / { 3 3 images { 4 - fdt@ocelot_pcb123 { 4 + fdt-ocelot_pcb123 { 5 5 description = "MSCC Ocelot PCB123 Device Tree"; 6 6 data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb"); 7 7 type = "flat_dt"; 8 8 arch = "mips"; 9 9 compression = "none"; 10 - hash@0 { 10 + hash { 11 11 algo = "sha1"; 12 12 }; 13 13 }; 14 14 15 - fdt@ocelot_pcb120 { 15 + fdt-ocelot_pcb120 { 16 16 description = "MSCC Ocelot PCB120 Device Tree"; 17 17 data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb"); 18 18 type = "flat_dt"; 19 19 arch = "mips"; 20 20 compression = "none"; 21 - hash@0 { 21 + hash { 22 22 algo = "sha1"; 23 23 }; 24 24 }; 25 25 }; 26 26 27 27 configurations { 28 - conf@ocelot_pcb123 { 28 + conf-ocelot_pcb123 { 29 29 description = "Ocelot Linux kernel"; 30 - kernel = "kernel@0"; 31 - fdt = "fdt@ocelot_pcb123"; 30 + kernel = "kernel"; 31 + fdt = "fdt-ocelot_pcb123"; 32 32 }; 33 33 34 - conf@ocelot_pcb120 { 34 + conf-ocelot_pcb120 { 35 35 description = "Ocelot Linux kernel"; 36 - kernel = "kernel@0"; 37 - fdt = "fdt@ocelot_pcb120"; 36 + kernel = "kernel"; 37 + fdt = "fdt-ocelot_pcb120"; 38 38 }; 39 39 }; 40 40 };
+4 -4
arch/mips/generic/board-serval.its.S
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 2 / { 3 3 images { 4 - fdt@serval_pcb105 { 4 + fdt-serval_pcb105 { 5 5 description = "MSCC Serval PCB105 Device Tree"; 6 6 data = /incbin/("boot/dts/mscc/serval_pcb105.dtb"); 7 7 type = "flat_dt"; 8 8 arch = "mips"; 9 9 compression = "none"; 10 - hash@0 { 10 + hash { 11 11 algo = "sha1"; 12 12 }; 13 13 }; ··· 16 16 configurations { 17 17 pcb105 { 18 18 description = "Serval Linux kernel"; 19 - kernel = "kernel@0"; 20 - fdt = "fdt@serval_pcb105"; 19 + kernel = "kernel"; 20 + fdt = "fdt-serval_pcb105"; 21 21 ramdisk = "ramdisk"; 22 22 }; 23 23 };
+5 -5
arch/mips/generic/board-xilfpga.its.S
··· 1 1 / { 2 2 images { 3 - fdt@xilfpga { 3 + fdt-xilfpga { 4 4 description = "MIPSfpga (xilfpga) Device Tree"; 5 5 data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb"); 6 6 type = "flat_dt"; 7 7 arch = "mips"; 8 8 compression = "none"; 9 - hash@0 { 9 + hash { 10 10 algo = "sha1"; 11 11 }; 12 12 }; 13 13 }; 14 14 15 15 configurations { 16 - conf@xilfpga { 16 + conf-xilfpga { 17 17 description = "MIPSfpga Linux kernel"; 18 - kernel = "kernel@0"; 19 - fdt = "fdt@xilfpga"; 18 + kernel = "kernel"; 19 + fdt = "fdt-xilfpga"; 20 20 }; 21 21 }; 22 22 };
+5 -5
arch/mips/generic/vmlinux.its.S
··· 6 6 #address-cells = <ADDR_CELLS>; 7 7 8 8 images { 9 - kernel@0 { 9 + kernel { 10 10 description = KERNEL_NAME; 11 11 data = /incbin/(VMLINUX_BINARY); 12 12 type = "kernel"; ··· 15 15 compression = VMLINUX_COMPRESSION; 16 16 load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>; 17 17 entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>; 18 - hash@0 { 18 + hash { 19 19 algo = "sha1"; 20 20 }; 21 21 }; 22 22 }; 23 23 24 24 configurations { 25 - default = "conf@default"; 25 + default = "conf-default"; 26 26 27 - conf@default { 27 + conf-default { 28 28 description = "Generic Linux kernel"; 29 - kernel = "kernel@0"; 29 + kernel = "kernel"; 30 30 }; 31 31 }; 32 32 };
+3 -4
arch/mips/include/asm/Kbuild
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # MIPS headers 3 - generated-y += syscall_table_32_o32.h 4 - generated-y += syscall_table_64_n32.h 5 - generated-y += syscall_table_64_n64.h 6 - generated-y += syscall_table_64_o32.h 3 + generated-y += syscall_table_n32.h 4 + generated-y += syscall_table_n64.h 5 + generated-y += syscall_table_o32.h 7 6 generated-y += unistd_nr_n32.h 8 7 generated-y += unistd_nr_n64.h 9 8 generated-y += unistd_nr_o32.h
+1 -2
arch/mips/include/asm/asmmacro.h
··· 44 44 .endm 45 45 #endif 46 46 47 - #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ 48 - defined(CONFIG_CPU_MIPSR6) 47 + #ifdef CONFIG_CPU_HAS_DIEI 49 48 .macro local_irq_enable reg=t0 50 49 ei 51 50 irq_enable_hazard
+1 -1
arch/mips/include/asm/bootinfo.h
··· 107 107 extern char arcs_cmdline[COMMAND_LINE_SIZE]; 108 108 109 109 /* 110 - * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware 110 + * Registers a0, a1, a2 and a3 as passed to the kernel entry by firmware 111 111 */ 112 112 extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 113 113
+39 -16
arch/mips/include/asm/div64.h
··· 1 1 /* 2 - * Copyright (C) 2000, 2004 Maciej W. Rozycki 2 + * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki 3 3 * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org) 4 4 * 5 5 * This file is subject to the terms and conditions of the GNU General Public ··· 9 9 #ifndef __ASM_DIV64_H 10 10 #define __ASM_DIV64_H 11 11 12 - #include <asm-generic/div64.h> 12 + #include <asm/bitsperlong.h> 13 13 14 - #if BITS_PER_LONG == 64 15 - 16 - #include <linux/types.h> 14 + #if BITS_PER_LONG == 32 17 15 18 16 /* 19 17 * No traps on overflows for any of these... 20 18 */ 21 19 22 - #define __div64_32(n, base) \ 23 - ({ \ 20 + #define do_div64_32(res, high, low, base) ({ \ 24 21 unsigned long __cf, __tmp, __tmp2, __i; \ 25 22 unsigned long __quot32, __mod32; \ 26 - unsigned long __high, __low; \ 27 - unsigned long long __n; \ 28 23 \ 29 - __high = *__n >> 32; \ 30 - __low = __n; \ 31 24 __asm__( \ 32 25 " .set push \n" \ 33 26 " .set noat \n" \ ··· 44 51 " subu %0, %0, %z6 \n" \ 45 52 " addiu %2, %2, 1 \n" \ 46 53 "3: \n" \ 47 - " bnez %4, 0b\n\t" \ 48 - " srl %5, %1, 0x1f\n\t" \ 54 + " bnez %4, 0b \n" \ 55 + " srl %5, %1, 0x1f \n" \ 49 56 " .set pop" \ 50 57 : "=&r" (__mod32), "=&r" (__tmp), \ 51 58 "=&r" (__quot32), "=&r" (__cf), \ 52 59 "=&r" (__i), "=&r" (__tmp2) \ 53 - : "Jr" (base), "0" (__high), "1" (__low)); \ 60 + : "Jr" (base), "0" (high), "1" (low)); \ 54 61 \ 55 - (__n) = __quot32; \ 62 + (res) = __quot32; \ 56 63 __mod32; \ 57 64 }) 58 65 59 - #endif /* BITS_PER_LONG == 64 */ 66 + #define __div64_32(n, base) ({ \ 67 + unsigned long __upper, __low, __high, __radix; \ 68 + unsigned long long __quot; \ 69 + unsigned long long __div; \ 70 + unsigned long __mod; \ 71 + \ 72 + __div = (*n); \ 73 + __radix = (base); \ 74 + \ 75 + __high = __div >> 32; \ 76 + __low = __div; \ 77 + \ 78 + if (__high < __radix) { \ 79 + __upper = __high; \ 80 + __high = 0; \ 81 + } else { \ 82 + __upper = __high % __radix; \ 83 + __high /= __radix; \ 84 + } \ 85 + \ 86 + __mod = do_div64_32(__low, __upper, __low, __radix); \ 87 + \ 88 + __quot = __high; \ 89 + __quot = __quot << 32 | __low; \ 90 + (*n) = __quot; \ 91 + __mod; \ 92 + }) 93 + 94 + #endif /* BITS_PER_LONG == 32 */ 95 + 96 + #include <asm-generic/div64.h> 60 97 61 98 #endif /* __ASM_DIV64_H */
+13 -1
arch/mips/include/asm/io.h
··· 100 100 * almost all conceivable cases a device driver should not be using 101 101 * this function 102 102 */ 103 - static inline unsigned long virt_to_phys(volatile const void *address) 103 + static inline unsigned long __virt_to_phys_nodebug(volatile const void *address) 104 104 { 105 105 return __pa(address); 106 + } 107 + 108 + #ifdef CONFIG_DEBUG_VIRTUAL 109 + extern phys_addr_t __virt_to_phys(volatile const void *x); 110 + #else 111 + #define __virt_to_phys(x) __virt_to_phys_nodebug(x) 112 + #endif 113 + 114 + #define virt_to_phys virt_to_phys 115 + static inline phys_addr_t virt_to_phys(const volatile void *x) 116 + { 117 + return __virt_to_phys(x); 106 118 } 107 119 108 120 /*
-238
arch/mips/include/asm/kvm_host.h
··· 88 88 89 89 #define KVM_HALT_POLL_NS_DEFAULT 500000 90 90 91 - #ifdef CONFIG_KVM_MIPS_VZ 92 91 extern unsigned long GUESTID_MASK; 93 92 extern unsigned long GUESTID_FIRST_VERSION; 94 93 extern unsigned long GUESTID_VERSION_MASK; 95 - #endif 96 94 97 - 98 - /* 99 - * Special address that contains the comm page, used for reducing # of traps 100 - * This needs to be within 32Kb of 0x0 (so the zero register can be used), but 101 - * preferably not at 0x0 so that most kernel NULL pointer dereferences can be 102 - * caught. 103 - */ 104 - #define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ 105 - (0x8000 - PAGE_SIZE)) 106 - 107 - #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ 108 - ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) 109 - 110 - #define KVM_GUEST_KUSEG 0x00000000UL 111 - #define KVM_GUEST_KSEG0 0x40000000UL 112 - #define KVM_GUEST_KSEG1 0x40000000UL 113 - #define KVM_GUEST_KSEG23 0x60000000UL 114 - #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) 115 - #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 116 - 117 - #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 118 - #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 119 - #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 120 - 121 - /* 122 - * Map an address to a certain kernel segment 123 - */ 124 - #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 125 - #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 126 - #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 127 - 128 - #define KVM_INVALID_PAGE 0xdeadbeef 129 95 #define KVM_INVALID_ADDR 0xdeadbeef 130 96 131 97 /* ··· 131 165 u64 fpe_exits; 132 166 u64 msa_disabled_exits; 133 167 u64 flush_dcache_exits; 134 - #ifdef CONFIG_KVM_MIPS_VZ 135 168 u64 vz_gpsi_exits; 136 169 u64 vz_gsfc_exits; 137 170 u64 vz_hc_exits; ··· 141 176 u64 vz_resvd_exits; 142 177 #ifdef CONFIG_CPU_LOONGSON64 143 178 u64 vz_cpucfg_exits; 144 - #endif 145 179 #endif 146 180 u64 halt_successful_poll; 147 181 u64 halt_attempted_poll; ··· 267 303 EMULATE_HYPERCALL, /* HYPCALL instruction */ 268 304 }; 269 305 270 - #define mips3_paddr_to_tlbpfn(x) \ 271 - (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) 272 - #define mips3_tlbpfn_to_paddr(x) \ 273 - ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) 274 - 275 - #define MIPS3_PG_SHIFT 6 276 - #define MIPS3_PG_FRAME 0x3fffffc0 277 - 278 306 #if defined(CONFIG_64BIT) 279 307 #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) 280 308 #else ··· 293 337 #define KVM_MIPS_AUX_FPU 0x1 294 338 #define KVM_MIPS_AUX_MSA 0x2 295 339 296 - #define KVM_MIPS_GUEST_TLB_SIZE 64 297 340 struct kvm_vcpu_arch { 298 341 void *guest_ebase; 299 342 int (*vcpu_run)(struct kvm_vcpu *vcpu); ··· 325 370 /* COP0 State */ 326 371 struct mips_coproc *cop0; 327 372 328 - /* Host KSEG0 address of the EI/DI offset */ 329 - void *kseg0_commpage; 330 - 331 373 /* Resume PC after MMIO completion */ 332 374 unsigned long io_pc; 333 375 /* GPR used as IO source/target */ ··· 350 398 /* Bitmask of pending exceptions to be cleared */ 351 399 unsigned long pending_exceptions_clr; 352 400 353 - /* S/W Based TLB for guest */ 354 - struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; 355 - 356 - /* Guest kernel/user [partial] mm */ 357 - struct mm_struct guest_kernel_mm, guest_user_mm; 358 - 359 - /* Guest ASID of last user mode execution */ 360 - unsigned int last_user_gasid; 361 - 362 401 /* Cache some mmu pages needed inside spinlock regions */ 363 402 struct kvm_mmu_memory_cache mmu_page_cache; 364 403 365 - #ifdef CONFIG_KVM_MIPS_VZ 366 404 /* vcpu's vzguestid is different on each host cpu in an smp system */ 367 405 u32 vzguestid[NR_CPUS]; 368 406 ··· 363 421 364 422 /* emulated guest MAAR registers */ 365 423 unsigned long maar[6]; 366 - #endif 367 424 368 425 /* Last CPU the VCPU state was loaded on */ 369 426 int last_sched_cpu; ··· 592 651 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \ 593 652 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type) 594 653 595 - #ifndef CONFIG_KVM_MIPS_VZ 596 - 597 - /* 598 - * T&E (trap & emulate software based virtualisation) 599 - * We generate the common accessors operating exclusively on the saved context 600 - * in RAM. 601 - */ 602 - 603 - #define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW 604 - #define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW 605 - #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW 606 - 607 - #else 608 - 609 654 /* 610 655 * VZ (hardware assisted virtualisation) 611 656 * These macros use the active guest state in VZ mode (hardware registers), ··· 623 696 * Races must be handled explicitly. 624 697 */ 625 698 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW 626 - 627 - #endif 628 699 629 700 /* 630 701 * Define accessors for CP0 registers that are accessible to the guest. These ··· 799 874 void kvm_lose_fpu(struct kvm_vcpu *vcpu); 800 875 801 876 /* TLB handling */ 802 - u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); 803 - 804 - u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); 805 - 806 - u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); 807 - 808 - #ifdef CONFIG_KVM_MIPS_VZ 809 877 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, 810 878 struct kvm_vcpu *vcpu, bool write_fault); 811 - #endif 812 - extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, 813 - struct kvm_vcpu *vcpu, 814 - bool write_fault); 815 879 816 - extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 817 - struct kvm_vcpu *vcpu); 818 - 819 - extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 820 - struct kvm_mips_tlb *tlb, 821 - unsigned long gva, 822 - bool write_fault); 823 - 824 - extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 825 - u32 *opc, 826 - struct kvm_vcpu *vcpu, 827 - bool write_fault); 828 - 829 - extern void kvm_mips_dump_host_tlbs(void); 830 - extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); 831 - extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi, 832 - bool user, bool kernel); 833 - 834 - extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, 835 - unsigned long entryhi); 836 - 837 - #ifdef CONFIG_KVM_MIPS_VZ 838 880 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); 839 881 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 840 882 unsigned long *gpa); ··· 815 923 void kvm_loongson_clear_guest_vtlb(void); 816 924 void kvm_loongson_clear_guest_ftlb(void); 817 925 #endif 818 - #endif 819 - 820 - void kvm_mips_suspend_mm(int cpu); 821 - void kvm_mips_resume_mm(int cpu); 822 926 823 927 /* MMU handling */ 824 928 825 - /** 826 - * enum kvm_mips_flush - Types of MMU flushes. 827 - * @KMF_USER: Flush guest user virtual memory mappings. 828 - * Guest USeg only. 829 - * @KMF_KERN: Flush guest kernel virtual memory mappings. 830 - * Guest USeg and KSeg2/3. 831 - * @KMF_GPA: Flush guest physical memory mappings. 832 - * Also includes KSeg0 if KMF_KERN is set. 833 - */ 834 - enum kvm_mips_flush { 835 - KMF_USER = 0x0, 836 - KMF_KERN = 0x1, 837 - KMF_GPA = 0x2, 838 - }; 839 - void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); 840 929 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 841 930 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); 842 931 pgd_t *kvm_pgd_alloc(void); 843 932 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); 844 - void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 845 - bool user); 846 - void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu); 847 - void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu); 848 - 849 - enum kvm_mips_fault_result { 850 - KVM_MIPS_MAPPED = 0, 851 - KVM_MIPS_GVA, 852 - KVM_MIPS_GPA, 853 - KVM_MIPS_TLB, 854 - KVM_MIPS_TLBINV, 855 - KVM_MIPS_TLBMOD, 856 - }; 857 - enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, 858 - unsigned long gva, 859 - bool write); 860 933 861 934 #define KVM_ARCH_WANT_MMU_NOTIFIER 862 935 int kvm_unmap_hva_range(struct kvm *kvm, ··· 831 974 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 832 975 833 976 /* Emulation */ 834 - int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 835 977 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); 836 978 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); 837 979 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); ··· 862 1006 return false; 863 1007 } 864 1008 865 - extern enum emulation_result kvm_mips_emulate_inst(u32 cause, 866 - u32 *opc, 867 - struct kvm_vcpu *vcpu); 868 - 869 - long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); 870 - 871 - extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, 872 - u32 *opc, 873 - struct kvm_vcpu *vcpu); 874 - 875 - extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 876 - u32 *opc, 877 - struct kvm_vcpu *vcpu); 878 - 879 - extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 880 - u32 *opc, 881 - struct kvm_vcpu *vcpu); 882 - 883 - extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 884 - u32 *opc, 885 - struct kvm_vcpu *vcpu); 886 - 887 - extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 888 - u32 *opc, 889 - struct kvm_vcpu *vcpu); 890 - 891 - extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 892 - u32 *opc, 893 - struct kvm_vcpu *vcpu); 894 - 895 - extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 896 - u32 *opc, 897 - struct kvm_vcpu *vcpu); 898 - 899 - extern enum emulation_result kvm_mips_handle_ri(u32 cause, 900 - u32 *opc, 901 - struct kvm_vcpu *vcpu); 902 - 903 - extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 904 - u32 *opc, 905 - struct kvm_vcpu *vcpu); 906 - 907 - extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 908 - u32 *opc, 909 - struct kvm_vcpu *vcpu); 910 - 911 - extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 912 - u32 *opc, 913 - struct kvm_vcpu *vcpu); 914 - 915 - extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 916 - u32 *opc, 917 - struct kvm_vcpu *vcpu); 918 - 919 - extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 920 - u32 *opc, 921 - struct kvm_vcpu *vcpu); 922 - 923 - extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 924 - u32 *opc, 925 - struct kvm_vcpu *vcpu); 926 - 927 1009 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu); 928 1010 929 1011 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); ··· 881 1087 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, 882 1088 u32 count, int min_drift); 883 1089 884 - #ifdef CONFIG_KVM_MIPS_VZ 885 1090 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu); 886 1091 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu); 887 - #else 888 - static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {} 889 - static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {} 890 - #endif 891 1092 892 - enum emulation_result kvm_mips_check_privilege(u32 cause, 893 - u32 *opc, 894 - struct kvm_vcpu *vcpu); 895 - 896 - enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 897 - u32 *opc, 898 - u32 cause, 899 - struct kvm_vcpu *vcpu); 900 - enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 901 - u32 *opc, 902 - u32 cause, 903 - struct kvm_vcpu *vcpu); 904 1093 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, 905 1094 u32 cause, 906 1095 struct kvm_vcpu *vcpu); ··· 894 1117 /* COP0 */ 895 1118 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu); 896 1119 897 - unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); 898 - unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); 899 - unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); 900 - unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); 901 - 902 1120 /* Hypercalls (hypcall.c) */ 903 1121 904 1122 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, 905 1123 union mips_instruction inst); 906 1124 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu); 907 - 908 - /* Dynamic binary translation */ 909 - extern int kvm_mips_trans_cache_index(union mips_instruction inst, 910 - u32 *opc, struct kvm_vcpu *vcpu); 911 - extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, 912 - struct kvm_vcpu *vcpu); 913 - extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, 914 - struct kvm_vcpu *vcpu); 915 - extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, 916 - struct kvm_vcpu *vcpu); 917 1125 918 1126 /* Misc */ 919 1127 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+8
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
··· 157 157 .macro smp_slave_setup 158 158 .endm 159 159 160 + #define USE_KEXEC_SMP_WAIT_FINAL 161 + .macro kexec_smp_wait_final 162 + .set push 163 + .set noreorder 164 + synci 0($0) 165 + .set pop 166 + .endm 167 + 160 168 #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
-12
arch/mips/include/asm/mach-generic/spaces.h
··· 30 30 #endif /* __ASSEMBLY__ */ 31 31 32 32 #ifdef CONFIG_32BIT 33 - #ifdef CONFIG_KVM_GUEST 34 - #define CAC_BASE _AC(0x40000000, UL) 35 - #else 36 33 #define CAC_BASE _AC(0x80000000, UL) 37 - #endif 38 34 #ifndef IO_BASE 39 35 #define IO_BASE _AC(0xa0000000, UL) 40 36 #endif ··· 39 43 #endif 40 44 41 45 #ifndef MAP_BASE 42 - #ifdef CONFIG_KVM_GUEST 43 - #define MAP_BASE _AC(0x60000000, UL) 44 - #else 45 46 #define MAP_BASE _AC(0xc0000000, UL) 46 - #endif 47 47 #endif 48 48 49 49 /* ··· 92 100 #endif 93 101 94 102 #ifndef FIXADDR_TOP 95 - #ifdef CONFIG_KVM_GUEST 96 - #define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) 97 - #else 98 103 #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) 99 - #endif 100 104 #endif 101 105 102 106 #endif /* __ASM_MACH_GENERIC_SPACES_H */
-27
arch/mips/include/asm/mach-loongson64/boot_param.h
··· 198 198 VIRTUAL = 3 199 199 }; 200 200 201 - struct loongson_system_configuration { 202 - u32 nr_cpus; 203 - u32 nr_nodes; 204 - int cores_per_node; 205 - int cores_per_package; 206 - u16 boot_cpu_id; 207 - u16 reserved_cpus_mask; 208 - enum loongson_cpu_type cputype; 209 - enum loongson_bridge_type bridgetype; 210 - u64 ht_control_base; 211 - u64 pci_mem_start_addr; 212 - u64 pci_mem_end_addr; 213 - u64 pci_io_base; 214 - u64 restart_addr; 215 - u64 poweroff_addr; 216 - u64 suspend_addr; 217 - u64 vgabios_addr; 218 - u32 dma_mask_bits; 219 - char ecname[32]; 220 - u32 nr_uarts; 221 - struct uart_device uarts[MAX_UARTS]; 222 - u32 nr_sensors; 223 - struct sensor_device sensors[MAX_SENSORS]; 224 - u64 workarounds; 225 - void (*early_config)(void); 226 - }; 227 - 228 201 extern struct efi_memory_map_loongson *loongson_memmap; 229 202 extern struct loongson_system_configuration loongson_sysconf; 230 203
+1
arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
··· 8 8 #ifndef __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ 9 9 #define __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_ 10 10 11 + extern u32 __dtb_loongson64_2core_2k1000_begin[]; 11 12 extern u32 __dtb_loongson64c_4core_ls7a_begin[]; 12 13 extern u32 __dtb_loongson64c_4core_rs780e_begin[]; 13 14 extern u32 __dtb_loongson64c_8core_rs780e_begin[];
+27
arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
··· 75 75 .set pop 76 76 .endm 77 77 78 + #define USE_KEXEC_SMP_WAIT_FINAL 79 + .macro kexec_smp_wait_final 80 + /* s0:prid s1:initfn */ 81 + /* a0:base t1:cpuid t2:node t9:count */ 82 + mfc0 t1, CP0_EBASE 83 + andi t1, MIPS_EBASE_CPUNUM 84 + dins a0, t1, 8, 2 /* insert core id*/ 85 + dext t2, t1, 2, 2 86 + dins a0, t2, 44, 2 /* insert node id */ 87 + mfc0 s0, CP0_PRID 88 + andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) 89 + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f 90 + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f 91 + b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */ 92 + 1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */ 93 + 2: li t9, 0x100 /* wait for init loop */ 94 + 3: addiu t9, -1 /* limit mailbox access */ 95 + bnez t9, 3b 96 + lw s1, 0x20(a0) /* check PC as an indicator */ 97 + beqz s1, 2b 98 + ld s1, 0x20(a0) /* get PC via mailbox reg0 */ 99 + ld sp, 0x28(a0) /* get SP via mailbox reg1 */ 100 + ld gp, 0x30(a0) /* get GP via mailbox reg2 */ 101 + ld a1, 0x38(a0) 102 + jr s1 /* jump to initial PC */ 103 + .endm 104 + 78 105 #endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */
+26 -1
arch/mips/include/asm/mach-loongson64/loongson.h
··· 12 12 #include <linux/irq.h> 13 13 #include <boot_param.h> 14 14 15 + enum loongson_fw_interface { 16 + LOONGSON_LEFI, 17 + LOONGSON_DTB, 18 + }; 19 + 20 + /* machine-specific boot configuration */ 21 + struct loongson_system_configuration { 22 + enum loongson_fw_interface fw_interface; 23 + u32 nr_cpus; 24 + u32 nr_nodes; 25 + int cores_per_node; 26 + int cores_per_package; 27 + u16 boot_cpu_id; 28 + u16 reserved_cpus_mask; 29 + enum loongson_cpu_type cputype; 30 + enum loongson_bridge_type bridgetype; 31 + u64 restart_addr; 32 + u64 poweroff_addr; 33 + u64 suspend_addr; 34 + u64 vgabios_addr; 35 + u32 dma_mask_bits; 36 + u64 workarounds; 37 + void (*early_config)(void); 38 + }; 15 39 16 40 /* machine-specific reboot/halt operation */ 17 41 extern void mach_prepare_reboot(void); ··· 47 23 extern const struct plat_smp_ops loongson3_smp_ops; 48 24 49 25 /* loongson-specific command line, env and memory initialization */ 50 - extern void __init prom_init_env(void); 26 + extern void __init prom_dtb_init_env(void); 27 + extern void __init prom_lefi_init_env(void); 51 28 extern void __init szmem(unsigned int node); 52 29 extern void *loongson_fdt_blob; 53 30
+4 -3
arch/mips/include/asm/mach-ralink/mt7621.h
··· 24 24 #define CHIP_REV_VER_SHIFT 8 25 25 #define CHIP_REV_ECO_MASK 0xf 26 26 27 - #define MT7621_DRAM_BASE 0x0 28 - #define MT7621_DDR2_SIZE_MIN 32 29 - #define MT7621_DDR2_SIZE_MAX 256 27 + #define MT7621_LOWMEM_BASE 0x0 28 + #define MT7621_LOWMEM_MAX_SIZE 0x1C000000 29 + #define MT7621_HIGHMEM_BASE 0x20000000 30 + #define MT7621_HIGHMEM_SIZE 0x4000000 30 31 31 32 #define MT7621_CHIP_NAME0 0x3637544D 32 33 #define MT7621_CHIP_NAME1 0x20203132
+22 -1
arch/mips/include/asm/mips-cps.h
··· 10 10 #include <linux/io.h> 11 11 #include <linux/types.h> 12 12 13 + #include <asm/mips-boards/launch.h> 14 + 13 15 extern unsigned long __cps_access_bad_size(void) 14 16 __compiletime_error("Bad size for CPS accessor"); 15 17 ··· 167 165 */ 168 166 static inline unsigned int mips_cps_numcores(unsigned int cluster) 169 167 { 168 + unsigned int ncores; 169 + 170 170 if (!mips_cm_present()) 171 171 return 0; 172 172 173 173 /* Add one before masking to handle 0xff indicating no cores */ 174 - return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; 174 + ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; 175 + 176 + if (IS_ENABLED(CONFIG_SOC_MT7621)) { 177 + struct cpulaunch *launch; 178 + 179 + /* 180 + * Ralink MT7621S SoC is single core, but the GCR_CONFIG method 181 + * always reports 2 cores. Check the second core's LAUNCH_FREADY 182 + * flag to detect if the second core is missing. This method 183 + * only works before the core has been started. 184 + */ 185 + launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); 186 + launch += 2; /* MT7621 has 2 VPEs per core */ 187 + if (!(launch->flags & LAUNCH_FREADY)) 188 + ncores = 1; 189 + } 190 + 191 + return ncores; 175 192 } 176 193 177 194 /**
+1 -1
arch/mips/include/asm/octeon/cvmx-address.h
··· 152 152 153 153 /* physical mem address */ 154 154 struct { 155 - /* techically, <47:40> are dont-cares */ 155 + /* technically, <47:40> are dont-cares */ 156 156 uint64_t zeroes:24; 157 157 /* the hardware ignores <39:36> in Octeon I */ 158 158 uint64_t unaddr:4;
+2
arch/mips/include/asm/octeon/cvmx-bootinfo.h
··· 298 298 CVMX_BOARD_TYPE_UBNT_E200 = 20003, 299 299 CVMX_BOARD_TYPE_UBNT_E220 = 20005, 300 300 CVMX_BOARD_TYPE_CUST_DSR1000N = 20006, 301 + CVMX_BOARD_TYPE_UBNT_E300 = 20300, 301 302 CVMX_BOARD_TYPE_KONTRON_S1901 = 21901, 302 303 CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000, 303 304 ··· 402 401 ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200) 403 402 ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220) 404 403 ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N) 404 + ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E300) 405 405 ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901) 406 406 ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX) 407 407 }
+8 -1
arch/mips/include/asm/page.h
··· 210 210 * also affect MIPS so we keep this one until GCC 3.x has been retired 211 211 * before we can apply https://patchwork.linux-mips.org/patch/1541/ 212 212 */ 213 + #define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 214 + 215 + #ifdef CONFIG_DEBUG_VIRTUAL 216 + extern phys_addr_t __phys_addr_symbol(unsigned long x); 217 + #else 218 + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 219 + #endif 213 220 214 221 #ifndef __pa_symbol 215 - #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 222 + #define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x)) 216 223 #endif 217 224 218 225 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-1
arch/mips/include/asm/pci.h
··· 38 38 struct resource *io_resource; 39 39 unsigned long io_offset; 40 40 unsigned long io_map_base; 41 - struct resource *busn_resource; 42 41 43 42 #ifndef CONFIG_PCI_DOMAINS_GENERIC 44 43 unsigned int index;
-9
arch/mips/include/asm/processor.h
··· 32 32 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); 33 33 34 34 #ifdef CONFIG_32BIT 35 - #ifdef CONFIG_KVM_GUEST 36 - /* User space process size is limited to 1GB in KVM Guest Mode */ 37 - #define TASK_SIZE 0x3fff8000UL 38 - #else 39 35 /* 40 36 * User space process size: 2GB. This is hardcoded into a few places, 41 37 * so don't change it unless you know what you are doing. 42 38 */ 43 39 #define TASK_SIZE 0x80000000UL 44 - #endif 45 40 46 41 #define STACK_TOP_MAX TASK_SIZE 47 42 ··· 220 225 #else 221 226 #define COP2_INIT 222 227 #endif 223 - 224 - typedef struct { 225 - unsigned long seg; 226 - } mm_segment_t; 227 228 228 229 #ifdef CONFIG_CPU_HAS_MSA 229 230 # define ARCH_MIN_TASKALIGN 16
-6
arch/mips/include/asm/thread_info.h
··· 28 28 unsigned long tp_value; /* thread pointer */ 29 29 __u32 cpu; /* current CPU */ 30 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 - mm_segment_t addr_limit; /* 32 - * thread address space limit: 33 - * 0x7fffffff for user-thead 34 - * 0xffffffff for kernel-thread 35 - */ 36 31 struct pt_regs *regs; 37 32 long syscall; /* syscall number */ 38 33 }; ··· 41 46 .flags = _TIF_FIXADE, \ 42 47 .cpu = 0, \ 43 48 .preempt_count = INIT_PREEMPT_COUNT, \ 44 - .addr_limit = KERNEL_DS, \ 45 49 } 46 50 47 51 /*
+223 -381
arch/mips/include/asm/uaccess.h
··· 16 16 #include <asm/asm-eva.h> 17 17 #include <asm/extable.h> 18 18 19 - /* 20 - * The fs value determines whether argument validity checking should be 21 - * performed or not. If get_fs() == USER_DS, checking is performed, with 22 - * get_fs() == KERNEL_DS, checking is bypassed. 23 - * 24 - * For historical reasons, these macros are grossly misnamed. 25 - */ 26 19 #ifdef CONFIG_32BIT 27 20 28 - #ifdef CONFIG_KVM_GUEST 29 - #define __UA_LIMIT 0x40000000UL 30 - #else 31 21 #define __UA_LIMIT 0x80000000UL 32 - #endif 33 22 34 23 #define __UA_ADDR ".word" 35 24 #define __UA_LA "la" ··· 41 52 #define __UA_t1 "$13" 42 53 43 54 #endif /* CONFIG_64BIT */ 44 - 45 - /* 46 - * USER_DS is a bitmask that has the bits set that may not be set in a valid 47 - * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but 48 - * the arithmetic we're doing only works if the limit is a power of two, so 49 - * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid 50 - * address in this range it's the process's problem, not ours :-) 51 - */ 52 - 53 - #ifdef CONFIG_KVM_GUEST 54 - #define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) 55 - #define USER_DS ((mm_segment_t) { 0xC0000000UL }) 56 - #else 57 - #define KERNEL_DS ((mm_segment_t) { 0UL }) 58 - #define USER_DS ((mm_segment_t) { __UA_LIMIT }) 59 - #endif 60 - 61 - #define get_fs() (current_thread_info()->addr_limit) 62 - #define set_fs(x) (current_thread_info()->addr_limit = (x)) 63 - 64 - #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 65 - 66 - /* 67 - * eva_kernel_access() - determine whether kernel memory access on an EVA system 68 - * 69 - * Determines whether memory accesses should be performed to kernel memory 70 - * on a system using Extended Virtual Addressing (EVA). 71 - * 72 - * Return: true if a kernel memory access on an EVA system, else false. 73 - */ 74 - static inline bool eva_kernel_access(void) 75 - { 76 - if (!IS_ENABLED(CONFIG_EVA)) 77 - return false; 78 - 79 - return uaccess_kernel(); 80 - } 81 55 82 56 /* 83 57 * Is a address valid? This does a straightforward calculation rather ··· 79 127 static inline int __access_ok(const void __user *p, unsigned long size) 80 128 { 81 129 unsigned long addr = (unsigned long)p; 82 - return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; 130 + unsigned long end = addr + size - !!size; 131 + 132 + return (__UA_LIMIT & (addr | end | __ua_size(size))) == 0; 83 133 } 84 134 85 135 #define access_ok(addr, size) \ ··· 104 150 * 105 151 * Returns zero on success, or -EFAULT on error. 106 152 */ 107 - #define put_user(x,ptr) \ 108 - __put_user_check((x), (ptr), sizeof(*(ptr))) 153 + #define put_user(x, ptr) \ 154 + ({ \ 155 + __typeof__(*(ptr)) __user *__p = (ptr); \ 156 + \ 157 + might_fault(); \ 158 + access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \ 159 + }) 109 160 110 161 /* 111 162 * get_user: - Get a simple variable from user space. ··· 130 171 * Returns zero on success, or -EFAULT on error. 131 172 * On error, the variable @x is set to zero. 132 173 */ 133 - #define get_user(x,ptr) \ 134 - __get_user_check((x), (ptr), sizeof(*(ptr))) 174 + #define get_user(x, ptr) \ 175 + ({ \ 176 + const __typeof__(*(ptr)) __user *__p = (ptr); \ 177 + \ 178 + might_fault(); \ 179 + access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \ 180 + ((x) = 0, -EFAULT); \ 181 + }) 135 182 136 183 /* 137 184 * __put_user: - Write a simple value into user space, with less checking. ··· 159 194 * 160 195 * Returns zero on success, or -EFAULT on error. 161 196 */ 162 - #define __put_user(x,ptr) \ 163 - __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 197 + #define __put_user(x, ptr) \ 198 + ({ \ 199 + __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \ 200 + __typeof__(*(ptr)) __pu_val = (x); \ 201 + int __pu_err = 0; \ 202 + \ 203 + __chk_user_ptr(__pu_ptr); \ 204 + switch (sizeof(*__pu_ptr)) { \ 205 + case 1: \ 206 + __put_data_asm(user_sb, __pu_ptr); \ 207 + break; \ 208 + case 2: \ 209 + __put_data_asm(user_sh, __pu_ptr); \ 210 + break; \ 211 + case 4: \ 212 + __put_data_asm(user_sw, __pu_ptr); \ 213 + break; \ 214 + case 8: \ 215 + __PUT_DW(user_sd, __pu_ptr); \ 216 + break; \ 217 + default: \ 218 + BUILD_BUG(); \ 219 + } \ 220 + \ 221 + __pu_err; \ 222 + }) 164 223 165 224 /* 166 225 * __get_user: - Get a simple variable from user space, with less checking. ··· 207 218 * Returns zero on success, or -EFAULT on error. 208 219 * On error, the variable @x is set to zero. 209 220 */ 210 - #define __get_user(x,ptr) \ 211 - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 221 + #define __get_user(x, ptr) \ 222 + ({ \ 223 + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ 224 + int __gu_err = 0; \ 225 + \ 226 + __chk_user_ptr(__gu_ptr); \ 227 + switch (sizeof(*__gu_ptr)) { \ 228 + case 1: \ 229 + __get_data_asm((x), user_lb, __gu_ptr); \ 230 + break; \ 231 + case 2: \ 232 + __get_data_asm((x), user_lh, __gu_ptr); \ 233 + break; \ 234 + case 4: \ 235 + __get_data_asm((x), user_lw, __gu_ptr); \ 236 + break; \ 237 + case 8: \ 238 + __GET_DW((x), user_ld, __gu_ptr); \ 239 + break; \ 240 + default: \ 241 + BUILD_BUG(); \ 242 + } \ 243 + \ 244 + __gu_err; \ 245 + }) 212 246 213 247 struct __large_struct { unsigned long buf[100]; }; 214 248 #define __m(x) (*(struct __large_struct __user *)(x)) 215 - 216 - /* 217 - * Yuck. We need two variants, one for 64bit operation and one 218 - * for 32 bit mode and old iron. 219 - */ 220 - #ifndef CONFIG_EVA 221 - #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) 222 - #else 223 - /* 224 - * Kernel specific functions for EVA. We need to use normal load instructions 225 - * to read data from kernel when operating in EVA mode. We use these macros to 226 - * avoid redefining __get_user_asm for EVA. 227 - */ 228 - #undef _loadd 229 - #undef _loadw 230 - #undef _loadh 231 - #undef _loadb 232 - #ifdef CONFIG_32BIT 233 - #define _loadd _loadw 234 - #else 235 - #define _loadd(reg, addr) "ld " reg ", " addr 236 - #endif 237 - #define _loadw(reg, addr) "lw " reg ", " addr 238 - #define _loadh(reg, addr) "lh " reg ", " addr 239 - #define _loadb(reg, addr) "lb " reg ", " addr 240 - 241 - #define __get_kernel_common(val, size, ptr) \ 242 - do { \ 243 - switch (size) { \ 244 - case 1: __get_data_asm(val, _loadb, ptr); break; \ 245 - case 2: __get_data_asm(val, _loadh, ptr); break; \ 246 - case 4: __get_data_asm(val, _loadw, ptr); break; \ 247 - case 8: __GET_DW(val, _loadd, ptr); break; \ 248 - default: __get_user_unknown(); break; \ 249 - } \ 250 - } while (0) 251 - #endif 252 249 253 250 #ifdef CONFIG_32BIT 254 251 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) ··· 242 267 #ifdef CONFIG_64BIT 243 268 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr) 244 269 #endif 245 - 246 - extern void __get_user_unknown(void); 247 - 248 - #define __get_user_common(val, size, ptr) \ 249 - do { \ 250 - switch (size) { \ 251 - case 1: __get_data_asm(val, user_lb, ptr); break; \ 252 - case 2: __get_data_asm(val, user_lh, ptr); break; \ 253 - case 4: __get_data_asm(val, user_lw, ptr); break; \ 254 - case 8: __GET_DW(val, user_ld, ptr); break; \ 255 - default: __get_user_unknown(); break; \ 256 - } \ 257 - } while (0) 258 - 259 - #define __get_user_nocheck(x, ptr, size) \ 260 - ({ \ 261 - int __gu_err; \ 262 - \ 263 - if (eva_kernel_access()) { \ 264 - __get_kernel_common((x), size, ptr); \ 265 - } else { \ 266 - __chk_user_ptr(ptr); \ 267 - __get_user_common((x), size, ptr); \ 268 - } \ 269 - __gu_err; \ 270 - }) 271 - 272 - #define __get_user_check(x, ptr, size) \ 273 - ({ \ 274 - int __gu_err = -EFAULT; \ 275 - const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 276 - \ 277 - might_fault(); \ 278 - if (likely(access_ok( __gu_ptr, size))) { \ 279 - if (eva_kernel_access()) \ 280 - __get_kernel_common((x), size, __gu_ptr); \ 281 - else \ 282 - __get_user_common((x), size, __gu_ptr); \ 283 - } else \ 284 - (x) = 0; \ 285 - \ 286 - __gu_err; \ 287 - }) 288 270 289 271 #define __get_data_asm(val, insn, addr) \ 290 272 { \ ··· 296 364 (val) = __gu_tmp.t; \ 297 365 } 298 366 299 - #ifndef CONFIG_EVA 300 - #define __put_kernel_common(ptr, size) __put_user_common(ptr, size) 301 - #else 302 - /* 303 - * Kernel specific functions for EVA. We need to use normal load instructions 304 - * to read data from kernel when operating in EVA mode. We use these macros to 305 - * avoid redefining __get_data_asm for EVA. 306 - */ 307 - #undef _stored 308 - #undef _storew 309 - #undef _storeh 310 - #undef _storeb 311 - #ifdef CONFIG_32BIT 312 - #define _stored _storew 313 - #else 314 - #define _stored(reg, addr) "ld " reg ", " addr 315 - #endif 367 + #define HAVE_GET_KERNEL_NOFAULT 316 368 317 - #define _storew(reg, addr) "sw " reg ", " addr 318 - #define _storeh(reg, addr) "sh " reg ", " addr 319 - #define _storeb(reg, addr) "sb " reg ", " addr 320 - 321 - #define __put_kernel_common(ptr, size) \ 369 + #define __get_kernel_nofault(dst, src, type, err_label) \ 322 370 do { \ 323 - switch (size) { \ 324 - case 1: __put_data_asm(_storeb, ptr); break; \ 325 - case 2: __put_data_asm(_storeh, ptr); break; \ 326 - case 4: __put_data_asm(_storew, ptr); break; \ 327 - case 8: __PUT_DW(_stored, ptr); break; \ 328 - default: __put_user_unknown(); break; \ 371 + int __gu_err; \ 372 + \ 373 + switch (sizeof(type)) { \ 374 + case 1: \ 375 + __get_data_asm(*(type *)(dst), kernel_lb, \ 376 + (__force type *)(src)); \ 377 + break; \ 378 + case 2: \ 379 + __get_data_asm(*(type *)(dst), kernel_lh, \ 380 + (__force type *)(src)); \ 381 + break; \ 382 + case 4: \ 383 + __get_data_asm(*(type *)(dst), kernel_lw, \ 384 + (__force type *)(src)); \ 385 + break; \ 386 + case 8: \ 387 + __GET_DW(*(type *)(dst), kernel_ld, \ 388 + (__force type *)(src)); \ 389 + break; \ 390 + default: \ 391 + BUILD_BUG(); \ 392 + break; \ 329 393 } \ 330 - } while(0) 331 - #endif 394 + if (unlikely(__gu_err)) \ 395 + goto err_label; \ 396 + } while (0) 332 397 333 398 /* 334 399 * Yuck. We need two variants, one for 64bit operation and one ··· 337 408 #ifdef CONFIG_64BIT 338 409 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr) 339 410 #endif 340 - 341 - #define __put_user_common(ptr, size) \ 342 - do { \ 343 - switch (size) { \ 344 - case 1: __put_data_asm(user_sb, ptr); break; \ 345 - case 2: __put_data_asm(user_sh, ptr); break; \ 346 - case 4: __put_data_asm(user_sw, ptr); break; \ 347 - case 8: __PUT_DW(user_sd, ptr); break; \ 348 - default: __put_user_unknown(); break; \ 349 - } \ 350 - } while (0) 351 - 352 - #define __put_user_nocheck(x, ptr, size) \ 353 - ({ \ 354 - __typeof__(*(ptr)) __pu_val; \ 355 - int __pu_err = 0; \ 356 - \ 357 - __pu_val = (x); \ 358 - if (eva_kernel_access()) { \ 359 - __put_kernel_common(ptr, size); \ 360 - } else { \ 361 - __chk_user_ptr(ptr); \ 362 - __put_user_common(ptr, size); \ 363 - } \ 364 - __pu_err; \ 365 - }) 366 - 367 - #define __put_user_check(x, ptr, size) \ 368 - ({ \ 369 - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 370 - __typeof__(*(ptr)) __pu_val = (x); \ 371 - int __pu_err = -EFAULT; \ 372 - \ 373 - might_fault(); \ 374 - if (likely(access_ok( __pu_addr, size))) { \ 375 - if (eva_kernel_access()) \ 376 - __put_kernel_common(__pu_addr, size); \ 377 - else \ 378 - __put_user_common(__pu_addr, size); \ 379 - } \ 380 - \ 381 - __pu_err; \ 382 - }) 383 411 384 412 #define __put_data_asm(insn, ptr) \ 385 413 { \ ··· 376 490 "i" (-EFAULT)); \ 377 491 } 378 492 379 - extern void __put_user_unknown(void); 493 + #define __put_kernel_nofault(dst, src, type, err_label) \ 494 + do { \ 495 + type __pu_val; \ 496 + int __pu_err = 0; \ 497 + \ 498 + __pu_val = *(__force type *)(src); \ 499 + switch (sizeof(type)) { \ 500 + case 1: \ 501 + __put_data_asm(kernel_sb, (type *)(dst)); \ 502 + break; \ 503 + case 2: \ 504 + __put_data_asm(kernel_sh, (type *)(dst)); \ 505 + break; \ 506 + case 4: \ 507 + __put_data_asm(kernel_sw, (type *)(dst)) \ 508 + break; \ 509 + case 8: \ 510 + __PUT_DW(kernel_sd, (type *)(dst)); \ 511 + break; \ 512 + default: \ 513 + BUILD_BUG(); \ 514 + break; \ 515 + } \ 516 + if (unlikely(__pu_err)) \ 517 + goto err_label; \ 518 + } while (0) 519 + 380 520 381 521 /* 382 522 * We're generating jump to subroutines which will be outside the range of ··· 426 514 #define DADDI_SCRATCH "$0" 427 515 #endif 428 516 429 - extern size_t __copy_user(void *__to, const void *__from, size_t __n); 430 - 431 - #define __invoke_copy_from(func, to, from, n) \ 432 - ({ \ 433 - register void *__cu_to_r __asm__("$4"); \ 434 - register const void __user *__cu_from_r __asm__("$5"); \ 435 - register long __cu_len_r __asm__("$6"); \ 436 - \ 437 - __cu_to_r = (to); \ 438 - __cu_from_r = (from); \ 439 - __cu_len_r = (n); \ 440 - __asm__ __volatile__( \ 441 - ".set\tnoreorder\n\t" \ 442 - __MODULE_JAL(func) \ 443 - ".set\tnoat\n\t" \ 444 - __UA_ADDU "\t$1, %1, %2\n\t" \ 445 - ".set\tat\n\t" \ 446 - ".set\treorder" \ 447 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 448 - : \ 449 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 450 - DADDI_SCRATCH, "memory"); \ 451 - __cu_len_r; \ 452 - }) 453 - 454 - #define __invoke_copy_to(func, to, from, n) \ 455 - ({ \ 456 - register void __user *__cu_to_r __asm__("$4"); \ 457 - register const void *__cu_from_r __asm__("$5"); \ 458 - register long __cu_len_r __asm__("$6"); \ 459 - \ 460 - __cu_to_r = (to); \ 461 - __cu_from_r = (from); \ 462 - __cu_len_r = (n); \ 463 - __asm__ __volatile__( \ 464 - __MODULE_JAL(func) \ 465 - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 466 - : \ 467 - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ 468 - DADDI_SCRATCH, "memory"); \ 469 - __cu_len_r; \ 470 - }) 471 - 472 - #define __invoke_copy_from_kernel(to, from, n) \ 473 - __invoke_copy_from(__copy_user, to, from, n) 474 - 475 - #define __invoke_copy_to_kernel(to, from, n) \ 476 - __invoke_copy_to(__copy_user, to, from, n) 477 - 478 - #define ___invoke_copy_in_kernel(to, from, n) \ 479 - __invoke_copy_from(__copy_user, to, from, n) 480 - 481 - #ifndef CONFIG_EVA 482 - #define __invoke_copy_from_user(to, from, n) \ 483 - __invoke_copy_from(__copy_user, to, from, n) 484 - 485 - #define __invoke_copy_to_user(to, from, n) \ 486 - __invoke_copy_to(__copy_user, to, from, n) 487 - 488 - #define ___invoke_copy_in_user(to, from, n) \ 489 - __invoke_copy_from(__copy_user, to, from, n) 490 - 491 - #else 492 - 493 - /* EVA specific functions */ 494 - 495 - extern size_t __copy_from_user_eva(void *__to, const void *__from, 496 - size_t __n); 497 - extern size_t __copy_to_user_eva(void *__to, const void *__from, 498 - size_t __n); 499 - extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); 500 - 501 - /* 502 - * Source or destination address is in userland. We need to go through 503 - * the TLB 504 - */ 505 - #define __invoke_copy_from_user(to, from, n) \ 506 - __invoke_copy_from(__copy_from_user_eva, to, from, n) 507 - 508 - #define __invoke_copy_to_user(to, from, n) \ 509 - __invoke_copy_to(__copy_to_user_eva, to, from, n) 510 - 511 - #define ___invoke_copy_in_user(to, from, n) \ 512 - __invoke_copy_from(__copy_in_user_eva, to, from, n) 513 - 514 - #endif /* CONFIG_EVA */ 515 - 516 - static inline unsigned long 517 - raw_copy_to_user(void __user *to, const void *from, unsigned long n) 518 - { 519 - if (eva_kernel_access()) 520 - return __invoke_copy_to_kernel(to, from, n); 521 - else 522 - return __invoke_copy_to_user(to, from, n); 523 - } 517 + extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); 518 + extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); 519 + extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n); 524 520 525 521 static inline unsigned long 526 522 raw_copy_from_user(void *to, const void __user *from, unsigned long n) 527 523 { 528 - if (eva_kernel_access()) 529 - return __invoke_copy_from_kernel(to, from, n); 530 - else 531 - return __invoke_copy_from_user(to, from, n); 524 + register void *__cu_to_r __asm__("$4"); 525 + register const void __user *__cu_from_r __asm__("$5"); 526 + register long __cu_len_r __asm__("$6"); 527 + 528 + __cu_to_r = to; 529 + __cu_from_r = from; 530 + __cu_len_r = n; 531 + 532 + __asm__ __volatile__( 533 + ".set\tnoreorder\n\t" 534 + __MODULE_JAL(__raw_copy_from_user) 535 + ".set\tnoat\n\t" 536 + __UA_ADDU "\t$1, %1, %2\n\t" 537 + ".set\tat\n\t" 538 + ".set\treorder" 539 + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) 540 + : 541 + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", 542 + DADDI_SCRATCH, "memory"); 543 + 544 + return __cu_len_r; 545 + } 546 + 547 + static inline unsigned long 548 + raw_copy_to_user(void __user *to, const void *from, unsigned long n) 549 + { 550 + register void __user *__cu_to_r __asm__("$4"); 551 + register const void *__cu_from_r __asm__("$5"); 552 + register long __cu_len_r __asm__("$6"); 553 + 554 + __cu_to_r = (to); 555 + __cu_from_r = (from); 556 + __cu_len_r = (n); 557 + 558 + __asm__ __volatile__( 559 + __MODULE_JAL(__raw_copy_to_user) 560 + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) 561 + : 562 + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", 563 + DADDI_SCRATCH, "memory"); 564 + 565 + return __cu_len_r; 532 566 } 533 567 534 568 #define INLINE_COPY_FROM_USER 535 569 #define INLINE_COPY_TO_USER 536 570 537 571 static inline unsigned long 538 - raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) 572 + raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 539 573 { 540 - if (eva_kernel_access()) 541 - return ___invoke_copy_in_kernel(to, from, n); 542 - else 543 - return ___invoke_copy_in_user(to, from, n); 574 + register void __user *__cu_to_r __asm__("$4"); 575 + register const void __user *__cu_from_r __asm__("$5"); 576 + register long __cu_len_r __asm__("$6"); 577 + 578 + __cu_to_r = to; 579 + __cu_from_r = from; 580 + __cu_len_r = n; 581 + 582 + __asm__ __volatile__( 583 + ".set\tnoreorder\n\t" 584 + __MODULE_JAL(__raw_copy_in_user) 585 + ".set\tnoat\n\t" 586 + __UA_ADDU "\t$1, %1, %2\n\t" 587 + ".set\tat\n\t" 588 + ".set\treorder" 589 + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) 590 + : 591 + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", 592 + DADDI_SCRATCH, "memory"); 593 + return __cu_len_r; 544 594 } 545 595 546 - extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); 547 596 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); 548 597 549 598 /* ··· 530 657 #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" 531 658 #endif /* CONFIG_CPU_MICROMIPS */ 532 659 533 - if (eva_kernel_access()) { 534 - __asm__ __volatile__( 535 - "move\t$4, %1\n\t" 536 - "move\t$5, $0\n\t" 537 - "move\t$6, %2\n\t" 538 - __MODULE_JAL(__bzero_kernel) 539 - "move\t%0, $6" 540 - : "=r" (res) 541 - : "r" (addr), "r" (size) 542 - : bzero_clobbers); 543 - } else { 544 - might_fault(); 545 - __asm__ __volatile__( 546 - "move\t$4, %1\n\t" 547 - "move\t$5, $0\n\t" 548 - "move\t$6, %2\n\t" 549 - __MODULE_JAL(__bzero) 550 - "move\t%0, $6" 551 - : "=r" (res) 552 - : "r" (addr), "r" (size) 553 - : bzero_clobbers); 554 - } 660 + might_fault(); 661 + __asm__ __volatile__( 662 + "move\t$4, %1\n\t" 663 + "move\t$5, $0\n\t" 664 + "move\t$6, %2\n\t" 665 + __MODULE_JAL(__bzero) 666 + "move\t%0, $6" 667 + : "=r" (res) 668 + : "r" (addr), "r" (size) 669 + : bzero_clobbers); 555 670 556 671 return res; 557 672 } ··· 553 692 __cl_size; \ 554 693 }) 555 694 556 - extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len); 557 695 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len); 558 696 559 697 /* ··· 578 718 { 579 719 long res; 580 720 581 - if (eva_kernel_access()) { 582 - __asm__ __volatile__( 583 - "move\t$4, %1\n\t" 584 - "move\t$5, %2\n\t" 585 - "move\t$6, %3\n\t" 586 - __MODULE_JAL(__strncpy_from_kernel_asm) 587 - "move\t%0, $2" 588 - : "=r" (res) 589 - : "r" (__to), "r" (__from), "r" (__len) 590 - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 591 - } else { 592 - might_fault(); 593 - __asm__ __volatile__( 594 - "move\t$4, %1\n\t" 595 - "move\t$5, %2\n\t" 596 - "move\t$6, %3\n\t" 597 - __MODULE_JAL(__strncpy_from_user_asm) 598 - "move\t%0, $2" 599 - : "=r" (res) 600 - : "r" (__to), "r" (__from), "r" (__len) 601 - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 602 - } 721 + if (!access_ok(__from, __len)) 722 + return -EFAULT; 723 + 724 + might_fault(); 725 + __asm__ __volatile__( 726 + "move\t$4, %1\n\t" 727 + "move\t$5, %2\n\t" 728 + "move\t$6, %3\n\t" 729 + __MODULE_JAL(__strncpy_from_user_asm) 730 + "move\t%0, $2" 731 + : "=r" (res) 732 + : "r" (__to), "r" (__from), "r" (__len) 733 + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 603 734 604 735 return res; 605 736 } 606 737 607 - extern long __strnlen_kernel_asm(const char __user *s, long n); 608 738 extern long __strnlen_user_asm(const char __user *s, long n); 609 739 610 740 /* ··· 614 764 { 615 765 long res; 616 766 767 + if (!access_ok(s, 1)) 768 + return 0; 769 + 617 770 might_fault(); 618 - if (eva_kernel_access()) { 619 - __asm__ __volatile__( 620 - "move\t$4, %1\n\t" 621 - "move\t$5, %2\n\t" 622 - __MODULE_JAL(__strnlen_kernel_asm) 623 - "move\t%0, $2" 624 - : "=r" (res) 625 - : "r" (s), "r" (n) 626 - : "$2", "$4", "$5", __UA_t0, "$31"); 627 - } else { 628 - __asm__ __volatile__( 629 - "move\t$4, %1\n\t" 630 - "move\t$5, %2\n\t" 631 - __MODULE_JAL(__strnlen_user_asm) 632 - "move\t%0, $2" 633 - : "=r" (res) 634 - : "r" (s), "r" (n) 635 - : "$2", "$4", "$5", __UA_t0, "$31"); 636 - } 771 + __asm__ __volatile__( 772 + "move\t$4, %1\n\t" 773 + "move\t$5, %2\n\t" 774 + __MODULE_JAL(__strnlen_user_asm) 775 + "move\t%0, $2" 776 + : "=r" (res) 777 + : "r" (s), "r" (n) 778 + : "$2", "$4", "$5", __UA_t0, "$31"); 637 779 638 780 return res; 639 781 }
+21 -5
arch/mips/include/asm/vdso/gettimeofday.h
··· 20 20 21 21 #define VDSO_HAS_CLOCK_GETRES 1 22 22 23 + #if MIPS_ISA_REV < 6 24 + #define VDSO_SYSCALL_CLOBBERS "hi", "lo", 25 + #else 26 + #define VDSO_SYSCALL_CLOBBERS 27 + #endif 28 + 23 29 static __always_inline long gettimeofday_fallback( 24 30 struct __kernel_old_timeval *_tv, 25 31 struct timezone *_tz) ··· 41 35 : "=r" (ret), "=r" (error) 42 36 : "r" (tv), "r" (tz), "r" (nr) 43 37 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 44 - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 38 + "$14", "$15", "$24", "$25", 39 + VDSO_SYSCALL_CLOBBERS 40 + "memory"); 45 41 46 42 return error ? -ret : ret; 47 43 } ··· 67 59 : "=r" (ret), "=r" (error) 68 60 : "r" (clkid), "r" (ts), "r" (nr) 69 61 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 70 - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 62 + "$14", "$15", "$24", "$25", 63 + VDSO_SYSCALL_CLOBBERS 64 + "memory"); 71 65 72 66 return error ? -ret : ret; 73 67 } ··· 93 83 : "=r" (ret), "=r" (error) 94 84 : "r" (clkid), "r" (ts), "r" (nr) 95 85 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 96 - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 86 + "$14", "$15", "$24", "$25", 87 + VDSO_SYSCALL_CLOBBERS 88 + "memory"); 97 89 98 90 return error ? -ret : ret; 99 91 } ··· 117 105 : "=r" (ret), "=r" (error) 118 106 : "r" (clkid), "r" (ts), "r" (nr) 119 107 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 120 - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 108 + "$14", "$15", "$24", "$25", 109 + VDSO_SYSCALL_CLOBBERS 110 + "memory"); 121 111 122 112 return error ? -ret : ret; 123 113 } ··· 139 125 : "=r" (ret), "=r" (error) 140 126 : "r" (clkid), "r" (ts), "r" (nr) 141 127 : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", 142 - "$14", "$15", "$24", "$25", "hi", "lo", "memory"); 128 + "$14", "$15", "$24", "$25", 129 + VDSO_SYSCALL_CLOBBERS 130 + "memory"); 143 131 144 132 return error ? -ret : ret; 145 133 }
+4 -4
arch/mips/kernel/Makefile
··· 17 17 endif 18 18 19 19 ifdef CONFIG_FUNCTION_TRACER 20 - CFLAGS_REMOVE_ftrace.o = -pg 21 - CFLAGS_REMOVE_early_printk.o = -pg 22 - CFLAGS_REMOVE_perf_event.o = -pg 23 - CFLAGS_REMOVE_perf_event_mipsxx.o = -pg 20 + CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) 21 + CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE) 22 + CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) 23 + CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE) 24 24 endif 25 25 26 26 obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
+19
arch/mips/kernel/access-helper.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <linux/uaccess.h> 4 + 5 + static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) 6 + { 7 + return user ? get_user(*a, (unsigned long __user *)p) : 8 + get_kernel_nofault(*a, p); 9 + } 10 + 11 + static inline int __get_inst16(u16 *i, u16 *p, bool user) 12 + { 13 + return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p); 14 + } 15 + 16 + static inline int __get_inst32(u32 *i, u32 *p, bool user) 17 + { 18 + return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p); 19 + }
-1
arch/mips/kernel/asm-offsets.c
··· 98 98 OFFSET(TI_TP_VALUE, thread_info, tp_value); 99 99 OFFSET(TI_CPU, thread_info, cpu); 100 100 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 101 - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 102 101 OFFSET(TI_REGS, thread_info, regs); 103 102 DEFINE(_THREAD_SIZE, THREAD_SIZE); 104 103 DEFINE(_THREAD_MASK, THREAD_MASK);
-4
arch/mips/kernel/cevt-r4k.c
··· 195 195 unsigned int delta; 196 196 unsigned int cnt; 197 197 198 - #ifdef CONFIG_KVM_GUEST 199 - return 1; 200 - #endif 201 - 202 198 /* 203 199 * IP7 already pending? Try to clear it by acking the timer. 204 200 */
-3
arch/mips/kernel/cpu-probe.c
··· 1752 1752 set_isa(c, MIPS_CPU_ISA_M64R2); 1753 1753 break; 1754 1754 } 1755 - c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1756 1755 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT | 1757 1756 MIPS_ASE_LOONGSON_EXT2); 1758 1757 break; ··· 1781 1782 * register, we correct it here. 1782 1783 */ 1783 1784 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; 1784 - c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1785 1785 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | 1786 1786 MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); 1787 1787 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ ··· 1791 1793 set_elf_platform(cpu, "loongson3a"); 1792 1794 set_isa(c, MIPS_CPU_ISA_M64R2); 1793 1795 decode_cpucfg(c); 1794 - c->writecombine = _CACHE_UNCACHED_ACCELERATED; 1795 1796 break; 1796 1797 default: 1797 1798 panic("Unknown Loongson Processor ID!");
-8
arch/mips/kernel/ftrace.c
··· 90 90 unsigned int new_code2) 91 91 { 92 92 int faulted; 93 - mm_segment_t old_fs; 94 93 95 94 safe_store_code(new_code1, ip, faulted); 96 95 if (unlikely(faulted)) ··· 101 102 return -EFAULT; 102 103 103 104 ip -= 4; 104 - old_fs = get_fs(); 105 - set_fs(KERNEL_DS); 106 105 flush_icache_range(ip, ip + 8); 107 - set_fs(old_fs); 108 106 109 107 return 0; 110 108 } ··· 110 114 unsigned int new_code2) 111 115 { 112 116 int faulted; 113 - mm_segment_t old_fs; 114 117 115 118 ip += 4; 116 119 safe_store_code(new_code2, ip, faulted); ··· 121 126 if (unlikely(faulted)) 122 127 return -EFAULT; 123 128 124 - old_fs = get_fs(); 125 - set_fs(KERNEL_DS); 126 129 flush_icache_range(ip, ip + 8); 127 - set_fs(old_fs); 128 130 129 131 return 0; 130 132 }
-2
arch/mips/kernel/process.c
··· 124 124 /* kernel thread */ 125 125 unsigned long status = p->thread.cp0_status; 126 126 memset(childregs, 0, sizeof(struct pt_regs)); 127 - ti->addr_limit = KERNEL_DS; 128 127 p->thread.reg16 = usp; /* fn */ 129 128 p->thread.reg17 = kthread_arg; 130 129 p->thread.reg29 = childksp; ··· 144 145 childregs->regs[2] = 0; /* Child gets zero as return value */ 145 146 if (usp) 146 147 childregs->regs[29] = usp; 147 - ti->addr_limit = USER_DS; 148 148 149 149 p->thread.reg29 = (unsigned long) childregs; 150 150 p->thread.reg31 = (unsigned long) ret_from_fork;
+4 -5
arch/mips/kernel/relocate_kernel.S
··· 11 11 #include <asm/stackframe.h> 12 12 #include <asm/addrspace.h> 13 13 14 + #include <kernel-entry-init.h> 15 + 14 16 LEAF(relocate_new_kernel) 15 17 PTR_L a0, arg0 16 18 PTR_L a1, arg1 ··· 127 125 1: LONG_L s0, (t0) 128 126 bne s0, zero,1b 129 127 130 - #ifdef CONFIG_CPU_CAVIUM_OCTEON 131 - .set push 132 - .set noreorder 133 - synci 0($0) 134 - .set pop 128 + #ifdef USE_KEXEC_SMP_WAIT_FINAL 129 + kexec_smp_wait_final 135 130 #else 136 131 sync 137 132 #endif
+3 -5
arch/mips/kernel/scall32-o32.S
··· 48 48 * We intentionally keep the kernel stack a little below the top of 49 49 * userspace so we don't have to do a slower byte accurate check here. 50 50 */ 51 - lw t5, TI_ADDR_LIMIT($28) 52 51 addu t4, t0, 32 53 - and t5, t4 54 - bltz t5, bad_stack # -> sp is bad 52 + bltz t4, bad_stack # -> sp is bad 55 53 56 54 /* 57 55 * Ok, copy the args from the luser stack to the kernel stack. ··· 215 217 #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity 216 218 #endif /* CONFIG_MIPS_MT_FPAFF */ 217 219 220 + #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) 218 221 #define __SYSCALL(nr, entry) PTR entry 219 222 .align 2 220 223 .type sys_call_table, @object 221 224 EXPORT(sys_call_table) 222 - #include <asm/syscall_table_32_o32.h> 223 - #undef __SYSCALL 225 + #include <asm/syscall_table_o32.h>
+1 -2
arch/mips/kernel/scall64-n32.S
··· 104 104 #define __SYSCALL(nr, entry) PTR entry 105 105 .type sysn32_call_table, @object 106 106 EXPORT(sysn32_call_table) 107 - #include <asm/syscall_table_64_n32.h> 108 - #undef __SYSCALL 107 + #include <asm/syscall_table_n32.h>
+1 -2
arch/mips/kernel/scall64-n64.S
··· 113 113 .align 3 114 114 .type sys_call_table, @object 115 115 EXPORT(sys_call_table) 116 - #include <asm/syscall_table_64_n64.h> 117 - #undef __SYSCALL 116 + #include <asm/syscall_table_n64.h>
+2 -2
arch/mips/kernel/scall64-o32.S
··· 213 213 jr ra 214 214 END(sys32_syscall) 215 215 216 + #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) 216 217 #define __SYSCALL(nr, entry) PTR entry 217 218 .align 3 218 219 .type sys32_call_table,@object 219 220 EXPORT(sys32_call_table) 220 - #include <asm/syscall_table_64_o32.h> 221 - #undef __SYSCALL 221 + #include <asm/syscall_table_o32.h>
+16 -9
arch/mips/kernel/smp-bmips.c
··· 134 134 if (!board_ebase_setup) 135 135 board_ebase_setup = &bmips_ebase_setup; 136 136 137 - __cpu_number_map[boot_cpu] = 0; 138 - __cpu_logical_map[0] = boot_cpu; 137 + if (max_cpus > 1) { 138 + __cpu_number_map[boot_cpu] = 0; 139 + __cpu_logical_map[0] = boot_cpu; 139 140 140 - for (i = 0; i < max_cpus; i++) { 141 - if (i != boot_cpu) { 142 - __cpu_number_map[i] = cpu; 143 - __cpu_logical_map[cpu] = i; 144 - cpu++; 141 + for (i = 0; i < max_cpus; i++) { 142 + if (i != boot_cpu) { 143 + __cpu_number_map[i] = cpu; 144 + __cpu_logical_map[cpu] = i; 145 + cpu++; 146 + } 147 + set_cpu_possible(i, 1); 148 + set_cpu_present(i, 1); 145 149 } 146 - set_cpu_possible(i, 1); 147 - set_cpu_present(i, 1); 150 + } else { 151 + __cpu_number_map[0] = boot_cpu; 152 + __cpu_logical_map[0] = 0; 153 + set_cpu_possible(0, 1); 154 + set_cpu_present(0, 1); 148 155 } 149 156 } 150 157
+4 -4
arch/mips/kernel/spinlock_test.c
··· 35 35 return 0; 36 36 } 37 37 38 - DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); 38 + DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); 39 39 40 40 41 41 ··· 114 114 return 0; 115 115 } 116 116 117 - DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); 117 + DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); 118 118 119 119 static int __init spinlock_test(void) 120 120 { 121 - debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, 121 + debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL, 122 122 &fops_ss); 123 - debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, 123 + debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, 124 124 &fops_multi); 125 125 return 0; 126 126 }
+10 -31
arch/mips/kernel/syscalls/Makefile
··· 8 8 syscalln32 := $(src)/syscall_n32.tbl 9 9 syscalln64 := $(src)/syscall_n64.tbl 10 10 syscallo32 := $(src)/syscall_o32.tbl 11 - syshdr := $(srctree)/$(src)/syscallhdr.sh 11 + syshdr := $(srctree)/scripts/syscallhdr.sh 12 12 sysnr := $(srctree)/$(src)/syscallnr.sh 13 - systbl := $(srctree)/$(src)/syscalltbl.sh 13 + systbl := $(srctree)/scripts/syscalltbl.sh 14 14 15 15 quiet_cmd_syshdr = SYSHDR $@ 16 - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ 17 - '$(syshdr_abis_$(basetarget))' \ 18 - '$(syshdr_pfx_$(basetarget))' \ 19 - '$(syshdr_offset_$(basetarget))' 16 + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@ 20 17 21 18 quiet_cmd_sysnr = SYSNR $@ 22 19 cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ ··· 22 25 '$(sysnr_offset_$(basetarget))' 23 26 24 27 quiet_cmd_systbl = SYSTBL $@ 25 - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ 26 - '$(systbl_abis_$(basetarget))' \ 27 - '$(systbl_abi_$(basetarget))' \ 28 - '$(systbl_offset_$(basetarget))' 28 + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ 29 29 30 - syshdr_offset_unistd_n32 := __NR_Linux 31 30 $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE 32 31 $(call if_changed,syshdr) 33 32 34 - syshdr_offset_unistd_n64 := __NR_Linux 35 33 $(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE 36 34 $(call if_changed,syshdr) 37 35 38 - syshdr_offset_unistd_o32 := __NR_Linux 39 36 $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE 40 37 $(call if_changed,syshdr) 41 38 ··· 48 57 $(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE 49 58 $(call if_changed,sysnr) 50 59 51 - systbl_abi_syscall_table_32_o32 := 32_o32 52 - systbl_offset_syscall_table_32_o32 := 4000 53 - $(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE 60 + $(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE 54 61 $(call if_changed,systbl) 55 62 56 - systbl_abi_syscall_table_64_n32 := 64_n32 57 - systbl_offset_syscall_table_64_n32 := 6000 58 - $(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE 63 + $(kapi)/syscall_table_n64.h: $(syscalln64) $(systbl) FORCE 59 64 $(call if_changed,systbl) 60 65 61 - systbl_abi_syscall_table_64_n64 := 64_n64 62 - systbl_offset_syscall_table_64_n64 := 5000 63 - $(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE 64 - $(call if_changed,systbl) 65 - 66 - systbl_abi_syscall_table_64_o32 := 64_o32 67 - systbl_offset_syscall_table_64_o32 := 4000 68 - $(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE 66 + $(kapi)/syscall_table_o32.h: $(syscallo32) $(systbl) FORCE 69 67 $(call if_changed,systbl) 70 68 71 69 uapisyshdr-y += unistd_n32.h \ 72 70 unistd_n64.h \ 73 71 unistd_o32.h 74 - kapisyshdr-y += syscall_table_32_o32.h \ 75 - syscall_table_64_n32.h \ 76 - syscall_table_64_n64.h \ 77 - syscall_table_64_o32.h \ 72 + kapisyshdr-y += syscall_table_n32.h \ 73 + syscall_table_n64.h \ 74 + syscall_table_o32.h \ 78 75 unistd_nr_n32.h \ 79 76 unistd_nr_n64.h \ 80 77 unistd_nr_o32.h
-36
arch/mips/kernel/syscalls/syscallhdr.sh
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - 4 - in="$1" 5 - out="$2" 6 - my_abis=`echo "($3)" | tr ',' '|'` 7 - prefix="$4" 8 - offset="$5" 9 - 10 - fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ 11 - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ 12 - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` 13 - grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( 14 - printf "#ifndef %s\n" "${fileguard}" 15 - printf "#define %s\n" "${fileguard}" 16 - printf "\n" 17 - 18 - nxt=0 19 - while read nr abi name entry compat ; do 20 - if [ -z "$offset" ]; then 21 - printf "#define __NR_%s%s\t%s\n" \ 22 - "${prefix}" "${name}" "${nr}" 23 - else 24 - printf "#define __NR_%s%s\t(%s + %s)\n" \ 25 - "${prefix}" "${name}" "${offset}" "${nr}" 26 - fi 27 - nxt=$((nr+1)) 28 - done 29 - 30 - printf "\n" 31 - printf "#ifdef __KERNEL__\n" 32 - printf "#define __NR_syscalls\t%s\n" "${nxt}" 33 - printf "#endif\n" 34 - printf "\n" 35 - printf "#endif /* %s */\n" "${fileguard}" 36 - ) > "$out"
-36
arch/mips/kernel/syscalls/syscalltbl.sh
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - 4 - in="$1" 5 - out="$2" 6 - my_abis=`echo "($3)" | tr ',' '|'` 7 - my_abi="$4" 8 - offset="$5" 9 - 10 - emit() { 11 - t_nxt="$1" 12 - t_nr="$2" 13 - t_entry="$3" 14 - 15 - while [ $t_nxt -lt $t_nr ]; do 16 - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" 17 - t_nxt=$((t_nxt+1)) 18 - done 19 - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" 20 - } 21 - 22 - grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( 23 - nxt=0 24 - if [ -z "$offset" ]; then 25 - offset=0 26 - fi 27 - 28 - while read nr abi name entry compat ; do 29 - if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then 30 - emit $((nxt+offset)) $((nr+offset)) $compat 31 - else 32 - emit $((nxt+offset)) $((nr+offset)) $entry 33 - fi 34 - nxt=$((nr+1)) 35 - done 36 - ) > "$out"
+46 -59
arch/mips/kernel/traps.c
··· 72 72 73 73 #include <asm/mach-loongson64/cpucfg-emul.h> 74 74 75 + #include "access-helper.h" 76 + 75 77 extern void check_wait(void); 76 78 extern asmlinkage void rollback_handle_int(void); 77 79 extern asmlinkage void handle_int(void); ··· 110 108 void (*board_ebase_setup)(void); 111 109 void(*board_cache_error_setup)(void); 112 110 113 - static void show_raw_backtrace(unsigned long reg29, const char *loglvl) 111 + static void show_raw_backtrace(unsigned long reg29, const char *loglvl, 112 + bool user) 114 113 { 115 114 unsigned long *sp = (unsigned long *)(reg29 & ~3); 116 115 unsigned long addr; ··· 121 118 printk("%s\n", loglvl); 122 119 #endif 123 120 while (!kstack_end(sp)) { 124 - unsigned long __user *p = 125 - (unsigned long __user *)(unsigned long)sp++; 126 - if (__get_user(addr, p)) { 121 + if (__get_addr(&addr, sp++, user)) { 127 122 printk("%s (Bad stack address)", loglvl); 128 123 break; 129 124 } ··· 142 141 #endif 143 142 144 143 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, 145 - const char *loglvl) 144 + const char *loglvl, bool user) 146 145 { 147 146 unsigned long sp = regs->regs[29]; 148 147 unsigned long ra = regs->regs[31]; ··· 152 151 task = current; 153 152 154 153 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { 155 - show_raw_backtrace(sp, loglvl); 154 + show_raw_backtrace(sp, loglvl, user); 156 155 return; 157 156 } 158 157 printk("%sCall Trace:\n", loglvl); ··· 168 167 * with at least a bit of error checking ... 169 168 */ 170 169 static void show_stacktrace(struct task_struct *task, 171 - const struct pt_regs *regs, const char *loglvl) 170 + const struct pt_regs *regs, const char *loglvl, bool user) 172 171 { 173 172 const int field = 2 * sizeof(unsigned long); 174 - long stackdata; 173 + unsigned long stackdata; 175 174 int i; 176 - unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; 175 + unsigned long *sp = (unsigned long *)regs->regs[29]; 177 176 178 177 printk("%sStack :", loglvl); 179 178 i = 0; ··· 187 186 break; 188 187 } 189 188 190 - if (__get_user(stackdata, sp++)) { 189 + if (__get_addr(&stackdata, sp++, user)) { 191 190 pr_cont(" (Bad stack address)"); 192 191 break; 193 192 } ··· 196 195 i++; 197 196 } 198 197 pr_cont("\n"); 199 - show_backtrace(task, regs, loglvl); 198 + show_backtrace(task, regs, loglvl, user); 200 199 } 201 200 202 201 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) 203 202 { 204 203 struct pt_regs regs; 205 - mm_segment_t old_fs = get_fs(); 206 204 207 205 regs.cp0_status = KSU_KERNEL; 208 206 if (sp) { ··· 217 217 prepare_frametrace(&regs); 218 218 } 219 219 } 220 - /* 221 - * show_stack() deals exclusively with kernel mode, so be sure to access 222 - * the stack in the kernel (not user) address space. 223 - */ 224 - set_fs(KERNEL_DS); 225 - show_stacktrace(task, &regs, loglvl); 226 - set_fs(old_fs); 220 + show_stacktrace(task, &regs, loglvl, false); 227 221 } 228 222 229 - static void show_code(unsigned int __user *pc) 223 + static void show_code(void *pc, bool user) 230 224 { 231 225 long i; 232 - unsigned short __user *pc16 = NULL; 226 + unsigned short *pc16 = NULL; 233 227 234 228 printk("Code:"); 235 229 236 230 if ((unsigned long)pc & 1) 237 - pc16 = (unsigned short __user *)((unsigned long)pc & ~1); 231 + pc16 = (u16 *)((unsigned long)pc & ~1); 232 + 238 233 for(i = -3 ; i < 6 ; i++) { 239 - unsigned int insn; 240 - if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { 241 - pr_cont(" (Bad address in epc)\n"); 242 - break; 234 + if (pc16) { 235 + u16 insn16; 236 + 237 + if (__get_inst16(&insn16, pc16 + i, user)) 238 + goto bad_address; 239 + 240 + pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>')); 241 + } else { 242 + u32 insn32; 243 + 244 + if (__get_inst32(&insn32, (u32 *)pc + i, user)) 245 + goto bad_address; 246 + 247 + pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>')); 243 248 } 244 - pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); 245 249 } 246 250 pr_cont("\n"); 251 + return; 252 + 253 + bad_address: 254 + pr_cont(" (Bad address in epc)\n\n"); 247 255 } 248 256 249 257 static void __show_regs(const struct pt_regs *regs) ··· 364 356 void show_registers(struct pt_regs *regs) 365 357 { 366 358 const int field = 2 * sizeof(unsigned long); 367 - mm_segment_t old_fs = get_fs(); 368 359 369 360 __show_regs(regs); 370 361 print_modules(); ··· 378 371 printk("*HwTLS: %0*lx\n", field, tls); 379 372 } 380 373 381 - if (!user_mode(regs)) 382 - /* Necessary for getting the correct stack content */ 383 - set_fs(KERNEL_DS); 384 - show_stacktrace(current, regs, KERN_DEFAULT); 385 - show_code((unsigned int __user *) regs->cp0_epc); 374 + show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); 375 + show_code((void *)regs->cp0_epc, user_mode(regs)); 386 376 printk("\n"); 387 - set_fs(old_fs); 388 377 } 389 378 390 379 static DEFINE_RAW_SPINLOCK(die_lock); ··· 1025 1022 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 1026 1023 unsigned int opcode, bcode; 1027 1024 enum ctx_state prev_state; 1028 - mm_segment_t seg; 1029 - 1030 - seg = get_fs(); 1031 - if (!user_mode(regs)) 1032 - set_fs(KERNEL_DS); 1025 + bool user = user_mode(regs); 1033 1026 1034 1027 prev_state = exception_enter(); 1035 1028 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 1036 1029 if (get_isa16_mode(regs->cp0_epc)) { 1037 1030 u16 instr[2]; 1038 1031 1039 - if (__get_user(instr[0], (u16 __user *)epc)) 1032 + if (__get_inst16(&instr[0], (u16 *)epc, user)) 1040 1033 goto out_sigsegv; 1041 1034 1042 1035 if (!cpu_has_mmips) { ··· 1043 1044 bcode = instr[0] & 0xf; 1044 1045 } else { 1045 1046 /* 32-bit microMIPS BREAK */ 1046 - if (__get_user(instr[1], (u16 __user *)(epc + 2))) 1047 + if (__get_inst16(&instr[1], (u16 *)(epc + 2), user)) 1047 1048 goto out_sigsegv; 1048 1049 opcode = (instr[0] << 16) | instr[1]; 1049 1050 bcode = (opcode >> 6) & ((1 << 20) - 1); 1050 1051 } 1051 1052 } else { 1052 - if (__get_user(opcode, (unsigned int __user *)epc)) 1053 + if (__get_inst32(&opcode, (u32 *)epc, user)) 1053 1054 goto out_sigsegv; 1054 1055 bcode = (opcode >> 6) & ((1 << 20) - 1); 1055 1056 } ··· 1099 1100 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); 1100 1101 1101 1102 out: 1102 - set_fs(seg); 1103 1103 exception_exit(prev_state); 1104 1104 return; 1105 1105 ··· 1112 1114 u32 opcode, tcode = 0; 1113 1115 enum ctx_state prev_state; 1114 1116 u16 instr[2]; 1115 - mm_segment_t seg; 1117 + bool user = user_mode(regs); 1116 1118 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 1117 - 1118 - seg = get_fs(); 1119 - if (!user_mode(regs)) 1120 - set_fs(KERNEL_DS); 1121 1119 1122 1120 prev_state = exception_enter(); 1123 1121 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 1124 1122 if (get_isa16_mode(regs->cp0_epc)) { 1125 - if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 1126 - __get_user(instr[1], (u16 __user *)(epc + 2))) 1123 + if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) || 1124 + __get_inst16(&instr[1], (u16 *)(epc + 2), user)) 1127 1125 goto out_sigsegv; 1128 1126 opcode = (instr[0] << 16) | instr[1]; 1129 1127 /* Immediate versions don't provide a code. */ 1130 1128 if (!(opcode & OPCODE)) 1131 1129 tcode = (opcode >> 12) & ((1 << 4) - 1); 1132 1130 } else { 1133 - if (__get_user(opcode, (u32 __user *)epc)) 1131 + if (__get_inst32(&opcode, (u32 *)epc, user)) 1134 1132 goto out_sigsegv; 1135 1133 /* Immediate versions don't provide a code. */ 1136 1134 if (!(opcode & OPCODE)) ··· 1136 1142 do_trap_or_bp(regs, tcode, 0, "Trap"); 1137 1143 1138 1144 out: 1139 - set_fs(seg); 1140 1145 exception_exit(prev_state); 1141 1146 return; 1142 1147 ··· 1584 1591 { 1585 1592 int multi_match = regs->cp0_status & ST0_TS; 1586 1593 enum ctx_state prev_state; 1587 - mm_segment_t old_fs = get_fs(); 1588 1594 1589 1595 prev_state = exception_enter(); 1590 1596 show_regs(regs); ··· 1594 1602 dump_tlb_all(); 1595 1603 } 1596 1604 1597 - if (!user_mode(regs)) 1598 - set_fs(KERNEL_DS); 1599 - 1600 - show_code((unsigned int __user *) regs->cp0_epc); 1601 - 1602 - set_fs(old_fs); 1605 + show_code((void *)regs->cp0_epc, user_mode(regs)); 1603 1606 1604 1607 /* 1605 1608 * Some chips may have other causes of machine check (e.g. SB1
+75 -130
arch/mips/kernel/unaligned.c
··· 93 93 #include <asm/mmu_context.h> 94 94 #include <linux/uaccess.h> 95 95 96 + #include "access-helper.h" 97 + 96 98 enum { 97 99 UNALIGNED_ACTION_QUIET, 98 100 UNALIGNED_ACTION_SIGNAL, ··· 109 107 extern void show_registers(struct pt_regs *regs); 110 108 111 109 static void emulate_load_store_insn(struct pt_regs *regs, 112 - void __user *addr, unsigned int __user *pc) 110 + void __user *addr, unsigned int *pc) 113 111 { 114 112 unsigned long origpc, orig31, value; 115 113 union mips_instruction insn; 116 114 unsigned int res; 117 - #ifdef CONFIG_EVA 118 - mm_segment_t seg; 119 - #endif 115 + bool user = user_mode(regs); 116 + 120 117 origpc = (unsigned long)pc; 121 118 orig31 = regs->regs[31]; 122 119 ··· 124 123 /* 125 124 * This load never faults. 126 125 */ 127 - __get_user(insn.word, pc); 126 + __get_inst32(&insn.word, pc, user); 128 127 129 128 switch (insn.i_format.opcode) { 130 129 /* ··· 164 163 if (insn.dsp_format.func == lx_op) { 165 164 switch (insn.dsp_format.op) { 166 165 case lwx_op: 167 - if (!access_ok(addr, 4)) 166 + if (user && !access_ok(addr, 4)) 168 167 goto sigbus; 169 168 LoadW(addr, value, res); 170 169 if (res) ··· 173 172 regs->regs[insn.dsp_format.rd] = value; 174 173 break; 175 174 case lhx_op: 176 - if (!access_ok(addr, 2)) 175 + if (user && !access_ok(addr, 2)) 177 176 goto sigbus; 178 177 LoadHW(addr, value, res); 179 178 if (res) ··· 192 191 * memory, so we need to "switch" the address limit to 193 192 * user space, so that address check can work properly. 194 193 */ 195 - seg = force_uaccess_begin(); 196 194 switch (insn.spec3_format.func) { 197 195 case lhe_op: 198 - if (!access_ok(addr, 2)) { 199 - force_uaccess_end(seg); 196 + if (!access_ok(addr, 2)) 200 197 goto sigbus; 201 - } 202 198 LoadHWE(addr, value, res); 203 - if (res) { 204 - force_uaccess_end(seg); 199 + if (res) 205 200 goto fault; 206 - } 207 201 compute_return_epc(regs); 208 202 regs->regs[insn.spec3_format.rt] = value; 209 203 break; 210 204 case lwe_op: 211 - if (!access_ok(addr, 4)) { 212 - force_uaccess_end(seg); 205 + if (!access_ok(addr, 4)) 213 206 goto sigbus; 214 - } 215 207 LoadWE(addr, value, res); 216 - if (res) { 217 - force_uaccess_end(seg); 208 + if (res) 218 209 goto fault; 219 - } 220 210 compute_return_epc(regs); 221 211 regs->regs[insn.spec3_format.rt] = value; 222 212 break; 223 213 case lhue_op: 224 - if (!access_ok(addr, 2)) { 225 - force_uaccess_end(seg); 214 + if (!access_ok(addr, 2)) 226 215 goto sigbus; 227 - } 228 216 LoadHWUE(addr, value, res); 229 - if (res) { 230 - force_uaccess_end(seg); 217 + if (res) 231 218 goto fault; 232 - } 233 219 compute_return_epc(regs); 234 220 regs->regs[insn.spec3_format.rt] = value; 235 221 break; 236 222 case she_op: 237 - if (!access_ok(addr, 2)) { 238 - force_uaccess_end(seg); 223 + if (!access_ok(addr, 2)) 239 224 goto sigbus; 240 - } 241 225 compute_return_epc(regs); 242 226 value = regs->regs[insn.spec3_format.rt]; 243 227 StoreHWE(addr, value, res); 244 - if (res) { 245 - force_uaccess_end(seg); 228 + if (res) 246 229 goto fault; 247 - } 248 230 break; 249 231 case swe_op: 250 - if (!access_ok(addr, 4)) { 251 - force_uaccess_end(seg); 232 + if (!access_ok(addr, 4)) 252 233 goto sigbus; 253 - } 254 234 compute_return_epc(regs); 255 235 value = regs->regs[insn.spec3_format.rt]; 256 236 StoreWE(addr, value, res); 257 - if (res) { 258 - force_uaccess_end(seg); 237 + if (res) 259 238 goto fault; 260 - } 261 239 break; 262 240 default: 263 - force_uaccess_end(seg); 264 241 goto sigill; 265 242 } 266 - force_uaccess_end(seg); 267 243 } 268 244 #endif 269 245 break; 270 246 case lh_op: 271 - if (!access_ok(addr, 2)) 247 + if (user && !access_ok(addr, 2)) 272 248 goto sigbus; 273 249 274 - if (IS_ENABLED(CONFIG_EVA)) { 275 - if (uaccess_kernel()) 276 - LoadHW(addr, value, res); 277 - else 278 - LoadHWE(addr, value, res); 279 - } else { 250 + if (IS_ENABLED(CONFIG_EVA) && user) 251 + LoadHWE(addr, value, res); 252 + else 280 253 LoadHW(addr, value, res); 281 - } 282 254 283 255 if (res) 284 256 goto fault; ··· 260 286 break; 261 287 262 288 case lw_op: 263 - if (!access_ok(addr, 4)) 289 + if (user && !access_ok(addr, 4)) 264 290 goto sigbus; 265 291 266 - if (IS_ENABLED(CONFIG_EVA)) { 267 - if (uaccess_kernel()) 268 - LoadW(addr, value, res); 269 - else 270 - LoadWE(addr, value, res); 271 - } else { 292 + if (IS_ENABLED(CONFIG_EVA) && user) 293 + LoadWE(addr, value, res); 294 + else 272 295 LoadW(addr, value, res); 273 - } 274 296 275 297 if (res) 276 298 goto fault; ··· 275 305 break; 276 306 277 307 case lhu_op: 278 - if (!access_ok(addr, 2)) 308 + if (user && !access_ok(addr, 2)) 279 309 goto sigbus; 280 310 281 - if (IS_ENABLED(CONFIG_EVA)) { 282 - if (uaccess_kernel()) 283 - LoadHWU(addr, value, res); 284 - else 285 - LoadHWUE(addr, value, res); 286 - } else { 311 + if (IS_ENABLED(CONFIG_EVA) && user) 312 + LoadHWUE(addr, value, res); 313 + else 287 314 LoadHWU(addr, value, res); 288 - } 289 315 290 316 if (res) 291 317 goto fault; ··· 298 332 * would blow up, so for now we don't handle unaligned 64-bit 299 333 * instructions on 32-bit kernels. 300 334 */ 301 - if (!access_ok(addr, 4)) 335 + if (user && !access_ok(addr, 4)) 302 336 goto sigbus; 303 337 304 338 LoadWU(addr, value, res); ··· 321 355 * would blow up, so for now we don't handle unaligned 64-bit 322 356 * instructions on 32-bit kernels. 323 357 */ 324 - if (!access_ok(addr, 8)) 358 + if (user && !access_ok(addr, 8)) 325 359 goto sigbus; 326 360 327 361 LoadDW(addr, value, res); ··· 336 370 goto sigill; 337 371 338 372 case sh_op: 339 - if (!access_ok(addr, 2)) 373 + if (user && !access_ok(addr, 2)) 340 374 goto sigbus; 341 375 342 376 compute_return_epc(regs); 343 377 value = regs->regs[insn.i_format.rt]; 344 378 345 - if (IS_ENABLED(CONFIG_EVA)) { 346 - if (uaccess_kernel()) 347 - StoreHW(addr, value, res); 348 - else 349 - StoreHWE(addr, value, res); 350 - } else { 379 + if (IS_ENABLED(CONFIG_EVA) && user) 380 + StoreHWE(addr, value, res); 381 + else 351 382 StoreHW(addr, value, res); 352 - } 353 383 354 384 if (res) 355 385 goto fault; 356 386 break; 357 387 358 388 case sw_op: 359 - if (!access_ok(addr, 4)) 389 + if (user && !access_ok(addr, 4)) 360 390 goto sigbus; 361 391 362 392 compute_return_epc(regs); 363 393 value = regs->regs[insn.i_format.rt]; 364 394 365 - if (IS_ENABLED(CONFIG_EVA)) { 366 - if (uaccess_kernel()) 367 - StoreW(addr, value, res); 368 - else 369 - StoreWE(addr, value, res); 370 - } else { 395 + if (IS_ENABLED(CONFIG_EVA) && user) 396 + StoreWE(addr, value, res); 397 + else 371 398 StoreW(addr, value, res); 372 - } 373 399 374 400 if (res) 375 401 goto fault; ··· 376 418 * would blow up, so for now we don't handle unaligned 64-bit 377 419 * instructions on 32-bit kernels. 378 420 */ 379 - if (!access_ok(addr, 8)) 421 + if (user && !access_ok(addr, 8)) 380 422 goto sigbus; 381 423 382 424 compute_return_epc(regs); ··· 584 626 unsigned long origpc, contpc; 585 627 union mips_instruction insn; 586 628 struct mm_decoded_insn mminsn; 629 + bool user = user_mode(regs); 587 630 588 631 origpc = regs->cp0_epc; 589 632 orig31 = regs->regs[31]; ··· 648 689 if (reg == 31) 649 690 goto sigbus; 650 691 651 - if (!access_ok(addr, 8)) 692 + if (user && !access_ok(addr, 8)) 652 693 goto sigbus; 653 694 654 695 LoadW(addr, value, res); ··· 667 708 if (reg == 31) 668 709 goto sigbus; 669 710 670 - if (!access_ok(addr, 8)) 711 + if (user && !access_ok(addr, 8)) 671 712 goto sigbus; 672 713 673 714 value = regs->regs[reg]; ··· 687 728 if (reg == 31) 688 729 goto sigbus; 689 730 690 - if (!access_ok(addr, 16)) 731 + if (user && !access_ok(addr, 16)) 691 732 goto sigbus; 692 733 693 734 LoadDW(addr, value, res); ··· 710 751 if (reg == 31) 711 752 goto sigbus; 712 753 713 - if (!access_ok(addr, 16)) 754 + if (user && !access_ok(addr, 16)) 714 755 goto sigbus; 715 756 716 757 value = regs->regs[reg]; ··· 733 774 if ((rvar > 9) || !reg) 734 775 goto sigill; 735 776 if (reg & 0x10) { 736 - if (!access_ok(addr, 4 * (rvar + 1))) 777 + if (user && !access_ok(addr, 4 * (rvar + 1))) 737 778 goto sigbus; 738 779 } else { 739 - if (!access_ok(addr, 4 * rvar)) 780 + if (user && !access_ok(addr, 4 * rvar)) 740 781 goto sigbus; 741 782 } 742 783 if (rvar == 9) ··· 769 810 if ((rvar > 9) || !reg) 770 811 goto sigill; 771 812 if (reg & 0x10) { 772 - if (!access_ok(addr, 4 * (rvar + 1))) 813 + if (user && !access_ok(addr, 4 * (rvar + 1))) 773 814 goto sigbus; 774 815 } else { 775 - if (!access_ok(addr, 4 * rvar)) 816 + if (user && !access_ok(addr, 4 * rvar)) 776 817 goto sigbus; 777 818 } 778 819 if (rvar == 9) ··· 806 847 if ((rvar > 9) || !reg) 807 848 goto sigill; 808 849 if (reg & 0x10) { 809 - if (!access_ok(addr, 8 * (rvar + 1))) 850 + if (user && !access_ok(addr, 8 * (rvar + 1))) 810 851 goto sigbus; 811 852 } else { 812 - if (!access_ok(addr, 8 * rvar)) 853 + if (user && !access_ok(addr, 8 * rvar)) 813 854 goto sigbus; 814 855 } 815 856 if (rvar == 9) ··· 847 888 if ((rvar > 9) || !reg) 848 889 goto sigill; 849 890 if (reg & 0x10) { 850 - if (!access_ok(addr, 8 * (rvar + 1))) 891 + if (user && !access_ok(addr, 8 * (rvar + 1))) 851 892 goto sigbus; 852 893 } else { 853 - if (!access_ok(addr, 8 * rvar)) 894 + if (user && !access_ok(addr, 8 * rvar)) 854 895 goto sigbus; 855 896 } 856 897 if (rvar == 9) ··· 969 1010 case mm_lwm16_op: 970 1011 reg = insn.mm16_m_format.rlist; 971 1012 rvar = reg + 1; 972 - if (!access_ok(addr, 4 * rvar)) 1013 + if (user && !access_ok(addr, 4 * rvar)) 973 1014 goto sigbus; 974 1015 975 1016 for (i = 16; rvar; rvar--, i++) { ··· 989 1030 case mm_swm16_op: 990 1031 reg = insn.mm16_m_format.rlist; 991 1032 rvar = reg + 1; 992 - if (!access_ok(addr, 4 * rvar)) 1033 + if (user && !access_ok(addr, 4 * rvar)) 993 1034 goto sigbus; 994 1035 995 1036 for (i = 16; rvar; rvar--, i++) { ··· 1043 1084 } 1044 1085 1045 1086 loadHW: 1046 - if (!access_ok(addr, 2)) 1087 + if (user && !access_ok(addr, 2)) 1047 1088 goto sigbus; 1048 1089 1049 1090 LoadHW(addr, value, res); ··· 1053 1094 goto success; 1054 1095 1055 1096 loadHWU: 1056 - if (!access_ok(addr, 2)) 1097 + if (user && !access_ok(addr, 2)) 1057 1098 goto sigbus; 1058 1099 1059 1100 LoadHWU(addr, value, res); ··· 1063 1104 goto success; 1064 1105 1065 1106 loadW: 1066 - if (!access_ok(addr, 4)) 1107 + if (user && !access_ok(addr, 4)) 1067 1108 goto sigbus; 1068 1109 1069 1110 LoadW(addr, value, res); ··· 1081 1122 * would blow up, so for now we don't handle unaligned 64-bit 1082 1123 * instructions on 32-bit kernels. 1083 1124 */ 1084 - if (!access_ok(addr, 4)) 1125 + if (user && !access_ok(addr, 4)) 1085 1126 goto sigbus; 1086 1127 1087 1128 LoadWU(addr, value, res); ··· 1103 1144 * would blow up, so for now we don't handle unaligned 64-bit 1104 1145 * instructions on 32-bit kernels. 1105 1146 */ 1106 - if (!access_ok(addr, 8)) 1147 + if (user && !access_ok(addr, 8)) 1107 1148 goto sigbus; 1108 1149 1109 1150 LoadDW(addr, value, res); ··· 1117 1158 goto sigill; 1118 1159 1119 1160 storeHW: 1120 - if (!access_ok(addr, 2)) 1161 + if (user && !access_ok(addr, 2)) 1121 1162 goto sigbus; 1122 1163 1123 1164 value = regs->regs[reg]; ··· 1127 1168 goto success; 1128 1169 1129 1170 storeW: 1130 - if (!access_ok(addr, 4)) 1171 + if (user && !access_ok(addr, 4)) 1131 1172 goto sigbus; 1132 1173 1133 1174 value = regs->regs[reg]; ··· 1145 1186 * would blow up, so for now we don't handle unaligned 64-bit 1146 1187 * instructions on 32-bit kernels. 1147 1188 */ 1148 - if (!access_ok(addr, 8)) 1189 + if (user && !access_ok(addr, 8)) 1149 1190 goto sigbus; 1150 1191 1151 1192 value = regs->regs[reg]; ··· 1202 1243 union mips16e_instruction mips16inst, oldinst; 1203 1244 unsigned int opcode; 1204 1245 int extended = 0; 1246 + bool user = user_mode(regs); 1205 1247 1206 1248 origpc = regs->cp0_epc; 1207 1249 orig31 = regs->regs[31]; ··· 1304 1344 goto sigbus; 1305 1345 1306 1346 case MIPS16e_lh_op: 1307 - if (!access_ok(addr, 2)) 1347 + if (user && !access_ok(addr, 2)) 1308 1348 goto sigbus; 1309 1349 1310 1350 LoadHW(addr, value, res); ··· 1315 1355 break; 1316 1356 1317 1357 case MIPS16e_lhu_op: 1318 - if (!access_ok(addr, 2)) 1358 + if (user && !access_ok(addr, 2)) 1319 1359 goto sigbus; 1320 1360 1321 1361 LoadHWU(addr, value, res); ··· 1328 1368 case MIPS16e_lw_op: 1329 1369 case MIPS16e_lwpc_op: 1330 1370 case MIPS16e_lwsp_op: 1331 - if (!access_ok(addr, 4)) 1371 + if (user && !access_ok(addr, 4)) 1332 1372 goto sigbus; 1333 1373 1334 1374 LoadW(addr, value, res); ··· 1347 1387 * would blow up, so for now we don't handle unaligned 64-bit 1348 1388 * instructions on 32-bit kernels. 1349 1389 */ 1350 - if (!access_ok(addr, 4)) 1390 + if (user && !access_ok(addr, 4)) 1351 1391 goto sigbus; 1352 1392 1353 1393 LoadWU(addr, value, res); ··· 1371 1411 * would blow up, so for now we don't handle unaligned 64-bit 1372 1412 * instructions on 32-bit kernels. 1373 1413 */ 1374 - if (!access_ok(addr, 8)) 1414 + if (user && !access_ok(addr, 8)) 1375 1415 goto sigbus; 1376 1416 1377 1417 LoadDW(addr, value, res); ··· 1386 1426 goto sigill; 1387 1427 1388 1428 case MIPS16e_sh_op: 1389 - if (!access_ok(addr, 2)) 1429 + if (user && !access_ok(addr, 2)) 1390 1430 goto sigbus; 1391 1431 1392 1432 MIPS16e_compute_return_epc(regs, &oldinst); ··· 1399 1439 case MIPS16e_sw_op: 1400 1440 case MIPS16e_swsp_op: 1401 1441 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 1402 - if (!access_ok(addr, 4)) 1442 + if (user && !access_ok(addr, 4)) 1403 1443 goto sigbus; 1404 1444 1405 1445 MIPS16e_compute_return_epc(regs, &oldinst); ··· 1419 1459 * would blow up, so for now we don't handle unaligned 64-bit 1420 1460 * instructions on 32-bit kernels. 1421 1461 */ 1422 - if (!access_ok(addr, 8)) 1462 + if (user && !access_ok(addr, 8)) 1423 1463 goto sigbus; 1424 1464 1425 1465 MIPS16e_compute_return_epc(regs, &oldinst); ··· 1475 1515 asmlinkage void do_ade(struct pt_regs *regs) 1476 1516 { 1477 1517 enum ctx_state prev_state; 1478 - unsigned int __user *pc; 1479 - mm_segment_t seg; 1518 + unsigned int *pc; 1480 1519 1481 1520 prev_state = exception_enter(); 1482 1521 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, ··· 1510 1551 show_registers(regs); 1511 1552 1512 1553 if (cpu_has_mmips) { 1513 - seg = get_fs(); 1514 - if (!user_mode(regs)) 1515 - set_fs(KERNEL_DS); 1516 1554 emulate_load_store_microMIPS(regs, 1517 1555 (void __user *)regs->cp0_badvaddr); 1518 - set_fs(seg); 1519 - 1520 1556 return; 1521 1557 } 1522 1558 1523 1559 if (cpu_has_mips16) { 1524 - seg = get_fs(); 1525 - if (!user_mode(regs)) 1526 - set_fs(KERNEL_DS); 1527 1560 emulate_load_store_MIPS16e(regs, 1528 1561 (void __user *)regs->cp0_badvaddr); 1529 - set_fs(seg); 1530 - 1531 1562 return; 1532 1563 } 1533 1564 ··· 1526 1577 1527 1578 if (unaligned_action == UNALIGNED_ACTION_SHOW) 1528 1579 show_registers(regs); 1529 - pc = (unsigned int __user *)exception_epc(regs); 1580 + pc = (unsigned int *)exception_epc(regs); 1530 1581 1531 - seg = get_fs(); 1532 - if (!user_mode(regs)) 1533 - set_fs(KERNEL_DS); 1534 1582 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 1535 - set_fs(seg); 1536 1583 1537 1584 return; 1538 1585
+3 -2
arch/mips/kernel/vdso.c
··· 90 90 { 91 91 struct mips_vdso_image *image = current->thread.abi->vdso; 92 92 struct mm_struct *mm = current->mm; 93 - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; 93 + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; 94 94 struct vm_area_struct *vma; 95 95 int ret; 96 96 ··· 158 158 159 159 /* Map GIC user page. */ 160 160 if (gic_size) { 161 - gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; 161 + gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; 162 + gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; 162 163 163 164 ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, 164 165 pgprot_noncached(vma->vm_page_prot));
-34
arch/mips/kvm/Kconfig
··· 30 30 help 31 31 Support for hosting Guest kernels. 32 32 33 - choice 34 - prompt "Virtualization mode" 35 - depends on KVM 36 - default KVM_MIPS_TE 37 - 38 - config KVM_MIPS_TE 39 - bool "Trap & Emulate" 40 - depends on CPU_MIPS32_R2 41 - help 42 - Use trap and emulate to virtualize 32-bit guests in user mode. This 43 - does not require any special hardware Virtualization support beyond 44 - standard MIPS32 r2 or later, but it does require the guest kernel 45 - to be configured with CONFIG_KVM_GUEST=y so that it resides in the 46 - user address segment. 47 - 48 - config KVM_MIPS_VZ 49 - bool "MIPS Virtualization (VZ) ASE" 50 - help 51 - Use the MIPS Virtualization (VZ) ASE to virtualize guests. This 52 - supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n), 53 - but requires hardware support. 54 - 55 - endchoice 56 - 57 - config KVM_MIPS_DYN_TRANS 58 - bool "KVM/MIPS: Dynamic binary translation to reduce traps" 59 - depends on KVM_MIPS_TE 60 - default y 61 - help 62 - When running in Trap & Emulate mode patch privileged 63 - instructions to reduce the number of traps. 64 - 65 - If unsure, say Y. 66 - 67 33 config KVM_MIPS_DEBUG_COP0_COUNTERS 68 34 bool "Maintain counters for COP0 accesses" 69 35 depends on KVM
+1 -6
arch/mips/kvm/Makefile
··· 9 9 common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o 10 10 11 11 kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ 12 - interrupt.o stats.o commpage.o \ 12 + interrupt.o stats.o \ 13 13 fpu.o 14 14 kvm-objs += hypcall.o 15 15 kvm-objs += mmu.o ··· 17 17 kvm-objs += loongson_ipi.o 18 18 endif 19 19 20 - ifdef CONFIG_KVM_MIPS_VZ 21 20 kvm-objs += vz.o 22 - else 23 - kvm-objs += dyntrans.o 24 - kvm-objs += trap_emul.o 25 - endif 26 21 obj-$(CONFIG_KVM) += kvm.o 27 22 obj-y += callback.o tlb.o
-32
arch/mips/kvm/commpage.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * commpage, currently used for Virtual COP0 registers. 7 - * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR. 8 - * 9 - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 - * Authors: Sanjay Lal <sanjayl@kymasys.com> 11 - */ 12 - 13 - #include <linux/errno.h> 14 - #include <linux/err.h> 15 - #include <linux/vmalloc.h> 16 - #include <linux/fs.h> 17 - #include <linux/memblock.h> 18 - #include <asm/page.h> 19 - #include <asm/cacheflush.h> 20 - #include <asm/mmu_context.h> 21 - 22 - #include <linux/kvm_host.h> 23 - 24 - #include "commpage.h" 25 - 26 - void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) 27 - { 28 - struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; 29 - 30 - /* Specific init values for fields */ 31 - vcpu->arch.cop0 = &page->cop0; 32 - }
-24
arch/mips/kvm/commpage.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * KVM/MIPS: commpage: mapped into get kernel space 7 - * 8 - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 - * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 - */ 11 - 12 - #ifndef __KVM_MIPS_COMMPAGE_H__ 13 - #define __KVM_MIPS_COMMPAGE_H__ 14 - 15 - struct kvm_mips_commpage { 16 - /* COP0 state is mapped into Guest kernel via commpage */ 17 - struct mips_coproc cop0; 18 - }; 19 - 20 - #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 21 - 22 - extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); 23 - 24 - #endif /* __KVM_MIPS_COMMPAGE_H__ */
-143
arch/mips/kvm/dyntrans.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. 7 - * 8 - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 - * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 - */ 11 - 12 - #include <linux/errno.h> 13 - #include <linux/err.h> 14 - #include <linux/highmem.h> 15 - #include <linux/kvm_host.h> 16 - #include <linux/uaccess.h> 17 - #include <linux/vmalloc.h> 18 - #include <linux/fs.h> 19 - #include <linux/memblock.h> 20 - #include <asm/cacheflush.h> 21 - 22 - #include "commpage.h" 23 - 24 - /** 25 - * kvm_mips_trans_replace() - Replace trapping instruction in guest memory. 26 - * @vcpu: Virtual CPU. 27 - * @opc: PC of instruction to replace. 28 - * @replace: Instruction to write 29 - */ 30 - static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, 31 - union mips_instruction replace) 32 - { 33 - unsigned long vaddr = (unsigned long)opc; 34 - int err; 35 - 36 - retry: 37 - /* The GVA page table is still active so use the Linux TLB handlers */ 38 - kvm_trap_emul_gva_lockless_begin(vcpu); 39 - err = put_user(replace.word, opc); 40 - kvm_trap_emul_gva_lockless_end(vcpu); 41 - 42 - if (unlikely(err)) { 43 - /* 44 - * We write protect clean pages in GVA page table so normal 45 - * Linux TLB mod handler doesn't silently dirty the page. 46 - * Its also possible we raced with a GVA invalidation. 47 - * Try to force the page to become dirty. 48 - */ 49 - err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); 50 - if (unlikely(err)) { 51 - kvm_info("%s: Address unwriteable: %p\n", 52 - __func__, opc); 53 - return -EFAULT; 54 - } 55 - 56 - /* 57 - * Try again. This will likely trigger a TLB refill, which will 58 - * fetch the new dirty entry from the GVA page table, which 59 - * should then succeed. 60 - */ 61 - goto retry; 62 - } 63 - __local_flush_icache_user_range(vaddr, vaddr + 4); 64 - 65 - return 0; 66 - } 67 - 68 - int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc, 69 - struct kvm_vcpu *vcpu) 70 - { 71 - union mips_instruction nop_inst = { 0 }; 72 - 73 - /* Replace the CACHE instruction, with a NOP */ 74 - return kvm_mips_trans_replace(vcpu, opc, nop_inst); 75 - } 76 - 77 - /* 78 - * Address based CACHE instructions are transformed into synci(s). A little 79 - * heavy for just D-cache invalidates, but avoids an expensive trap 80 - */ 81 - int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, 82 - struct kvm_vcpu *vcpu) 83 - { 84 - union mips_instruction synci_inst = { 0 }; 85 - 86 - synci_inst.i_format.opcode = bcond_op; 87 - synci_inst.i_format.rs = inst.i_format.rs; 88 - synci_inst.i_format.rt = synci_op; 89 - if (cpu_has_mips_r6) 90 - synci_inst.i_format.simmediate = inst.spec3_format.simmediate; 91 - else 92 - synci_inst.i_format.simmediate = inst.i_format.simmediate; 93 - 94 - return kvm_mips_trans_replace(vcpu, opc, synci_inst); 95 - } 96 - 97 - int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, 98 - struct kvm_vcpu *vcpu) 99 - { 100 - union mips_instruction mfc0_inst = { 0 }; 101 - u32 rd, sel; 102 - 103 - rd = inst.c0r_format.rd; 104 - sel = inst.c0r_format.sel; 105 - 106 - if (rd == MIPS_CP0_ERRCTL && sel == 0) { 107 - mfc0_inst.r_format.opcode = spec_op; 108 - mfc0_inst.r_format.rd = inst.c0r_format.rt; 109 - mfc0_inst.r_format.func = add_op; 110 - } else { 111 - mfc0_inst.i_format.opcode = lw_op; 112 - mfc0_inst.i_format.rt = inst.c0r_format.rt; 113 - mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | 114 - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); 115 - #ifdef CONFIG_CPU_BIG_ENDIAN 116 - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) 117 - mfc0_inst.i_format.simmediate |= 4; 118 - #endif 119 - } 120 - 121 - return kvm_mips_trans_replace(vcpu, opc, mfc0_inst); 122 - } 123 - 124 - int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, 125 - struct kvm_vcpu *vcpu) 126 - { 127 - union mips_instruction mtc0_inst = { 0 }; 128 - u32 rd, sel; 129 - 130 - rd = inst.c0r_format.rd; 131 - sel = inst.c0r_format.sel; 132 - 133 - mtc0_inst.i_format.opcode = sw_op; 134 - mtc0_inst.i_format.rt = inst.c0r_format.rt; 135 - mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | 136 - offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); 137 - #ifdef CONFIG_CPU_BIG_ENDIAN 138 - if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) 139 - mtc0_inst.i_format.simmediate |= 4; 140 - #endif 141 - 142 - return kvm_mips_trans_replace(vcpu, opc, mtc0_inst); 143 - }
+16 -1656
arch/mips/kvm/emulate.c
··· 30 30 #define CONFIG_MIPS_MT 31 31 32 32 #include "interrupt.h" 33 - #include "commpage.h" 34 33 35 34 #include "trace.h" 36 35 ··· 275 276 *out = vcpu->arch.host_cp0_badinstr; 276 277 return 0; 277 278 } else { 278 - return kvm_get_inst(opc, vcpu, out); 279 + WARN_ONCE(1, "CPU doesn't have BadInstr register\n"); 280 + return -EINVAL; 279 281 } 280 282 } 281 283 ··· 297 297 *out = vcpu->arch.host_cp0_badinstrp; 298 298 return 0; 299 299 } else { 300 - return kvm_get_inst(opc, vcpu, out); 300 + WARN_ONCE(1, "CPU doesn't have BadInstrp register\n"); 301 + return -EINVAL; 301 302 } 302 303 } 303 304 ··· 722 721 * preemption until the new value is written to prevent restore of a 723 722 * GTOffset corresponding to the old CP0_Compare value. 724 723 */ 725 - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { 724 + if (delta > 0) { 726 725 preempt_disable(); 727 726 write_c0_gtoffset(compare - read_c0_count()); 728 727 back_to_back_c0_hazard(); ··· 735 734 736 735 if (ack) 737 736 kvm_mips_callbacks->dequeue_timer_int(vcpu); 738 - else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) 737 + else 739 738 /* 740 739 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so 741 740 * preserve guest CP0_Cause.TI if we don't want to ack it. ··· 744 743 745 744 kvm_write_c0_guest_compare(cop0, compare); 746 745 747 - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { 748 - if (delta > 0) 749 - preempt_enable(); 746 + if (delta > 0) 747 + preempt_enable(); 750 748 751 - back_to_back_c0_hazard(); 749 + back_to_back_c0_hazard(); 752 750 753 - if (!ack && cause & CAUSEF_TI) 754 - kvm_write_c0_guest_cause(cop0, cause); 755 - } 751 + if (!ack && cause & CAUSEF_TI) 752 + kvm_write_c0_guest_cause(cop0, cause); 756 753 757 754 /* resume_hrtimer() takes care of timer interrupts > count */ 758 755 if (!dc) ··· 761 762 * until after the new CP0_Compare is written, otherwise new guest 762 763 * CP0_Count could hit new guest CP0_Compare. 763 764 */ 764 - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) 765 + if (delta <= 0) 765 766 write_c0_gtoffset(compare - read_c0_count()); 766 767 } 767 768 ··· 942 943 return HRTIMER_RESTART; 943 944 } 944 945 945 - enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) 946 - { 947 - struct mips_coproc *cop0 = vcpu->arch.cop0; 948 - enum emulation_result er = EMULATE_DONE; 949 - 950 - if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { 951 - kvm_clear_c0_guest_status(cop0, ST0_ERL); 952 - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); 953 - } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { 954 - kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, 955 - kvm_read_c0_guest_epc(cop0)); 956 - kvm_clear_c0_guest_status(cop0, ST0_EXL); 957 - vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); 958 - 959 - } else { 960 - kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", 961 - vcpu->arch.pc); 962 - er = EMULATE_FAIL; 963 - } 964 - 965 - return er; 966 - } 967 - 968 946 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) 969 947 { 970 948 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, ··· 965 989 } 966 990 967 991 return EMULATE_DONE; 968 - } 969 - 970 - static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, 971 - unsigned long entryhi) 972 - { 973 - struct mips_coproc *cop0 = vcpu->arch.cop0; 974 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 975 - int cpu, i; 976 - u32 nasid = entryhi & KVM_ENTRYHI_ASID; 977 - 978 - if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { 979 - trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & 980 - KVM_ENTRYHI_ASID, nasid); 981 - 982 - /* 983 - * Flush entries from the GVA page tables. 984 - * Guest user page table will get flushed lazily on re-entry to 985 - * guest user if the guest ASID actually changes. 986 - */ 987 - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); 988 - 989 - /* 990 - * Regenerate/invalidate kernel MMU context. 991 - * The user MMU context will be regenerated lazily on re-entry 992 - * to guest user if the guest ASID actually changes. 993 - */ 994 - preempt_disable(); 995 - cpu = smp_processor_id(); 996 - get_new_mmu_context(kern_mm); 997 - for_each_possible_cpu(i) 998 - if (i != cpu) 999 - set_cpu_context(i, kern_mm, 0); 1000 - preempt_enable(); 1001 - } 1002 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1003 - } 1004 - 1005 - enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) 1006 - { 1007 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1008 - struct kvm_mips_tlb *tlb; 1009 - unsigned long pc = vcpu->arch.pc; 1010 - int index; 1011 - 1012 - index = kvm_read_c0_guest_index(cop0); 1013 - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 1014 - /* UNDEFINED */ 1015 - kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); 1016 - index &= KVM_MIPS_GUEST_TLB_SIZE - 1; 1017 - } 1018 - 1019 - tlb = &vcpu->arch.guest_tlb[index]; 1020 - kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); 1021 - kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); 1022 - kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); 1023 - kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); 1024 - 1025 - return EMULATE_DONE; 1026 - } 1027 - 1028 - /** 1029 - * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. 1030 - * @vcpu: VCPU with changed mappings. 1031 - * @tlb: TLB entry being removed. 1032 - * 1033 - * This is called to indicate a single change in guest MMU mappings, so that we 1034 - * can arrange TLB flushes on this and other CPUs. 1035 - */ 1036 - static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, 1037 - struct kvm_mips_tlb *tlb) 1038 - { 1039 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1040 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 1041 - int cpu, i; 1042 - bool user; 1043 - 1044 - /* No need to flush for entries which are already invalid */ 1045 - if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) 1046 - return; 1047 - /* Don't touch host kernel page tables or TLB mappings */ 1048 - if ((unsigned long)tlb->tlb_hi > 0x7fffffff) 1049 - return; 1050 - /* User address space doesn't need flushing for KSeg2/3 changes */ 1051 - user = tlb->tlb_hi < KVM_GUEST_KSEG0; 1052 - 1053 - preempt_disable(); 1054 - 1055 - /* Invalidate page table entries */ 1056 - kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); 1057 - 1058 - /* 1059 - * Probe the shadow host TLB for the entry being overwritten, if one 1060 - * matches, invalidate it 1061 - */ 1062 - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); 1063 - 1064 - /* Invalidate the whole ASID on other CPUs */ 1065 - cpu = smp_processor_id(); 1066 - for_each_possible_cpu(i) { 1067 - if (i == cpu) 1068 - continue; 1069 - if (user) 1070 - set_cpu_context(i, user_mm, 0); 1071 - set_cpu_context(i, kern_mm, 0); 1072 - } 1073 - 1074 - preempt_enable(); 1075 - } 1076 - 1077 - /* Write Guest TLB Entry @ Index */ 1078 - enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) 1079 - { 1080 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1081 - int index = kvm_read_c0_guest_index(cop0); 1082 - struct kvm_mips_tlb *tlb = NULL; 1083 - unsigned long pc = vcpu->arch.pc; 1084 - 1085 - if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { 1086 - kvm_debug("%s: illegal index: %d\n", __func__, index); 1087 - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 1088 - pc, index, kvm_read_c0_guest_entryhi(cop0), 1089 - kvm_read_c0_guest_entrylo0(cop0), 1090 - kvm_read_c0_guest_entrylo1(cop0), 1091 - kvm_read_c0_guest_pagemask(cop0)); 1092 - index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; 1093 - } 1094 - 1095 - tlb = &vcpu->arch.guest_tlb[index]; 1096 - 1097 - kvm_mips_invalidate_guest_tlb(vcpu, tlb); 1098 - 1099 - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 1100 - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 1101 - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 1102 - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 1103 - 1104 - kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", 1105 - pc, index, kvm_read_c0_guest_entryhi(cop0), 1106 - kvm_read_c0_guest_entrylo0(cop0), 1107 - kvm_read_c0_guest_entrylo1(cop0), 1108 - kvm_read_c0_guest_pagemask(cop0)); 1109 - 1110 - return EMULATE_DONE; 1111 - } 1112 - 1113 - /* Write Guest TLB Entry @ Random Index */ 1114 - enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) 1115 - { 1116 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1117 - struct kvm_mips_tlb *tlb = NULL; 1118 - unsigned long pc = vcpu->arch.pc; 1119 - int index; 1120 - 1121 - index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); 1122 - tlb = &vcpu->arch.guest_tlb[index]; 1123 - 1124 - kvm_mips_invalidate_guest_tlb(vcpu, tlb); 1125 - 1126 - tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); 1127 - tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); 1128 - tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); 1129 - tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); 1130 - 1131 - kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", 1132 - pc, index, kvm_read_c0_guest_entryhi(cop0), 1133 - kvm_read_c0_guest_entrylo0(cop0), 1134 - kvm_read_c0_guest_entrylo1(cop0)); 1135 - 1136 - return EMULATE_DONE; 1137 - } 1138 - 1139 - enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) 1140 - { 1141 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1142 - long entryhi = kvm_read_c0_guest_entryhi(cop0); 1143 - unsigned long pc = vcpu->arch.pc; 1144 - int index = -1; 1145 - 1146 - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 1147 - 1148 - kvm_write_c0_guest_index(cop0, index); 1149 - 1150 - kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, 1151 - index); 1152 - 1153 - return EMULATE_DONE; 1154 - } 1155 - 1156 - /** 1157 - * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 1158 - * @vcpu: Virtual CPU. 1159 - * 1160 - * Finds the mask of bits which are writable in the guest's Config1 CP0 1161 - * register, by userland (currently read-only to the guest). 1162 - */ 1163 - unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) 1164 - { 1165 - unsigned int mask = 0; 1166 - 1167 - /* Permit FPU to be present if FPU is supported */ 1168 - if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) 1169 - mask |= MIPS_CONF1_FP; 1170 - 1171 - return mask; 1172 - } 1173 - 1174 - /** 1175 - * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 1176 - * @vcpu: Virtual CPU. 1177 - * 1178 - * Finds the mask of bits which are writable in the guest's Config3 CP0 1179 - * register, by userland (currently read-only to the guest). 1180 - */ 1181 - unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) 1182 - { 1183 - /* Config4 and ULRI are optional */ 1184 - unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; 1185 - 1186 - /* Permit MSA to be present if MSA is supported */ 1187 - if (kvm_mips_guest_can_have_msa(&vcpu->arch)) 1188 - mask |= MIPS_CONF3_MSA; 1189 - 1190 - return mask; 1191 - } 1192 - 1193 - /** 1194 - * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 1195 - * @vcpu: Virtual CPU. 1196 - * 1197 - * Finds the mask of bits which are writable in the guest's Config4 CP0 1198 - * register, by userland (currently read-only to the guest). 1199 - */ 1200 - unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) 1201 - { 1202 - /* Config5 is optional */ 1203 - unsigned int mask = MIPS_CONF_M; 1204 - 1205 - /* KScrExist */ 1206 - mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; 1207 - 1208 - return mask; 1209 - } 1210 - 1211 - /** 1212 - * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 1213 - * @vcpu: Virtual CPU. 1214 - * 1215 - * Finds the mask of bits which are writable in the guest's Config5 CP0 1216 - * register, by the guest itself. 1217 - */ 1218 - unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) 1219 - { 1220 - unsigned int mask = 0; 1221 - 1222 - /* Permit MSAEn changes if MSA supported and enabled */ 1223 - if (kvm_mips_guest_has_msa(&vcpu->arch)) 1224 - mask |= MIPS_CONF5_MSAEN; 1225 - 1226 - /* 1227 - * Permit guest FPU mode changes if FPU is enabled and the relevant 1228 - * feature exists according to FIR register. 1229 - */ 1230 - if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 1231 - if (cpu_has_fre) 1232 - mask |= MIPS_CONF5_FRE; 1233 - /* We don't support UFR or UFE */ 1234 - } 1235 - 1236 - return mask; 1237 - } 1238 - 1239 - enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, 1240 - u32 *opc, u32 cause, 1241 - struct kvm_vcpu *vcpu) 1242 - { 1243 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1244 - enum emulation_result er = EMULATE_DONE; 1245 - u32 rt, rd, sel; 1246 - unsigned long curr_pc; 1247 - 1248 - /* 1249 - * Update PC and hold onto current PC in case there is 1250 - * an error and we want to rollback the PC 1251 - */ 1252 - curr_pc = vcpu->arch.pc; 1253 - er = update_pc(vcpu, cause); 1254 - if (er == EMULATE_FAIL) 1255 - return er; 1256 - 1257 - if (inst.co_format.co) { 1258 - switch (inst.co_format.func) { 1259 - case tlbr_op: /* Read indexed TLB entry */ 1260 - er = kvm_mips_emul_tlbr(vcpu); 1261 - break; 1262 - case tlbwi_op: /* Write indexed */ 1263 - er = kvm_mips_emul_tlbwi(vcpu); 1264 - break; 1265 - case tlbwr_op: /* Write random */ 1266 - er = kvm_mips_emul_tlbwr(vcpu); 1267 - break; 1268 - case tlbp_op: /* TLB Probe */ 1269 - er = kvm_mips_emul_tlbp(vcpu); 1270 - break; 1271 - case rfe_op: 1272 - kvm_err("!!!COP0_RFE!!!\n"); 1273 - break; 1274 - case eret_op: 1275 - er = kvm_mips_emul_eret(vcpu); 1276 - goto dont_update_pc; 1277 - case wait_op: 1278 - er = kvm_mips_emul_wait(vcpu); 1279 - break; 1280 - case hypcall_op: 1281 - er = kvm_mips_emul_hypcall(vcpu, inst); 1282 - break; 1283 - } 1284 - } else { 1285 - rt = inst.c0r_format.rt; 1286 - rd = inst.c0r_format.rd; 1287 - sel = inst.c0r_format.sel; 1288 - 1289 - switch (inst.c0r_format.rs) { 1290 - case mfc_op: 1291 - #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1292 - cop0->stat[rd][sel]++; 1293 - #endif 1294 - /* Get reg */ 1295 - if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1296 - vcpu->arch.gprs[rt] = 1297 - (s32)kvm_mips_read_count(vcpu); 1298 - } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { 1299 - vcpu->arch.gprs[rt] = 0x0; 1300 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1301 - kvm_mips_trans_mfc0(inst, opc, vcpu); 1302 - #endif 1303 - } else { 1304 - vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; 1305 - 1306 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1307 - kvm_mips_trans_mfc0(inst, opc, vcpu); 1308 - #endif 1309 - } 1310 - 1311 - trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, 1312 - KVM_TRACE_COP0(rd, sel), 1313 - vcpu->arch.gprs[rt]); 1314 - break; 1315 - 1316 - case dmfc_op: 1317 - vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; 1318 - 1319 - trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, 1320 - KVM_TRACE_COP0(rd, sel), 1321 - vcpu->arch.gprs[rt]); 1322 - break; 1323 - 1324 - case mtc_op: 1325 - #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS 1326 - cop0->stat[rd][sel]++; 1327 - #endif 1328 - trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, 1329 - KVM_TRACE_COP0(rd, sel), 1330 - vcpu->arch.gprs[rt]); 1331 - 1332 - if ((rd == MIPS_CP0_TLB_INDEX) 1333 - && (vcpu->arch.gprs[rt] >= 1334 - KVM_MIPS_GUEST_TLB_SIZE)) { 1335 - kvm_err("Invalid TLB Index: %ld", 1336 - vcpu->arch.gprs[rt]); 1337 - er = EMULATE_FAIL; 1338 - break; 1339 - } 1340 - if ((rd == MIPS_CP0_PRID) && (sel == 1)) { 1341 - /* 1342 - * Preserve core number, and keep the exception 1343 - * base in guest KSeg0. 1344 - */ 1345 - kvm_change_c0_guest_ebase(cop0, 0x1ffff000, 1346 - vcpu->arch.gprs[rt]); 1347 - } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 1348 - kvm_mips_change_entryhi(vcpu, 1349 - vcpu->arch.gprs[rt]); 1350 - } 1351 - /* Are we writing to COUNT */ 1352 - else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { 1353 - kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); 1354 - goto done; 1355 - } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { 1356 - /* If we are writing to COMPARE */ 1357 - /* Clear pending timer interrupt, if any */ 1358 - kvm_mips_write_compare(vcpu, 1359 - vcpu->arch.gprs[rt], 1360 - true); 1361 - } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { 1362 - unsigned int old_val, val, change; 1363 - 1364 - old_val = kvm_read_c0_guest_status(cop0); 1365 - val = vcpu->arch.gprs[rt]; 1366 - change = val ^ old_val; 1367 - 1368 - /* Make sure that the NMI bit is never set */ 1369 - val &= ~ST0_NMI; 1370 - 1371 - /* 1372 - * Don't allow CU1 or FR to be set unless FPU 1373 - * capability enabled and exists in guest 1374 - * configuration. 1375 - */ 1376 - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1377 - val &= ~(ST0_CU1 | ST0_FR); 1378 - 1379 - /* 1380 - * Also don't allow FR to be set if host doesn't 1381 - * support it. 1382 - */ 1383 - if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 1384 - val &= ~ST0_FR; 1385 - 1386 - 1387 - /* Handle changes in FPU mode */ 1388 - preempt_disable(); 1389 - 1390 - /* 1391 - * FPU and Vector register state is made 1392 - * UNPREDICTABLE by a change of FR, so don't 1393 - * even bother saving it. 1394 - */ 1395 - if (change & ST0_FR) 1396 - kvm_drop_fpu(vcpu); 1397 - 1398 - /* 1399 - * If MSA state is already live, it is undefined 1400 - * how it interacts with FR=0 FPU state, and we 1401 - * don't want to hit reserved instruction 1402 - * exceptions trying to save the MSA state later 1403 - * when CU=1 && FR=1, so play it safe and save 1404 - * it first. 1405 - */ 1406 - if (change & ST0_CU1 && !(val & ST0_FR) && 1407 - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1408 - kvm_lose_fpu(vcpu); 1409 - 1410 - /* 1411 - * Propagate CU1 (FPU enable) changes 1412 - * immediately if the FPU context is already 1413 - * loaded. When disabling we leave the context 1414 - * loaded so it can be quickly enabled again in 1415 - * the near future. 1416 - */ 1417 - if (change & ST0_CU1 && 1418 - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1419 - change_c0_status(ST0_CU1, val); 1420 - 1421 - preempt_enable(); 1422 - 1423 - kvm_write_c0_guest_status(cop0, val); 1424 - 1425 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1426 - /* 1427 - * If FPU present, we need CU1/FR bits to take 1428 - * effect fairly soon. 1429 - */ 1430 - if (!kvm_mips_guest_has_fpu(&vcpu->arch)) 1431 - kvm_mips_trans_mtc0(inst, opc, vcpu); 1432 - #endif 1433 - } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { 1434 - unsigned int old_val, val, change, wrmask; 1435 - 1436 - old_val = kvm_read_c0_guest_config5(cop0); 1437 - val = vcpu->arch.gprs[rt]; 1438 - 1439 - /* Only a few bits are writable in Config5 */ 1440 - wrmask = kvm_mips_config5_wrmask(vcpu); 1441 - change = (val ^ old_val) & wrmask; 1442 - val = old_val ^ change; 1443 - 1444 - 1445 - /* Handle changes in FPU/MSA modes */ 1446 - preempt_disable(); 1447 - 1448 - /* 1449 - * Propagate FRE changes immediately if the FPU 1450 - * context is already loaded. 1451 - */ 1452 - if (change & MIPS_CONF5_FRE && 1453 - vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) 1454 - change_c0_config5(MIPS_CONF5_FRE, val); 1455 - 1456 - /* 1457 - * Propagate MSAEn changes immediately if the 1458 - * MSA context is already loaded. When disabling 1459 - * we leave the context loaded so it can be 1460 - * quickly enabled again in the near future. 1461 - */ 1462 - if (change & MIPS_CONF5_MSAEN && 1463 - vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) 1464 - change_c0_config5(MIPS_CONF5_MSAEN, 1465 - val); 1466 - 1467 - preempt_enable(); 1468 - 1469 - kvm_write_c0_guest_config5(cop0, val); 1470 - } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { 1471 - u32 old_cause, new_cause; 1472 - 1473 - old_cause = kvm_read_c0_guest_cause(cop0); 1474 - new_cause = vcpu->arch.gprs[rt]; 1475 - /* Update R/W bits */ 1476 - kvm_change_c0_guest_cause(cop0, 0x08800300, 1477 - new_cause); 1478 - /* DC bit enabling/disabling timer? */ 1479 - if ((old_cause ^ new_cause) & CAUSEF_DC) { 1480 - if (new_cause & CAUSEF_DC) 1481 - kvm_mips_count_disable_cause(vcpu); 1482 - else 1483 - kvm_mips_count_enable_cause(vcpu); 1484 - } 1485 - } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { 1486 - u32 mask = MIPS_HWRENA_CPUNUM | 1487 - MIPS_HWRENA_SYNCISTEP | 1488 - MIPS_HWRENA_CC | 1489 - MIPS_HWRENA_CCRES; 1490 - 1491 - if (kvm_read_c0_guest_config3(cop0) & 1492 - MIPS_CONF3_ULRI) 1493 - mask |= MIPS_HWRENA_ULR; 1494 - cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; 1495 - } else { 1496 - cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; 1497 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1498 - kvm_mips_trans_mtc0(inst, opc, vcpu); 1499 - #endif 1500 - } 1501 - break; 1502 - 1503 - case dmtc_op: 1504 - kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", 1505 - vcpu->arch.pc, rt, rd, sel); 1506 - trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, 1507 - KVM_TRACE_COP0(rd, sel), 1508 - vcpu->arch.gprs[rt]); 1509 - er = EMULATE_FAIL; 1510 - break; 1511 - 1512 - case mfmc0_op: 1513 - #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS 1514 - cop0->stat[MIPS_CP0_STATUS][0]++; 1515 - #endif 1516 - if (rt != 0) 1517 - vcpu->arch.gprs[rt] = 1518 - kvm_read_c0_guest_status(cop0); 1519 - /* EI */ 1520 - if (inst.mfmc0_format.sc) { 1521 - kvm_debug("[%#lx] mfmc0_op: EI\n", 1522 - vcpu->arch.pc); 1523 - kvm_set_c0_guest_status(cop0, ST0_IE); 1524 - } else { 1525 - kvm_debug("[%#lx] mfmc0_op: DI\n", 1526 - vcpu->arch.pc); 1527 - kvm_clear_c0_guest_status(cop0, ST0_IE); 1528 - } 1529 - 1530 - break; 1531 - 1532 - case wrpgpr_op: 1533 - { 1534 - u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; 1535 - u32 pss = 1536 - (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; 1537 - /* 1538 - * We don't support any shadow register sets, so 1539 - * SRSCtl[PSS] == SRSCtl[CSS] = 0 1540 - */ 1541 - if (css || pss) { 1542 - er = EMULATE_FAIL; 1543 - break; 1544 - } 1545 - kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, 1546 - vcpu->arch.gprs[rt]); 1547 - vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; 1548 - } 1549 - break; 1550 - default: 1551 - kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", 1552 - vcpu->arch.pc, inst.c0r_format.rs); 1553 - er = EMULATE_FAIL; 1554 - break; 1555 - } 1556 - } 1557 - 1558 - done: 1559 - /* Rollback PC only if emulation was unsuccessful */ 1560 - if (er == EMULATE_FAIL) 1561 - vcpu->arch.pc = curr_pc; 1562 - 1563 - dont_update_pc: 1564 - /* 1565 - * This is for special instructions whose emulation 1566 - * updates the PC, so do not overwrite the PC under 1567 - * any circumstances 1568 - */ 1569 - 1570 - return er; 1571 992 } 1572 993 1573 994 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, ··· 996 1623 goto out_fail; 997 1624 998 1625 switch (inst.i_format.opcode) { 999 - #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1626 + #if defined(CONFIG_64BIT) 1000 1627 case sd_op: 1001 1628 run->mmio.len = 8; 1002 1629 *(u64 *)data = vcpu->arch.gprs[rt]; ··· 1094 1721 vcpu->arch.gprs[rt], *(u32 *)data); 1095 1722 break; 1096 1723 1097 - #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1724 + #if defined(CONFIG_64BIT) 1098 1725 case sdl_op: 1099 1726 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1100 1727 vcpu->arch.host_cp0_badvaddr) & (~0x7); ··· 1301 1928 1302 1929 vcpu->mmio_needed = 2; /* signed */ 1303 1930 switch (op) { 1304 - #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 1931 + #if defined(CONFIG_64BIT) 1305 1932 case ld_op: 1306 1933 run->mmio.len = 8; 1307 1934 break; ··· 1376 2003 } 1377 2004 break; 1378 2005 1379 - #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) 2006 + #if defined(CONFIG_64BIT) 1380 2007 case ldl_op: 1381 2008 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( 1382 2009 vcpu->arch.host_cp0_badvaddr) & (~0x7); ··· 1506 2133 } 1507 2134 1508 2135 return EMULATE_DO_MMIO; 1509 - } 1510 - 1511 - #ifndef CONFIG_KVM_MIPS_VZ 1512 - static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), 1513 - unsigned long curr_pc, 1514 - unsigned long addr, 1515 - struct kvm_vcpu *vcpu, 1516 - u32 cause) 1517 - { 1518 - int err; 1519 - 1520 - for (;;) { 1521 - /* Carefully attempt the cache operation */ 1522 - kvm_trap_emul_gva_lockless_begin(vcpu); 1523 - err = fn(addr); 1524 - kvm_trap_emul_gva_lockless_end(vcpu); 1525 - 1526 - if (likely(!err)) 1527 - return EMULATE_DONE; 1528 - 1529 - /* 1530 - * Try to handle the fault and retry, maybe we just raced with a 1531 - * GVA invalidation. 1532 - */ 1533 - switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { 1534 - case KVM_MIPS_GVA: 1535 - case KVM_MIPS_GPA: 1536 - /* bad virtual or physical address */ 1537 - return EMULATE_FAIL; 1538 - case KVM_MIPS_TLB: 1539 - /* no matching guest TLB */ 1540 - vcpu->arch.host_cp0_badvaddr = addr; 1541 - vcpu->arch.pc = curr_pc; 1542 - kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu); 1543 - return EMULATE_EXCEPT; 1544 - case KVM_MIPS_TLBINV: 1545 - /* invalid matching guest TLB */ 1546 - vcpu->arch.host_cp0_badvaddr = addr; 1547 - vcpu->arch.pc = curr_pc; 1548 - kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu); 1549 - return EMULATE_EXCEPT; 1550 - default: 1551 - break; 1552 - } 1553 - } 1554 - } 1555 - 1556 - enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, 1557 - u32 *opc, u32 cause, 1558 - struct kvm_vcpu *vcpu) 1559 - { 1560 - enum emulation_result er = EMULATE_DONE; 1561 - u32 cache, op_inst, op, base; 1562 - s16 offset; 1563 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1564 - unsigned long va; 1565 - unsigned long curr_pc; 1566 - 1567 - /* 1568 - * Update PC and hold onto current PC in case there is 1569 - * an error and we want to rollback the PC 1570 - */ 1571 - curr_pc = vcpu->arch.pc; 1572 - er = update_pc(vcpu, cause); 1573 - if (er == EMULATE_FAIL) 1574 - return er; 1575 - 1576 - base = inst.i_format.rs; 1577 - op_inst = inst.i_format.rt; 1578 - if (cpu_has_mips_r6) 1579 - offset = inst.spec3_format.simmediate; 1580 - else 1581 - offset = inst.i_format.simmediate; 1582 - cache = op_inst & CacheOp_Cache; 1583 - op = op_inst & CacheOp_Op; 1584 - 1585 - va = arch->gprs[base] + offset; 1586 - 1587 - kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1588 - cache, op, base, arch->gprs[base], offset); 1589 - 1590 - /* 1591 - * Treat INDEX_INV as a nop, basically issued by Linux on startup to 1592 - * invalidate the caches entirely by stepping through all the 1593 - * ways/indexes 1594 - */ 1595 - if (op == Index_Writeback_Inv) { 1596 - kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1597 - vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, 1598 - arch->gprs[base], offset); 1599 - 1600 - if (cache == Cache_D) { 1601 - #ifdef CONFIG_CPU_R4K_CACHE_TLB 1602 - r4k_blast_dcache(); 1603 - #else 1604 - switch (boot_cpu_type()) { 1605 - case CPU_CAVIUM_OCTEON3: 1606 - /* locally flush icache */ 1607 - local_flush_icache_range(0, 0); 1608 - break; 1609 - default: 1610 - __flush_cache_all(); 1611 - break; 1612 - } 1613 - #endif 1614 - } else if (cache == Cache_I) { 1615 - #ifdef CONFIG_CPU_R4K_CACHE_TLB 1616 - r4k_blast_icache(); 1617 - #else 1618 - switch (boot_cpu_type()) { 1619 - case CPU_CAVIUM_OCTEON3: 1620 - /* locally flush icache */ 1621 - local_flush_icache_range(0, 0); 1622 - break; 1623 - default: 1624 - flush_icache_all(); 1625 - break; 1626 - } 1627 - #endif 1628 - } else { 1629 - kvm_err("%s: unsupported CACHE INDEX operation\n", 1630 - __func__); 1631 - return EMULATE_FAIL; 1632 - } 1633 - 1634 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1635 - kvm_mips_trans_cache_index(inst, opc, vcpu); 1636 - #endif 1637 - goto done; 1638 - } 1639 - 1640 - /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ 1641 - if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { 1642 - /* 1643 - * Perform the dcache part of icache synchronisation on the 1644 - * guest's behalf. 1645 - */ 1646 - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 1647 - curr_pc, va, vcpu, cause); 1648 - if (er != EMULATE_DONE) 1649 - goto done; 1650 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1651 - /* 1652 - * Replace the CACHE instruction, with a SYNCI, not the same, 1653 - * but avoids a trap 1654 - */ 1655 - kvm_mips_trans_cache_va(inst, opc, vcpu); 1656 - #endif 1657 - } else if (op_inst == Hit_Invalidate_I) { 1658 - /* Perform the icache synchronisation on the guest's behalf */ 1659 - er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, 1660 - curr_pc, va, vcpu, cause); 1661 - if (er != EMULATE_DONE) 1662 - goto done; 1663 - er = kvm_mips_guest_cache_op(protected_flush_icache_line, 1664 - curr_pc, va, vcpu, cause); 1665 - if (er != EMULATE_DONE) 1666 - goto done; 1667 - 1668 - #ifdef CONFIG_KVM_MIPS_DYN_TRANS 1669 - /* Replace the CACHE instruction, with a SYNCI */ 1670 - kvm_mips_trans_cache_va(inst, opc, vcpu); 1671 - #endif 1672 - } else { 1673 - kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", 1674 - cache, op, base, arch->gprs[base], offset); 1675 - er = EMULATE_FAIL; 1676 - } 1677 - 1678 - done: 1679 - /* Rollback PC only if emulation was unsuccessful */ 1680 - if (er == EMULATE_FAIL) 1681 - vcpu->arch.pc = curr_pc; 1682 - /* Guest exception needs guest to resume */ 1683 - if (er == EMULATE_EXCEPT) 1684 - er = EMULATE_DONE; 1685 - 1686 - return er; 1687 - } 1688 - 1689 - enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, 1690 - struct kvm_vcpu *vcpu) 1691 - { 1692 - union mips_instruction inst; 1693 - enum emulation_result er = EMULATE_DONE; 1694 - int err; 1695 - 1696 - /* Fetch the instruction. */ 1697 - if (cause & CAUSEF_BD) 1698 - opc += 1; 1699 - err = kvm_get_badinstr(opc, vcpu, &inst.word); 1700 - if (err) 1701 - return EMULATE_FAIL; 1702 - 1703 - switch (inst.r_format.opcode) { 1704 - case cop0_op: 1705 - er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu); 1706 - break; 1707 - 1708 - #ifndef CONFIG_CPU_MIPSR6 1709 - case cache_op: 1710 - ++vcpu->stat.cache_exits; 1711 - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1712 - er = kvm_mips_emulate_cache(inst, opc, cause, vcpu); 1713 - break; 1714 - #else 1715 - case spec3_op: 1716 - switch (inst.spec3_format.func) { 1717 - case cache6_op: 1718 - ++vcpu->stat.cache_exits; 1719 - trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); 1720 - er = kvm_mips_emulate_cache(inst, opc, cause, 1721 - vcpu); 1722 - break; 1723 - default: 1724 - goto unknown; 1725 - } 1726 - break; 1727 - unknown: 1728 - #endif 1729 - 1730 - default: 1731 - kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, 1732 - inst.word); 1733 - kvm_arch_vcpu_dump_regs(vcpu); 1734 - er = EMULATE_FAIL; 1735 - break; 1736 - } 1737 - 1738 - return er; 1739 - } 1740 - #endif /* CONFIG_KVM_MIPS_VZ */ 1741 - 1742 - /** 1743 - * kvm_mips_guest_exception_base() - Find guest exception vector base address. 1744 - * 1745 - * Returns: The base address of the current guest exception vector, taking 1746 - * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. 1747 - */ 1748 - long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) 1749 - { 1750 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1751 - 1752 - if (kvm_read_c0_guest_status(cop0) & ST0_BEV) 1753 - return KVM_GUEST_CKSEG1ADDR(0x1fc00200); 1754 - else 1755 - return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; 1756 - } 1757 - 1758 - enum emulation_result kvm_mips_emulate_syscall(u32 cause, 1759 - u32 *opc, 1760 - struct kvm_vcpu *vcpu) 1761 - { 1762 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1763 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1764 - enum emulation_result er = EMULATE_DONE; 1765 - 1766 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1767 - /* save old pc */ 1768 - kvm_write_c0_guest_epc(cop0, arch->pc); 1769 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1770 - 1771 - if (cause & CAUSEF_BD) 1772 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1773 - else 1774 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1775 - 1776 - kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); 1777 - 1778 - kvm_change_c0_guest_cause(cop0, (0xff), 1779 - (EXCCODE_SYS << CAUSEB_EXCCODE)); 1780 - 1781 - /* Set PC to the exception entry point */ 1782 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1783 - 1784 - } else { 1785 - kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); 1786 - er = EMULATE_FAIL; 1787 - } 1788 - 1789 - return er; 1790 - } 1791 - 1792 - enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, 1793 - u32 *opc, 1794 - struct kvm_vcpu *vcpu) 1795 - { 1796 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1797 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1798 - unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1799 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1800 - 1801 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1802 - /* save old pc */ 1803 - kvm_write_c0_guest_epc(cop0, arch->pc); 1804 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1805 - 1806 - if (cause & CAUSEF_BD) 1807 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1808 - else 1809 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1810 - 1811 - kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", 1812 - arch->pc); 1813 - 1814 - /* set pc to the exception entry point */ 1815 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 1816 - 1817 - } else { 1818 - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1819 - arch->pc); 1820 - 1821 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1822 - } 1823 - 1824 - kvm_change_c0_guest_cause(cop0, (0xff), 1825 - (EXCCODE_TLBL << CAUSEB_EXCCODE)); 1826 - 1827 - /* setup badvaddr, context and entryhi registers for the guest */ 1828 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1829 - /* XXXKYMA: is the context register used by linux??? */ 1830 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1831 - 1832 - return EMULATE_DONE; 1833 - } 1834 - 1835 - enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, 1836 - u32 *opc, 1837 - struct kvm_vcpu *vcpu) 1838 - { 1839 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1840 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1841 - unsigned long entryhi = 1842 - (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1843 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1844 - 1845 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1846 - /* save old pc */ 1847 - kvm_write_c0_guest_epc(cop0, arch->pc); 1848 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1849 - 1850 - if (cause & CAUSEF_BD) 1851 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1852 - else 1853 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1854 - 1855 - kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", 1856 - arch->pc); 1857 - } else { 1858 - kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", 1859 - arch->pc); 1860 - } 1861 - 1862 - /* set pc to the exception entry point */ 1863 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1864 - 1865 - kvm_change_c0_guest_cause(cop0, (0xff), 1866 - (EXCCODE_TLBL << CAUSEB_EXCCODE)); 1867 - 1868 - /* setup badvaddr, context and entryhi registers for the guest */ 1869 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1870 - /* XXXKYMA: is the context register used by linux??? */ 1871 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1872 - 1873 - return EMULATE_DONE; 1874 - } 1875 - 1876 - enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, 1877 - u32 *opc, 1878 - struct kvm_vcpu *vcpu) 1879 - { 1880 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1881 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1882 - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1883 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1884 - 1885 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1886 - /* save old pc */ 1887 - kvm_write_c0_guest_epc(cop0, arch->pc); 1888 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1889 - 1890 - if (cause & CAUSEF_BD) 1891 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1892 - else 1893 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1894 - 1895 - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 1896 - arch->pc); 1897 - 1898 - /* Set PC to the exception entry point */ 1899 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; 1900 - } else { 1901 - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 1902 - arch->pc); 1903 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1904 - } 1905 - 1906 - kvm_change_c0_guest_cause(cop0, (0xff), 1907 - (EXCCODE_TLBS << CAUSEB_EXCCODE)); 1908 - 1909 - /* setup badvaddr, context and entryhi registers for the guest */ 1910 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1911 - /* XXXKYMA: is the context register used by linux??? */ 1912 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1913 - 1914 - return EMULATE_DONE; 1915 - } 1916 - 1917 - enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, 1918 - u32 *opc, 1919 - struct kvm_vcpu *vcpu) 1920 - { 1921 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1922 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1923 - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1924 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1925 - 1926 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1927 - /* save old pc */ 1928 - kvm_write_c0_guest_epc(cop0, arch->pc); 1929 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1930 - 1931 - if (cause & CAUSEF_BD) 1932 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1933 - else 1934 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1935 - 1936 - kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", 1937 - arch->pc); 1938 - } else { 1939 - kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", 1940 - arch->pc); 1941 - } 1942 - 1943 - /* Set PC to the exception entry point */ 1944 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1945 - 1946 - kvm_change_c0_guest_cause(cop0, (0xff), 1947 - (EXCCODE_TLBS << CAUSEB_EXCCODE)); 1948 - 1949 - /* setup badvaddr, context and entryhi registers for the guest */ 1950 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1951 - /* XXXKYMA: is the context register used by linux??? */ 1952 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1953 - 1954 - return EMULATE_DONE; 1955 - } 1956 - 1957 - enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, 1958 - u32 *opc, 1959 - struct kvm_vcpu *vcpu) 1960 - { 1961 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1962 - unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1963 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 1964 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1965 - 1966 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1967 - /* save old pc */ 1968 - kvm_write_c0_guest_epc(cop0, arch->pc); 1969 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1970 - 1971 - if (cause & CAUSEF_BD) 1972 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1973 - else 1974 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1975 - 1976 - kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", 1977 - arch->pc); 1978 - } else { 1979 - kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", 1980 - arch->pc); 1981 - } 1982 - 1983 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1984 - 1985 - kvm_change_c0_guest_cause(cop0, (0xff), 1986 - (EXCCODE_MOD << CAUSEB_EXCCODE)); 1987 - 1988 - /* setup badvaddr, context and entryhi registers for the guest */ 1989 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1990 - /* XXXKYMA: is the context register used by linux??? */ 1991 - kvm_write_c0_guest_entryhi(cop0, entryhi); 1992 - 1993 - return EMULATE_DONE; 1994 - } 1995 - 1996 - enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, 1997 - u32 *opc, 1998 - struct kvm_vcpu *vcpu) 1999 - { 2000 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2001 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2002 - 2003 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2004 - /* save old pc */ 2005 - kvm_write_c0_guest_epc(cop0, arch->pc); 2006 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2007 - 2008 - if (cause & CAUSEF_BD) 2009 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2010 - else 2011 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2012 - 2013 - } 2014 - 2015 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2016 - 2017 - kvm_change_c0_guest_cause(cop0, (0xff), 2018 - (EXCCODE_CPU << CAUSEB_EXCCODE)); 2019 - kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); 2020 - 2021 - return EMULATE_DONE; 2022 - } 2023 - 2024 - enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, 2025 - u32 *opc, 2026 - struct kvm_vcpu *vcpu) 2027 - { 2028 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2029 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2030 - enum emulation_result er = EMULATE_DONE; 2031 - 2032 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2033 - /* save old pc */ 2034 - kvm_write_c0_guest_epc(cop0, arch->pc); 2035 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2036 - 2037 - if (cause & CAUSEF_BD) 2038 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2039 - else 2040 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2041 - 2042 - kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); 2043 - 2044 - kvm_change_c0_guest_cause(cop0, (0xff), 2045 - (EXCCODE_RI << CAUSEB_EXCCODE)); 2046 - 2047 - /* Set PC to the exception entry point */ 2048 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2049 - 2050 - } else { 2051 - kvm_err("Trying to deliver RI when EXL is already set\n"); 2052 - er = EMULATE_FAIL; 2053 - } 2054 - 2055 - return er; 2056 - } 2057 - 2058 - enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, 2059 - u32 *opc, 2060 - struct kvm_vcpu *vcpu) 2061 - { 2062 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2063 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2064 - enum emulation_result er = EMULATE_DONE; 2065 - 2066 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2067 - /* save old pc */ 2068 - kvm_write_c0_guest_epc(cop0, arch->pc); 2069 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2070 - 2071 - if (cause & CAUSEF_BD) 2072 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2073 - else 2074 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2075 - 2076 - kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); 2077 - 2078 - kvm_change_c0_guest_cause(cop0, (0xff), 2079 - (EXCCODE_BP << CAUSEB_EXCCODE)); 2080 - 2081 - /* Set PC to the exception entry point */ 2082 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2083 - 2084 - } else { 2085 - kvm_err("Trying to deliver BP when EXL is already set\n"); 2086 - er = EMULATE_FAIL; 2087 - } 2088 - 2089 - return er; 2090 - } 2091 - 2092 - enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, 2093 - u32 *opc, 2094 - struct kvm_vcpu *vcpu) 2095 - { 2096 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2097 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2098 - enum emulation_result er = EMULATE_DONE; 2099 - 2100 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2101 - /* save old pc */ 2102 - kvm_write_c0_guest_epc(cop0, arch->pc); 2103 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2104 - 2105 - if (cause & CAUSEF_BD) 2106 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2107 - else 2108 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2109 - 2110 - kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); 2111 - 2112 - kvm_change_c0_guest_cause(cop0, (0xff), 2113 - (EXCCODE_TR << CAUSEB_EXCCODE)); 2114 - 2115 - /* Set PC to the exception entry point */ 2116 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2117 - 2118 - } else { 2119 - kvm_err("Trying to deliver TRAP when EXL is already set\n"); 2120 - er = EMULATE_FAIL; 2121 - } 2122 - 2123 - return er; 2124 - } 2125 - 2126 - enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, 2127 - u32 *opc, 2128 - struct kvm_vcpu *vcpu) 2129 - { 2130 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2131 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2132 - enum emulation_result er = EMULATE_DONE; 2133 - 2134 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2135 - /* save old pc */ 2136 - kvm_write_c0_guest_epc(cop0, arch->pc); 2137 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2138 - 2139 - if (cause & CAUSEF_BD) 2140 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2141 - else 2142 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2143 - 2144 - kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); 2145 - 2146 - kvm_change_c0_guest_cause(cop0, (0xff), 2147 - (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); 2148 - 2149 - /* Set PC to the exception entry point */ 2150 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2151 - 2152 - } else { 2153 - kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); 2154 - er = EMULATE_FAIL; 2155 - } 2156 - 2157 - return er; 2158 - } 2159 - 2160 - enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, 2161 - u32 *opc, 2162 - struct kvm_vcpu *vcpu) 2163 - { 2164 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2165 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2166 - enum emulation_result er = EMULATE_DONE; 2167 - 2168 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2169 - /* save old pc */ 2170 - kvm_write_c0_guest_epc(cop0, arch->pc); 2171 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2172 - 2173 - if (cause & CAUSEF_BD) 2174 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2175 - else 2176 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2177 - 2178 - kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); 2179 - 2180 - kvm_change_c0_guest_cause(cop0, (0xff), 2181 - (EXCCODE_FPE << CAUSEB_EXCCODE)); 2182 - 2183 - /* Set PC to the exception entry point */ 2184 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2185 - 2186 - } else { 2187 - kvm_err("Trying to deliver FPE when EXL is already set\n"); 2188 - er = EMULATE_FAIL; 2189 - } 2190 - 2191 - return er; 2192 - } 2193 - 2194 - enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, 2195 - u32 *opc, 2196 - struct kvm_vcpu *vcpu) 2197 - { 2198 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2199 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2200 - enum emulation_result er = EMULATE_DONE; 2201 - 2202 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 2203 - /* save old pc */ 2204 - kvm_write_c0_guest_epc(cop0, arch->pc); 2205 - kvm_set_c0_guest_status(cop0, ST0_EXL); 2206 - 2207 - if (cause & CAUSEF_BD) 2208 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 2209 - else 2210 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 2211 - 2212 - kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); 2213 - 2214 - kvm_change_c0_guest_cause(cop0, (0xff), 2215 - (EXCCODE_MSADIS << CAUSEB_EXCCODE)); 2216 - 2217 - /* Set PC to the exception entry point */ 2218 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 2219 - 2220 - } else { 2221 - kvm_err("Trying to deliver MSADIS when EXL is already set\n"); 2222 - er = EMULATE_FAIL; 2223 - } 2224 - 2225 - return er; 2226 - } 2227 - 2228 - enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, 2229 - struct kvm_vcpu *vcpu) 2230 - { 2231 - struct mips_coproc *cop0 = vcpu->arch.cop0; 2232 - struct kvm_vcpu_arch *arch = &vcpu->arch; 2233 - enum emulation_result er = EMULATE_DONE; 2234 - unsigned long curr_pc; 2235 - union mips_instruction inst; 2236 - int err; 2237 - 2238 - /* 2239 - * Update PC and hold onto current PC in case there is 2240 - * an error and we want to rollback the PC 2241 - */ 2242 - curr_pc = vcpu->arch.pc; 2243 - er = update_pc(vcpu, cause); 2244 - if (er == EMULATE_FAIL) 2245 - return er; 2246 - 2247 - /* Fetch the instruction. */ 2248 - if (cause & CAUSEF_BD) 2249 - opc += 1; 2250 - err = kvm_get_badinstr(opc, vcpu, &inst.word); 2251 - if (err) { 2252 - kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); 2253 - return EMULATE_FAIL; 2254 - } 2255 - 2256 - if (inst.r_format.opcode == spec3_op && 2257 - inst.r_format.func == rdhwr_op && 2258 - inst.r_format.rs == 0 && 2259 - (inst.r_format.re >> 3) == 0) { 2260 - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 2261 - int rd = inst.r_format.rd; 2262 - int rt = inst.r_format.rt; 2263 - int sel = inst.r_format.re & 0x7; 2264 - 2265 - /* If usermode, check RDHWR rd is allowed by guest HWREna */ 2266 - if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { 2267 - kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", 2268 - rd, opc); 2269 - goto emulate_ri; 2270 - } 2271 - switch (rd) { 2272 - case MIPS_HWR_CPUNUM: /* CPU number */ 2273 - arch->gprs[rt] = vcpu->vcpu_id; 2274 - break; 2275 - case MIPS_HWR_SYNCISTEP: /* SYNCI length */ 2276 - arch->gprs[rt] = min(current_cpu_data.dcache.linesz, 2277 - current_cpu_data.icache.linesz); 2278 - break; 2279 - case MIPS_HWR_CC: /* Read count register */ 2280 - arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); 2281 - break; 2282 - case MIPS_HWR_CCRES: /* Count register resolution */ 2283 - switch (current_cpu_data.cputype) { 2284 - case CPU_20KC: 2285 - case CPU_25KF: 2286 - arch->gprs[rt] = 1; 2287 - break; 2288 - default: 2289 - arch->gprs[rt] = 2; 2290 - } 2291 - break; 2292 - case MIPS_HWR_ULR: /* Read UserLocal register */ 2293 - arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 2294 - break; 2295 - 2296 - default: 2297 - kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); 2298 - goto emulate_ri; 2299 - } 2300 - 2301 - trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), 2302 - vcpu->arch.gprs[rt]); 2303 - } else { 2304 - kvm_debug("Emulate RI not supported @ %p: %#x\n", 2305 - opc, inst.word); 2306 - goto emulate_ri; 2307 - } 2308 - 2309 - return EMULATE_DONE; 2310 - 2311 - emulate_ri: 2312 - /* 2313 - * Rollback PC (if in branch delay slot then the PC already points to 2314 - * branch target), and pass the RI exception to the guest OS. 2315 - */ 2316 - vcpu->arch.pc = curr_pc; 2317 - return kvm_mips_emulate_ri_exc(cause, opc, vcpu); 2318 2136 } 2319 2137 2320 2138 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) ··· 1648 3084 } 1649 3085 1650 3086 done: 1651 - return er; 1652 - } 1653 - 1654 - static enum emulation_result kvm_mips_emulate_exc(u32 cause, 1655 - u32 *opc, 1656 - struct kvm_vcpu *vcpu) 1657 - { 1658 - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1659 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1660 - struct kvm_vcpu_arch *arch = &vcpu->arch; 1661 - enum emulation_result er = EMULATE_DONE; 1662 - 1663 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1664 - /* save old pc */ 1665 - kvm_write_c0_guest_epc(cop0, arch->pc); 1666 - kvm_set_c0_guest_status(cop0, ST0_EXL); 1667 - 1668 - if (cause & CAUSEF_BD) 1669 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 1670 - else 1671 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 1672 - 1673 - kvm_change_c0_guest_cause(cop0, (0xff), 1674 - (exccode << CAUSEB_EXCCODE)); 1675 - 1676 - /* Set PC to the exception entry point */ 1677 - arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; 1678 - kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); 1679 - 1680 - kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", 1681 - exccode, kvm_read_c0_guest_epc(cop0), 1682 - kvm_read_c0_guest_badvaddr(cop0)); 1683 - } else { 1684 - kvm_err("Trying to deliver EXC when EXL is already set\n"); 1685 - er = EMULATE_FAIL; 1686 - } 1687 - 1688 - return er; 1689 - } 1690 - 1691 - enum emulation_result kvm_mips_check_privilege(u32 cause, 1692 - u32 *opc, 1693 - struct kvm_vcpu *vcpu) 1694 - { 1695 - enum emulation_result er = EMULATE_DONE; 1696 - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1697 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 1698 - 1699 - int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); 1700 - 1701 - if (usermode) { 1702 - switch (exccode) { 1703 - case EXCCODE_INT: 1704 - case EXCCODE_SYS: 1705 - case EXCCODE_BP: 1706 - case EXCCODE_RI: 1707 - case EXCCODE_TR: 1708 - case EXCCODE_MSAFPE: 1709 - case EXCCODE_FPE: 1710 - case EXCCODE_MSADIS: 1711 - break; 1712 - 1713 - case EXCCODE_CPU: 1714 - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) 1715 - er = EMULATE_PRIV_FAIL; 1716 - break; 1717 - 1718 - case EXCCODE_MOD: 1719 - break; 1720 - 1721 - case EXCCODE_TLBL: 1722 - /* 1723 - * We we are accessing Guest kernel space, then send an 1724 - * address error exception to the guest 1725 - */ 1726 - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 1727 - kvm_debug("%s: LD MISS @ %#lx\n", __func__, 1728 - badvaddr); 1729 - cause &= ~0xff; 1730 - cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); 1731 - er = EMULATE_PRIV_FAIL; 1732 - } 1733 - break; 1734 - 1735 - case EXCCODE_TLBS: 1736 - /* 1737 - * We we are accessing Guest kernel space, then send an 1738 - * address error exception to the guest 1739 - */ 1740 - if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { 1741 - kvm_debug("%s: ST MISS @ %#lx\n", __func__, 1742 - badvaddr); 1743 - cause &= ~0xff; 1744 - cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); 1745 - er = EMULATE_PRIV_FAIL; 1746 - } 1747 - break; 1748 - 1749 - case EXCCODE_ADES: 1750 - kvm_debug("%s: address error ST @ %#lx\n", __func__, 1751 - badvaddr); 1752 - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 1753 - cause &= ~0xff; 1754 - cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); 1755 - } 1756 - er = EMULATE_PRIV_FAIL; 1757 - break; 1758 - case EXCCODE_ADEL: 1759 - kvm_debug("%s: address error LD @ %#lx\n", __func__, 1760 - badvaddr); 1761 - if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { 1762 - cause &= ~0xff; 1763 - cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); 1764 - } 1765 - er = EMULATE_PRIV_FAIL; 1766 - break; 1767 - default: 1768 - er = EMULATE_PRIV_FAIL; 1769 - break; 1770 - } 1771 - } 1772 - 1773 - if (er == EMULATE_PRIV_FAIL) 1774 - kvm_mips_emulate_exc(cause, opc, vcpu); 1775 - 1776 - return er; 1777 - } 1778 - 1779 - /* 1780 - * User Address (UA) fault, this could happen if 1781 - * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this 1782 - * case we pass on the fault to the guest kernel and let it handle it. 1783 - * (2) TLB entry is present in the Guest TLB but not in the shadow, in this 1784 - * case we inject the TLB from the Guest TLB into the shadow host TLB 1785 - */ 1786 - enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, 1787 - u32 *opc, 1788 - struct kvm_vcpu *vcpu, 1789 - bool write_fault) 1790 - { 1791 - enum emulation_result er = EMULATE_DONE; 1792 - u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1793 - unsigned long va = vcpu->arch.host_cp0_badvaddr; 1794 - int index; 1795 - 1796 - kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", 1797 - vcpu->arch.host_cp0_badvaddr); 1798 - 1799 - /* 1800 - * KVM would not have got the exception if this entry was valid in the 1801 - * shadow host TLB. Check the Guest TLB, if the entry is not there then 1802 - * send the guest an exception. The guest exc handler should then inject 1803 - * an entry into the guest TLB. 1804 - */ 1805 - index = kvm_mips_guest_tlb_lookup(vcpu, 1806 - (va & VPN2_MASK) | 1807 - (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & 1808 - KVM_ENTRYHI_ASID)); 1809 - if (index < 0) { 1810 - if (exccode == EXCCODE_TLBL) { 1811 - er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu); 1812 - } else if (exccode == EXCCODE_TLBS) { 1813 - er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu); 1814 - } else { 1815 - kvm_err("%s: invalid exc code: %d\n", __func__, 1816 - exccode); 1817 - er = EMULATE_FAIL; 1818 - } 1819 - } else { 1820 - struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; 1821 - 1822 - /* 1823 - * Check if the entry is valid, if not then setup a TLB invalid 1824 - * exception to the guest 1825 - */ 1826 - if (!TLB_IS_VALID(*tlb, va)) { 1827 - if (exccode == EXCCODE_TLBL) { 1828 - er = kvm_mips_emulate_tlbinv_ld(cause, opc, 1829 - vcpu); 1830 - } else if (exccode == EXCCODE_TLBS) { 1831 - er = kvm_mips_emulate_tlbinv_st(cause, opc, 1832 - vcpu); 1833 - } else { 1834 - kvm_err("%s: invalid exc code: %d\n", __func__, 1835 - exccode); 1836 - er = EMULATE_FAIL; 1837 - } 1838 - } else { 1839 - kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", 1840 - tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); 1841 - /* 1842 - * OK we have a Guest TLB entry, now inject it into the 1843 - * shadow host TLB 1844 - */ 1845 - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, 1846 - write_fault)) { 1847 - kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", 1848 - __func__, va, index, vcpu, 1849 - read_c0_entryhi()); 1850 - er = EMULATE_FAIL; 1851 - } 1852 - } 1853 - } 1854 - 1855 3087 return er; 1856 3088 }
-33
arch/mips/kvm/entry.c
··· 305 305 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); 306 306 UASM_i_MTC0(&p, T0, C0_EPC); 307 307 308 - #ifdef CONFIG_KVM_MIPS_VZ 309 308 /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ 310 309 if (cpu_has_ldpte) 311 310 UASM_i_MFC0(&p, K0, C0_PWBASE); ··· 366 367 /* Set the root ASID for the Guest */ 367 368 UASM_i_ADDIU(&p, T1, S0, 368 369 offsetof(struct kvm, arch.gpa_mm.context.asid)); 369 - #else 370 - /* Set the ASID for the Guest Kernel or User */ 371 - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); 372 - UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), 373 - T0); 374 - uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); 375 - uasm_i_xori(&p, T0, T0, KSU_USER); 376 - uasm_il_bnez(&p, &r, T0, label_kernel_asid); 377 - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, 378 - guest_kernel_mm.context.asid)); 379 - /* else user */ 380 - UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, 381 - guest_user_mm.context.asid)); 382 - uasm_l_kernel_asid(&l, p); 383 - #endif 384 370 385 371 /* t1: contains the base of the ASID array, need to get the cpu id */ 386 372 /* smp_processor_id */ ··· 390 406 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); 391 407 #endif 392 408 393 - #ifndef CONFIG_KVM_MIPS_VZ 394 - /* 395 - * Set up KVM T&E GVA pgd. 396 - * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): 397 - * - call tlbmiss_handler_setup_pgd(mm->pgd) 398 - * - but skips write into CP0_PWBase for now 399 - */ 400 - UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - 401 - (int)offsetof(struct mm_struct, context.asid), T1); 402 - 403 - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); 404 - uasm_i_jalr(&p, RA, T9); 405 - uasm_i_mtc0(&p, K0, C0_ENTRYHI); 406 - #else 407 409 /* Set up KVM VZ root ASID (!guestid) */ 408 410 uasm_i_mtc0(&p, K0, C0_ENTRYHI); 409 411 skip_asid_restore: 410 - #endif 411 412 uasm_i_ehb(&p); 412 413 413 414 /* Disable RDHWR access */ ··· 689 720 uasm_l_msa_1(&l, p); 690 721 } 691 722 692 - #ifdef CONFIG_KVM_MIPS_VZ 693 723 /* Restore host ASID */ 694 724 if (!cpu_has_guestid) { 695 725 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), ··· 732 764 MIPS_GCTL1_RID_WIDTH); 733 765 uasm_i_mtc0(&p, T0, C0_GUESTCTL1); 734 766 } 735 - #endif 736 767 737 768 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 738 769 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+2 -121
arch/mips/kvm/interrupt.c
··· 21 21 22 22 #include "interrupt.h" 23 23 24 - void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 25 - { 26 - set_bit(priority, &vcpu->arch.pending_exceptions); 27 - } 28 - 29 - void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) 30 - { 31 - clear_bit(priority, &vcpu->arch.pending_exceptions); 32 - } 33 - 34 - void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) 35 - { 36 - /* 37 - * Cause bits to reflect the pending timer interrupt, 38 - * the EXC code will be set when we are actually 39 - * delivering the interrupt: 40 - */ 41 - kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); 42 - 43 - /* Queue up an INT exception for the core */ 44 - kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); 45 - 46 - } 47 - 48 - void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) 49 - { 50 - kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); 51 - kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); 52 - } 53 - 54 - void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, 55 - struct kvm_mips_interrupt *irq) 56 - { 57 - int intr = (int)irq->irq; 58 - 59 - /* 60 - * Cause bits to reflect the pending IO interrupt, 61 - * the EXC code will be set when we are actually 62 - * delivering the interrupt: 63 - */ 64 - kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); 65 - kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr)); 66 - } 67 - 68 - void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 69 - struct kvm_mips_interrupt *irq) 70 - { 71 - int intr = (int)irq->irq; 72 - 73 - kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); 74 - kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); 75 - } 76 - 77 - /* Deliver the interrupt of the corresponding priority, if possible. */ 78 - int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 79 - u32 cause) 80 - { 81 - int allowed = 0; 82 - u32 exccode, ie; 83 - 84 - struct kvm_vcpu_arch *arch = &vcpu->arch; 85 - struct mips_coproc *cop0 = vcpu->arch.cop0; 86 - 87 - if (priority == MIPS_EXC_MAX) 88 - return 0; 89 - 90 - ie = 1 << (kvm_priority_to_irq[priority] + 8); 91 - if ((kvm_read_c0_guest_status(cop0) & ST0_IE) 92 - && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) 93 - && (kvm_read_c0_guest_status(cop0) & ie)) { 94 - allowed = 1; 95 - exccode = EXCCODE_INT; 96 - } 97 - 98 - /* Are we allowed to deliver the interrupt ??? */ 99 - if (allowed) { 100 - if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 101 - /* save old pc */ 102 - kvm_write_c0_guest_epc(cop0, arch->pc); 103 - kvm_set_c0_guest_status(cop0, ST0_EXL); 104 - 105 - if (cause & CAUSEF_BD) 106 - kvm_set_c0_guest_cause(cop0, CAUSEF_BD); 107 - else 108 - kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); 109 - 110 - kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); 111 - 112 - } else 113 - kvm_err("Trying to deliver interrupt when EXL is already set\n"); 114 - 115 - kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, 116 - (exccode << CAUSEB_EXCCODE)); 117 - 118 - /* XXXSL Set PC to the interrupt exception entry point */ 119 - arch->pc = kvm_mips_guest_exception_base(vcpu); 120 - if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) 121 - arch->pc += 0x200; 122 - else 123 - arch->pc += 0x180; 124 - 125 - clear_bit(priority, &vcpu->arch.pending_exceptions); 126 - } 127 - 128 - return allowed; 129 - } 130 - 131 - int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 132 - u32 cause) 133 - { 134 - return 1; 135 - } 136 - 137 24 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) 138 25 { 139 26 unsigned long *pending = &vcpu->arch.pending_exceptions; ··· 32 145 33 146 priority = __ffs(*pending_clr); 34 147 while (priority <= MIPS_EXC_MAX) { 35 - if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { 36 - if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) 37 - break; 38 - } 148 + kvm_mips_callbacks->irq_clear(vcpu, priority, cause); 39 149 40 150 priority = find_next_bit(pending_clr, 41 151 BITS_PER_BYTE * sizeof(*pending_clr), ··· 41 157 42 158 priority = __ffs(*pending); 43 159 while (priority <= MIPS_EXC_MAX) { 44 - if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { 45 - if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) 46 - break; 47 - } 160 + kvm_mips_callbacks->irq_deliver(vcpu, priority, cause); 48 161 49 162 priority = find_next_bit(pending, 50 163 BITS_PER_BYTE * sizeof(*pending),
-20
arch/mips/kvm/interrupt.h
··· 31 31 32 32 #define C_TI (_ULCAST_(1) << 30) 33 33 34 - #ifdef CONFIG_KVM_MIPS_VZ 35 - #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1) 36 - #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1) 37 - #else 38 - #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) 39 - #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) 40 - #endif 41 - 42 34 extern u32 *kvm_priority_to_irq; 43 35 u32 kvm_irq_to_priority(u32 irq); 44 36 45 - void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); 46 - void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); 47 37 int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); 48 38 49 - void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); 50 - void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); 51 - void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, 52 - struct kvm_mips_interrupt *irq); 53 - void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, 54 - struct kvm_mips_interrupt *irq); 55 - int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, 56 - u32 cause); 57 - int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, 58 - u32 cause); 59 39 void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
+2 -66
arch/mips/kvm/mips.c
··· 30 30 #include <linux/kvm_host.h> 31 31 32 32 #include "interrupt.h" 33 - #include "commpage.h" 34 33 35 34 #define CREATE_TRACE_POINTS 36 35 #include "trace.h" ··· 57 58 VCPU_STAT("fpe", fpe_exits), 58 59 VCPU_STAT("msa_disabled", msa_disabled_exits), 59 60 VCPU_STAT("flush_dcache", flush_dcache_exits), 60 - #ifdef CONFIG_KVM_MIPS_VZ 61 61 VCPU_STAT("vz_gpsi", vz_gpsi_exits), 62 62 VCPU_STAT("vz_gsfc", vz_gsfc_exits), 63 63 VCPU_STAT("vz_hc", vz_hc_exits), ··· 67 69 VCPU_STAT("vz_resvd", vz_resvd_exits), 68 70 #ifdef CONFIG_CPU_LOONGSON64 69 71 VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), 70 - #endif 71 72 #endif 72 73 VCPU_STAT("halt_successful_poll", halt_successful_poll), 73 74 VCPU_STAT("halt_attempted_poll", halt_attempted_poll), ··· 136 139 switch (type) { 137 140 case KVM_VM_MIPS_AUTO: 138 141 break; 139 - #ifdef CONFIG_KVM_MIPS_VZ 140 142 case KVM_VM_MIPS_VZ: 141 - #else 142 - case KVM_VM_MIPS_TE: 143 - #endif 144 143 break; 145 144 default: 146 145 /* Unsupported KVM type */ ··· 354 361 355 362 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ 356 363 refill_start = gebase; 357 - if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) 364 + if (IS_ENABLED(CONFIG_64BIT)) 358 365 refill_start += 0x080; 359 366 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); 360 367 ··· 390 397 flush_icache_range((unsigned long)gebase, 391 398 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 392 399 393 - /* 394 - * Allocate comm page for guest kernel, a TLB will be reserved for 395 - * mapping GVA @ 0xFFFF8000 to this page 396 - */ 397 - vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 398 - 399 - if (!vcpu->arch.kseg0_commpage) { 400 - err = -ENOMEM; 401 - goto out_free_gebase; 402 - } 403 - 404 - kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); 405 - kvm_mips_commpage_init(vcpu); 406 - 407 400 /* Init */ 408 401 vcpu->arch.last_sched_cpu = -1; 409 402 vcpu->arch.last_exec_cpu = -1; ··· 397 418 /* Initial guest state */ 398 419 err = kvm_mips_callbacks->vcpu_setup(vcpu); 399 420 if (err) 400 - goto out_free_commpage; 421 + goto out_free_gebase; 401 422 402 423 return 0; 403 424 404 - out_free_commpage: 405 - kfree(vcpu->arch.kseg0_commpage); 406 425 out_free_gebase: 407 426 kfree(gebase); 408 427 out_uninit_vcpu: ··· 416 439 417 440 kvm_mmu_free_memory_caches(vcpu); 418 441 kfree(vcpu->arch.guest_ebase); 419 - kfree(vcpu->arch.kseg0_commpage); 420 442 421 443 kvm_mips_callbacks->vcpu_uninit(vcpu); 422 444 } ··· 1188 1212 1189 1213 vcpu->mode = OUTSIDE_GUEST_MODE; 1190 1214 1191 - /* re-enable HTW before enabling interrupts */ 1192 - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) 1193 - htw_start(); 1194 - 1195 1215 /* Set a default exit reason */ 1196 1216 run->exit_reason = KVM_EXIT_UNKNOWN; 1197 1217 run->ready_for_interrupt_injection = 1; ··· 1203 1231 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1204 1232 cause, opc, run, vcpu); 1205 1233 trace_kvm_exit(vcpu, exccode); 1206 - 1207 - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { 1208 - /* 1209 - * Do a privilege check, if in UM most of these exit conditions 1210 - * end up causing an exception to be delivered to the Guest 1211 - * Kernel 1212 - */ 1213 - er = kvm_mips_check_privilege(cause, opc, vcpu); 1214 - if (er == EMULATE_PRIV_FAIL) { 1215 - goto skip_emul; 1216 - } else if (er == EMULATE_FAIL) { 1217 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1218 - ret = RESUME_HOST; 1219 - goto skip_emul; 1220 - } 1221 - } 1222 1234 1223 1235 switch (exccode) { 1224 1236 case EXCCODE_INT: ··· 1313 1357 1314 1358 } 1315 1359 1316 - skip_emul: 1317 1360 local_irq_disable(); 1318 1361 1319 1362 if (ret == RESUME_GUEST) ··· 1361 1406 read_c0_config5() & MIPS_CONF5_MSAEN) 1362 1407 __kvm_restore_msacsr(&vcpu->arch); 1363 1408 } 1364 - 1365 - /* Disable HTW before returning to guest or host */ 1366 - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) 1367 - htw_stop(); 1368 - 1369 1409 return ret; 1370 1410 } 1371 1411 ··· 1379 1429 * FR=0 FPU state, and we don't want to hit reserved instruction 1380 1430 * exceptions trying to save the MSA state later when CU=1 && FR=1, so 1381 1431 * play it safe and save it first. 1382 - * 1383 - * In theory we shouldn't ever hit this case since kvm_lose_fpu() should 1384 - * get called when guest CU1 is set, however we can't trust the guest 1385 - * not to clobber the status register directly via the commpage. 1386 1432 */ 1387 1433 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && 1388 1434 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) ··· 1499 1553 1500 1554 preempt_disable(); 1501 1555 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { 1502 - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { 1503 - set_c0_config5(MIPS_CONF5_MSAEN); 1504 - enable_fpu_hazard(); 1505 - } 1506 - 1507 1556 __kvm_save_msa(&vcpu->arch); 1508 1557 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); 1509 1558 ··· 1510 1569 } 1511 1570 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); 1512 1571 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { 1513 - if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { 1514 - set_c0_status(ST0_CU1); 1515 - enable_fpu_hazard(); 1516 - } 1517 - 1518 1572 __kvm_save_fpu(&vcpu->arch); 1519 1573 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; 1520 1574 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
-405
arch/mips/kvm/mmu.c
··· 756 756 return err; 757 757 } 758 758 759 - static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, 760 - unsigned long addr) 761 - { 762 - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 763 - pgd_t *pgdp; 764 - int ret; 765 - 766 - /* We need a minimum of cached pages ready for page table creation */ 767 - ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); 768 - if (ret) 769 - return NULL; 770 - 771 - if (KVM_GUEST_KERNEL_MODE(vcpu)) 772 - pgdp = vcpu->arch.guest_kernel_mm.pgd; 773 - else 774 - pgdp = vcpu->arch.guest_user_mm.pgd; 775 - 776 - return kvm_mips_walk_pgd(pgdp, memcache, addr); 777 - } 778 - 779 - void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, 780 - bool user) 781 - { 782 - pgd_t *pgdp; 783 - pte_t *ptep; 784 - 785 - addr &= PAGE_MASK << 1; 786 - 787 - pgdp = vcpu->arch.guest_kernel_mm.pgd; 788 - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); 789 - if (ptep) { 790 - ptep[0] = pfn_pte(0, __pgprot(0)); 791 - ptep[1] = pfn_pte(0, __pgprot(0)); 792 - } 793 - 794 - if (user) { 795 - pgdp = vcpu->arch.guest_user_mm.pgd; 796 - ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); 797 - if (ptep) { 798 - ptep[0] = pfn_pte(0, __pgprot(0)); 799 - ptep[1] = pfn_pte(0, __pgprot(0)); 800 - } 801 - } 802 - } 803 - 804 - /* 805 - * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. 806 - * Flush a range of guest physical address space from the VM's GPA page tables. 807 - */ 808 - 809 - static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, 810 - unsigned long end_gva) 811 - { 812 - int i_min = pte_index(start_gva); 813 - int i_max = pte_index(end_gva); 814 - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); 815 - int i; 816 - 817 - /* 818 - * There's no freeing to do, so there's no point clearing individual 819 - * entries unless only part of the last level page table needs flushing. 820 - */ 821 - if (safe_to_remove) 822 - return true; 823 - 824 - for (i = i_min; i <= i_max; ++i) { 825 - if (!pte_present(pte[i])) 826 - continue; 827 - 828 - set_pte(pte + i, __pte(0)); 829 - } 830 - return false; 831 - } 832 - 833 - static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, 834 - unsigned long end_gva) 835 - { 836 - pte_t *pte; 837 - unsigned long end = ~0ul; 838 - int i_min = pmd_index(start_gva); 839 - int i_max = pmd_index(end_gva); 840 - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); 841 - int i; 842 - 843 - for (i = i_min; i <= i_max; ++i, start_gva = 0) { 844 - if (!pmd_present(pmd[i])) 845 - continue; 846 - 847 - pte = pte_offset_kernel(pmd + i, 0); 848 - if (i == i_max) 849 - end = end_gva; 850 - 851 - if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { 852 - pmd_clear(pmd + i); 853 - pte_free_kernel(NULL, pte); 854 - } else { 855 - safe_to_remove = false; 856 - } 857 - } 858 - return safe_to_remove; 859 - } 860 - 861 - static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, 862 - unsigned long end_gva) 863 - { 864 - pmd_t *pmd; 865 - unsigned long end = ~0ul; 866 - int i_min = pud_index(start_gva); 867 - int i_max = pud_index(end_gva); 868 - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); 869 - int i; 870 - 871 - for (i = i_min; i <= i_max; ++i, start_gva = 0) { 872 - if (!pud_present(pud[i])) 873 - continue; 874 - 875 - pmd = pmd_offset(pud + i, 0); 876 - if (i == i_max) 877 - end = end_gva; 878 - 879 - if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { 880 - pud_clear(pud + i); 881 - pmd_free(NULL, pmd); 882 - } else { 883 - safe_to_remove = false; 884 - } 885 - } 886 - return safe_to_remove; 887 - } 888 - 889 - static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, 890 - unsigned long end_gva) 891 - { 892 - p4d_t *p4d; 893 - pud_t *pud; 894 - unsigned long end = ~0ul; 895 - int i_min = pgd_index(start_gva); 896 - int i_max = pgd_index(end_gva); 897 - bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); 898 - int i; 899 - 900 - for (i = i_min; i <= i_max; ++i, start_gva = 0) { 901 - if (!pgd_present(pgd[i])) 902 - continue; 903 - 904 - p4d = p4d_offset(pgd, 0); 905 - pud = pud_offset(p4d + i, 0); 906 - if (i == i_max) 907 - end = end_gva; 908 - 909 - if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { 910 - pgd_clear(pgd + i); 911 - pud_free(NULL, pud); 912 - } else { 913 - safe_to_remove = false; 914 - } 915 - } 916 - return safe_to_remove; 917 - } 918 - 919 - void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) 920 - { 921 - if (flags & KMF_GPA) { 922 - /* all of guest virtual address space could be affected */ 923 - if (flags & KMF_KERN) 924 - /* useg, kseg0, seg2/3 */ 925 - kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); 926 - else 927 - /* useg */ 928 - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); 929 - } else { 930 - /* useg */ 931 - kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); 932 - 933 - /* kseg2/3 */ 934 - if (flags & KMF_KERN) 935 - kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); 936 - } 937 - } 938 - 939 - static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte) 940 - { 941 - /* 942 - * Don't leak writeable but clean entries from GPA page tables. We don't 943 - * want the normal Linux tlbmod handler to handle dirtying when KVM 944 - * accesses guest memory. 945 - */ 946 - if (!pte_dirty(pte)) 947 - pte = pte_wrprotect(pte); 948 - 949 - return pte; 950 - } 951 - 952 - static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) 953 - { 954 - /* Guest EntryLo overrides host EntryLo */ 955 - if (!(entrylo & ENTRYLO_D)) 956 - pte = pte_mkclean(pte); 957 - 958 - return kvm_mips_gpa_pte_to_gva_unmapped(pte); 959 - } 960 - 961 - #ifdef CONFIG_KVM_MIPS_VZ 962 759 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, 963 760 struct kvm_vcpu *vcpu, 964 761 bool write_fault) ··· 768 971 769 972 /* Invalidate this entry in the TLB */ 770 973 return kvm_vz_host_tlb_inv(vcpu, badvaddr); 771 - } 772 - #endif 773 - 774 - /* XXXKYMA: Must be called with interrupts disabled */ 775 - int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, 776 - struct kvm_vcpu *vcpu, 777 - bool write_fault) 778 - { 779 - unsigned long gpa; 780 - pte_t pte_gpa[2], *ptep_gva; 781 - int idx; 782 - 783 - if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { 784 - kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); 785 - kvm_mips_dump_host_tlbs(); 786 - return -1; 787 - } 788 - 789 - /* Get the GPA page table entry */ 790 - gpa = KVM_GUEST_CPHYSADDR(badvaddr); 791 - idx = (badvaddr >> PAGE_SHIFT) & 1; 792 - if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], 793 - &pte_gpa[!idx]) < 0) 794 - return -1; 795 - 796 - /* Get the GVA page table entry */ 797 - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE); 798 - if (!ptep_gva) { 799 - kvm_err("No ptep for gva %lx\n", badvaddr); 800 - return -1; 801 - } 802 - 803 - /* Copy a pair of entries from GPA page table to GVA page table */ 804 - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]); 805 - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); 806 - 807 - /* Invalidate this entry in the TLB, guest kernel ASID only */ 808 - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); 809 - return 0; 810 - } 811 - 812 - int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, 813 - struct kvm_mips_tlb *tlb, 814 - unsigned long gva, 815 - bool write_fault) 816 - { 817 - struct kvm *kvm = vcpu->kvm; 818 - long tlb_lo[2]; 819 - pte_t pte_gpa[2], *ptep_buddy, *ptep_gva; 820 - unsigned int idx = TLB_LO_IDX(*tlb, gva); 821 - bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); 822 - 823 - tlb_lo[0] = tlb->tlb_lo[0]; 824 - tlb_lo[1] = tlb->tlb_lo[1]; 825 - 826 - /* 827 - * The commpage address must not be mapped to anything else if the guest 828 - * TLB contains entries nearby, or commpage accesses will break. 829 - */ 830 - if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) 831 - tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0; 832 - 833 - /* Get the GPA page table entry */ 834 - if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]), 835 - write_fault, &pte_gpa[idx], NULL) < 0) 836 - return -1; 837 - 838 - /* And its GVA buddy's GPA page table entry if it also exists */ 839 - pte_gpa[!idx] = pfn_pte(0, __pgprot(0)); 840 - if (tlb_lo[!idx] & ENTRYLO_V) { 841 - spin_lock(&kvm->mmu_lock); 842 - ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL, 843 - mips3_tlbpfn_to_paddr(tlb_lo[!idx])); 844 - if (ptep_buddy) 845 - pte_gpa[!idx] = *ptep_buddy; 846 - spin_unlock(&kvm->mmu_lock); 847 - } 848 - 849 - /* Get the GVA page table entry pair */ 850 - ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); 851 - if (!ptep_gva) { 852 - kvm_err("No ptep for gva %lx\n", gva); 853 - return -1; 854 - } 855 - 856 - /* Copy a pair of entries from GPA page table to GVA page table */ 857 - ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]); 858 - ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); 859 - 860 - /* Invalidate this entry in the TLB, current guest mode ASID only */ 861 - kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); 862 - 863 - kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, 864 - tlb->tlb_lo[0], tlb->tlb_lo[1]); 865 - 866 - return 0; 867 - } 868 - 869 - int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, 870 - struct kvm_vcpu *vcpu) 871 - { 872 - kvm_pfn_t pfn; 873 - pte_t *ptep; 874 - pgprot_t prot; 875 - 876 - ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr); 877 - if (!ptep) { 878 - kvm_err("No ptep for commpage %lx\n", badvaddr); 879 - return -1; 880 - } 881 - 882 - pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); 883 - /* Also set valid and dirty, so refill handler doesn't have to */ 884 - prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED); 885 - *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot))); 886 - 887 - /* Invalidate this entry in the TLB, guest kernel ASID only */ 888 - kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); 889 - return 0; 890 974 } 891 975 892 976 /** ··· 830 1152 kvm_mips_callbacks->vcpu_put(vcpu, cpu); 831 1153 832 1154 local_irq_restore(flags); 833 - } 834 - 835 - /** 836 - * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault. 837 - * @vcpu: Virtual CPU. 838 - * @gva: Guest virtual address to be accessed. 839 - * @write: True if write attempted (must be dirtied and made writable). 840 - * 841 - * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and 842 - * dirtying the page if @write so that guest instructions can be modified. 843 - * 844 - * Returns: KVM_MIPS_MAPPED on success. 845 - * KVM_MIPS_GVA if bad guest virtual address. 846 - * KVM_MIPS_GPA if bad guest physical address. 847 - * KVM_MIPS_TLB if guest TLB not present. 848 - * KVM_MIPS_TLBINV if guest TLB present but not valid. 849 - * KVM_MIPS_TLBMOD if guest TLB read only. 850 - */ 851 - enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, 852 - unsigned long gva, 853 - bool write) 854 - { 855 - struct mips_coproc *cop0 = vcpu->arch.cop0; 856 - struct kvm_mips_tlb *tlb; 857 - int index; 858 - 859 - if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { 860 - if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) 861 - return KVM_MIPS_GPA; 862 - } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || 863 - KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { 864 - /* Address should be in the guest TLB */ 865 - index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | 866 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID)); 867 - if (index < 0) 868 - return KVM_MIPS_TLB; 869 - tlb = &vcpu->arch.guest_tlb[index]; 870 - 871 - /* Entry should be valid, and dirty for writes */ 872 - if (!TLB_IS_VALID(*tlb, gva)) 873 - return KVM_MIPS_TLBINV; 874 - if (write && !TLB_IS_DIRTY(*tlb, gva)) 875 - return KVM_MIPS_TLBMOD; 876 - 877 - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) 878 - return KVM_MIPS_GPA; 879 - } else { 880 - return KVM_MIPS_GVA; 881 - } 882 - 883 - return KVM_MIPS_MAPPED; 884 - } 885 - 886 - int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) 887 - { 888 - int err; 889 - 890 - if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ), 891 - "Expect BadInstr/BadInstrP registers to be used with VZ\n")) 892 - return -EINVAL; 893 - 894 - retry: 895 - kvm_trap_emul_gva_lockless_begin(vcpu); 896 - err = get_user(*out, opc); 897 - kvm_trap_emul_gva_lockless_end(vcpu); 898 - 899 - if (unlikely(err)) { 900 - /* 901 - * Try to handle the fault, maybe we just raced with a GVA 902 - * invalidation. 903 - */ 904 - err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc, 905 - false); 906 - if (unlikely(err)) { 907 - kvm_err("%s: illegal address: %p\n", 908 - __func__, opc); 909 - return -EFAULT; 910 - } 911 - 912 - /* Hopefully it'll work now */ 913 - goto retry; 914 - } 915 - return 0; 916 1155 }
-174
arch/mips/kvm/tlb.c
··· 30 30 #include <asm/r4kcache.h> 31 31 #define CONFIG_MIPS_MT 32 32 33 - #define KVM_GUEST_PC_TLB 0 34 - #define KVM_GUEST_SP_TLB 1 35 - 36 - #ifdef CONFIG_KVM_MIPS_VZ 37 33 unsigned long GUESTID_MASK; 38 34 EXPORT_SYMBOL_GPL(GUESTID_MASK); 39 35 unsigned long GUESTID_FIRST_VERSION; ··· 46 50 else 47 51 return cpu_asid(smp_processor_id(), gpa_mm); 48 52 } 49 - #endif 50 - 51 - static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 52 - { 53 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 54 - int cpu = smp_processor_id(); 55 - 56 - return cpu_asid(cpu, kern_mm); 57 - } 58 - 59 - static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 60 - { 61 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 62 - int cpu = smp_processor_id(); 63 - 64 - return cpu_asid(cpu, user_mm); 65 - } 66 - 67 - /* Structure defining an tlb entry data set. */ 68 - 69 - void kvm_mips_dump_host_tlbs(void) 70 - { 71 - unsigned long flags; 72 - 73 - local_irq_save(flags); 74 - 75 - kvm_info("HOST TLBs:\n"); 76 - dump_tlb_regs(); 77 - pr_info("\n"); 78 - dump_tlb_all(); 79 - 80 - local_irq_restore(flags); 81 - } 82 - EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); 83 - 84 - void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) 85 - { 86 - struct mips_coproc *cop0 = vcpu->arch.cop0; 87 - struct kvm_mips_tlb tlb; 88 - int i; 89 - 90 - kvm_info("Guest TLBs:\n"); 91 - kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); 92 - 93 - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 94 - tlb = vcpu->arch.guest_tlb[i]; 95 - kvm_info("TLB%c%3d Hi 0x%08lx ", 96 - (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V 97 - ? ' ' : '*', 98 - i, tlb.tlb_hi); 99 - kvm_info("Lo0=0x%09llx %c%c attr %lx ", 100 - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), 101 - (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', 102 - (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', 103 - (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); 104 - kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", 105 - (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), 106 - (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', 107 - (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', 108 - (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, 109 - tlb.tlb_mask); 110 - } 111 - } 112 - EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); 113 - 114 - int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) 115 - { 116 - int i; 117 - int index = -1; 118 - struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; 119 - 120 - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 121 - if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && 122 - TLB_HI_ASID_HIT(tlb[i], entryhi)) { 123 - index = i; 124 - break; 125 - } 126 - } 127 - 128 - kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", 129 - __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); 130 - 131 - return index; 132 - } 133 - EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); 134 53 135 54 static int _kvm_mips_host_tlb_inv(unsigned long entryhi) 136 55 { ··· 73 162 74 163 return idx; 75 164 } 76 - 77 - int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 78 - bool user, bool kernel) 79 - { 80 - /* 81 - * Initialize idx_user and idx_kernel to workaround bogus 82 - * maybe-initialized warning when using GCC 6. 83 - */ 84 - int idx_user = 0, idx_kernel = 0; 85 - unsigned long flags, old_entryhi; 86 - 87 - local_irq_save(flags); 88 - 89 - old_entryhi = read_c0_entryhi(); 90 - 91 - if (user) 92 - idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 93 - kvm_mips_get_user_asid(vcpu)); 94 - if (kernel) 95 - idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | 96 - kvm_mips_get_kernel_asid(vcpu)); 97 - 98 - write_c0_entryhi(old_entryhi); 99 - mtc0_tlbw_hazard(); 100 - 101 - local_irq_restore(flags); 102 - 103 - /* 104 - * We don't want to get reserved instruction exceptions for missing tlb 105 - * entries. 106 - */ 107 - if (cpu_has_vtag_icache) 108 - flush_icache_all(); 109 - 110 - if (user && idx_user >= 0) 111 - kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", 112 - __func__, (va & VPN2_MASK) | 113 - kvm_mips_get_user_asid(vcpu), idx_user); 114 - if (kernel && idx_kernel >= 0) 115 - kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", 116 - __func__, (va & VPN2_MASK) | 117 - kvm_mips_get_kernel_asid(vcpu), idx_kernel); 118 - 119 - return 0; 120 - } 121 - EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); 122 - 123 - #ifdef CONFIG_KVM_MIPS_VZ 124 165 125 166 /* GuestID management */ 126 167 ··· 524 661 } 525 662 EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); 526 663 #endif 527 - 528 - #endif 529 - 530 - /** 531 - * kvm_mips_suspend_mm() - Suspend the active mm. 532 - * @cpu The CPU we're running on. 533 - * 534 - * Suspend the active_mm, ready for a switch to a KVM guest virtual address 535 - * space. This is left active for the duration of guest context, including time 536 - * with interrupts enabled, so we need to be careful not to confuse e.g. cache 537 - * management IPIs. 538 - * 539 - * kvm_mips_resume_mm() should be called before context switching to a different 540 - * process so we don't need to worry about reference counting. 541 - * 542 - * This needs to be in static kernel code to avoid exporting init_mm. 543 - */ 544 - void kvm_mips_suspend_mm(int cpu) 545 - { 546 - cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); 547 - current->active_mm = &init_mm; 548 - } 549 - EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); 550 - 551 - /** 552 - * kvm_mips_resume_mm() - Resume the current process mm. 553 - * @cpu The CPU we're running on. 554 - * 555 - * Resume the mm of the current process, after a switch back from a KVM guest 556 - * virtual address space (see kvm_mips_suspend_mm()). 557 - */ 558 - void kvm_mips_resume_mm(int cpu) 559 - { 560 - cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 561 - current->active_mm = current->mm; 562 - } 563 - EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
-1306
arch/mips/kvm/trap_emul.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel 7 - * 8 - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 - * Authors: Sanjay Lal <sanjayl@kymasys.com> 10 - */ 11 - 12 - #include <linux/errno.h> 13 - #include <linux/err.h> 14 - #include <linux/kvm_host.h> 15 - #include <linux/log2.h> 16 - #include <linux/uaccess.h> 17 - #include <linux/vmalloc.h> 18 - #include <asm/mmu_context.h> 19 - #include <asm/pgalloc.h> 20 - 21 - #include "interrupt.h" 22 - 23 - static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) 24 - { 25 - gpa_t gpa; 26 - gva_t kseg = KSEGX(gva); 27 - gva_t gkseg = KVM_GUEST_KSEGX(gva); 28 - 29 - if ((kseg == CKSEG0) || (kseg == CKSEG1)) 30 - gpa = CPHYSADDR(gva); 31 - else if (gkseg == KVM_GUEST_KSEG0) 32 - gpa = KVM_GUEST_CPHYSADDR(gva); 33 - else { 34 - kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); 35 - kvm_mips_dump_host_tlbs(); 36 - gpa = KVM_INVALID_ADDR; 37 - } 38 - 39 - kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); 40 - 41 - return gpa; 42 - } 43 - 44 - static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) 45 - { 46 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 47 - u32 cause = vcpu->arch.host_cp0_cause; 48 - u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; 49 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 50 - u32 inst = 0; 51 - 52 - /* 53 - * Fetch the instruction. 54 - */ 55 - if (cause & CAUSEF_BD) 56 - opc += 1; 57 - kvm_get_badinstr(opc, vcpu, &inst); 58 - 59 - kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", 60 - exccode, opc, inst, badvaddr, 61 - kvm_read_c0_guest_status(vcpu->arch.cop0)); 62 - kvm_arch_vcpu_dump_regs(vcpu); 63 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 64 - return RESUME_HOST; 65 - } 66 - 67 - static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) 68 - { 69 - struct mips_coproc *cop0 = vcpu->arch.cop0; 70 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 71 - u32 cause = vcpu->arch.host_cp0_cause; 72 - enum emulation_result er = EMULATE_DONE; 73 - int ret = RESUME_GUEST; 74 - 75 - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { 76 - /* FPU Unusable */ 77 - if (!kvm_mips_guest_has_fpu(&vcpu->arch) || 78 - (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { 79 - /* 80 - * Unusable/no FPU in guest: 81 - * deliver guest COP1 Unusable Exception 82 - */ 83 - er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu); 84 - } else { 85 - /* Restore FPU state */ 86 - kvm_own_fpu(vcpu); 87 - er = EMULATE_DONE; 88 - } 89 - } else { 90 - er = kvm_mips_emulate_inst(cause, opc, vcpu); 91 - } 92 - 93 - switch (er) { 94 - case EMULATE_DONE: 95 - ret = RESUME_GUEST; 96 - break; 97 - 98 - case EMULATE_FAIL: 99 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 100 - ret = RESUME_HOST; 101 - break; 102 - 103 - case EMULATE_WAIT: 104 - vcpu->run->exit_reason = KVM_EXIT_INTR; 105 - ret = RESUME_HOST; 106 - break; 107 - 108 - case EMULATE_HYPERCALL: 109 - ret = kvm_mips_handle_hypcall(vcpu); 110 - break; 111 - 112 - default: 113 - BUG(); 114 - } 115 - return ret; 116 - } 117 - 118 - static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) 119 - { 120 - enum emulation_result er; 121 - union mips_instruction inst; 122 - int err; 123 - 124 - /* A code fetch fault doesn't count as an MMIO */ 125 - if (kvm_is_ifetch_fault(&vcpu->arch)) { 126 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 127 - return RESUME_HOST; 128 - } 129 - 130 - /* Fetch the instruction. */ 131 - if (cause & CAUSEF_BD) 132 - opc += 1; 133 - err = kvm_get_badinstr(opc, vcpu, &inst.word); 134 - if (err) { 135 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 136 - return RESUME_HOST; 137 - } 138 - 139 - /* Emulate the load */ 140 - er = kvm_mips_emulate_load(inst, cause, vcpu); 141 - if (er == EMULATE_FAIL) { 142 - kvm_err("Emulate load from MMIO space failed\n"); 143 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 144 - } else { 145 - vcpu->run->exit_reason = KVM_EXIT_MMIO; 146 - } 147 - return RESUME_HOST; 148 - } 149 - 150 - static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) 151 - { 152 - enum emulation_result er; 153 - union mips_instruction inst; 154 - int err; 155 - 156 - /* Fetch the instruction. */ 157 - if (cause & CAUSEF_BD) 158 - opc += 1; 159 - err = kvm_get_badinstr(opc, vcpu, &inst.word); 160 - if (err) { 161 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 162 - return RESUME_HOST; 163 - } 164 - 165 - /* Emulate the store */ 166 - er = kvm_mips_emulate_store(inst, cause, vcpu); 167 - if (er == EMULATE_FAIL) { 168 - kvm_err("Emulate store to MMIO space failed\n"); 169 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 170 - } else { 171 - vcpu->run->exit_reason = KVM_EXIT_MMIO; 172 - } 173 - return RESUME_HOST; 174 - } 175 - 176 - static int kvm_mips_bad_access(u32 cause, u32 *opc, 177 - struct kvm_vcpu *vcpu, bool store) 178 - { 179 - if (store) 180 - return kvm_mips_bad_store(cause, opc, vcpu); 181 - else 182 - return kvm_mips_bad_load(cause, opc, vcpu); 183 - } 184 - 185 - static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) 186 - { 187 - struct mips_coproc *cop0 = vcpu->arch.cop0; 188 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 189 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 190 - u32 cause = vcpu->arch.host_cp0_cause; 191 - struct kvm_mips_tlb *tlb; 192 - unsigned long entryhi; 193 - int index; 194 - 195 - if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 196 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 197 - /* 198 - * First find the mapping in the guest TLB. If the failure to 199 - * write was due to the guest TLB, it should be up to the guest 200 - * to handle it. 201 - */ 202 - entryhi = (badvaddr & VPN2_MASK) | 203 - (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); 204 - index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); 205 - 206 - /* 207 - * These should never happen. 208 - * They would indicate stale host TLB entries. 209 - */ 210 - if (unlikely(index < 0)) { 211 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 212 - return RESUME_HOST; 213 - } 214 - tlb = vcpu->arch.guest_tlb + index; 215 - if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { 216 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 217 - return RESUME_HOST; 218 - } 219 - 220 - /* 221 - * Guest entry not dirty? That would explain the TLB modified 222 - * exception. Relay that on to the guest so it can handle it. 223 - */ 224 - if (!TLB_IS_DIRTY(*tlb, badvaddr)) { 225 - kvm_mips_emulate_tlbmod(cause, opc, vcpu); 226 - return RESUME_GUEST; 227 - } 228 - 229 - if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, 230 - true)) 231 - /* Not writable, needs handling as MMIO */ 232 - return kvm_mips_bad_store(cause, opc, vcpu); 233 - return RESUME_GUEST; 234 - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 235 - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) 236 - /* Not writable, needs handling as MMIO */ 237 - return kvm_mips_bad_store(cause, opc, vcpu); 238 - return RESUME_GUEST; 239 - } else { 240 - /* host kernel addresses are all handled as MMIO */ 241 - return kvm_mips_bad_store(cause, opc, vcpu); 242 - } 243 - } 244 - 245 - static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) 246 - { 247 - struct kvm_run *run = vcpu->run; 248 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 249 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 250 - u32 cause = vcpu->arch.host_cp0_cause; 251 - enum emulation_result er = EMULATE_DONE; 252 - int ret = RESUME_GUEST; 253 - 254 - if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) 255 - && KVM_GUEST_KERNEL_MODE(vcpu)) { 256 - if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { 257 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 258 - ret = RESUME_HOST; 259 - } 260 - } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 261 - || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { 262 - kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", 263 - store ? "ST" : "LD", cause, opc, badvaddr); 264 - 265 - /* 266 - * User Address (UA) fault, this could happen if 267 - * (1) TLB entry not present/valid in both Guest and shadow host 268 - * TLBs, in this case we pass on the fault to the guest 269 - * kernel and let it handle it. 270 - * (2) TLB entry is present in the Guest TLB but not in the 271 - * shadow, in this case we inject the TLB from the Guest TLB 272 - * into the shadow host TLB 273 - */ 274 - 275 - er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store); 276 - if (er == EMULATE_DONE) 277 - ret = RESUME_GUEST; 278 - else { 279 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 280 - ret = RESUME_HOST; 281 - } 282 - } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { 283 - /* 284 - * All KSEG0 faults are handled by KVM, as the guest kernel does 285 - * not expect to ever get them 286 - */ 287 - if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) 288 - ret = kvm_mips_bad_access(cause, opc, vcpu, store); 289 - } else if (KVM_GUEST_KERNEL_MODE(vcpu) 290 - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { 291 - /* 292 - * With EVA we may get a TLB exception instead of an address 293 - * error when the guest performs MMIO to KSeg1 addresses. 294 - */ 295 - ret = kvm_mips_bad_access(cause, opc, vcpu, store); 296 - } else { 297 - kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", 298 - store ? "ST" : "LD", cause, opc, badvaddr); 299 - kvm_mips_dump_host_tlbs(); 300 - kvm_arch_vcpu_dump_regs(vcpu); 301 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 302 - ret = RESUME_HOST; 303 - } 304 - return ret; 305 - } 306 - 307 - static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) 308 - { 309 - return kvm_trap_emul_handle_tlb_miss(vcpu, true); 310 - } 311 - 312 - static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) 313 - { 314 - return kvm_trap_emul_handle_tlb_miss(vcpu, false); 315 - } 316 - 317 - static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) 318 - { 319 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 320 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 321 - u32 cause = vcpu->arch.host_cp0_cause; 322 - int ret = RESUME_GUEST; 323 - 324 - if (KVM_GUEST_KERNEL_MODE(vcpu) 325 - && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { 326 - ret = kvm_mips_bad_store(cause, opc, vcpu); 327 - } else { 328 - kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", 329 - cause, opc, badvaddr); 330 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 331 - ret = RESUME_HOST; 332 - } 333 - return ret; 334 - } 335 - 336 - static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) 337 - { 338 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 339 - unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 340 - u32 cause = vcpu->arch.host_cp0_cause; 341 - int ret = RESUME_GUEST; 342 - 343 - if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { 344 - ret = kvm_mips_bad_load(cause, opc, vcpu); 345 - } else { 346 - kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", 347 - cause, opc, badvaddr); 348 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 349 - ret = RESUME_HOST; 350 - } 351 - return ret; 352 - } 353 - 354 - static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) 355 - { 356 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 357 - u32 cause = vcpu->arch.host_cp0_cause; 358 - enum emulation_result er = EMULATE_DONE; 359 - int ret = RESUME_GUEST; 360 - 361 - er = kvm_mips_emulate_syscall(cause, opc, vcpu); 362 - if (er == EMULATE_DONE) 363 - ret = RESUME_GUEST; 364 - else { 365 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 366 - ret = RESUME_HOST; 367 - } 368 - return ret; 369 - } 370 - 371 - static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) 372 - { 373 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 374 - u32 cause = vcpu->arch.host_cp0_cause; 375 - enum emulation_result er = EMULATE_DONE; 376 - int ret = RESUME_GUEST; 377 - 378 - er = kvm_mips_handle_ri(cause, opc, vcpu); 379 - if (er == EMULATE_DONE) 380 - ret = RESUME_GUEST; 381 - else { 382 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 383 - ret = RESUME_HOST; 384 - } 385 - return ret; 386 - } 387 - 388 - static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) 389 - { 390 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 391 - u32 cause = vcpu->arch.host_cp0_cause; 392 - enum emulation_result er = EMULATE_DONE; 393 - int ret = RESUME_GUEST; 394 - 395 - er = kvm_mips_emulate_bp_exc(cause, opc, vcpu); 396 - if (er == EMULATE_DONE) 397 - ret = RESUME_GUEST; 398 - else { 399 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 400 - ret = RESUME_HOST; 401 - } 402 - return ret; 403 - } 404 - 405 - static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) 406 - { 407 - u32 __user *opc = (u32 __user *)vcpu->arch.pc; 408 - u32 cause = vcpu->arch.host_cp0_cause; 409 - enum emulation_result er = EMULATE_DONE; 410 - int ret = RESUME_GUEST; 411 - 412 - er = kvm_mips_emulate_trap_exc(cause, opc, vcpu); 413 - if (er == EMULATE_DONE) { 414 - ret = RESUME_GUEST; 415 - } else { 416 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 417 - ret = RESUME_HOST; 418 - } 419 - return ret; 420 - } 421 - 422 - static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) 423 - { 424 - u32 __user *opc = (u32 __user *)vcpu->arch.pc; 425 - u32 cause = vcpu->arch.host_cp0_cause; 426 - enum emulation_result er = EMULATE_DONE; 427 - int ret = RESUME_GUEST; 428 - 429 - er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu); 430 - if (er == EMULATE_DONE) { 431 - ret = RESUME_GUEST; 432 - } else { 433 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 434 - ret = RESUME_HOST; 435 - } 436 - return ret; 437 - } 438 - 439 - static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) 440 - { 441 - u32 __user *opc = (u32 __user *)vcpu->arch.pc; 442 - u32 cause = vcpu->arch.host_cp0_cause; 443 - enum emulation_result er = EMULATE_DONE; 444 - int ret = RESUME_GUEST; 445 - 446 - er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu); 447 - if (er == EMULATE_DONE) { 448 - ret = RESUME_GUEST; 449 - } else { 450 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 451 - ret = RESUME_HOST; 452 - } 453 - return ret; 454 - } 455 - 456 - /** 457 - * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. 458 - * @vcpu: Virtual CPU context. 459 - * 460 - * Handle when the guest attempts to use MSA when it is disabled. 461 - */ 462 - static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) 463 - { 464 - struct mips_coproc *cop0 = vcpu->arch.cop0; 465 - u32 __user *opc = (u32 __user *) vcpu->arch.pc; 466 - u32 cause = vcpu->arch.host_cp0_cause; 467 - enum emulation_result er = EMULATE_DONE; 468 - int ret = RESUME_GUEST; 469 - 470 - if (!kvm_mips_guest_has_msa(&vcpu->arch) || 471 - (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { 472 - /* 473 - * No MSA in guest, or FPU enabled and not in FR=1 mode, 474 - * guest reserved instruction exception 475 - */ 476 - er = kvm_mips_emulate_ri_exc(cause, opc, vcpu); 477 - } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { 478 - /* MSA disabled by guest, guest MSA disabled exception */ 479 - er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu); 480 - } else { 481 - /* Restore MSA/FPU state */ 482 - kvm_own_msa(vcpu); 483 - er = EMULATE_DONE; 484 - } 485 - 486 - switch (er) { 487 - case EMULATE_DONE: 488 - ret = RESUME_GUEST; 489 - break; 490 - 491 - case EMULATE_FAIL: 492 - vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 493 - ret = RESUME_HOST; 494 - break; 495 - 496 - default: 497 - BUG(); 498 - } 499 - return ret; 500 - } 501 - 502 - static int kvm_trap_emul_hardware_enable(void) 503 - { 504 - return 0; 505 - } 506 - 507 - static void kvm_trap_emul_hardware_disable(void) 508 - { 509 - } 510 - 511 - static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) 512 - { 513 - int r; 514 - 515 - switch (ext) { 516 - case KVM_CAP_MIPS_TE: 517 - r = 1; 518 - break; 519 - case KVM_CAP_IOEVENTFD: 520 - r = 1; 521 - break; 522 - default: 523 - r = 0; 524 - break; 525 - } 526 - 527 - return r; 528 - } 529 - 530 - static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) 531 - { 532 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 533 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 534 - 535 - /* 536 - * Allocate GVA -> HPA page tables. 537 - * MIPS doesn't use the mm_struct pointer argument. 538 - */ 539 - kern_mm->pgd = pgd_alloc(kern_mm); 540 - if (!kern_mm->pgd) 541 - return -ENOMEM; 542 - 543 - user_mm->pgd = pgd_alloc(user_mm); 544 - if (!user_mm->pgd) { 545 - pgd_free(kern_mm, kern_mm->pgd); 546 - return -ENOMEM; 547 - } 548 - 549 - return 0; 550 - } 551 - 552 - static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) 553 - { 554 - /* Don't free host kernel page tables copied from init_mm.pgd */ 555 - const unsigned long end = 0x80000000; 556 - unsigned long pgd_va, pud_va, pmd_va; 557 - p4d_t *p4d; 558 - pud_t *pud; 559 - pmd_t *pmd; 560 - pte_t *pte; 561 - int i, j, k; 562 - 563 - for (i = 0; i < USER_PTRS_PER_PGD; i++) { 564 - if (pgd_none(pgd[i])) 565 - continue; 566 - 567 - pgd_va = (unsigned long)i << PGDIR_SHIFT; 568 - if (pgd_va >= end) 569 - break; 570 - p4d = p4d_offset(pgd, 0); 571 - pud = pud_offset(p4d + i, 0); 572 - for (j = 0; j < PTRS_PER_PUD; j++) { 573 - if (pud_none(pud[j])) 574 - continue; 575 - 576 - pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); 577 - if (pud_va >= end) 578 - break; 579 - pmd = pmd_offset(pud + j, 0); 580 - for (k = 0; k < PTRS_PER_PMD; k++) { 581 - if (pmd_none(pmd[k])) 582 - continue; 583 - 584 - pmd_va = pud_va | (k << PMD_SHIFT); 585 - if (pmd_va >= end) 586 - break; 587 - pte = pte_offset_kernel(pmd + k, 0); 588 - pte_free_kernel(NULL, pte); 589 - } 590 - pmd_free(NULL, pmd); 591 - } 592 - pud_free(NULL, pud); 593 - } 594 - pgd_free(NULL, pgd); 595 - } 596 - 597 - static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) 598 - { 599 - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); 600 - kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); 601 - } 602 - 603 - static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) 604 - { 605 - struct mips_coproc *cop0 = vcpu->arch.cop0; 606 - u32 config, config1; 607 - int vcpu_id = vcpu->vcpu_id; 608 - 609 - /* Start off the timer at 100 MHz */ 610 - kvm_mips_init_count(vcpu, 100*1000*1000); 611 - 612 - /* 613 - * Arch specific stuff, set up config registers properly so that the 614 - * guest will come up as expected 615 - */ 616 - #ifndef CONFIG_CPU_MIPSR6 617 - /* r2-r5, simulate a MIPS 24kc */ 618 - kvm_write_c0_guest_prid(cop0, 0x00019300); 619 - #else 620 - /* r6+, simulate a generic QEMU machine */ 621 - kvm_write_c0_guest_prid(cop0, 0x00010000); 622 - #endif 623 - /* 624 - * Have config1, Cacheable, noncoherent, write-back, write allocate. 625 - * Endianness, arch revision & virtually tagged icache should match 626 - * host. 627 - */ 628 - config = read_c0_config() & MIPS_CONF_AR; 629 - config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; 630 - #ifdef CONFIG_CPU_BIG_ENDIAN 631 - config |= CONF_BE; 632 - #endif 633 - if (cpu_has_vtag_icache) 634 - config |= MIPS_CONF_VI; 635 - kvm_write_c0_guest_config(cop0, config); 636 - 637 - /* Read the cache characteristics from the host Config1 Register */ 638 - config1 = (read_c0_config1() & ~0x7f); 639 - 640 - /* DCache line size not correctly reported in Config1 on Octeon CPUs */ 641 - if (cpu_dcache_line_size()) { 642 - config1 &= ~MIPS_CONF1_DL; 643 - config1 |= ((ilog2(cpu_dcache_line_size()) - 1) << 644 - MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL; 645 - } 646 - 647 - /* Set up MMU size */ 648 - config1 &= ~(0x3f << 25); 649 - config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); 650 - 651 - /* We unset some bits that we aren't emulating */ 652 - config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | 653 - MIPS_CONF1_WR | MIPS_CONF1_CA); 654 - kvm_write_c0_guest_config1(cop0, config1); 655 - 656 - /* Have config3, no tertiary/secondary caches implemented */ 657 - kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); 658 - /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ 659 - 660 - /* Have config4, UserLocal */ 661 - kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); 662 - 663 - /* Have config5 */ 664 - kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); 665 - 666 - /* No config6 */ 667 - kvm_write_c0_guest_config5(cop0, 0); 668 - 669 - /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ 670 - kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); 671 - 672 - /* Status */ 673 - kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); 674 - 675 - /* 676 - * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) 677 - */ 678 - kvm_write_c0_guest_intctl(cop0, 0xFC000000); 679 - 680 - /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ 681 - kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | 682 - (vcpu_id & MIPS_EBASE_CPUNUM)); 683 - 684 - /* Put PC at guest reset vector */ 685 - vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); 686 - 687 - return 0; 688 - } 689 - 690 - static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) 691 - { 692 - /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ 693 - kvm_flush_remote_tlbs(kvm); 694 - } 695 - 696 - static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, 697 - const struct kvm_memory_slot *slot) 698 - { 699 - kvm_trap_emul_flush_shadow_all(kvm); 700 - } 701 - 702 - static u64 kvm_trap_emul_get_one_regs[] = { 703 - KVM_REG_MIPS_CP0_INDEX, 704 - KVM_REG_MIPS_CP0_ENTRYLO0, 705 - KVM_REG_MIPS_CP0_ENTRYLO1, 706 - KVM_REG_MIPS_CP0_CONTEXT, 707 - KVM_REG_MIPS_CP0_USERLOCAL, 708 - KVM_REG_MIPS_CP0_PAGEMASK, 709 - KVM_REG_MIPS_CP0_WIRED, 710 - KVM_REG_MIPS_CP0_HWRENA, 711 - KVM_REG_MIPS_CP0_BADVADDR, 712 - KVM_REG_MIPS_CP0_COUNT, 713 - KVM_REG_MIPS_CP0_ENTRYHI, 714 - KVM_REG_MIPS_CP0_COMPARE, 715 - KVM_REG_MIPS_CP0_STATUS, 716 - KVM_REG_MIPS_CP0_INTCTL, 717 - KVM_REG_MIPS_CP0_CAUSE, 718 - KVM_REG_MIPS_CP0_EPC, 719 - KVM_REG_MIPS_CP0_PRID, 720 - KVM_REG_MIPS_CP0_EBASE, 721 - KVM_REG_MIPS_CP0_CONFIG, 722 - KVM_REG_MIPS_CP0_CONFIG1, 723 - KVM_REG_MIPS_CP0_CONFIG2, 724 - KVM_REG_MIPS_CP0_CONFIG3, 725 - KVM_REG_MIPS_CP0_CONFIG4, 726 - KVM_REG_MIPS_CP0_CONFIG5, 727 - KVM_REG_MIPS_CP0_CONFIG7, 728 - KVM_REG_MIPS_CP0_ERROREPC, 729 - KVM_REG_MIPS_CP0_KSCRATCH1, 730 - KVM_REG_MIPS_CP0_KSCRATCH2, 731 - KVM_REG_MIPS_CP0_KSCRATCH3, 732 - KVM_REG_MIPS_CP0_KSCRATCH4, 733 - KVM_REG_MIPS_CP0_KSCRATCH5, 734 - KVM_REG_MIPS_CP0_KSCRATCH6, 735 - 736 - KVM_REG_MIPS_COUNT_CTL, 737 - KVM_REG_MIPS_COUNT_RESUME, 738 - KVM_REG_MIPS_COUNT_HZ, 739 - }; 740 - 741 - static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) 742 - { 743 - return ARRAY_SIZE(kvm_trap_emul_get_one_regs); 744 - } 745 - 746 - static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, 747 - u64 __user *indices) 748 - { 749 - if (copy_to_user(indices, kvm_trap_emul_get_one_regs, 750 - sizeof(kvm_trap_emul_get_one_regs))) 751 - return -EFAULT; 752 - indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); 753 - 754 - return 0; 755 - } 756 - 757 - static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, 758 - const struct kvm_one_reg *reg, 759 - s64 *v) 760 - { 761 - struct mips_coproc *cop0 = vcpu->arch.cop0; 762 - 763 - switch (reg->id) { 764 - case KVM_REG_MIPS_CP0_INDEX: 765 - *v = (long)kvm_read_c0_guest_index(cop0); 766 - break; 767 - case KVM_REG_MIPS_CP0_ENTRYLO0: 768 - *v = kvm_read_c0_guest_entrylo0(cop0); 769 - break; 770 - case KVM_REG_MIPS_CP0_ENTRYLO1: 771 - *v = kvm_read_c0_guest_entrylo1(cop0); 772 - break; 773 - case KVM_REG_MIPS_CP0_CONTEXT: 774 - *v = (long)kvm_read_c0_guest_context(cop0); 775 - break; 776 - case KVM_REG_MIPS_CP0_USERLOCAL: 777 - *v = (long)kvm_read_c0_guest_userlocal(cop0); 778 - break; 779 - case KVM_REG_MIPS_CP0_PAGEMASK: 780 - *v = (long)kvm_read_c0_guest_pagemask(cop0); 781 - break; 782 - case KVM_REG_MIPS_CP0_WIRED: 783 - *v = (long)kvm_read_c0_guest_wired(cop0); 784 - break; 785 - case KVM_REG_MIPS_CP0_HWRENA: 786 - *v = (long)kvm_read_c0_guest_hwrena(cop0); 787 - break; 788 - case KVM_REG_MIPS_CP0_BADVADDR: 789 - *v = (long)kvm_read_c0_guest_badvaddr(cop0); 790 - break; 791 - case KVM_REG_MIPS_CP0_ENTRYHI: 792 - *v = (long)kvm_read_c0_guest_entryhi(cop0); 793 - break; 794 - case KVM_REG_MIPS_CP0_COMPARE: 795 - *v = (long)kvm_read_c0_guest_compare(cop0); 796 - break; 797 - case KVM_REG_MIPS_CP0_STATUS: 798 - *v = (long)kvm_read_c0_guest_status(cop0); 799 - break; 800 - case KVM_REG_MIPS_CP0_INTCTL: 801 - *v = (long)kvm_read_c0_guest_intctl(cop0); 802 - break; 803 - case KVM_REG_MIPS_CP0_CAUSE: 804 - *v = (long)kvm_read_c0_guest_cause(cop0); 805 - break; 806 - case KVM_REG_MIPS_CP0_EPC: 807 - *v = (long)kvm_read_c0_guest_epc(cop0); 808 - break; 809 - case KVM_REG_MIPS_CP0_PRID: 810 - *v = (long)kvm_read_c0_guest_prid(cop0); 811 - break; 812 - case KVM_REG_MIPS_CP0_EBASE: 813 - *v = (long)kvm_read_c0_guest_ebase(cop0); 814 - break; 815 - case KVM_REG_MIPS_CP0_CONFIG: 816 - *v = (long)kvm_read_c0_guest_config(cop0); 817 - break; 818 - case KVM_REG_MIPS_CP0_CONFIG1: 819 - *v = (long)kvm_read_c0_guest_config1(cop0); 820 - break; 821 - case KVM_REG_MIPS_CP0_CONFIG2: 822 - *v = (long)kvm_read_c0_guest_config2(cop0); 823 - break; 824 - case KVM_REG_MIPS_CP0_CONFIG3: 825 - *v = (long)kvm_read_c0_guest_config3(cop0); 826 - break; 827 - case KVM_REG_MIPS_CP0_CONFIG4: 828 - *v = (long)kvm_read_c0_guest_config4(cop0); 829 - break; 830 - case KVM_REG_MIPS_CP0_CONFIG5: 831 - *v = (long)kvm_read_c0_guest_config5(cop0); 832 - break; 833 - case KVM_REG_MIPS_CP0_CONFIG7: 834 - *v = (long)kvm_read_c0_guest_config7(cop0); 835 - break; 836 - case KVM_REG_MIPS_CP0_COUNT: 837 - *v = kvm_mips_read_count(vcpu); 838 - break; 839 - case KVM_REG_MIPS_COUNT_CTL: 840 - *v = vcpu->arch.count_ctl; 841 - break; 842 - case KVM_REG_MIPS_COUNT_RESUME: 843 - *v = ktime_to_ns(vcpu->arch.count_resume); 844 - break; 845 - case KVM_REG_MIPS_COUNT_HZ: 846 - *v = vcpu->arch.count_hz; 847 - break; 848 - case KVM_REG_MIPS_CP0_ERROREPC: 849 - *v = (long)kvm_read_c0_guest_errorepc(cop0); 850 - break; 851 - case KVM_REG_MIPS_CP0_KSCRATCH1: 852 - *v = (long)kvm_read_c0_guest_kscratch1(cop0); 853 - break; 854 - case KVM_REG_MIPS_CP0_KSCRATCH2: 855 - *v = (long)kvm_read_c0_guest_kscratch2(cop0); 856 - break; 857 - case KVM_REG_MIPS_CP0_KSCRATCH3: 858 - *v = (long)kvm_read_c0_guest_kscratch3(cop0); 859 - break; 860 - case KVM_REG_MIPS_CP0_KSCRATCH4: 861 - *v = (long)kvm_read_c0_guest_kscratch4(cop0); 862 - break; 863 - case KVM_REG_MIPS_CP0_KSCRATCH5: 864 - *v = (long)kvm_read_c0_guest_kscratch5(cop0); 865 - break; 866 - case KVM_REG_MIPS_CP0_KSCRATCH6: 867 - *v = (long)kvm_read_c0_guest_kscratch6(cop0); 868 - break; 869 - default: 870 - return -EINVAL; 871 - } 872 - return 0; 873 - } 874 - 875 - static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, 876 - const struct kvm_one_reg *reg, 877 - s64 v) 878 - { 879 - struct mips_coproc *cop0 = vcpu->arch.cop0; 880 - int ret = 0; 881 - unsigned int cur, change; 882 - 883 - switch (reg->id) { 884 - case KVM_REG_MIPS_CP0_INDEX: 885 - kvm_write_c0_guest_index(cop0, v); 886 - break; 887 - case KVM_REG_MIPS_CP0_ENTRYLO0: 888 - kvm_write_c0_guest_entrylo0(cop0, v); 889 - break; 890 - case KVM_REG_MIPS_CP0_ENTRYLO1: 891 - kvm_write_c0_guest_entrylo1(cop0, v); 892 - break; 893 - case KVM_REG_MIPS_CP0_CONTEXT: 894 - kvm_write_c0_guest_context(cop0, v); 895 - break; 896 - case KVM_REG_MIPS_CP0_USERLOCAL: 897 - kvm_write_c0_guest_userlocal(cop0, v); 898 - break; 899 - case KVM_REG_MIPS_CP0_PAGEMASK: 900 - kvm_write_c0_guest_pagemask(cop0, v); 901 - break; 902 - case KVM_REG_MIPS_CP0_WIRED: 903 - kvm_write_c0_guest_wired(cop0, v); 904 - break; 905 - case KVM_REG_MIPS_CP0_HWRENA: 906 - kvm_write_c0_guest_hwrena(cop0, v); 907 - break; 908 - case KVM_REG_MIPS_CP0_BADVADDR: 909 - kvm_write_c0_guest_badvaddr(cop0, v); 910 - break; 911 - case KVM_REG_MIPS_CP0_ENTRYHI: 912 - kvm_write_c0_guest_entryhi(cop0, v); 913 - break; 914 - case KVM_REG_MIPS_CP0_STATUS: 915 - kvm_write_c0_guest_status(cop0, v); 916 - break; 917 - case KVM_REG_MIPS_CP0_INTCTL: 918 - /* No VInt, so no VS, read-only for now */ 919 - break; 920 - case KVM_REG_MIPS_CP0_EPC: 921 - kvm_write_c0_guest_epc(cop0, v); 922 - break; 923 - case KVM_REG_MIPS_CP0_PRID: 924 - kvm_write_c0_guest_prid(cop0, v); 925 - break; 926 - case KVM_REG_MIPS_CP0_EBASE: 927 - /* 928 - * Allow core number to be written, but the exception base must 929 - * remain in guest KSeg0. 930 - */ 931 - kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, 932 - v); 933 - break; 934 - case KVM_REG_MIPS_CP0_COUNT: 935 - kvm_mips_write_count(vcpu, v); 936 - break; 937 - case KVM_REG_MIPS_CP0_COMPARE: 938 - kvm_mips_write_compare(vcpu, v, false); 939 - break; 940 - case KVM_REG_MIPS_CP0_CAUSE: 941 - /* 942 - * If the timer is stopped or started (DC bit) it must look 943 - * atomic with changes to the interrupt pending bits (TI, IRQ5). 944 - * A timer interrupt should not happen in between. 945 - */ 946 - if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { 947 - if (v & CAUSEF_DC) { 948 - /* disable timer first */ 949 - kvm_mips_count_disable_cause(vcpu); 950 - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, 951 - v); 952 - } else { 953 - /* enable timer last */ 954 - kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, 955 - v); 956 - kvm_mips_count_enable_cause(vcpu); 957 - } 958 - } else { 959 - kvm_write_c0_guest_cause(cop0, v); 960 - } 961 - break; 962 - case KVM_REG_MIPS_CP0_CONFIG: 963 - /* read-only for now */ 964 - break; 965 - case KVM_REG_MIPS_CP0_CONFIG1: 966 - cur = kvm_read_c0_guest_config1(cop0); 967 - change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); 968 - if (change) { 969 - v = cur ^ change; 970 - kvm_write_c0_guest_config1(cop0, v); 971 - } 972 - break; 973 - case KVM_REG_MIPS_CP0_CONFIG2: 974 - /* read-only for now */ 975 - break; 976 - case KVM_REG_MIPS_CP0_CONFIG3: 977 - cur = kvm_read_c0_guest_config3(cop0); 978 - change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); 979 - if (change) { 980 - v = cur ^ change; 981 - kvm_write_c0_guest_config3(cop0, v); 982 - } 983 - break; 984 - case KVM_REG_MIPS_CP0_CONFIG4: 985 - cur = kvm_read_c0_guest_config4(cop0); 986 - change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); 987 - if (change) { 988 - v = cur ^ change; 989 - kvm_write_c0_guest_config4(cop0, v); 990 - } 991 - break; 992 - case KVM_REG_MIPS_CP0_CONFIG5: 993 - cur = kvm_read_c0_guest_config5(cop0); 994 - change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); 995 - if (change) { 996 - v = cur ^ change; 997 - kvm_write_c0_guest_config5(cop0, v); 998 - } 999 - break; 1000 - case KVM_REG_MIPS_CP0_CONFIG7: 1001 - /* writes ignored */ 1002 - break; 1003 - case KVM_REG_MIPS_COUNT_CTL: 1004 - ret = kvm_mips_set_count_ctl(vcpu, v); 1005 - break; 1006 - case KVM_REG_MIPS_COUNT_RESUME: 1007 - ret = kvm_mips_set_count_resume(vcpu, v); 1008 - break; 1009 - case KVM_REG_MIPS_COUNT_HZ: 1010 - ret = kvm_mips_set_count_hz(vcpu, v); 1011 - break; 1012 - case KVM_REG_MIPS_CP0_ERROREPC: 1013 - kvm_write_c0_guest_errorepc(cop0, v); 1014 - break; 1015 - case KVM_REG_MIPS_CP0_KSCRATCH1: 1016 - kvm_write_c0_guest_kscratch1(cop0, v); 1017 - break; 1018 - case KVM_REG_MIPS_CP0_KSCRATCH2: 1019 - kvm_write_c0_guest_kscratch2(cop0, v); 1020 - break; 1021 - case KVM_REG_MIPS_CP0_KSCRATCH3: 1022 - kvm_write_c0_guest_kscratch3(cop0, v); 1023 - break; 1024 - case KVM_REG_MIPS_CP0_KSCRATCH4: 1025 - kvm_write_c0_guest_kscratch4(cop0, v); 1026 - break; 1027 - case KVM_REG_MIPS_CP0_KSCRATCH5: 1028 - kvm_write_c0_guest_kscratch5(cop0, v); 1029 - break; 1030 - case KVM_REG_MIPS_CP0_KSCRATCH6: 1031 - kvm_write_c0_guest_kscratch6(cop0, v); 1032 - break; 1033 - default: 1034 - return -EINVAL; 1035 - } 1036 - return ret; 1037 - } 1038 - 1039 - static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1040 - { 1041 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1042 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 1043 - struct mm_struct *mm; 1044 - 1045 - /* 1046 - * Were we in guest context? If so, restore the appropriate ASID based 1047 - * on the mode of the Guest (Kernel/User). 1048 - */ 1049 - if (current->flags & PF_VCPU) { 1050 - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; 1051 - check_switch_mmu_context(mm); 1052 - kvm_mips_suspend_mm(cpu); 1053 - ehb(); 1054 - } 1055 - 1056 - return 0; 1057 - } 1058 - 1059 - static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) 1060 - { 1061 - kvm_lose_fpu(vcpu); 1062 - 1063 - if (current->flags & PF_VCPU) { 1064 - /* Restore normal Linux process memory map */ 1065 - check_switch_mmu_context(current->mm); 1066 - kvm_mips_resume_mm(cpu); 1067 - ehb(); 1068 - } 1069 - 1070 - return 0; 1071 - } 1072 - 1073 - static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, 1074 - bool reload_asid) 1075 - { 1076 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1077 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 1078 - struct mm_struct *mm; 1079 - int i; 1080 - 1081 - if (likely(!kvm_request_pending(vcpu))) 1082 - return; 1083 - 1084 - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1085 - /* 1086 - * Both kernel & user GVA mappings must be invalidated. The 1087 - * caller is just about to check whether the ASID is stale 1088 - * anyway so no need to reload it here. 1089 - */ 1090 - kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); 1091 - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); 1092 - for_each_possible_cpu(i) { 1093 - set_cpu_context(i, kern_mm, 0); 1094 - set_cpu_context(i, user_mm, 0); 1095 - } 1096 - 1097 - /* Generate new ASID for current mode */ 1098 - if (reload_asid) { 1099 - mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; 1100 - get_new_mmu_context(mm); 1101 - htw_stop(); 1102 - write_c0_entryhi(cpu_asid(cpu, mm)); 1103 - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); 1104 - htw_start(); 1105 - } 1106 - } 1107 - } 1108 - 1109 - /** 1110 - * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. 1111 - * @vcpu: VCPU pointer. 1112 - * 1113 - * Call before a GVA space access outside of guest mode, to ensure that 1114 - * asynchronous TLB flush requests are handled or delayed until completion of 1115 - * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). 1116 - * 1117 - * Should be called with IRQs already enabled. 1118 - */ 1119 - void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) 1120 - { 1121 - /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ 1122 - WARN_ON_ONCE(irqs_disabled()); 1123 - 1124 - /* 1125 - * The caller is about to access the GVA space, so we set the mode to 1126 - * force TLB flush requests to send an IPI, and also disable IRQs to 1127 - * delay IPI handling until kvm_trap_emul_gva_lockless_end(). 1128 - */ 1129 - local_irq_disable(); 1130 - 1131 - /* 1132 - * Make sure the read of VCPU requests is not reordered ahead of the 1133 - * write to vcpu->mode, or we could miss a TLB flush request while 1134 - * the requester sees the VCPU as outside of guest mode and not needing 1135 - * an IPI. 1136 - */ 1137 - smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); 1138 - 1139 - /* 1140 - * If a TLB flush has been requested (potentially while 1141 - * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it 1142 - * before accessing the GVA space, and be sure to reload the ASID if 1143 - * necessary as it'll be immediately used. 1144 - * 1145 - * TLB flush requests after this check will trigger an IPI due to the 1146 - * mode change above, which will be delayed due to IRQs disabled. 1147 - */ 1148 - kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); 1149 - } 1150 - 1151 - /** 1152 - * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. 1153 - * @vcpu: VCPU pointer. 1154 - * 1155 - * Called after a GVA space access outside of guest mode. Should have a matching 1156 - * call to kvm_trap_emul_gva_lockless_begin(). 1157 - */ 1158 - void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) 1159 - { 1160 - /* 1161 - * Make sure the write to vcpu->mode is not reordered in front of GVA 1162 - * accesses, or a TLB flush requester may not think it necessary to send 1163 - * an IPI. 1164 - */ 1165 - smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); 1166 - 1167 - /* 1168 - * Now that the access to GVA space is complete, its safe for pending 1169 - * TLB flush request IPIs to be handled (which indicates completion). 1170 - */ 1171 - local_irq_enable(); 1172 - } 1173 - 1174 - static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu) 1175 - { 1176 - struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; 1177 - struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; 1178 - struct mm_struct *mm; 1179 - struct mips_coproc *cop0 = vcpu->arch.cop0; 1180 - int i, cpu = smp_processor_id(); 1181 - unsigned int gasid; 1182 - 1183 - /* 1184 - * No need to reload ASID, IRQs are disabled already so there's no rush, 1185 - * and we'll check if we need to regenerate below anyway before 1186 - * re-entering the guest. 1187 - */ 1188 - kvm_trap_emul_check_requests(vcpu, cpu, false); 1189 - 1190 - if (KVM_GUEST_KERNEL_MODE(vcpu)) { 1191 - mm = kern_mm; 1192 - } else { 1193 - mm = user_mm; 1194 - 1195 - /* 1196 - * Lazy host ASID regeneration / PT flush for guest user mode. 1197 - * If the guest ASID has changed since the last guest usermode 1198 - * execution, invalidate the stale TLB entries and flush GVA PT 1199 - * entries too. 1200 - */ 1201 - gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; 1202 - if (gasid != vcpu->arch.last_user_gasid) { 1203 - kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); 1204 - for_each_possible_cpu(i) 1205 - set_cpu_context(i, user_mm, 0); 1206 - vcpu->arch.last_user_gasid = gasid; 1207 - } 1208 - } 1209 - 1210 - /* 1211 - * Check if ASID is stale. This may happen due to a TLB flush request or 1212 - * a lazy user MM invalidation. 1213 - */ 1214 - check_mmu_context(mm); 1215 - } 1216 - 1217 - static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) 1218 - { 1219 - int cpu = smp_processor_id(); 1220 - int r; 1221 - 1222 - /* Check if we have any exceptions/interrupts pending */ 1223 - kvm_mips_deliver_interrupts(vcpu, 1224 - kvm_read_c0_guest_cause(vcpu->arch.cop0)); 1225 - 1226 - kvm_trap_emul_vcpu_reenter(vcpu); 1227 - 1228 - /* 1229 - * We use user accessors to access guest memory, but we don't want to 1230 - * invoke Linux page faulting. 1231 - */ 1232 - pagefault_disable(); 1233 - 1234 - /* Disable hardware page table walking while in guest */ 1235 - htw_stop(); 1236 - 1237 - /* 1238 - * While in guest context we're in the guest's address space, not the 1239 - * host process address space, so we need to be careful not to confuse 1240 - * e.g. cache management IPIs. 1241 - */ 1242 - kvm_mips_suspend_mm(cpu); 1243 - 1244 - r = vcpu->arch.vcpu_run(vcpu); 1245 - 1246 - /* We may have migrated while handling guest exits */ 1247 - cpu = smp_processor_id(); 1248 - 1249 - /* Restore normal Linux process memory map */ 1250 - check_switch_mmu_context(current->mm); 1251 - kvm_mips_resume_mm(cpu); 1252 - 1253 - htw_start(); 1254 - 1255 - pagefault_enable(); 1256 - 1257 - return r; 1258 - } 1259 - 1260 - static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { 1261 - /* exit handlers */ 1262 - .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, 1263 - .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, 1264 - .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, 1265 - .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, 1266 - .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, 1267 - .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, 1268 - .handle_syscall = kvm_trap_emul_handle_syscall, 1269 - .handle_res_inst = kvm_trap_emul_handle_res_inst, 1270 - .handle_break = kvm_trap_emul_handle_break, 1271 - .handle_trap = kvm_trap_emul_handle_trap, 1272 - .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, 1273 - .handle_fpe = kvm_trap_emul_handle_fpe, 1274 - .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, 1275 - .handle_guest_exit = kvm_trap_emul_no_handler, 1276 - 1277 - .hardware_enable = kvm_trap_emul_hardware_enable, 1278 - .hardware_disable = kvm_trap_emul_hardware_disable, 1279 - .check_extension = kvm_trap_emul_check_extension, 1280 - .vcpu_init = kvm_trap_emul_vcpu_init, 1281 - .vcpu_uninit = kvm_trap_emul_vcpu_uninit, 1282 - .vcpu_setup = kvm_trap_emul_vcpu_setup, 1283 - .flush_shadow_all = kvm_trap_emul_flush_shadow_all, 1284 - .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, 1285 - .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, 1286 - .queue_timer_int = kvm_mips_queue_timer_int_cb, 1287 - .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, 1288 - .queue_io_int = kvm_mips_queue_io_int_cb, 1289 - .dequeue_io_int = kvm_mips_dequeue_io_int_cb, 1290 - .irq_deliver = kvm_mips_irq_deliver_cb, 1291 - .irq_clear = kvm_mips_irq_clear_cb, 1292 - .num_regs = kvm_trap_emul_num_regs, 1293 - .copy_reg_indices = kvm_trap_emul_copy_reg_indices, 1294 - .get_one_reg = kvm_trap_emul_get_one_reg, 1295 - .set_one_reg = kvm_trap_emul_set_one_reg, 1296 - .vcpu_load = kvm_trap_emul_vcpu_load, 1297 - .vcpu_put = kvm_trap_emul_vcpu_put, 1298 - .vcpu_run = kvm_trap_emul_vcpu_run, 1299 - .vcpu_reenter = kvm_trap_emul_vcpu_reenter, 1300 - }; 1301 - 1302 - int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) 1303 - { 1304 - *install_callbacks = &kvm_trap_emul_callbacks; 1305 - return 0; 1306 - }
+2 -3
arch/mips/kvm/vz.c
··· 292 292 switch (priority) { 293 293 case MIPS_EXC_INT_TIMER: 294 294 /* 295 - * Call to kvm_write_c0_guest_compare() clears Cause.TI in 296 - * kvm_mips_emulate_CP0(). Explicitly clear irq associated with 297 - * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not 295 + * Explicitly clear irq associated with Cause.IP[IPTI] 296 + * if GuestCtl2 virtual interrupt register not 298 297 * supported or if not using GuestCtl2 Hardware Clear. 299 298 */ 300 299 if (cpu_has_guestctl2) {
+17 -11
arch/mips/lib/memcpy.S
··· 661 661 EXPORT_SYMBOL(memcpy) 662 662 move v0, dst /* return value */ 663 663 .L__memcpy: 664 - FEXPORT(__copy_user) 665 - EXPORT_SYMBOL(__copy_user) 664 + #ifndef CONFIG_EVA 665 + FEXPORT(__raw_copy_from_user) 666 + EXPORT_SYMBOL(__raw_copy_from_user) 667 + FEXPORT(__raw_copy_to_user) 668 + EXPORT_SYMBOL(__raw_copy_to_user) 669 + FEXPORT(__raw_copy_in_user) 670 + EXPORT_SYMBOL(__raw_copy_in_user) 671 + #endif 666 672 /* Legacy Mode, user <-> user */ 667 673 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP 668 674 ··· 687 681 * __copy_from_user (EVA) 688 682 */ 689 683 690 - LEAF(__copy_from_user_eva) 691 - EXPORT_SYMBOL(__copy_from_user_eva) 684 + LEAF(__raw_copy_from_user) 685 + EXPORT_SYMBOL(__raw_copy_from_user) 692 686 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP 693 - END(__copy_from_user_eva) 687 + END(__raw_copy_from_user) 694 688 695 689 696 690 ··· 698 692 * __copy_to_user (EVA) 699 693 */ 700 694 701 - LEAF(__copy_to_user_eva) 702 - EXPORT_SYMBOL(__copy_to_user_eva) 695 + LEAF(__raw_copy_to_user) 696 + EXPORT_SYMBOL(__raw_copy_to_user) 703 697 __BUILD_COPY_USER EVA_MODE KERNELOP USEROP 704 - END(__copy_to_user_eva) 698 + END(__raw_copy_to_user) 705 699 706 700 /* 707 701 * __copy_in_user (EVA) 708 702 */ 709 703 710 - LEAF(__copy_in_user_eva) 711 - EXPORT_SYMBOL(__copy_in_user_eva) 704 + LEAF(__raw_copy_in_user) 705 + EXPORT_SYMBOL(__raw_copy_in_user) 712 706 __BUILD_COPY_USER EVA_MODE USEROP USEROP 713 - END(__copy_in_user_eva) 707 + END(__raw_copy_in_user) 714 708 715 709 #endif
-3
arch/mips/lib/memset.S
··· 314 314 #ifndef CONFIG_EVA 315 315 FEXPORT(__bzero) 316 316 EXPORT_SYMBOL(__bzero) 317 - #else 318 - FEXPORT(__bzero_kernel) 319 - EXPORT_SYMBOL(__bzero_kernel) 320 317 #endif 321 318 __BUILD_BZERO LEGACY_MODE 322 319
+14 -34
arch/mips/lib/strncpy_user.S
··· 29 29 * it happens at most some bytes of the exceptions handlers will be copied. 30 30 */ 31 31 32 - .macro __BUILD_STRNCPY_ASM func 33 - LEAF(__strncpy_from_\func\()_asm) 34 - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 35 - and v0, a1 36 - bnez v0, .Lfault\@ 37 - 32 + LEAF(__strncpy_from_user_asm) 38 33 move t0, zero 39 34 move v1, a1 40 - .ifeqs "\func","kernel" 41 - 1: EX(lbu, v0, (v1), .Lfault\@) 42 - .else 43 - 1: EX(lbue, v0, (v1), .Lfault\@) 44 - .endif 35 + #ifdef CONFIG_EVA 36 + .set push 37 + .set eva 38 + 1: EX(lbue, v0, (v1), .Lfault) 39 + .set pop 40 + #else 41 + 1: EX(lbu, v0, (v1), .Lfault) 42 + #endif 45 43 PTR_ADDIU v1, 1 46 44 R10KCBARRIER(0(ra)) 47 45 sb v0, (a0) ··· 49 51 bne t0, a2, 1b 50 52 2: PTR_ADDU v0, a1, t0 51 53 xor v0, a1 52 - bltz v0, .Lfault\@ 54 + bltz v0, .Lfault 53 55 move v0, t0 54 56 jr ra # return n 55 - END(__strncpy_from_\func\()_asm) 57 + END(__strncpy_from_user_asm) 56 58 57 - .Lfault\@: 59 + .Lfault: 58 60 li v0, -EFAULT 59 61 jr ra 60 62 61 63 .section __ex_table,"a" 62 - PTR 1b, .Lfault\@ 64 + PTR 1b, .Lfault 63 65 .previous 64 66 65 - .endm 66 - 67 - #ifndef CONFIG_EVA 68 - /* Set aliases */ 69 - .global __strncpy_from_user_asm 70 - .set __strncpy_from_user_asm, __strncpy_from_kernel_asm 71 - EXPORT_SYMBOL(__strncpy_from_user_asm) 72 - #endif 73 - 74 - __BUILD_STRNCPY_ASM kernel 75 - EXPORT_SYMBOL(__strncpy_from_kernel_asm) 76 - 77 - #ifdef CONFIG_EVA 78 - .set push 79 - .set eva 80 - __BUILD_STRNCPY_ASM user 81 - .set pop 82 - EXPORT_SYMBOL(__strncpy_from_user_asm) 83 - #endif 67 + EXPORT_SYMBOL(__strncpy_from_user_asm)
+12 -32
arch/mips/lib/strnlen_user.S
··· 26 26 * bytes. There's nothing secret there. On 64-bit accessing beyond 27 27 * the maximum is a tad hairier ... 28 28 */ 29 - .macro __BUILD_STRNLEN_ASM func 30 - LEAF(__strnlen_\func\()_asm) 31 - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 32 - and v0, a0 33 - bnez v0, .Lfault\@ 34 - 29 + LEAF(__strnlen_user_asm) 35 30 move v0, a0 36 31 PTR_ADDU a1, a0 # stop pointer 37 32 1: ··· 35 40 li AT, 1 36 41 #endif 37 42 beq v0, a1, 1f # limit reached? 38 - .ifeqs "\func", "kernel" 39 - EX(lb, t0, (v0), .Lfault\@) 40 - .else 41 - EX(lbe, t0, (v0), .Lfault\@) 42 - .endif 43 + #ifdef CONFIG_EVA 44 + .set push 45 + .set eva 46 + EX(lbe, t0, (v0), .Lfault) 47 + .set pop 48 + #else 49 + EX(lb, t0, (v0), .Lfault) 50 + #endif 43 51 .set noreorder 44 52 bnez t0, 1b 45 53 1: ··· 55 57 .set reorder 56 58 PTR_SUBU v0, a0 57 59 jr ra 58 - END(__strnlen_\func\()_asm) 60 + END(__strnlen_user_asm) 59 61 60 - .Lfault\@: 62 + .Lfault: 61 63 move v0, zero 62 64 jr ra 63 - .endm 64 65 65 - #ifndef CONFIG_EVA 66 - /* Set aliases */ 67 - .global __strnlen_user_asm 68 - .set __strnlen_user_asm, __strnlen_kernel_asm 69 - EXPORT_SYMBOL(__strnlen_user_asm) 70 - #endif 71 - 72 - __BUILD_STRNLEN_ASM kernel 73 - EXPORT_SYMBOL(__strnlen_kernel_asm) 74 - 75 - #ifdef CONFIG_EVA 76 - 77 - .set push 78 - .set eva 79 - __BUILD_STRNLEN_ASM user 80 - .set pop 81 - EXPORT_SYMBOL(__strnlen_user_asm) 82 - #endif 66 + EXPORT_SYMBOL(__strnlen_user_asm)
+1 -1
arch/mips/loongson64/Makefile
··· 2 2 # 3 3 # Makefile for Loongson-3 family machines 4 4 # 5 - obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o platform.o dma.o \ 5 + obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o dma.o \ 6 6 setup.o init.o env.o time.o reset.o \ 7 7 8 8 obj-$(CONFIG_SMP) += smp.o
+12 -21
arch/mips/loongson64/env.c
··· 43 43 return "Generic Loongson64 System"; 44 44 } 45 45 46 - void __init prom_init_env(void) 46 + 47 + void __init prom_dtb_init_env(void) 48 + { 49 + if ((fw_arg2 < CKSEG0 || fw_arg2 > CKSEG1) 50 + && (fw_arg2 < XKPHYS || fw_arg2 > XKSEG)) 51 + 52 + loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin; 53 + else 54 + loongson_fdt_blob = (void *)fw_arg2; 55 + } 56 + 57 + void __init prom_lefi_init_env(void) 47 58 { 48 59 struct boot_params *boot_p; 49 60 struct loongson_params *loongson_p; ··· 106 95 loongson_freqctrl[1] = 0x900010001fe001d0; 107 96 loongson_freqctrl[2] = 0x900020001fe001d0; 108 97 loongson_freqctrl[3] = 0x900030001fe001d0; 109 - loongson_sysconf.ht_control_base = 0x90000EFDFB000000; 110 98 loongson_sysconf.workarounds = WORKAROUND_CPUFREQ; 111 99 break; 112 100 case Legacy_3B: ··· 128 118 loongson_freqctrl[1] = 0x900020001fe001d0; 129 119 loongson_freqctrl[2] = 0x900040001fe001d0; 130 120 loongson_freqctrl[3] = 0x900060001fe001d0; 131 - loongson_sysconf.ht_control_base = 0x90001EFDFB000000; 132 121 loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG; 133 122 break; 134 123 default: ··· 145 136 loongson_sysconf.cores_per_node - 1) / 146 137 loongson_sysconf.cores_per_node; 147 138 148 - loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr; 149 - loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr; 150 - loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr; 151 139 loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; 152 140 if (loongson_sysconf.dma_mask_bits < 32 || 153 141 loongson_sysconf.dma_mask_bits > 64) ··· 159 153 loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, 160 154 loongson_sysconf.vgabios_addr); 161 155 162 - memset(loongson_sysconf.ecname, 0, 32); 163 - if (esys->has_ec) 164 - memcpy(loongson_sysconf.ecname, esys->ec_name, 32); 165 156 loongson_sysconf.workarounds |= esys->workarounds; 166 157 167 - loongson_sysconf.nr_uarts = esys->nr_uarts; 168 - if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS) 169 - loongson_sysconf.nr_uarts = 1; 170 - memcpy(loongson_sysconf.uarts, esys->uarts, 171 - sizeof(struct uart_device) * loongson_sysconf.nr_uarts); 172 - 173 - loongson_sysconf.nr_sensors = esys->nr_sensors; 174 - if (loongson_sysconf.nr_sensors > MAX_SENSORS) 175 - loongson_sysconf.nr_sensors = 0; 176 - if (loongson_sysconf.nr_sensors) 177 - memcpy(loongson_sysconf.sensors, esys->sensors, 178 - sizeof(struct sensor_device) * loongson_sysconf.nr_sensors); 179 158 pr_info("CpuClock = %u\n", cpu_clock_freq); 180 159 181 160 /* Read the ID of PCI host bridge to detect bridge type */
+19 -4
arch/mips/loongson64/init.c
··· 52 52 static unsigned long num_physpages; 53 53 u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; 54 54 55 + /* Otherwise come from DTB */ 56 + if (loongson_sysconf.fw_interface != LOONGSON_LEFI) 57 + return; 58 + 55 59 /* Parse memory information and activate */ 56 60 for (i = 0; i < loongson_memmap->nr_map; i++) { 57 61 node_id = loongson_memmap->map[i].node_id; ··· 98 94 void __init prom_init(void) 99 95 { 100 96 fw_init_cmdline(); 101 - prom_init_env(); 97 + 98 + if (fw_arg2 == 0 || (fdt_magic(fw_arg2) == FDT_MAGIC)) { 99 + loongson_sysconf.fw_interface = LOONGSON_DTB; 100 + prom_dtb_init_env(); 101 + } else { 102 + loongson_sysconf.fw_interface = LOONGSON_LEFI; 103 + prom_lefi_init_env(); 104 + } 102 105 103 106 /* init base address of io space */ 104 107 set_io_port_base(PCI_IOBASE); 105 108 106 - loongson_sysconf.early_config(); 109 + if (loongson_sysconf.early_config) 110 + loongson_sysconf.early_config(); 107 111 108 112 #ifdef CONFIG_NUMA 109 113 prom_init_numa_memory(); ··· 120 108 #endif 121 109 122 110 /* Hardcode to CPU UART 0 */ 123 - setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); 111 + if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) 112 + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE), 0, 1024); 113 + else 114 + setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024); 124 115 125 116 register_smp_ops(&loongson3_smp_ops); 126 117 board_nmi_handler_setup = mips_nmi_setup; ··· 141 126 return -ENOMEM; 142 127 143 128 range->fwnode = fwnode; 144 - range->size = size; 129 + range->size = size = round_up(size, PAGE_SIZE); 145 130 range->hw_start = hw_start; 146 131 range->flags = LOGIC_PIO_CPU_MMIO; 147 132
+14 -3
arch/mips/loongson64/numa.c
··· 27 27 #include <boot_param.h> 28 28 #include <loongson.h> 29 29 30 - static struct pglist_data prealloc__node_data[MAX_NUMNODES]; 31 30 unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; 32 31 EXPORT_SYMBOL(__node_distances); 33 32 struct pglist_data *__node_data[MAX_NUMNODES]; ··· 83 84 84 85 static void __init node_mem_init(unsigned int node) 85 86 { 87 + struct pglist_data *nd; 86 88 unsigned long node_addrspace_offset; 87 89 unsigned long start_pfn, end_pfn; 90 + unsigned long nd_pa; 91 + int tnid; 92 + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); 88 93 89 94 node_addrspace_offset = nid_to_addrbase(node); 90 95 pr_info("Node%d's addrspace_offset is 0x%lx\n", ··· 98 95 pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n", 99 96 node, start_pfn, end_pfn); 100 97 101 - __node_data[node] = prealloc__node_data + node; 102 - 98 + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node); 99 + if (!nd_pa) 100 + panic("Cannot allocate %zu bytes for node %d data\n", 101 + nd_size, node); 102 + nd = __va(nd_pa); 103 + memset(nd, 0, sizeof(struct pglist_data)); 104 + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 105 + if (tnid != node) 106 + pr_info("NODE_DATA(%d) on node %d\n", node, tnid); 107 + __node_data[node] = nd; 103 108 NODE_DATA(node)->node_start_pfn = start_pfn; 104 109 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; 105 110
-42
arch/mips/loongson64/platform.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Copyright (C) 2009 Lemote Inc. 4 - * Author: Wu Zhangjin, wuzhangjin@gmail.com 5 - * Xiang Yu, xiangy@lemote.com 6 - * Chen Huacai, chenhc@lemote.com 7 - */ 8 - 9 - #include <linux/err.h> 10 - #include <linux/slab.h> 11 - #include <linux/platform_device.h> 12 - #include <asm/bootinfo.h> 13 - #include <boot_param.h> 14 - #include <loongson_hwmon.h> 15 - #include <workarounds.h> 16 - 17 - static int __init loongson3_platform_init(void) 18 - { 19 - int i; 20 - struct platform_device *pdev; 21 - 22 - if (loongson_sysconf.ecname[0] != '\0') 23 - platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0); 24 - 25 - for (i = 0; i < loongson_sysconf.nr_sensors; i++) { 26 - if (loongson_sysconf.sensors[i].type > SENSOR_FAN) 27 - continue; 28 - 29 - pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); 30 - if (!pdev) 31 - return -ENOMEM; 32 - 33 - pdev->name = loongson_sysconf.sensors[i].name; 34 - pdev->id = loongson_sysconf.sensors[i].id; 35 - pdev->dev.platform_data = &loongson_sysconf.sensors[i]; 36 - platform_device_register(pdev); 37 - } 38 - 39 - return 0; 40 - } 41 - 42 - arch_initcall(loongson3_platform_init);
+113
arch/mips/loongson64/reset.c
··· 6 6 * Copyright (C) 2009 Lemote, Inc. 7 7 * Author: Zhangjin Wu, wuzhangjin@gmail.com 8 8 */ 9 + #include <linux/cpu.h> 10 + #include <linux/delay.h> 9 11 #include <linux/init.h> 12 + #include <linux/kexec.h> 10 13 #include <linux/pm.h> 14 + #include <linux/slab.h> 11 15 16 + #include <asm/bootinfo.h> 12 17 #include <asm/idle.h> 13 18 #include <asm/reboot.h> 14 19 ··· 52 47 } 53 48 } 54 49 50 + #ifdef CONFIG_KEXEC 51 + 52 + /* 0X80000000~0X80200000 is safe */ 53 + #define MAX_ARGS 64 54 + #define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL 55 + #define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL 56 + #define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE 57 + #define KEXEC_ENVP_SIZE 4800 58 + 59 + static int kexec_argc; 60 + static int kdump_argc; 61 + static void *kexec_argv; 62 + static void *kdump_argv; 63 + static void *kexec_envp; 64 + 65 + static int loongson_kexec_prepare(struct kimage *image) 66 + { 67 + int i, argc = 0; 68 + unsigned int *argv; 69 + char *str, *ptr, *bootloader = "kexec"; 70 + 71 + /* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */ 72 + if (image->type == KEXEC_TYPE_DEFAULT) 73 + argv = (unsigned int *)kexec_argv; 74 + else 75 + argv = (unsigned int *)kdump_argv; 76 + 77 + argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2); 78 + 79 + for (i = 0; i < image->nr_segments; i++) { 80 + if (!strncmp(bootloader, (char *)image->segment[i].buf, 81 + strlen(bootloader))) { 82 + /* 83 + * convert command line string to array 84 + * of parameters (as bootloader does). 85 + */ 86 + int offt; 87 + str = (char *)argv + KEXEC_ARGV_SIZE/2; 88 + memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2); 89 + ptr = strchr(str, ' '); 90 + 91 + while (ptr && (argc < MAX_ARGS)) { 92 + *ptr = '\0'; 93 + if (ptr[1] != ' ') { 94 + offt = (int)(ptr - str + 1); 95 + argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt; 96 + argc++; 97 + } 98 + ptr = strchr(ptr + 1, ' '); 99 + } 100 + break; 101 + } 102 + } 103 + 104 + if (image->type == KEXEC_TYPE_DEFAULT) 105 + kexec_argc = argc; 106 + else 107 + kdump_argc = argc; 108 + 109 + /* kexec/kdump need a safe page to save reboot_code_buffer */ 110 + image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE); 111 + 112 + return 0; 113 + } 114 + 115 + static void loongson_kexec_shutdown(void) 116 + { 117 + #ifdef CONFIG_SMP 118 + int cpu; 119 + 120 + /* All CPUs go to reboot_code_buffer */ 121 + for_each_possible_cpu(cpu) 122 + if (!cpu_online(cpu)) 123 + cpu_device_up(get_cpu_device(cpu)); 124 + #endif 125 + kexec_args[0] = kexec_argc; 126 + kexec_args[1] = fw_arg1; 127 + kexec_args[2] = fw_arg2; 128 + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); 129 + memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE); 130 + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); 131 + } 132 + 133 + static void loongson_crash_shutdown(struct pt_regs *regs) 134 + { 135 + default_machine_crash_shutdown(regs); 136 + kexec_args[0] = kdump_argc; 137 + kexec_args[1] = fw_arg1; 138 + kexec_args[2] = fw_arg2; 139 + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); 140 + memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE); 141 + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); 142 + } 143 + 144 + #endif 145 + 55 146 static int __init mips_reboot_setup(void) 56 147 { 57 148 _machine_restart = loongson_restart; 58 149 _machine_halt = loongson_halt; 59 150 pm_power_off = loongson_poweroff; 151 + 152 + #ifdef CONFIG_KEXEC 153 + kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); 154 + kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); 155 + kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL); 156 + fw_arg1 = KEXEC_ARGV_ADDR; 157 + memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE); 158 + 159 + _machine_kexec_prepare = loongson_kexec_prepare; 160 + _machine_kexec_shutdown = loongson_kexec_shutdown; 161 + _machine_crash_shutdown = loongson_crash_shutdown; 162 + #endif 60 163 61 164 return 0; 62 165 }
+24
arch/mips/loongson64/time.c
··· 11 11 #include <asm/hpet.h> 12 12 13 13 #include <loongson.h> 14 + #include <linux/clk.h> 15 + #include <linux/of_clk.h> 14 16 15 17 void __init plat_time_init(void) 16 18 { 19 + struct clk *clk; 20 + struct device_node *np; 21 + 22 + if (loongson_sysconf.fw_interface == LOONGSON_DTB) { 23 + of_clk_init(NULL); 24 + 25 + np = of_get_cpu_node(0, NULL); 26 + if (!np) { 27 + pr_err("Failed to get CPU node\n"); 28 + return; 29 + } 30 + 31 + clk = of_clk_get(np, 0); 32 + if (IS_ERR(clk)) { 33 + pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk)); 34 + return; 35 + } 36 + 37 + cpu_clock_freq = clk_get_rate(clk); 38 + clk_put(clk); 39 + } 40 + 17 41 /* setup mips r4k timer */ 18 42 mips_hpt_frequency = cpu_clock_freq / 2; 19 43
+6
arch/mips/mm/Makefile
··· 22 22 obj-y += uasm-mips.o 23 23 endif 24 24 25 + ifndef CONFIG_EVA 26 + obj-y += maccess.o 27 + endif 28 + 25 29 obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 26 30 obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o 27 31 obj-$(CONFIG_HIGHMEM) += highmem.o ··· 44 40 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 45 41 obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 46 42 obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o 43 + 44 + obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+10
arch/mips/mm/maccess.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/uaccess.h> 4 + #include <linux/kernel.h> 5 + 6 + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) 7 + { 8 + /* highest bit set means kernel space */ 9 + return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1); 10 + }
+56
arch/mips/mm/physaddr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bug.h> 3 + #include <linux/export.h> 4 + #include <linux/types.h> 5 + #include <linux/mmdebug.h> 6 + #include <linux/mm.h> 7 + 8 + #include <asm/sections.h> 9 + #include <asm/io.h> 10 + #include <asm/page.h> 11 + #include <asm/dma.h> 12 + 13 + static inline bool __debug_virt_addr_valid(unsigned long x) 14 + { 15 + /* high_memory does not get immediately defined, and there 16 + * are early callers of __pa() against PAGE_OFFSET 17 + */ 18 + if (!high_memory && x >= PAGE_OFFSET) 19 + return true; 20 + 21 + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) 22 + return true; 23 + 24 + /* 25 + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an 26 + * actual physical address. Enough code relies on 27 + * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it 28 + * and always return true. 29 + */ 30 + if (x == MAX_DMA_ADDRESS) 31 + return true; 32 + 33 + return false; 34 + } 35 + 36 + phys_addr_t __virt_to_phys(volatile const void *x) 37 + { 38 + WARN(!__debug_virt_addr_valid((unsigned long)x), 39 + "virt_to_phys used for non-linear address: %pK (%pS)\n", 40 + x, x); 41 + 42 + return __virt_to_phys_nodebug(x); 43 + } 44 + EXPORT_SYMBOL(__virt_to_phys); 45 + 46 + phys_addr_t __phys_addr_symbol(unsigned long x) 47 + { 48 + /* This is bounds checking against the kernel image only. 49 + * __pa_symbol should only be used on kernel symbol addresses. 50 + */ 51 + VIRTUAL_BUG_ON(x < (unsigned long)_text || 52 + x > (unsigned long)_end); 53 + 54 + return __pa_symbol_nodebug(x); 55 + } 56 + EXPORT_SYMBOL(__phys_addr_symbol);
+5 -4
arch/mips/mm/tlbex.c
··· 849 849 /* Clear lower 23 bits of context. */ 850 850 uasm_i_dins(p, ptr, 0, 0, 23); 851 851 852 - /* 1 0 1 0 1 << 6 xkphys cached */ 853 - uasm_i_ori(p, ptr, ptr, 0x540); 852 + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ 853 + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); 854 854 uasm_i_drotr(p, ptr, ptr, 11); 855 855 #elif defined(CONFIG_SMP) 856 856 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); ··· 1165 1165 1166 1166 if (pgd_reg == -1) { 1167 1167 vmalloc_branch_delay_filled = 1; 1168 - /* 1 0 1 0 1 << 6 xkphys cached */ 1169 - uasm_i_ori(p, ptr, ptr, 0x540); 1168 + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ 1169 + uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53)); 1170 + 1170 1171 uasm_i_drotr(p, ptr, ptr, 11); 1171 1172 } 1172 1173
+1 -5
arch/mips/mti-malta/Platform
··· 2 2 # MIPS Malta board 3 3 # 4 4 cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta 5 - ifdef CONFIG_KVM_GUEST 6 - load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 7 - else 8 - load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 9 - endif 5 + load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 10 6 all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
-5
arch/mips/mti-malta/malta-time.c
··· 66 66 int secs; 67 67 u64 giccount = 0, gicstart = 0; 68 68 69 - #if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ 70 - mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; 71 - return; 72 - #endif 73 - 74 69 local_irq_save(flags); 75 70 76 71 if (mips_gic_present())
+1
arch/mips/pci/pci-ar2315.c
··· 31 31 #include <linux/platform_device.h> 32 32 #include <linux/kernel.h> 33 33 #include <linux/init.h> 34 + #include <linux/dma-direct.h> 34 35 #include <linux/mm.h> 35 36 #include <linux/delay.h> 36 37 #include <linux/bitops.h>
+10 -13
arch/mips/pci/pci-legacy.c
··· 89 89 hose->mem_resource, hose->mem_offset); 90 90 pci_add_resource_offset(&resources, 91 91 hose->io_resource, hose->io_offset); 92 - pci_add_resource(&resources, hose->busn_resource); 93 92 list_splice_init(&resources, &bridge->windows); 94 93 bridge->dev.parent = NULL; 95 94 bridge->sysdata = hose; ··· 139 140 struct of_pci_range range; 140 141 struct of_pci_range_parser parser; 141 142 142 - pr_info("PCI host bridge %pOF ranges:\n", node); 143 143 hose->of_node = node; 144 144 145 145 if (of_pci_range_parser_init(&parser, node)) ··· 149 151 150 152 switch (range.flags & IORESOURCE_TYPE_BITS) { 151 153 case IORESOURCE_IO: 152 - pr_info(" IO 0x%016llx..0x%016llx\n", 153 - range.cpu_addr, 154 - range.cpu_addr + range.size - 1); 155 154 hose->io_map_base = 156 155 (unsigned long)ioremap(range.cpu_addr, 157 156 range.size); 158 157 res = hose->io_resource; 159 158 break; 160 159 case IORESOURCE_MEM: 161 - pr_info(" MEM 0x%016llx..0x%016llx\n", 162 - range.cpu_addr, 163 - range.cpu_addr + range.size - 1); 164 160 res = hose->mem_resource; 165 161 break; 166 162 } 167 - if (res != NULL) 168 - of_pci_range_to_resource(&range, node, res); 163 + if (res != NULL) { 164 + res->name = node->full_name; 165 + res->flags = range.flags; 166 + res->start = range.cpu_addr; 167 + res->end = range.cpu_addr + range.size - 1; 168 + res->parent = res->child = res->sibling = NULL; 169 + } 169 170 } 170 171 } 171 172 ··· 249 252 250 253 pci_read_config_word(dev, PCI_COMMAND, &cmd); 251 254 old_cmd = cmd; 252 - for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { 255 + for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) { 253 256 /* Only set up the requested stuff */ 254 257 if (!(mask & (1<<idx))) 255 258 continue; ··· 279 282 280 283 int pcibios_enable_device(struct pci_dev *dev, int mask) 281 284 { 282 - int err; 285 + int err = pcibios_enable_resources(dev, mask); 283 286 284 - if ((err = pcibios_enable_resources(dev, mask)) < 0) 287 + if (err < 0) 285 288 return err; 286 289 287 290 return pcibios_plat_dev_init(dev);
+3 -2
arch/mips/pci/pci-mt7620.c
··· 30 30 #define RALINK_GPIOMODE 0x60 31 31 32 32 #define PPLL_CFG1 0x9c 33 + #define PPLL_LD BIT(23) 33 34 34 35 #define PPLL_DRV 0xa0 35 36 #define PDRV_SW_SET BIT(31) ··· 240 239 rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); 241 240 mdelay(100); 242 241 243 - if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) { 244 - dev_err(&pdev->dev, "MT7620 PPLL unlock\n"); 242 + if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { 243 + dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n"); 245 244 reset_control_assert(rstpcie0); 246 245 rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); 247 246 return -1;
+24 -26
arch/mips/pci/pci-rt2880.c
··· 41 41 #define RT2880_PCI_REG_ARBCTL 0x80 42 42 43 43 static void __iomem *rt2880_pci_base; 44 - static DEFINE_SPINLOCK(rt2880_pci_lock); 45 44 46 45 static u32 rt2880_pci_reg_read(u32 reg) 47 46 { ··· 62 63 static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, 63 64 int where, int size, u32 *val) 64 65 { 65 - unsigned long flags; 66 66 u32 address; 67 67 u32 data; 68 68 69 69 address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), 70 70 PCI_FUNC(devfn), where); 71 71 72 - spin_lock_irqsave(&rt2880_pci_lock, flags); 73 72 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); 74 73 data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); 75 - spin_unlock_irqrestore(&rt2880_pci_lock, flags); 76 74 77 75 switch (size) { 78 76 case 1: ··· 89 93 static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, 90 94 int where, int size, u32 val) 91 95 { 92 - unsigned long flags; 93 96 u32 address; 94 97 u32 data; 95 98 96 99 address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), 97 100 PCI_FUNC(devfn), where); 98 101 99 - spin_lock_irqsave(&rt2880_pci_lock, flags); 100 102 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); 101 103 data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); 102 104 ··· 113 119 } 114 120 115 121 rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); 116 - spin_unlock_irqrestore(&rt2880_pci_lock, flags); 117 122 118 123 return PCIBIOS_SUCCESSFUL; 119 124 } ··· 144 151 145 152 static inline u32 rt2880_pci_read_u32(unsigned long reg) 146 153 { 147 - unsigned long flags; 148 154 u32 address; 149 155 u32 ret; 150 156 151 157 address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); 152 158 153 - spin_lock_irqsave(&rt2880_pci_lock, flags); 154 159 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); 155 160 ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); 156 - spin_unlock_irqrestore(&rt2880_pci_lock, flags); 157 161 158 162 return ret; 159 163 } 160 164 161 165 static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) 162 166 { 163 - unsigned long flags; 164 167 u32 address; 165 168 166 169 address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); 167 170 168 - spin_lock_irqsave(&rt2880_pci_lock, flags); 169 171 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); 170 172 rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); 171 - spin_unlock_irqrestore(&rt2880_pci_lock, flags); 172 173 } 173 174 174 175 int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 175 176 { 176 - u16 cmd; 177 177 int irq = -1; 178 178 179 179 if (dev->bus->number != 0) ··· 174 188 175 189 switch (PCI_SLOT(dev->devfn)) { 176 190 case 0x00: 177 - rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); 178 - (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); 179 191 break; 180 192 case 0x11: 181 193 irq = RT288X_CPU_IRQ_PCI; ··· 185 201 break; 186 202 } 187 203 188 - pci_write_config_byte((struct pci_dev *) dev, 189 - PCI_CACHE_LINE_SIZE, 0x14); 190 - pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF); 191 - pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd); 192 - cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 193 - PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | 194 - PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; 195 - pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd); 196 - pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE, 197 - dev->irq); 198 204 return irq; 199 205 } 200 206 ··· 225 251 226 252 int pcibios_plat_dev_init(struct pci_dev *dev) 227 253 { 254 + static bool slot0_init; 255 + 256 + /* 257 + * Nobody seems to initialize slot 0, but this platform requires it, so 258 + * do it once when some other slot is being enabled. The PCI subsystem 259 + * should configure other slots properly, so no need to do anything 260 + * special for those. 261 + */ 262 + if (!slot0_init && dev->bus->number == 0) { 263 + u16 cmd; 264 + u32 bar0; 265 + 266 + slot0_init = true; 267 + 268 + pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, 269 + 0x08000000); 270 + pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0, 271 + &bar0); 272 + 273 + pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd); 274 + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; 275 + pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd); 276 + } 277 + 228 278 return 0; 229 279 } 230 280
+2 -8
arch/mips/pci/pci-rt3883.c
··· 100 100 unsigned bus, unsigned slot, 101 101 unsigned func, unsigned reg) 102 102 { 103 - unsigned long flags; 104 103 u32 address; 105 104 u32 ret; 106 105 ··· 115 116 unsigned bus, unsigned slot, 116 117 unsigned func, unsigned reg, u32 val) 117 118 { 118 - unsigned long flags; 119 119 u32 address; 120 120 121 121 address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); ··· 227 229 int where, int size, u32 *val) 228 230 { 229 231 struct rt3883_pci_controller *rpc; 230 - unsigned long flags; 231 232 u32 address; 232 233 u32 data; 233 234 ··· 260 263 int where, int size, u32 val) 261 264 { 262 265 struct rt3883_pci_controller *rpc; 263 - unsigned long flags; 264 266 u32 address; 265 267 u32 data; 266 268 ··· 431 435 432 436 if (!rpc->intc_of_node) { 433 437 dev_err(dev, "%pOF has no %s child node", 434 - rpc->intc_of_node, 435 - "interrupt controller"); 438 + np, "interrupt controller"); 436 439 return -EINVAL; 437 440 } 438 441 ··· 445 450 446 451 if (!rpc->pci_controller.of_node) { 447 452 dev_err(dev, "%pOF has no %s child node", 448 - rpc->intc_of_node, 449 - "PCI host bridge"); 453 + np, "PCI host bridge"); 450 454 err = -EINVAL; 451 455 goto err_put_intc_node; 452 456 }
+1 -1
arch/mips/pci/pci-xtalk-bridge.c
··· 385 385 bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ 386 386 387 387 /* 388 - * Enable sending of an interrupt clear packt to the hub on a high to 388 + * Enable sending of an interrupt clear packet to the hub on a high to 389 389 * low transition of the interrupt pin. 390 390 * 391 391 * IRIX sets additional bits in the address which are documented as
+1
arch/mips/ralink/Kconfig
··· 26 26 27 27 config SOC_RT288X 28 28 bool "RT288x" 29 + select MIPS_AUTO_PFN_OFFSET 29 30 select MIPS_L1_CACHE_SHIFT_4 30 31 select HAVE_LEGACY_CLK 31 32 select HAVE_PCI
+14
arch/mips/ralink/clk.c
··· 70 70 } 71 71 EXPORT_SYMBOL_GPL(clk_round_rate); 72 72 73 + int clk_set_parent(struct clk *clk, struct clk *parent) 74 + { 75 + WARN_ON(clk); 76 + return -1; 77 + } 78 + EXPORT_SYMBOL_GPL(clk_set_parent); 79 + 80 + struct clk *clk_get_parent(struct clk *clk) 81 + { 82 + WARN_ON(clk); 83 + return NULL; 84 + } 85 + EXPORT_SYMBOL_GPL(clk_get_parent); 86 + 73 87 void __init plat_time_init(void) 74 88 { 75 89 struct clk *clk;
+2 -1
arch/mips/ralink/common.h
··· 17 17 unsigned long mem_size; 18 18 unsigned long mem_size_min; 19 19 unsigned long mem_size_max; 20 + void (*mem_detect)(void); 20 21 }; 21 22 extern struct ralink_soc_info soc_info; 22 23 ··· 28 27 29 28 extern void ralink_rst_init(void); 30 29 31 - extern void prom_soc_init(struct ralink_soc_info *soc_info); 30 + extern void __init prom_soc_init(struct ralink_soc_info *soc_info); 32 31 33 32 __iomem void *plat_of_remap_node(const char *node); 34 33
+1 -1
arch/mips/ralink/mt7620.c
··· 639 639 } 640 640 } 641 641 642 - void prom_soc_init(struct ralink_soc_info *soc_info) 642 + void __init prom_soc_init(struct ralink_soc_info *soc_info) 643 643 { 644 644 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); 645 645 unsigned char *name = NULL;
+26 -5
arch/mips/ralink/mt7621.c
··· 9 9 #include <linux/init.h> 10 10 #include <linux/slab.h> 11 11 #include <linux/sys_soc.h> 12 + #include <linux/memblock.h> 12 13 14 + #include <asm/bootinfo.h> 13 15 #include <asm/mipsregs.h> 14 16 #include <asm/smp-ops.h> 15 17 #include <asm/mips-cps.h> ··· 50 48 #define MT7621_GPIO_MODE_SDHCI_MASK 0x3 51 49 #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 52 50 #define MT7621_GPIO_MODE_SDHCI_GPIO 1 51 + 52 + static void *detect_magic __initdata = detect_memory_region; 53 53 54 54 static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; 55 55 static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; ··· 114 110 panic("Cannot detect cpc address"); 115 111 } 116 112 113 + static void __init mt7621_memory_detect(void) 114 + { 115 + void *dm = &detect_magic; 116 + phys_addr_t size; 117 + 118 + for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) { 119 + if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic))) 120 + break; 121 + } 122 + 123 + if ((size == 256 * SZ_1M) && 124 + (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) && 125 + __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) { 126 + memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE); 127 + memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE); 128 + } else { 129 + memblock_add(MT7621_LOWMEM_BASE, size); 130 + } 131 + } 132 + 117 133 void __init ralink_of_remap(void) 118 134 { 119 135 rt_sysc_membase = plat_of_remap_node("mediatek,mt7621-sysc"); ··· 170 146 } 171 147 } 172 148 173 - void prom_soc_init(struct ralink_soc_info *soc_info) 149 + void __init prom_soc_init(struct ralink_soc_info *soc_info) 174 150 { 175 151 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); 176 152 unsigned char *name = NULL; ··· 218 194 (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, 219 195 (rev & CHIP_REV_ECO_MASK)); 220 196 221 - soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN; 222 - soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX; 223 - soc_info->mem_base = MT7621_DRAM_BASE; 224 - 197 + soc_info->mem_detect = mt7621_memory_detect; 225 198 rt2880_pinmux_data = mt7621_pinmux_data; 226 199 227 200 soc_dev_init(soc_info, rev);
+2
arch/mips/ralink/of.c
··· 78 78 of_scan_flat_dt(early_init_dt_find_memory, NULL); 79 79 if (memory_dtb) 80 80 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 81 + else if (soc_info.mem_detect) 82 + soc_info.mem_detect(); 81 83 else if (soc_info.mem_size) 82 84 memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M); 83 85 else
+1 -1
arch/mips/ralink/rt288x.c
··· 77 77 panic("Failed to remap core resources"); 78 78 } 79 79 80 - void prom_soc_init(struct ralink_soc_info *soc_info) 80 + void __init prom_soc_init(struct ralink_soc_info *soc_info) 81 81 { 82 82 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE); 83 83 const char *name;
+1 -1
arch/mips/ralink/rt305x.c
··· 214 214 panic("Failed to remap core resources"); 215 215 } 216 216 217 - void prom_soc_init(struct ralink_soc_info *soc_info) 217 + void __init prom_soc_init(struct ralink_soc_info *soc_info) 218 218 { 219 219 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); 220 220 unsigned char *name;
+1 -1
arch/mips/ralink/rt3883.c
··· 113 113 panic("Failed to remap core resources"); 114 114 } 115 115 116 - void prom_soc_init(struct ralink_soc_info *soc_info) 116 + void __init prom_soc_init(struct ralink_soc_info *soc_info) 117 117 { 118 118 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); 119 119 const char *name;
-19
arch/mips/sgi-ip27/TODO
··· 1 - 1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay 2 - not to write to the IOC3 ever. 3 - 2. Need to figure out RRB allocation in bridge_startup(). 4 - 3. Need to figure out why address swaizzling is needed in inw/outw for 5 - Qlogic scsi controllers. 6 - 4. Need to integrate ip27-klconfig.c:find_lboard and 7 - ip27-init.c:find_lbaord_real. DONE 8 - 5. Is it okay to set calias space on all nodes as 0, instead of 8k as 9 - in irix? 10 - 6. Investigate why things do not work without the setup_test() call 11 - being invoked on all nodes in ip27-memory.c. 12 - 8. Too many do_page_faults invoked - investigate. 13 - 9. start_thread must turn off UX64 ... and define tlb_refill_debug. 14 - 10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable 15 - does not agree with pgd_bad/pmd_bad. 16 - 11. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node. 17 - This might need to change later. Only the timer intr is set up to be 18 - received on both Cpu A and B. (ip27_do_irq()/bridge_startup()) 19 - 13. Cache flushing (specially the SMP version) has to be investigated.
+2 -2
arch/mips/sgi-ip27/ip27-timer.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copytight (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) 4 - * Copytight (C) 1999, 2000 Silicon Graphics, Inc. 3 + * Copyright (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org) 4 + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 5 5 */ 6 6 #include <linux/bcd.h> 7 7 #include <linux/clockchips.h>
+2 -2
arch/mips/vdso/Makefile
··· 46 46 CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) 47 47 endif 48 48 49 - CFLAGS_REMOVE_vgettimeofday.o = -pg 49 + CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) 50 50 51 51 ifdef CONFIG_MIPS_DISABLE_VDSO 52 52 ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO ··· 60 60 $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ 61 61 -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T 62 62 63 - CFLAGS_REMOVE_vdso.o = -pg 63 + CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE) 64 64 65 65 GCOV_PROFILE := n 66 66 UBSAN_SANITIZE := n
+51 -49
drivers/firmware/broadcom/bcm47xx_nvram.c
··· 34 34 static size_t nvram_len; 35 35 static const u32 nvram_sizes[] = {0x6000, 0x8000, 0xF000, 0x10000}; 36 36 37 - static u32 find_nvram_size(void __iomem *end) 37 + /** 38 + * bcm47xx_nvram_is_valid - check for a valid NVRAM at specified memory 39 + */ 40 + static bool bcm47xx_nvram_is_valid(void __iomem *nvram) 38 41 { 39 - struct nvram_header __iomem *header; 40 - int i; 41 - 42 - for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { 43 - header = (struct nvram_header *)(end - nvram_sizes[i]); 44 - if (header->magic == NVRAM_MAGIC) 45 - return nvram_sizes[i]; 46 - } 47 - 48 - return 0; 42 + return ((struct nvram_header *)nvram)->magic == NVRAM_MAGIC; 49 43 } 50 44 51 - /* Probe for NVRAM header */ 52 - static int nvram_find_and_copy(void __iomem *iobase, u32 lim) 45 + /** 46 + * bcm47xx_nvram_copy - copy NVRAM to internal buffer 47 + */ 48 + static void bcm47xx_nvram_copy(void __iomem *nvram_start, size_t res_size) 53 49 { 54 - struct nvram_header __iomem *header; 55 - u32 off; 56 - u32 size; 50 + struct nvram_header __iomem *header = nvram_start; 51 + size_t copy_size; 52 + 53 + copy_size = header->len; 54 + if (copy_size > res_size) { 55 + pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); 56 + copy_size = res_size; 57 + } 58 + if (copy_size >= NVRAM_SPACE) { 59 + pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", 60 + copy_size, NVRAM_SPACE - 1); 61 + copy_size = NVRAM_SPACE - 1; 62 + } 63 + 64 + __ioread32_copy(nvram_buf, nvram_start, DIV_ROUND_UP(copy_size, 4)); 65 + nvram_buf[NVRAM_SPACE - 1] = '\0'; 66 + nvram_len = copy_size; 67 + } 68 + 69 + /** 70 + * bcm47xx_nvram_find_and_copy - find NVRAM on flash mapping & copy it 71 + */ 72 + static int bcm47xx_nvram_find_and_copy(void __iomem *flash_start, size_t res_size) 73 + { 74 + size_t flash_size; 75 + size_t offset; 76 + int i; 57 77 58 78 if (nvram_len) { 59 79 pr_warn("nvram already initialized\n"); ··· 81 61 } 82 62 83 63 /* TODO: when nvram is on nand flash check for bad blocks first. */ 84 - off = FLASH_MIN; 85 - while (off <= lim) { 86 - /* Windowed flash access */ 87 - size = find_nvram_size(iobase + off); 88 - if (size) { 89 - header = (struct nvram_header *)(iobase + off - size); 90 - goto found; 64 + 65 + /* Try every possible flash size and check for NVRAM at its end */ 66 + for (flash_size = FLASH_MIN; flash_size <= res_size; flash_size <<= 1) { 67 + for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { 68 + offset = flash_size - nvram_sizes[i]; 69 + if (bcm47xx_nvram_is_valid(flash_start + offset)) 70 + goto found; 91 71 } 92 - off <<= 1; 93 72 } 94 73 95 74 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ 96 - header = (struct nvram_header *)(iobase + 4096); 97 - if (header->magic == NVRAM_MAGIC) { 98 - size = NVRAM_SPACE; 99 - goto found; 100 - } 101 75 102 - header = (struct nvram_header *)(iobase + 1024); 103 - if (header->magic == NVRAM_MAGIC) { 104 - size = NVRAM_SPACE; 76 + offset = 4096; 77 + if (bcm47xx_nvram_is_valid(flash_start + offset)) 105 78 goto found; 106 - } 79 + 80 + offset = 1024; 81 + if (bcm47xx_nvram_is_valid(flash_start + offset)) 82 + goto found; 107 83 108 84 pr_err("no nvram found\n"); 109 85 return -ENXIO; 110 86 111 87 found: 112 - __ioread32_copy(nvram_buf, header, sizeof(*header) / 4); 113 - nvram_len = ((struct nvram_header *)(nvram_buf))->len; 114 - if (nvram_len > size) { 115 - pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n"); 116 - nvram_len = size; 117 - } 118 - if (nvram_len >= NVRAM_SPACE) { 119 - pr_err("nvram on flash (%zu bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n", 120 - nvram_len, NVRAM_SPACE - 1); 121 - nvram_len = NVRAM_SPACE - 1; 122 - } 123 - /* proceed reading data after header */ 124 - __ioread32_copy(nvram_buf + sizeof(*header), header + 1, 125 - DIV_ROUND_UP(nvram_len, 4)); 126 - nvram_buf[NVRAM_SPACE - 1] = '\0'; 88 + bcm47xx_nvram_copy(flash_start + offset, res_size - offset); 127 89 128 90 return 0; 129 91 } ··· 126 124 if (!iobase) 127 125 return -ENOMEM; 128 126 129 - err = nvram_find_and_copy(iobase, lim); 127 + err = bcm47xx_nvram_find_and_copy(iobase, lim); 130 128 131 129 iounmap(iobase); 132 130
+50 -10
drivers/irqchip/irq-loongson-liointc.c
··· 16 16 #include <linux/smp.h> 17 17 #include <linux/irqchip/chained_irq.h> 18 18 19 - #include <boot_param.h> 19 + #include <loongson.h> 20 20 21 21 #define LIOINTC_CHIP_IRQ 32 22 22 #define LIOINTC_NUM_PARENT 4 23 + #define LIOINTC_NUM_CORES 4 23 24 24 25 #define LIOINTC_INTC_CHIP_START 0x20 25 26 ··· 43 42 struct liointc_priv { 44 43 struct irq_chip_generic *gc; 45 44 struct liointc_handler_data handler[LIOINTC_NUM_PARENT]; 45 + void __iomem *core_isr[LIOINTC_NUM_CORES]; 46 46 u8 map_cache[LIOINTC_CHIP_IRQ]; 47 47 bool has_lpc_irq_errata; 48 48 }; ··· 53 51 struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); 54 52 struct irq_chip *chip = irq_desc_get_chip(desc); 55 53 struct irq_chip_generic *gc = handler->priv->gc; 54 + int core = get_ebase_cpunum() % LIOINTC_NUM_CORES; 56 55 u32 pending; 57 56 58 57 chained_irq_enter(chip, desc); 59 58 60 - pending = readl(gc->reg_base + LIOINTC_REG_INTC_STATUS); 59 + pending = readl(handler->priv->core_isr[core]); 61 60 62 61 if (!pending) { 63 62 /* Always blame LPC IRQ if we have that bug */ ··· 144 141 } 145 142 146 143 static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; 144 + static const char * const core_reg_names[] = {"isr0", "isr1", "isr2", "isr3"}; 145 + 146 + static void __iomem *liointc_get_reg_byname(struct device_node *node, 147 + const char *name) 148 + { 149 + int index = of_property_match_string(node, "reg-names", name); 150 + 151 + if (index < 0) 152 + return NULL; 153 + 154 + return of_iomap(node, index); 155 + } 147 156 148 157 static int __init liointc_of_init(struct device_node *node, 149 158 struct device_node *parent) ··· 174 159 if (!priv) 175 160 return -ENOMEM; 176 161 177 - base = of_iomap(node, 0); 178 - if (!base) { 179 - err = -ENODEV; 180 - goto out_free_priv; 162 + if (of_device_is_compatible(node, "loongson,liointc-2.0")) { 163 + base = liointc_get_reg_byname(node, "main"); 164 + if (!base) { 165 + err = -ENODEV; 166 + goto out_free_priv; 167 + } 168 + 169 + for (i = 0; i < LIOINTC_NUM_CORES; i++) 170 + priv->core_isr[i] = liointc_get_reg_byname(node, core_reg_names[i]); 171 + if (!priv->core_isr[0]) { 172 + err = -ENODEV; 173 + goto out_iounmap_base; 174 + } 175 + } else { 176 + base = of_iomap(node, 0); 177 + if (!base) { 178 + err = -ENODEV; 179 + goto out_free_priv; 180 + } 181 + 182 + for (i = 0; i < LIOINTC_NUM_CORES; i++) 183 + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; 181 184 } 182 185 183 186 for (i = 0; i < LIOINTC_NUM_PARENT; i++) { ··· 205 172 } 206 173 if (!have_parent) { 207 174 err = -ENODEV; 208 - goto out_iounmap; 175 + goto out_iounmap_isr; 209 176 } 210 177 211 178 sz = of_property_read_variable_u32_array(node, ··· 216 183 if (sz < 4) { 217 184 pr_err("loongson-liointc: No parent_int_map\n"); 218 185 err = -ENODEV; 219 - goto out_iounmap; 186 + goto out_iounmap_isr; 220 187 } 221 188 222 189 for (i = 0; i < LIOINTC_NUM_PARENT; i++) ··· 228 195 if (!domain) { 229 196 pr_err("loongson-liointc: cannot add IRQ domain\n"); 230 197 err = -EINVAL; 231 - goto out_iounmap; 198 + goto out_iounmap_isr; 232 199 } 233 200 234 201 err = irq_alloc_domain_generic_chips(domain, 32, 1, ··· 293 260 294 261 out_free_domain: 295 262 irq_domain_remove(domain); 296 - out_iounmap: 263 + out_iounmap_isr: 264 + for (i = 0; i < LIOINTC_NUM_CORES; i++) { 265 + if (!priv->core_isr[i]) 266 + continue; 267 + iounmap(priv->core_isr[i]); 268 + } 269 + out_iounmap_base: 297 270 iounmap(base); 298 271 out_free_priv: 299 272 kfree(priv); ··· 309 270 310 271 IRQCHIP_DECLARE(loongson_liointc_1_0, "loongson,liointc-1.0", liointc_of_init); 311 272 IRQCHIP_DECLARE(loongson_liointc_1_0a, "loongson,liointc-1.0a", liointc_of_init); 273 + IRQCHIP_DECLARE(loongson_liointc_2_0, "loongson,liointc-2.0", liointc_of_init);
+6 -4
include/asm-generic/div64.h
··· 8 8 * Optimization for constant divisors on 32-bit machines: 9 9 * Copyright (C) 2006-2015 Nicolas Pitre 10 10 * 11 - * The semantics of do_div() are: 11 + * The semantics of do_div() is, in C++ notation, observing that the name 12 + * is a function-like macro and the n parameter has the semantics of a C++ 13 + * reference: 12 14 * 13 - * uint32_t do_div(uint64_t *n, uint32_t base) 15 + * uint32_t do_div(uint64_t &n, uint32_t base) 14 16 * { 15 - * uint32_t remainder = *n % base; 16 - * *n = *n / base; 17 + * uint32_t remainder = n % base; 18 + * n = n / base; 17 19 * return remainder; 18 20 * } 19 21 *
+10
lib/Kconfig.debug
··· 2066 2066 2067 2067 If unsure, say N. 2068 2068 2069 + config TEST_DIV64 2070 + tristate "64bit/32bit division and modulo test" 2071 + depends on DEBUG_KERNEL || m 2072 + help 2073 + Enable this to turn on 'do_div()' function test. This test is 2074 + executed only once during system boot (so affects only boot time), 2075 + or at module load time. 2076 + 2077 + If unsure, say N. 2078 + 2069 2079 config KPROBES_SANITY_TEST 2070 2080 bool "Kprobes sanity tests" 2071 2081 depends on DEBUG_KERNEL
+2
lib/math/Makefile
··· 4 4 obj-$(CONFIG_CORDIC) += cordic.o 5 5 obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o 6 6 obj-$(CONFIG_RATIONAL) += rational.o 7 + 8 + obj-$(CONFIG_TEST_DIV64) += test_div64.o
+249
lib/math/test_div64.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2021 Maciej W. Rozycki 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/init.h> 9 + #include <linux/ktime.h> 10 + #include <linux/module.h> 11 + #include <linux/printk.h> 12 + #include <linux/time64.h> 13 + #include <linux/types.h> 14 + 15 + #include <asm/div64.h> 16 + 17 + #define TEST_DIV64_N_ITER 1024 18 + 19 + static const u64 test_div64_dividends[] = { 20 + 0x00000000ab275080, 21 + 0x0000000fe73c1959, 22 + 0x000000e54c0a74b1, 23 + 0x00000d4398ff1ef9, 24 + 0x0000a18c2ee1c097, 25 + 0x00079fb80b072e4a, 26 + 0x0072db27380dd689, 27 + 0x0842f488162e2284, 28 + 0xf66745411d8ab063, 29 + }; 30 + #define SIZE_DIV64_DIVIDENDS ARRAY_SIZE(test_div64_dividends) 31 + 32 + #define TEST_DIV64_DIVISOR_0 0x00000009 33 + #define TEST_DIV64_DIVISOR_1 0x0000007c 34 + #define TEST_DIV64_DIVISOR_2 0x00000204 35 + #define TEST_DIV64_DIVISOR_3 0x0000cb5b 36 + #define TEST_DIV64_DIVISOR_4 0x00010000 37 + #define TEST_DIV64_DIVISOR_5 0x0008a880 38 + #define TEST_DIV64_DIVISOR_6 0x003fd3ae 39 + #define TEST_DIV64_DIVISOR_7 0x0b658fac 40 + #define TEST_DIV64_DIVISOR_8 0xdc08b349 41 + 42 + static const u32 test_div64_divisors[] = { 43 + TEST_DIV64_DIVISOR_0, 44 + TEST_DIV64_DIVISOR_1, 45 + TEST_DIV64_DIVISOR_2, 46 + TEST_DIV64_DIVISOR_3, 47 + TEST_DIV64_DIVISOR_4, 48 + TEST_DIV64_DIVISOR_5, 49 + TEST_DIV64_DIVISOR_6, 50 + TEST_DIV64_DIVISOR_7, 51 + TEST_DIV64_DIVISOR_8, 52 + }; 53 + #define SIZE_DIV64_DIVISORS ARRAY_SIZE(test_div64_divisors) 54 + 55 + static const struct { 56 + u64 quotient; 57 + u32 remainder; 58 + } test_div64_results[SIZE_DIV64_DIVISORS][SIZE_DIV64_DIVIDENDS] = { 59 + { 60 + { 0x0000000013045e47, 0x00000001 }, 61 + { 0x000000000161596c, 0x00000030 }, 62 + { 0x000000000054e9d4, 0x00000130 }, 63 + { 0x000000000000d776, 0x0000278e }, 64 + { 0x000000000000ab27, 0x00005080 }, 65 + { 0x00000000000013c4, 0x0004ce80 }, 66 + { 0x00000000000002ae, 0x001e143c }, 67 + { 0x000000000000000f, 0x0033e56c }, 68 + { 0x0000000000000000, 0xab275080 }, 69 + }, { 70 + { 0x00000001c45c02d1, 0x00000000 }, 71 + { 0x0000000020d5213c, 0x00000049 }, 72 + { 0x0000000007e3d65f, 0x000001dd }, 73 + { 0x0000000000140531, 0x000065ee }, 74 + { 0x00000000000fe73c, 0x00001959 }, 75 + { 0x000000000001d637, 0x0004e5d9 }, 76 + { 0x0000000000003fc9, 0x000713bb }, 77 + { 0x0000000000000165, 0x029abe7d }, 78 + { 0x0000000000000012, 0x6e9f7e37 }, 79 + }, { 80 + { 0x000000197a3a0cf7, 0x00000002 }, 81 + { 0x00000001d9632e5c, 0x00000021 }, 82 + { 0x0000000071c28039, 0x000001cd }, 83 + { 0x000000000120a844, 0x0000b885 }, 84 + { 0x0000000000e54c0a, 0x000074b1 }, 85 + { 0x00000000001a7bb3, 0x00072331 }, 86 + { 0x00000000000397ad, 0x0002c61b }, 87 + { 0x000000000000141e, 0x06ea2e89 }, 88 + { 0x000000000000010a, 0xab002ad7 }, 89 + }, { 90 + { 0x0000017949e37538, 0x00000001 }, 91 + { 0x0000001b62441f37, 0x00000055 }, 92 + { 0x0000000694a3391d, 0x00000085 }, 93 + { 0x0000000010b2a5d2, 0x0000a753 }, 94 + { 0x000000000d4398ff, 0x00001ef9 }, 95 + { 0x0000000001882ec6, 0x0005cbf9 }, 96 + { 0x000000000035333b, 0x0017abdf }, 97 + { 0x00000000000129f1, 0x0ab4520d }, 98 + { 0x0000000000000f6e, 0x8ac0ce9b }, 99 + }, { 100 + { 0x000011f321a74e49, 0x00000006 }, 101 + { 0x0000014d8481d211, 0x0000005b }, 102 + { 0x0000005025cbd92d, 0x000001e3 }, 103 + { 0x00000000cb5e71e3, 0x000043e6 }, 104 + { 0x00000000a18c2ee1, 0x0000c097 }, 105 + { 0x0000000012a88828, 0x00036c97 }, 106 + { 0x000000000287f16f, 0x002c2a25 }, 107 + { 0x00000000000e2cc7, 0x02d581e3 }, 108 + { 0x000000000000bbf4, 0x1ba08c03 }, 109 + }, { 110 + { 0x0000d8db8f72935d, 0x00000005 }, 111 + { 0x00000fbd5aed7a2e, 0x00000002 }, 112 + { 0x000003c84b6ea64a, 0x00000122 }, 113 + { 0x0000000998fa8829, 0x000044b7 }, 114 + { 0x000000079fb80b07, 0x00002e4a }, 115 + { 0x00000000e16b20fa, 0x0002a14a }, 116 + { 0x000000001e940d22, 0x00353b2e }, 117 + { 0x0000000000ab40ac, 0x06fba6ba }, 118 + { 0x000000000008debd, 0x72d98365 }, 119 + }, { 120 + { 0x000cc3045b8fc281, 0x00000000 }, 121 + { 0x0000ed1f48b5c9fc, 0x00000079 }, 122 + { 0x000038fb9c63406a, 0x000000e1 }, 123 + { 0x000000909705b825, 0x00000a62 }, 124 + { 0x00000072db27380d, 0x0000d689 }, 125 + { 0x0000000d43fce827, 0x00082b09 }, 126 + { 0x00000001ccaba11a, 0x0037e8dd }, 127 + { 0x000000000a13f729, 0x0566dffd }, 128 + { 0x000000000085a14b, 0x23d36726 }, 129 + }, { 130 + { 0x00eafeb9c993592b, 0x00000001 }, 131 + { 0x00110e5befa9a991, 0x00000048 }, 132 + { 0x00041947b4a1d36a, 0x000000dc }, 133 + { 0x00000a6679327311, 0x0000c079 }, 134 + { 0x00000842f488162e, 0x00002284 }, 135 + { 0x000000f4459740fc, 0x00084484 }, 136 + { 0x0000002122c47bf9, 0x002ca446 }, 137 + { 0x00000000b9936290, 0x004979c4 }, 138 + { 0x00000000099ca89d, 0x9db446bf }, 139 + }, { 140 + { 0x1b60cece589da1d2, 0x00000001 }, 141 + { 0x01fcb42be1453f5b, 0x0000004f }, 142 + { 0x007a3f2457df0749, 0x0000013f }, 143 + { 0x0001363130e3ec7b, 0x000017aa }, 144 + { 0x0000f66745411d8a, 0x0000b063 }, 145 + { 0x00001c757dfab350, 0x00048863 }, 146 + { 0x000003dc4979c652, 0x00224ea7 }, 147 + { 0x000000159edc3144, 0x06409ab3 }, 148 + { 0x000000011eadfee3, 0xa99c48a8 }, 149 + }, 150 + }; 151 + 152 + static inline bool test_div64_verify(u64 quotient, u32 remainder, int i, int j) 153 + { 154 + return (quotient == test_div64_results[i][j].quotient && 155 + remainder == test_div64_results[i][j].remainder); 156 + } 157 + 158 + /* 159 + * This needs to be a macro, because we don't want to rely on the compiler 160 + * to do constant propagation, and `do_div' may take a different path for 161 + * constants, so we do want to verify that as well. 162 + */ 163 + #define test_div64_one(dividend, divisor, i, j) ({ \ 164 + bool result = true; \ 165 + u64 quotient; \ 166 + u32 remainder; \ 167 + \ 168 + quotient = dividend; \ 169 + remainder = do_div(quotient, divisor); \ 170 + if (!test_div64_verify(quotient, remainder, i, j)) { \ 171 + pr_err("ERROR: %016llx / %08x => %016llx,%08x\n", \ 172 + dividend, divisor, quotient, remainder); \ 173 + pr_err("ERROR: expected value => %016llx,%08x\n",\ 174 + test_div64_results[i][j].quotient, \ 175 + test_div64_results[i][j].remainder); \ 176 + result = false; \ 177 + } \ 178 + result; \ 179 + }) 180 + 181 + /* 182 + * Run calculation for the same divisor value expressed as a constant 183 + * and as a variable, so as to verify the implementation for both cases 184 + * should they be handled by different code execution paths. 185 + */ 186 + static bool __init test_div64(void) 187 + { 188 + u64 dividend; 189 + int i, j; 190 + 191 + for (i = 0; i < SIZE_DIV64_DIVIDENDS; i++) { 192 + dividend = test_div64_dividends[i]; 193 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_0, i, 0)) 194 + return false; 195 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_1, i, 1)) 196 + return false; 197 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_2, i, 2)) 198 + return false; 199 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_3, i, 3)) 200 + return false; 201 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_4, i, 4)) 202 + return false; 203 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_5, i, 5)) 204 + return false; 205 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_6, i, 6)) 206 + return false; 207 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_7, i, 7)) 208 + return false; 209 + if (!test_div64_one(dividend, TEST_DIV64_DIVISOR_8, i, 8)) 210 + return false; 211 + for (j = 0; j < SIZE_DIV64_DIVISORS; j++) { 212 + if (!test_div64_one(dividend, test_div64_divisors[j], 213 + i, j)) 214 + return false; 215 + } 216 + } 217 + return true; 218 + } 219 + 220 + static int __init test_div64_init(void) 221 + { 222 + struct timespec64 ts, ts0, ts1; 223 + int i; 224 + 225 + pr_info("Starting 64bit/32bit division and modulo test\n"); 226 + ktime_get_ts64(&ts0); 227 + 228 + for (i = 0; i < TEST_DIV64_N_ITER; i++) 229 + if (!test_div64()) 230 + break; 231 + 232 + ktime_get_ts64(&ts1); 233 + ts = timespec64_sub(ts1, ts0); 234 + pr_info("Completed 64bit/32bit division and modulo test, " 235 + "%llu.%09lus elapsed\n", ts.tv_sec, ts.tv_nsec); 236 + 237 + return 0; 238 + } 239 + 240 + static void __exit test_div64_exit(void) 241 + { 242 + } 243 + 244 + module_init(test_div64_init); 245 + module_exit(test_div64_exit); 246 + 247 + MODULE_AUTHOR("Maciej W. Rozycki <macro@orcam.me.uk>"); 248 + MODULE_LICENSE("GPL"); 249 + MODULE_DESCRIPTION("64bit/32bit division and modulo test module");