Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
"This is the main pull request for MIPS for 4.8. Also includes is a
minor SSB cleanup as SSB code traditionally is merged through the MIPS
tree:

ATH25:
- MIPS: Add default configuration for ath25

Boot:
- For zboot, copy appended dtb to the end of the kernel
- store the appended dtb address in a variable

BPF:
- Fix off by one error in offset allocation

Cobalt code:
- Fix typos

Core code:
- debugfs_create_file returns NULL on error, so don't use IS_ERR for
testing for errors.
- Fix double locking issue in RM7000 S-cache code. This would only
affect RM7000 ARC systems on reboot.
- Fix page table corruption on THP permission changes.
- Use compat_sys_keyctl for 32 bit userspace on 64 bit kernels.
David says, there are no compatibility issues raised by this fix.
- Move some signal code around.
- Rewrite r4k count/compare clockevent device registration such that
min_delta_ticks/max_delta_ticks files are guaranteed to be
initialized.
- Only register r4k count/compare as clockevent device if we can
assume the clock to be constant.
- Fix MSA asm warnings in control reg accessors
- uasm and tlbex fixes and tweaking.
- Print segment physical address when EU=1.
- Define AT_VECTOR_SIZE_ARCH for ARCH_DLINFO.
- CP: Allow booting by VP other than VP 0
- Cache handling fixes and optimizations for r4k class caches
- Add hotplug support for R6 processors
- Cleanup hotplug bits in kconfig
- traps: return correct si code for accessing nonmapped addresses
- Remove cpu_has_safe_index_cacheops

Lantiq:
- Register IRQ handler for virtual IRQ number
- Fix EIU interrupt loading code
- Use the real EXIN count
- Fix build error.

Loongson 3:
- Increase HPET_MIN_PROG_DELTA and decrease HPET_MIN_CYCLES

Octeon:
- Delete built-in DTB pruning code for D-Link DSR-1000N.
- Clean up GPIO definitions in dlink_dsr-1000n.dts.
- Add more LEDs to the DSR-100n DTS
- Fix off by one in octeon_irq_gpio_map()
- Typo fixes
- Enable SATA by default in cavium_octeon_defconfig
- Support readq/writeq()
- Remove forced mappings of USB interrupts.
- Ensure DMA descriptors are always in the low 4GB
- Improve USB reset code for OCTEON II.

Pistachio:
- Add maintainers entry for pistachio SoC Support
- Remove plat_setup_iocoherency

Ralink:
- Fix pwm UART in spis group pinmux.

SSB:
- Change bare unsigned to unsigned int to suit coding style

Tools:
- Fix reloc tool compiler warnings.

Other:
- Delete use of ARCH_WANT_OPTIONAL_GPIOLIB"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (61 commits)
MIPS: mm: Fix definition of R6 cache instruction
MIPS: tools: Fix relocs tool compiler warnings
MIPS: Cobalt: Fix typo
MIPS: Octeon: Fix typo
MIPS: Lantiq: Fix build failure
MIPS: Use CPHYSADDR to implement mips32 __pa
MIPS: Octeon: Dlink_dsr-1000n.dts: add more leds.
MIPS: Octeon: Clean up GPIO definitions in dlink_dsr-1000n.dts.
MIPS: Octeon: Delete built-in DTB pruning code for D-Link DSR-1000N.
MIPS: store the appended dtb address in a variable
MIPS: ZBOOT: copy appended dtb to the end of the kernel
MIPS: ralink: fix spis group pinmux
MIPS: Factor o32 specific code into signal_o32.c
MIPS: non-exec stack & heap when non-exec PT_GNU_STACK is present
MIPS: Use per-mm page to execute branch delay slot instructions
MIPS: Modify error handling
MIPS: c-r4k: Use SMP calls for CM indexed cache ops
MIPS: c-r4k: Avoid small flush_icache_range SMP calls
MIPS: c-r4k: Local flush_icache_range cache op override
MIPS: c-r4k: Split r4k_flush_kernel_vmap_range()
...

+1418 -806
+10
MAINTAINERS
··· 9252 9252 S: Maintained 9253 9253 F: drivers/pinctrl/spear/ 9254 9254 9255 + PISTACHIO SOC SUPPORT 9256 + M: James Hartley <james.hartley@imgtec.com> 9257 + M: Ionela Voinescu <ionela.voinescu@imgtec.com> 9258 + L: linux-mips@linux-mips.org 9259 + S: Maintained 9260 + F: arch/mips/pistachio/ 9261 + F: arch/mips/include/asm/mach-pistachio/ 9262 + F: arch/mips/boot/dts/pistachio/ 9263 + F: arch/mips/configs/pistachio*_defconfig 9264 + 9255 9265 PKTCDVD DRIVER 9256 9266 M: Jiri Kosina <jikos@kernel.org> 9257 9267 S: Maintained
+14 -33
arch/mips/Kconfig
··· 64 64 select GENERIC_TIME_VSYSCALL 65 65 select ARCH_CLOCKSOURCE_DATA 66 66 select HANDLE_DOMAIN_IRQ 67 + select HAVE_EXIT_THREAD 67 68 68 69 menu "Machine selection" 69 70 ··· 385 384 select CLKSRC_MIPS_GIC 386 385 select COMMON_CLK 387 386 select CSRC_R4K 388 - select DMA_MAYBE_COHERENT 387 + select DMA_NONCOHERENT 389 388 select GPIOLIB 390 389 select IRQ_MIPS_CPU 391 390 select LIBFDT ··· 881 880 select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN 882 881 select SYS_HAS_EARLY_PRINTK 883 882 select SYS_HAS_CPU_CAVIUM_OCTEON 884 - select SWAP_IO_SPACE 885 883 select HW_HAS_PCI 886 884 select ZONE_DMA32 887 885 select HOLES_IN_ZONE ··· 1110 1110 1111 1111 config SYS_HAS_EARLY_PRINTK 1112 1112 bool 1113 - 1114 - config HOTPLUG_CPU 1115 - bool "Support for hot-pluggable CPUs" 1116 - depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU 1117 - help 1118 - Say Y here to allow turning CPUs off and on. CPUs can be 1119 - controlled through /sys/devices/system/cpu. 1120 - (Note: power management support will enable this option 1121 - automatically on SMP systems. ) 1122 - Say N if you want to disable CPU hotplug. 1123 1113 1124 1114 config SYS_SUPPORTS_HOTPLUG_CPU 1125 1115 bool ··· 1396 1406 bool "Loongson 1B" 1397 1407 depends on SYS_HAS_CPU_LOONGSON1B 1398 1408 select CPU_LOONGSON1 1399 - select ARCH_WANT_OPTIONAL_GPIOLIB 1400 1409 select LEDS_GPIO_REGISTER 1401 1410 help 1402 1411 The Loongson 1B is a 32-bit SoC, which implements the MIPS32 ··· 2625 2636 2626 2637 If you don't know what to do here, say N. 2627 2638 2639 + config HOTPLUG_CPU 2640 + bool "Support for hot-pluggable CPUs" 2641 + depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU 2642 + help 2643 + Say Y here to allow turning CPUs off and on. CPUs can be 2644 + controlled through /sys/devices/system/cpu. 2645 + (Note: power management support will enable this option 2646 + automatically on SMP systems. ) 2647 + Say N if you want to disable CPU hotplug. 2648 + 2628 2649 config SMP_UP 2629 2650 bool 2630 2651 ··· 2886 2887 the documented boot protocol using a device tree. 2887 2888 2888 2889 config MIPS_RAW_APPENDED_DTB 2889 - bool "vmlinux.bin" 2890 + bool "vmlinux.bin or vmlinuz.bin" 2890 2891 help 2891 2892 With this option, the boot code will look for a device tree binary 2892 - DTB) appended to raw vmlinux.bin (without decompressor). 2893 + DTB) appended to raw vmlinux.bin or vmlinuz.bin. 2893 2894 (e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb). 2894 2895 2895 2896 This is meant as a backward compatibility convenience for those ··· 2900 2901 this option being confused by leftover garbage in memory that might 2901 2902 look like a DTB header after a reboot if no actual DTB is appended 2902 2903 to vmlinux.bin. Do not leave this option active in a production kernel 2903 - if you don't intend to always append a DTB. 2904 - 2905 - config MIPS_ZBOOT_APPENDED_DTB 2906 - bool "vmlinuz.bin" 2907 - depends on SYS_SUPPORTS_ZBOOT 2908 - help 2909 - With this option, the boot code will look for a device tree binary 2910 - DTB) appended to raw vmlinuz.bin (with decompressor). 2911 - (e.g. cat vmlinuz.bin <filename>.dtb > vmlinuz_w_dtb). 2912 - 2913 - This is meant as a backward compatibility convenience for those 2914 - systems with a bootloader that can't be upgraded to accommodate 2915 - the documented boot protocol using a device tree. 2916 - 2917 - Beware that there is very little in terms of protection against 2918 - this option being confused by leftover garbage in memory that might 2919 - look like a DTB header after a reboot if no actual DTB is appended 2920 - to vmlinuz.bin. Do not leave this option active in a production kernel 2921 2904 if you don't intend to always append a DTB. 2922 2905 endchoice 2923 2906
+2 -2
arch/mips/ath79/setup.c
··· 203 203 fdt_start = fw_getenvl("fdt_start"); 204 204 if (fdt_start) 205 205 __dt_setup_arch((void *)KSEG0ADDR(fdt_start)); 206 - else if (fw_arg0 == -2) 207 - __dt_setup_arch((void *)KSEG0ADDR(fw_arg1)); 206 + else if (fw_passed_dtb) 207 + __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); 208 208 209 209 if (mips_machtype != ATH79_MACH_GENERIC_OF) { 210 210 ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+2 -2
arch/mips/bmips/setup.c
··· 162 162 /* intended to somewhat resemble ARM; see Documentation/arm/Booting */ 163 163 if (fw_arg0 == 0 && fw_arg1 == 0xffffffff) 164 164 dtb = phys_to_virt(fw_arg2); 165 - else if (fw_arg0 == -2) /* UHI interface */ 166 - dtb = (void *)fw_arg1; 165 + else if (fw_passed_dtb) /* UHI interface */ 166 + dtb = (void *)fw_passed_dtb; 167 167 else if (__dtb_start != __dtb_end) 168 168 dtb = (void *)__dtb_start; 169 169 else
+17
arch/mips/boot/compressed/decompress.c
··· 14 14 #include <linux/types.h> 15 15 #include <linux/kernel.h> 16 16 #include <linux/string.h> 17 + #include <linux/libfdt.h> 17 18 18 19 #include <asm/addrspace.h> 19 20 ··· 36 35 #define puts(s) do {} while (0) 37 36 #define puthex(val) do {} while (0) 38 37 #endif 38 + 39 + extern char __appended_dtb[]; 39 40 40 41 void error(char *x) 41 42 { ··· 116 113 /* Decompress the kernel with according algorithm */ 117 114 __decompress((char *)zimage_start, zimage_size, 0, 0, 118 115 (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); 116 + 117 + if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) && 118 + fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) { 119 + unsigned int image_size, dtb_size; 120 + 121 + dtb_size = fdt_totalsize((void *)&__appended_dtb); 122 + 123 + /* last four bytes is always image size in little endian */ 124 + image_size = le32_to_cpup((void *)&__image_end - 4); 125 + 126 + /* copy dtb to where the booted kernel will expect it */ 127 + memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size, 128 + __appended_dtb, dtb_size); 129 + } 119 130 120 131 /* FIXME: should we flush cache here? */ 121 132 puts("Now, booting the kernel...\n");
-16
arch/mips/boot/compressed/head.S
··· 25 25 move s2, a2 26 26 move s3, a3 27 27 28 - #ifdef CONFIG_MIPS_ZBOOT_APPENDED_DTB 29 - PTR_LA t0, __appended_dtb 30 - #ifdef CONFIG_CPU_BIG_ENDIAN 31 - li t1, 0xd00dfeed 32 - #else 33 - li t1, 0xedfe0dd0 34 - #endif 35 - lw t2, (t0) 36 - bne t1, t2, not_found 37 - nop 38 - 39 - move s1, t0 40 - PTR_LI s0, -2 41 - not_found: 42 - #endif 43 - 44 28 /* Clear BSS */ 45 29 PTR_LA a0, _edata 46 30 PTR_LA a2, _end
-12
arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
··· 388 388 usbn = &usbn; 389 389 led0 = &led0; 390 390 }; 391 - 392 - dsr1000n-leds { 393 - compatible = "gpio-leds"; 394 - usb1 { 395 - label = "usb1"; 396 - gpios = <&gpio 9 1>; /* Active low */ 397 - }; 398 - usb2 { 399 - label = "usb2"; 400 - gpios = <&gpio 10 1>; /* Active low */ 401 - }; 402 - }; 403 391 };
+11 -8
arch/mips/boot/tools/relocs_64.c
··· 9 9 10 10 typedef uint8_t Elf64_Byte; 11 11 12 - typedef struct { 13 - Elf64_Word r_sym; /* Symbol index. */ 14 - Elf64_Byte r_ssym; /* Special symbol. */ 15 - Elf64_Byte r_type3; /* Third relocation. */ 16 - Elf64_Byte r_type2; /* Second relocation. */ 17 - Elf64_Byte r_type; /* First relocation. */ 12 + typedef union { 13 + struct { 14 + Elf64_Word r_sym; /* Symbol index. */ 15 + Elf64_Byte r_ssym; /* Special symbol. */ 16 + Elf64_Byte r_type3; /* Third relocation. */ 17 + Elf64_Byte r_type2; /* Second relocation. */ 18 + Elf64_Byte r_type; /* First relocation. */ 19 + } fields; 20 + Elf64_Xword unused; 18 21 } Elf64_Mips_Rela; 19 22 20 23 #define ELF_CLASS ELFCLASS64 21 - #define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->r_sym) 22 - #define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->r_type) 24 + #define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym) 25 + #define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type) 23 26 #define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) 24 27 #define ELF_ST_BIND(o) ELF64_ST_BIND(o) 25 28 #define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
+1 -1
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
··· 668 668 /* 669 669 * Round size up to mult of minimum alignment bytes We need 670 670 * the actual size allocated to allow for blocks to be 671 - * coallesced when they are freed. The alloc routine does the 671 + * coalesced when they are freed. The alloc routine does the 672 672 * same rounding up on all allocations. 673 673 */ 674 674 size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
-22
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
··· 186 186 return 7 - ipd_port; 187 187 else 188 188 return -1; 189 - case CVMX_BOARD_TYPE_CUST_DSR1000N: 190 - /* 191 - * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1) 192 - * connect to a switch (BCM53115). 193 - */ 194 - if (ipd_port == 2) 195 - return 8; 196 - else 197 - return -1; 198 189 case CVMX_BOARD_TYPE_KONTRON_S1901: 199 190 if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT) 200 191 return 1; ··· 278 287 result.s.full_duplex = 1; 279 288 result.s.speed = 1000; 280 289 return result; 281 - } 282 - break; 283 - case CVMX_BOARD_TYPE_CUST_DSR1000N: 284 - if (ipd_port == 0 || ipd_port == 1) { 285 - /* Ports 0 and 1 connect to a switch (BCM53115). */ 286 - result.s.link_up = 1; 287 - result.s.full_duplex = 1; 288 - result.s.speed = 1000; 289 - return result; 290 - } else { 291 - /* Port 2 uses a Broadcom PHY (B5081). */ 292 - is_broadcom_phy = 1; 293 290 } 294 291 break; 295 292 } ··· 744 765 case CVMX_BOARD_TYPE_LANAI2_G: 745 766 case CVMX_BOARD_TYPE_NIC10E_66: 746 767 case CVMX_BOARD_TYPE_UBNT_E100: 747 - case CVMX_BOARD_TYPE_CUST_DSR1000N: 748 768 return USB_CLOCK_TYPE_CRYSTAL_12; 749 769 case CVMX_BOARD_TYPE_NIC10E: 750 770 return USB_CLOCK_TYPE_REF_12;
+1 -13
arch/mips/cavium-octeon/octeon-irq.c
··· 1260 1260 1261 1261 line = (hw + gpiod->base_hwirq) >> 6; 1262 1262 bit = (hw + gpiod->base_hwirq) & 63; 1263 - if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1263 + if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1264 1264 octeon_irq_ciu_to_irq[line][bit] != 0) 1265 1265 return -EINVAL; 1266 1266 ··· 1542 1542 goto err; 1543 1543 } 1544 1544 1545 - r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1546 - if (r) 1547 - goto err; 1548 - 1549 1545 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1550 1546 if (r) 1551 1547 goto err; ··· 1554 1558 if (r) 1555 1559 goto err; 1556 1560 } 1557 - 1558 - r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1559 - if (r) 1560 - goto err; 1561 1561 1562 1562 /* Enable the CIU lines */ 1563 1563 set_c0_status(STATUSF_IP3 | STATUSF_IP2); ··· 2068 2076 if (r) 2069 2077 goto err; 2070 2078 } 2071 - 2072 - r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 2073 - if (r) 2074 - goto err; 2075 2079 2076 2080 for (i = 0; i < 4; i++) { 2077 2081 r = octeon_irq_force_ciu_mapping(
+69 -56
arch/mips/cavium-octeon/octeon-platform.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 2004-2011 Cavium Networks 6 + * Copyright (C) 2004-2016 Cavium Networks 7 7 * Copyright (C) 2008 Wind River Systems 8 8 */ 9 9 10 - #include <linux/delay.h> 11 10 #include <linux/init.h> 12 - #include <linux/irq.h> 13 - #include <linux/i2c.h> 14 - #include <linux/usb.h> 15 - #include <linux/dma-mapping.h> 11 + #include <linux/delay.h> 16 12 #include <linux/etherdevice.h> 17 - #include <linux/module.h> 18 - #include <linux/mutex.h> 19 - #include <linux/slab.h> 20 - #include <linux/platform_device.h> 21 13 #include <linux/of_platform.h> 22 14 #include <linux/of_fdt.h> 23 15 #include <linux/libfdt.h> 16 + #include <linux/usb/ehci_def.h> 24 17 #include <linux/usb/ehci_pdriver.h> 25 18 #include <linux/usb/ohci_pdriver.h> 26 19 27 20 #include <asm/octeon/octeon.h> 28 - #include <asm/octeon/cvmx-rnm-defs.h> 29 - #include <asm/octeon/cvmx-helper.h> 30 21 #include <asm/octeon/cvmx-helper-board.h> 31 22 #include <asm/octeon/cvmx-uctlx-defs.h> 23 + 24 + #define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull)) 25 + #define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull)) 32 26 33 27 /* Octeon Random Number Generator. */ 34 28 static int __init octeon_rng_device_init(void) ··· 72 78 73 79 static int octeon2_usb_clock_start_cnt; 74 80 81 + static int __init octeon2_usb_reset(void) 82 + { 83 + union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; 84 + u32 ucmd; 85 + 86 + if (!OCTEON_IS_OCTEON2()) 87 + return 0; 88 + 89 + clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); 90 + if (clk_rst_ctl.s.hrst) { 91 + ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD); 92 + ucmd &= ~CMD_RUN; 93 + cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); 94 + mdelay(2); 95 + ucmd |= CMD_RESET; 96 + cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd); 97 + ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD); 98 + ucmd |= CMD_RUN; 99 + cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd); 100 + } 101 + 102 + return 0; 103 + } 104 + arch_initcall(octeon2_usb_reset); 105 + 75 106 static void octeon2_usb_clocks_start(struct device *dev) 76 107 { 77 108 u64 div; 78 109 union cvmx_uctlx_if_ena if_ena; 79 110 union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; 80 - union cvmx_uctlx_uphy_ctl_status uphy_ctl_status; 81 111 union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status; 82 112 int i; 83 113 unsigned long io_clk_64_to_ns; ··· 148 130 if_ena.u64 = 0; 149 131 if_ena.s.en = 1; 150 132 cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); 133 + 134 + for (i = 0; i <= 1; i++) { 135 + port_ctl_status.u64 = 136 + cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); 137 + /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ 138 + port_ctl_status.s.txvreftune = 15; 139 + port_ctl_status.s.txrisetune = 1; 140 + port_ctl_status.s.txpreemphasistune = 1; 141 + cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), 142 + port_ctl_status.u64); 143 + } 151 144 152 145 /* Step 3: Configure the reference clock, PHY, and HCLK */ 153 146 clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); ··· 247 218 clk_rst_ctl.s.p_por = 0; 248 219 cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); 249 220 250 - /* Step 5: Wait 1 ms for the PHY clock to start. */ 251 - mdelay(1); 221 + /* Step 5: Wait 3 ms for the PHY clock to start. */ 222 + mdelay(3); 252 223 253 - /* 254 - * Step 6: Program the reset input from automatic test 255 - * equipment field in the UPHY CSR 256 - */ 257 - uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0)); 258 - uphy_ctl_status.s.ate_reset = 1; 259 - cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); 260 - 261 - /* Step 7: Wait for at least 10ns. */ 262 - ndelay(10); 263 - 264 - /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */ 265 - uphy_ctl_status.s.ate_reset = 0; 266 - cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); 267 - 268 - /* 269 - * Step 9: Wait for at least 20ns for UPHY to output PHY clock 270 - * signals and OHCI_CLK48 271 - */ 272 - ndelay(20); 224 + /* Steps 6..9 for ATE only, are skipped. */ 273 225 274 226 /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */ 275 227 /* 10a */ ··· 271 261 clk_rst_ctl.s.p_prst = 1; 272 262 cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); 273 263 264 + /* Step 11b */ 265 + udelay(1); 266 + 267 + /* Step 11c */ 268 + clk_rst_ctl.s.p_prst = 0; 269 + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); 270 + 271 + /* Step 11d */ 272 + mdelay(1); 273 + 274 + /* Step 11e */ 275 + clk_rst_ctl.s.p_prst = 1; 276 + cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); 277 + 274 278 /* Step 12: Wait 1 uS. */ 275 279 udelay(1); 276 280 ··· 293 269 cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); 294 270 295 271 end_clock: 296 - /* Now we can set some other registers. */ 297 - 298 - for (i = 0; i <= 1; i++) { 299 - port_ctl_status.u64 = 300 - cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); 301 - /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ 302 - port_ctl_status.s.txvreftune = 15; 303 - port_ctl_status.s.txrisetune = 1; 304 - port_ctl_status.s.txpreemphasistune = 1; 305 - cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), 306 - port_ctl_status.u64); 307 - } 308 - 309 272 /* Set uSOF cycle period to 60,000 bits. */ 310 273 cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); 274 + 311 275 exit: 312 276 mutex_unlock(&octeon2_usb_clocks_mutex); 313 277 } ··· 323 311 #ifdef __BIG_ENDIAN 324 312 .big_endian_mmio = 1, 325 313 #endif 326 - .dma_mask_64 = 1, 314 + /* 315 + * We can DMA from anywhere. But the descriptors must be in 316 + * the lower 4GB. 317 + */ 318 + .dma_mask_64 = 0, 327 319 .power_on = octeon_ehci_power_on, 328 320 .power_off = octeon_ehci_power_off, 329 321 }; ··· 705 689 if (fdt_check_header(initial_boot_params)) 706 690 panic("Corrupt Device Tree."); 707 691 692 + WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N, 693 + "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.", 694 + cvmx_board_type_to_string(octeon_bootinfo->board_type)); 695 + 708 696 aliases = fdt_path_offset(initial_boot_params, "/aliases"); 709 697 if (aliases < 0) { 710 698 pr_err("Error: No /aliases node in device tree."); ··· 1050 1030 break; 1051 1031 } 1052 1032 } 1053 - } 1054 - 1055 - if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) { 1056 - int dsr1000n_leds = fdt_path_offset(initial_boot_params, 1057 - "/dsr1000n-leds"); 1058 - if (dsr1000n_leds >= 0) 1059 - fdt_nop_node(initial_boot_params, dsr1000n_leds); 1060 1033 } 1061 1034 1062 1035 return 0;
+19 -1
arch/mips/cavium-octeon/setup.c
··· 40 40 41 41 #include <asm/octeon/octeon.h> 42 42 #include <asm/octeon/pci-octeon.h> 43 - #include <asm/octeon/cvmx-mio-defs.h> 44 43 #include <asm/octeon/cvmx-rst-defs.h> 44 + 45 + /* 46 + * TRUE for devices having registers with little-endian byte 47 + * order, FALSE for registers with native-endian byte order. 48 + * PCI mandates little-endian, USB and SATA are configuraable, 49 + * but we chose little-endian for these. 50 + */ 51 + const bool octeon_should_swizzle_table[256] = { 52 + [0x00] = true, /* bootbus/CF */ 53 + [0x1b] = true, /* PCI mmio window */ 54 + [0x1c] = true, /* PCI mmio window */ 55 + [0x1d] = true, /* PCI mmio window */ 56 + [0x1e] = true, /* PCI mmio window */ 57 + [0x68] = true, /* OCTEON III USB */ 58 + [0x69] = true, /* OCTEON III USB */ 59 + [0x6c] = true, /* OCTEON III SATA */ 60 + [0x6f] = true, /* OCTEON II USB */ 61 + }; 62 + EXPORT_SYMBOL(octeon_should_swizzle_table); 45 63 46 64 #ifdef CONFIG_PCI 47 65 extern void pci_console_init(const char *arg);
+1
arch/mips/cavium-octeon/smp.c
··· 271 271 return -ENOTSUPP; 272 272 273 273 set_cpu_online(cpu, false); 274 + calculate_cpu_foreign_map(); 274 275 cpumask_clear_cpu(cpu, &cpu_callin_map); 275 276 octeon_fixup_irqs(); 276 277
+2 -2
arch/mips/cobalt/setup.c
··· 42 42 43 43 /* 44 44 * Cobalt doesn't have PS/2 keyboard/mouse interfaces, 45 - * keyboard conntroller is never used. 46 - * Also PCI-ISA bridge DMA contoroller is never used. 45 + * keyboard controller is never used. 46 + * Also PCI-ISA bridge DMA controller is never used. 47 47 */ 48 48 static struct resource cobalt_reserved_resources[] = { 49 49 { /* dma1 */
+119
arch/mips/configs/ath25_defconfig
··· 1 + CONFIG_ATH25=y 2 + # CONFIG_COMPACTION is not set 3 + CONFIG_HZ_100=y 4 + # CONFIG_SECCOMP is not set 5 + # CONFIG_LOCALVERSION_AUTO is not set 6 + CONFIG_SYSVIPC=y 7 + # CONFIG_CROSS_MEMORY_ATTACH is not set 8 + # CONFIG_FHANDLE is not set 9 + CONFIG_HIGH_RES_TIMERS=y 10 + CONFIG_BLK_DEV_INITRD=y 11 + # CONFIG_RD_GZIP is not set 12 + # CONFIG_RD_BZIP2 is not set 13 + # CONFIG_RD_XZ is not set 14 + # CONFIG_RD_LZO is not set 15 + # CONFIG_RD_LZ4 is not set 16 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 17 + # CONFIG_AIO is not set 18 + CONFIG_EMBEDDED=y 19 + # CONFIG_VM_EVENT_COUNTERS is not set 20 + # CONFIG_SLUB_DEBUG is not set 21 + # CONFIG_COMPAT_BRK is not set 22 + CONFIG_MODULES=y 23 + CONFIG_MODULE_UNLOAD=y 24 + # CONFIG_BLK_DEV_BSG is not set 25 + # CONFIG_IOSCHED_CFQ is not set 26 + # CONFIG_SUSPEND is not set 27 + CONFIG_NET=y 28 + CONFIG_PACKET=y 29 + CONFIG_UNIX=y 30 + CONFIG_INET=y 31 + CONFIG_IP_MULTICAST=y 32 + CONFIG_IP_ADVANCED_ROUTER=y 33 + # CONFIG_INET_XFRM_MODE_TRANSPORT is not set 34 + # CONFIG_INET_XFRM_MODE_TUNNEL is not set 35 + # CONFIG_INET_XFRM_MODE_BEET is not set 36 + # CONFIG_IPV6 is not set 37 + CONFIG_CFG80211=m 38 + CONFIG_MAC80211=m 39 + CONFIG_MAC80211_DEBUGFS=y 40 + CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 41 + # CONFIG_FIRMWARE_IN_KERNEL is not set 42 + CONFIG_MTD=y 43 + CONFIG_MTD_REDBOOT_PARTS=y 44 + CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2 45 + CONFIG_MTD_CMDLINE_PARTS=y 46 + CONFIG_MTD_BLOCK=y 47 + CONFIG_MTD_CFI=y 48 + CONFIG_MTD_CFI_ADV_OPTIONS=y 49 + CONFIG_MTD_CFI_GEOMETRY=y 50 + # CONFIG_MTD_MAP_BANK_WIDTH_1 is not set 51 + # CONFIG_MTD_MAP_BANK_WIDTH_4 is not set 52 + # CONFIG_MTD_CFI_I2 is not set 53 + CONFIG_MTD_CFI_AMDSTD=y 54 + CONFIG_MTD_COMPLEX_MAPPINGS=y 55 + CONFIG_MTD_PHYSMAP=y 56 + CONFIG_NETDEVICES=y 57 + # CONFIG_ETHERNET is not set 58 + # CONFIG_WLAN_VENDOR_ADMTEK is not set 59 + CONFIG_ATH5K=m 60 + # CONFIG_WLAN_VENDOR_ATMEL is not set 61 + # CONFIG_WLAN_VENDOR_BROADCOM is not set 62 + # CONFIG_WLAN_VENDOR_CISCO is not set 63 + # CONFIG_WLAN_VENDOR_INTEL is not set 64 + # CONFIG_WLAN_VENDOR_INTERSIL is not set 65 + # CONFIG_WLAN_VENDOR_MARVELL is not set 66 + # CONFIG_WLAN_VENDOR_MEDIATEK is not set 67 + # CONFIG_WLAN_VENDOR_RALINK is not set 68 + # CONFIG_WLAN_VENDOR_REALTEK is not set 69 + # CONFIG_WLAN_VENDOR_RSI is not set 70 + # CONFIG_WLAN_VENDOR_ST is not set 71 + # CONFIG_WLAN_VENDOR_TI is not set 72 + # CONFIG_WLAN_VENDOR_ZYDAS is not set 73 + CONFIG_INPUT=m 74 + # CONFIG_INPUT_KEYBOARD is not set 75 + # CONFIG_INPUT_MOUSE is not set 76 + # CONFIG_SERIO is not set 77 + # CONFIG_VT is not set 78 + # CONFIG_LEGACY_PTYS is not set 79 + # CONFIG_DEVKMEM is not set 80 + CONFIG_SERIAL_8250=y 81 + CONFIG_SERIAL_8250_CONSOLE=y 82 + # CONFIG_SERIAL_8250_PCI is not set 83 + CONFIG_SERIAL_8250_NR_UARTS=1 84 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1 85 + # CONFIG_HW_RANDOM is not set 86 + # CONFIG_HWMON is not set 87 + # CONFIG_VGA_ARB is not set 88 + CONFIG_USB=m 89 + CONFIG_USB_EHCI_HCD=m 90 + CONFIG_LEDS_CLASS=y 91 + # CONFIG_IOMMU_SUPPORT is not set 92 + # CONFIG_DNOTIFY is not set 93 + # CONFIG_PROC_PAGE_MONITOR is not set 94 + CONFIG_TMPFS=y 95 + CONFIG_TMPFS_XATTR=y 96 + CONFIG_JFFS2_FS=y 97 + CONFIG_JFFS2_SUMMARY=y 98 + CONFIG_JFFS2_FS_XATTR=y 99 + # CONFIG_JFFS2_FS_POSIX_ACL is not set 100 + # CONFIG_JFFS2_FS_SECURITY is not set 101 + CONFIG_JFFS2_COMPRESSION_OPTIONS=y 102 + # CONFIG_JFFS2_ZLIB is not set 103 + CONFIG_SQUASHFS=y 104 + CONFIG_SQUASHFS_FILE_DIRECT=y 105 + CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y 106 + # CONFIG_SQUASHFS_ZLIB is not set 107 + CONFIG_SQUASHFS_XZ=y 108 + CONFIG_PRINTK_TIME=y 109 + # CONFIG_ENABLE_MUST_CHECK is not set 110 + CONFIG_STRIP_ASM_SYMS=y 111 + CONFIG_DEBUG_FS=y 112 + # CONFIG_SCHED_DEBUG is not set 113 + # CONFIG_FTRACE is not set 114 + # CONFIG_XZ_DEC_X86 is not set 115 + # CONFIG_XZ_DEC_POWERPC is not set 116 + # CONFIG_XZ_DEC_IA64 is not set 117 + # CONFIG_XZ_DEC_ARM is not set 118 + # CONFIG_XZ_DEC_ARMTHUMB is not set 119 + # CONFIG_XZ_DEC_SPARC is not set
+2
arch/mips/configs/cavium_octeon_defconfig
··· 59 59 CONFIG_BLK_DEV_SD=y 60 60 CONFIG_ATA=y 61 61 CONFIG_SATA_AHCI=y 62 + CONFIG_SATA_AHCI_PLATFORM=y 63 + CONFIG_AHCI_OCTEON=y 62 64 CONFIG_PATA_OCTEON_CF=y 63 65 CONFIG_SATA_SIL=y 64 66 CONFIG_NETDEVICES=y
+4
arch/mips/include/asm/bootinfo.h
··· 127 127 */ 128 128 extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 129 129 130 + #ifdef CONFIG_USE_OF 131 + extern unsigned long fw_passed_dtb; 132 + #endif 133 + 130 134 /* 131 135 * Platform memory detection hook called by setup_arch 132 136 */
+92
arch/mips/include/asm/dsemul.h
··· 1 + /* 2 + * Copyright (C) 2016 Imagination Technologies 3 + * Author: Paul Burton <paul.burton@imgtec.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of the GNU General Public License as published by the 7 + * Free Software Foundation; either version 2 of the License, or (at your 8 + * option) any later version. 9 + */ 10 + 11 + #ifndef __MIPS_ASM_DSEMUL_H__ 12 + #define __MIPS_ASM_DSEMUL_H__ 13 + 14 + #include <asm/break.h> 15 + #include <asm/inst.h> 16 + 17 + /* Break instruction with special math emu break code set */ 18 + #define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) 19 + 20 + /* When used as a frame index, indicates the lack of a frame */ 21 + #define BD_EMUFRAME_NONE ((int)BIT(31)) 22 + 23 + struct mm_struct; 24 + struct pt_regs; 25 + struct task_struct; 26 + 27 + /** 28 + * mips_dsemul() - 'Emulate' an instruction from a branch delay slot 29 + * @regs: User thread register context. 30 + * @ir: The instruction to be 'emulated'. 31 + * @branch_pc: The PC of the branch instruction. 32 + * @cont_pc: The PC to continue at following 'emulation'. 33 + * 34 + * Emulate or execute an arbitrary MIPS instruction within the context of 35 + * the current user thread. This is used primarily to handle instructions 36 + * in the delay slots of emulated branch instructions, for example FP 37 + * branch instructions on systems without an FPU. 38 + * 39 + * Return: Zero on success, negative if ir is a NOP, signal number on failure. 40 + */ 41 + extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, 42 + unsigned long branch_pc, unsigned long cont_pc); 43 + 44 + /** 45 + * do_dsemulret() - Return from a delay slot 'emulation' frame 46 + * @xcp: User thread register context. 47 + * 48 + * Call in response to the BRK_MEMU break instruction used to return to 49 + * the kernel from branch delay slot 'emulation' frames following a call 50 + * to mips_dsemul(). Restores the user thread PC to the value that was 51 + * passed as the cpc parameter to mips_dsemul(). 52 + * 53 + * Return: True if an emulation frame was returned from, else false. 54 + */ 55 + extern bool do_dsemulret(struct pt_regs *xcp); 56 + 57 + /** 58 + * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame 59 + * @tsk: The task structure associated with the thread 60 + * 61 + * If the thread @tsk has a branch delay slot 'emulation' frame 62 + * allocated to it then free that frame. 63 + * 64 + * Return: True if a frame was freed, else false. 65 + */ 66 + extern bool dsemul_thread_cleanup(struct task_struct *tsk); 67 + 68 + /** 69 + * dsemul_thread_rollback() - Rollback from an 'emulation' frame 70 + * @regs: User thread register context. 71 + * 72 + * If the current thread, whose register context is represented by @regs, 73 + * is executing within a delay slot 'emulation' frame then exit that 74 + * frame. The PC will be rolled back to the branch if the instruction 75 + * that was being 'emulated' has not yet executed, or advanced to the 76 + * continuation PC if it has. 77 + * 78 + * Return: True if a frame was exited, else false. 79 + */ 80 + extern bool dsemul_thread_rollback(struct pt_regs *regs); 81 + 82 + /** 83 + * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state 84 + * @mm: The struct mm_struct to cleanup state for. 85 + * 86 + * Cleanup state for the given @mm, ensuring that any memory allocated 87 + * for delay slot 'emulation' book-keeping is freed. This is to be called 88 + * before @mm is freed in order to avoid memory leaks. 89 + */ 90 + extern void dsemul_mm_cleanup(struct mm_struct *mm); 91 + 92 + #endif /* __MIPS_ASM_DSEMUL_H__ */
+4
arch/mips/include/asm/elf.h
··· 458 458 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 459 459 #endif 460 460 461 + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 461 462 #define ARCH_DLINFO \ 462 463 do { \ 463 464 NEW_AUX_ENT(AT_SYSINFO_EHDR, \ ··· 498 497 499 498 extern void mips_set_personality_nan(struct arch_elf_state *state); 500 499 extern void mips_set_personality_fp(struct arch_elf_state *state); 500 + 501 + #define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk) 502 + extern int mips_elf_read_implies_exec(void *elf_ex, int exstack); 501 503 502 504 #endif /* _ASM_ELF_H */
+3 -14
arch/mips/include/asm/fpu_emulator.h
··· 24 24 #define _ASM_FPU_EMULATOR_H 25 25 26 26 #include <linux/sched.h> 27 - #include <asm/break.h> 27 + #include <asm/dsemul.h> 28 28 #include <asm/thread_info.h> 29 29 #include <asm/inst.h> 30 30 #include <asm/local.h> ··· 60 60 #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) 61 61 #endif /* CONFIG_DEBUG_FS */ 62 62 63 - extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, 64 - unsigned long cpc); 65 - extern int do_dsemulret(struct pt_regs *xcp); 66 63 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, 67 64 struct mips_fpu_struct *ctx, int has_fpu, 68 65 void *__user *fault_addr); 69 66 int process_fpemu_return(int sig, void __user *fault_addr, 70 67 unsigned long fcr31); 68 + int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 69 + unsigned long *contpc); 71 70 int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 72 71 unsigned long *contpc); 73 - 74 - /* 75 - * Instruction inserted following the badinst to further tag the sequence 76 - */ 77 - #define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */ 78 - 79 - /* 80 - * Break instruction with special math emu break code set 81 - */ 82 - #define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) 83 72 84 73 #define SIGNALLING_NAN 0x7ff800007ff80000LL 85 74
-2
arch/mips/include/asm/mach-cavium-octeon/irq.h
··· 42 42 OCTEON_IRQ_TIMER1, 43 43 OCTEON_IRQ_TIMER2, 44 44 OCTEON_IRQ_TIMER3, 45 - OCTEON_IRQ_USB0, 46 - OCTEON_IRQ_USB1, 47 45 #ifndef CONFIG_PCI_MSI 48 46 OCTEON_IRQ_LAST = 127 49 47 #endif
+13 -29
arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
··· 12 12 13 13 #ifdef __BIG_ENDIAN 14 14 15 + static inline bool __should_swizzle_bits(volatile void *a) 16 + { 17 + extern const bool octeon_should_swizzle_table[]; 18 + 19 + unsigned long did = ((unsigned long)a >> 40) & 0xff; 20 + return octeon_should_swizzle_table[did]; 21 + } 22 + 15 23 # define __swizzle_addr_b(port) (port) 16 24 # define __swizzle_addr_w(port) (port) 17 25 # define __swizzle_addr_l(port) (port) 18 26 # define __swizzle_addr_q(port) (port) 19 27 20 28 #else /* __LITTLE_ENDIAN */ 29 + 30 + #define __should_swizzle_bits(a) false 21 31 22 32 static inline bool __should_swizzle_addr(unsigned long p) 23 33 { ··· 45 35 46 36 #endif /* __BIG_ENDIAN */ 47 37 48 - /* 49 - * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; 50 - * less sane hardware forces software to fiddle with this... 51 - * 52 - * Regardless, if the host bus endianness mismatches that of PCI/ISA, then 53 - * you can't have the numerical value of data and byte addresses within 54 - * multibyte quantities both preserved at the same time. Hence two 55 - * variations of functions: non-prefixed ones that preserve the value 56 - * and prefixed ones that preserve byte addresses. The latters are 57 - * typically used for moving raw data between a peripheral and memory (cf. 58 - * string I/O functions), hence the "__mem_" prefix. 59 - */ 60 - #if defined(CONFIG_SWAP_IO_SPACE) 61 38 62 39 # define ioswabb(a, x) (x) 63 40 # define __mem_ioswabb(a, x) (x) 64 - # define ioswabw(a, x) le16_to_cpu(x) 41 + # define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x) 65 42 # define __mem_ioswabw(a, x) (x) 66 - # define ioswabl(a, x) le32_to_cpu(x) 43 + # define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x) 67 44 # define __mem_ioswabl(a, x) (x) 68 - # define ioswabq(a, x) le64_to_cpu(x) 45 + # define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x) 69 46 # define __mem_ioswabq(a, x) (x) 70 - 71 - #else 72 - 73 - # define ioswabb(a, x) (x) 74 - # define __mem_ioswabb(a, x) (x) 75 - # define ioswabw(a, x) (x) 76 - # define __mem_ioswabw(a, x) cpu_to_le16(x) 77 - # define ioswabl(a, x) (x) 78 - # define __mem_ioswabl(a, x) cpu_to_le32(x) 79 - # define ioswabq(a, x) (x) 80 - # define __mem_ioswabq(a, x) cpu_to_le32(x) 81 - 82 - #endif 83 47 84 48 #endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
+9
arch/mips/include/asm/mmu.h
··· 2 2 #define __ASM_MMU_H 3 3 4 4 #include <linux/atomic.h> 5 + #include <linux/spinlock.h> 6 + #include <linux/wait.h> 5 7 6 8 typedef struct { 7 9 unsigned long asid[NR_CPUS]; 8 10 void *vdso; 9 11 atomic_t fp_mode_switching; 12 + 13 + /* lock to be held whilst modifying fp_bd_emupage_allocmap */ 14 + spinlock_t bd_emupage_lock; 15 + /* bitmap tracking allocation of fp_bd_emupage */ 16 + unsigned long *bd_emupage_allocmap; 17 + /* wait queue for threads requiring an emuframe */ 18 + wait_queue_head_t bd_emupage_queue; 10 19 } mm_context_t; 11 20 12 21 #endif /* __ASM_MMU_H */
+6
arch/mips/include/asm/mmu_context.h
··· 16 16 #include <linux/smp.h> 17 17 #include <linux/slab.h> 18 18 #include <asm/cacheflush.h> 19 + #include <asm/dsemul.h> 19 20 #include <asm/hazards.h> 20 21 #include <asm/tlbflush.h> 21 22 #include <asm-generic/mm_hooks.h> ··· 129 128 130 129 atomic_set(&mm->context.fp_mode_switching, 0); 131 130 131 + mm->context.bd_emupage_allocmap = NULL; 132 + spin_lock_init(&mm->context.bd_emupage_lock); 133 + init_waitqueue_head(&mm->context.bd_emupage_queue); 134 + 132 135 return 0; 133 136 } 134 137 ··· 167 162 */ 168 163 static inline void destroy_context(struct mm_struct *mm) 169 164 { 165 + dsemul_mm_cleanup(mm); 170 166 } 171 167 172 168 #define deactivate_mm(tsk, mm) do { } while (0)
+2
arch/mips/include/asm/msa.h
··· 168 168 unsigned int reg; \ 169 169 __asm__ __volatile__( \ 170 170 " .set push\n" \ 171 + " .set fp=64\n" \ 171 172 " .set msa\n" \ 172 173 " cfcmsa %0, $" #cs "\n" \ 173 174 " .set pop\n" \ ··· 180 179 { \ 181 180 __asm__ __volatile__( \ 182 181 " .set push\n" \ 182 + " .set fp=64\n" \ 183 183 " .set msa\n" \ 184 184 " ctcmsa $" #cs ", %0\n" \ 185 185 " .set pop\n" \
+32 -12
arch/mips/include/asm/page.h
··· 162 162 /* 163 163 * __pa()/__va() should be used only during mem init. 164 164 */ 165 - #ifdef CONFIG_64BIT 166 - #define __pa(x) \ 167 - ({ \ 168 - unsigned long __x = (unsigned long)(x); \ 169 - __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \ 170 - }) 171 - #else 172 - #define __pa(x) \ 173 - ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 174 - #endif 165 + static inline unsigned long ___pa(unsigned long x) 166 + { 167 + if (config_enabled(CONFIG_64BIT)) { 168 + /* 169 + * For MIPS64 the virtual address may either be in one of 170 + * the compatibility segements ckseg0 or ckseg1, or it may 171 + * be in xkphys. 172 + */ 173 + return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); 174 + } 175 + 176 + if (!config_enabled(CONFIG_EVA)) { 177 + /* 178 + * We're using the standard MIPS32 legacy memory map, ie. 179 + * the address x is going to be in kseg0 or kseg1. We can 180 + * handle either case by masking out the desired bits using 181 + * CPHYSADDR. 182 + */ 183 + return CPHYSADDR(x); 184 + } 185 + 186 + /* 187 + * EVA is in use so the memory map could be anything, making it not 188 + * safe to just mask out bits. 189 + */ 190 + return x - PAGE_OFFSET + PHYS_OFFSET; 191 + } 192 + #define __pa(x) ___pa((unsigned long)(x)) 175 193 #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 176 194 #include <asm/io.h> 177 195 ··· 247 229 #define virt_addr_valid(kaddr) \ 248 230 __virt_addr_valid((const volatile void *) (kaddr)) 249 231 250 - #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 251 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 232 + #define VM_DATA_DEFAULT_FLAGS \ 233 + (VM_READ | VM_WRITE | \ 234 + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 235 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 252 236 253 237 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) 254 238 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+17 -1
arch/mips/include/asm/processor.h
··· 11 11 #ifndef _ASM_PROCESSOR_H 12 12 #define _ASM_PROCESSOR_H 13 13 14 + #include <linux/atomic.h> 14 15 #include <linux/cpumask.h> 15 16 #include <linux/threads.h> 16 17 17 18 #include <asm/cachectl.h> 18 19 #include <asm/cpu.h> 19 20 #include <asm/cpu-info.h> 21 + #include <asm/dsemul.h> 20 22 #include <asm/mipsregs.h> 21 23 #include <asm/prefetch.h> 22 24 ··· 80 78 81 79 #endif 82 80 83 - #define STACK_TOP (TASK_SIZE & PAGE_MASK) 81 + /* 82 + * One page above the stack is used for branch delay slot "emulation". 83 + * See dsemul.c for details. 84 + */ 85 + #define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE) 84 86 85 87 /* 86 88 * This decides where the kernel will search for a free chunk of vm ··· 262 256 263 257 /* Saved fpu/fpu emulator stuff. */ 264 258 struct mips_fpu_struct fpu FPU_ALIGN; 259 + /* Assigned branch delay slot 'emulation' frame */ 260 + atomic_t bd_emu_frame; 261 + /* PC of the branch from a branch delay slot 'emulation' */ 262 + unsigned long bd_emu_branch_pc; 263 + /* PC to continue from following a branch delay slot 'emulation' */ 264 + unsigned long bd_emu_cont_pc; 265 265 #ifdef CONFIG_MIPS_MT_FPAFF 266 266 /* Emulated instruction count */ 267 267 unsigned long emulated_fp; ··· 335 323 * FPU affinity state (null if not FPAFF) \ 336 324 */ \ 337 325 FPAFF_INIT \ 326 + /* Delay slot emulation */ \ 327 + .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \ 328 + .bd_emu_branch_pc = 0, \ 329 + .bd_emu_cont_pc = 0, \ 338 330 /* \ 339 331 * Saved DSP stuff \ 340 332 */ \
+4
arch/mips/include/asm/r4kcache.h
··· 210 210 211 211 static inline void protected_writeback_scache_line(unsigned long addr) 212 212 { 213 + #ifdef CONFIG_EVA 214 + protected_cachee_op(Hit_Writeback_Inv_SD, addr); 215 + #else 213 216 protected_cache_op(Hit_Writeback_Inv_SD, addr); 217 + #endif 214 218 } 215 219 216 220 /*
+1 -1
arch/mips/include/asm/signal.h
··· 11 11 12 12 #include <uapi/asm/signal.h> 13 13 14 - #ifdef CONFIG_MIPS32_COMPAT 14 + #ifdef CONFIG_MIPS32_O32 15 15 extern struct mips_abi mips_abi_32; 16 16 17 17 #define sig_uses_siginfo(ka, abi) \
+3 -1
arch/mips/include/asm/smp.h
··· 23 23 extern int smp_num_siblings; 24 24 extern cpumask_t cpu_sibling_map[]; 25 25 extern cpumask_t cpu_core_map[]; 26 - extern cpumask_t cpu_foreign_map; 26 + extern cpumask_t cpu_foreign_map[]; 27 27 28 28 #define raw_smp_processor_id() (current_thread_info()->cpu) 29 29 ··· 52 52 extern cpumask_t cpu_coherent_mask; 53 53 54 54 extern void asmlinkage smp_bootstrap(void); 55 + 56 + extern void calculate_cpu_foreign_map(void); 55 57 56 58 /* 57 59 * this function sends a 'reschedule' IPI to another CPU.
+2
arch/mips/include/uapi/asm/auxvec.h
··· 14 14 /* Location of VDSO image. */ 15 15 #define AT_SYSINFO_EHDR 33 16 16 17 + #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ 18 + 17 19 #endif /* __ASM_AUXVEC_H */
+1 -1
arch/mips/kernel/Makefile
··· 71 71 obj-$(CONFIG_64BIT) += scall64-64.o 72 72 obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o 73 73 obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o 74 - obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o 74 + obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o 75 75 76 76 obj-$(CONFIG_KGDB) += kgdb.o 77 77 obj-$(CONFIG_PROC_FS) += proc.o
+1 -6
arch/mips/kernel/cevt-r4k.c
··· 276 276 CLOCK_EVT_FEAT_C3STOP | 277 277 CLOCK_EVT_FEAT_PERCPU; 278 278 279 - clockevent_set_clock(cd, mips_hpt_frequency); 280 - 281 - /* Calculate the min / max delta */ 282 - cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 283 279 min_delta = calculate_min_delta(); 284 - cd->min_delta_ns = clockevent_delta2ns(min_delta, cd); 285 280 286 281 cd->rating = 300; 287 282 cd->irq = irq; ··· 284 289 cd->set_next_event = mips_next_event; 285 290 cd->event_handler = mips_event_handler; 286 291 287 - clockevents_register_device(cd); 292 + clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); 288 293 289 294 if (cp0_timer_irq_installed) 290 295 return 0;
+3 -1
arch/mips/kernel/csrc-r4k.c
··· 23 23 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 24 24 }; 25 25 26 - static u64 notrace r4k_read_sched_clock(void) 26 + static u64 __maybe_unused notrace r4k_read_sched_clock(void) 27 27 { 28 28 return read_c0_count(); 29 29 } ··· 82 82 83 83 clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); 84 84 85 + #ifndef CONFIG_CPU_FREQ 85 86 sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); 87 + #endif 86 88 87 89 return 0; 88 90 }
+19
arch/mips/kernel/elf.c
··· 8 8 * option) any later version. 9 9 */ 10 10 11 + #include <linux/binfmts.h> 11 12 #include <linux/elf.h> 13 + #include <linux/export.h> 12 14 #include <linux/sched.h> 13 15 16 + #include <asm/cpu-features.h> 14 17 #include <asm/cpu-info.h> 15 18 16 19 /* Whether to accept legacy-NaN and 2008-NaN user binaries. */ ··· 329 326 BUG(); 330 327 } 331 328 } 329 + 330 + int mips_elf_read_implies_exec(void *elf_ex, int exstack) 331 + { 332 + if (exstack != EXSTACK_DISABLE_X) { 333 + /* The binary doesn't request a non-executable stack */ 334 + return 1; 335 + } 336 + 337 + if (!cpu_has_rixi) { 338 + /* The CPU doesn't support non-executable memory */ 339 + return 1; 340 + } 341 + 342 + return 0; 343 + } 344 + EXPORT_SYMBOL(mips_elf_read_implies_exec);
+14 -7
arch/mips/kernel/head.S
··· 93 93 jr t0 94 94 0: 95 95 96 + #ifdef CONFIG_USE_OF 96 97 #ifdef CONFIG_MIPS_RAW_APPENDED_DTB 97 - PTR_LA t0, __appended_dtb 98 + PTR_LA t2, __appended_dtb 98 99 99 100 #ifdef CONFIG_CPU_BIG_ENDIAN 100 101 li t1, 0xd00dfeed 101 102 #else 102 103 li t1, 0xedfe0dd0 103 104 #endif 104 - lw t2, (t0) 105 - bne t1, t2, not_found 106 - nop 105 + lw t0, (t2) 106 + beq t0, t1, dtb_found 107 + #endif 108 + li t1, -2 109 + beq a0, t1, dtb_found 110 + move t2, a1 107 111 108 - move a1, t0 109 - PTR_LI a0, -2 110 - not_found: 112 + li t2, 0 113 + dtb_found: 111 114 #endif 112 115 PTR_LA t0, __bss_start # clear .bss 113 116 LONG_S zero, (t0) ··· 124 121 LONG_S a1, fw_arg1 125 122 LONG_S a2, fw_arg2 126 123 LONG_S a3, fw_arg3 124 + 125 + #ifdef CONFIG_USE_OF 126 + LONG_S t2, fw_passed_dtb 127 + #endif 127 128 128 129 MTC0 zero, CP0_CONTEXT # clear context register 129 130 PTR_LA $28, init_thread_union
+4 -4
arch/mips/kernel/mips-r2-to-r6-emul.c
··· 283 283 err = mipsr6_emul(regs, nir); 284 284 if (err > 0) { 285 285 regs->cp0_epc = nepc; 286 - err = mips_dsemul(regs, nir, cepc); 286 + err = mips_dsemul(regs, nir, epc, cepc); 287 287 if (err == SIGILL) 288 288 err = SIGEMT; 289 289 MIPS_R2_STATS(dsemul); ··· 1033 1033 if (nir) { 1034 1034 err = mipsr6_emul(regs, nir); 1035 1035 if (err > 0) { 1036 - err = mips_dsemul(regs, nir, cpc); 1036 + err = mips_dsemul(regs, nir, epc, cpc); 1037 1037 if (err == SIGILL) 1038 1038 err = SIGEMT; 1039 1039 MIPS_R2_STATS(dsemul); ··· 1082 1082 if (nir) { 1083 1083 err = mipsr6_emul(regs, nir); 1084 1084 if (err > 0) { 1085 - err = mips_dsemul(regs, nir, cpc); 1085 + err = mips_dsemul(regs, nir, epc, cpc); 1086 1086 if (err == SIGILL) 1087 1087 err = SIGEMT; 1088 1088 MIPS_R2_STATS(dsemul); ··· 1149 1149 if (nir) { 1150 1150 err = mipsr6_emul(regs, nir); 1151 1151 if (err > 0) { 1152 - err = mips_dsemul(regs, nir, cpc); 1152 + err = mips_dsemul(regs, nir, epc, cpc); 1153 1153 if (err == SIGILL) 1154 1154 err = SIGEMT; 1155 1155 MIPS_R2_STATS(dsemul);
+14
arch/mips/kernel/process.c
··· 30 30 #include <asm/asm.h> 31 31 #include <asm/bootinfo.h> 32 32 #include <asm/cpu.h> 33 + #include <asm/dsemul.h> 33 34 #include <asm/dsp.h> 34 35 #include <asm/fpu.h> 35 36 #include <asm/msa.h> ··· 69 68 lose_fpu(0); 70 69 clear_thread_flag(TIF_MSA_CTX_LIVE); 71 70 clear_used_math(); 71 + atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE); 72 72 init_dsp(); 73 73 regs->cp0_epc = pc; 74 74 regs->regs[29] = sp; 75 + } 76 + 77 + void exit_thread(struct task_struct *tsk) 78 + { 79 + /* 80 + * User threads may have allocated a delay slot emulation frame. 81 + * If so, clean up that allocation. 82 + */ 83 + if (!(current->flags & PF_KTHREAD)) 84 + dsemul_thread_cleanup(tsk); 75 85 } 76 86 77 87 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) ··· 170 158 #ifdef CONFIG_MIPS_MT_FPAFF 171 159 clear_tsk_thread_flag(p, TIF_FPUBOUND); 172 160 #endif /* CONFIG_MIPS_MT_FPAFF */ 161 + 162 + atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); 173 163 174 164 if (clone_flags & CLONE_SETTLS) 175 165 ti->tp_value = regs->regs[7];
+1 -1
arch/mips/kernel/scall64-n32.S
··· 348 348 PTR sys_ni_syscall /* available, was setaltroot */ 349 349 PTR sys_add_key 350 350 PTR sys_request_key 351 - PTR sys_keyctl /* 6245 */ 351 + PTR compat_sys_keyctl /* 6245 */ 352 352 PTR sys_set_thread_area 353 353 PTR sys_inotify_init 354 354 PTR sys_inotify_add_watch
+1 -1
arch/mips/kernel/scall64-o32.S
··· 504 504 PTR sys_ni_syscall /* available, was setaltroot */ 505 505 PTR sys_add_key /* 4280 */ 506 506 PTR sys_request_key 507 - PTR sys_keyctl 507 + PTR compat_sys_keyctl 508 508 PTR sys_set_thread_area 509 509 PTR sys_inotify_init 510 510 PTR sys_inotify_add_watch /* 4285 */
+8 -5
arch/mips/kernel/segment.c
··· 26 26 27 27 /* 28 28 * Access modes MK, MSK and MUSK are mapped segments. Therefore 29 - * there is no direct physical address mapping. 29 + * there is no direct physical address mapping unless it becomes 30 + * unmapped uncached at error level due to EU. 30 31 */ 31 - if ((am == 0) || (am > 3)) { 32 + if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU)) 32 33 str += sprintf(str, " %03lx", 33 34 ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); 35 + else 36 + str += sprintf(str, " UND"); 37 + 38 + if ((am == 0) || (am > 3)) 34 39 str += sprintf(str, " %01ld", 35 40 ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); 36 - } else { 37 - str += sprintf(str, " UND"); 41 + else 38 42 str += sprintf(str, " U"); 39 - } 40 43 41 44 /* Exception configuration. */ 42 45 str += sprintf(str, " %01ld\n",
+4
arch/mips/kernel/setup.c
··· 875 875 unsigned long kernelsp[NR_CPUS]; 876 876 unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; 877 877 878 + #ifdef CONFIG_USE_OF 879 + unsigned long fw_passed_dtb; 880 + #endif 881 + 878 882 #ifdef CONFIG_DEBUG_FS 879 883 struct dentry *mips_debugfs_dir; 880 884 static int __init debugfs_mips(void)
+8
arch/mips/kernel/signal.c
··· 772 772 struct mips_abi *abi = current->thread.abi; 773 773 void *vdso = current->mm->context.vdso; 774 774 775 + /* 776 + * If we were emulating a delay slot instruction, exit that frame such 777 + * that addresses in the sigframe are as expected for userland and we 778 + * don't have a problem if we reuse the thread's frame for an 779 + * instruction within the signal handler. 780 + */ 781 + dsemul_thread_rollback(regs); 782 + 775 783 if (regs->regs[0]) { 776 784 switch(regs->regs[2]) { 777 785 case ERESTART_RESTARTBLOCK:
+6 -282
arch/mips/kernel/signal32.c
··· 6 6 * Copyright (C) 1991, 1992 Linus Torvalds 7 7 * Copyright (C) 1994 - 2000, 2006 Ralf Baechle 8 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 + * Copyright (C) 2016, Imagination Technologies Ltd. 9 10 */ 10 - #include <linux/cache.h> 11 - #include <linux/compat.h> 12 - #include <linux/sched.h> 13 - #include <linux/mm.h> 14 - #include <linux/smp.h> 11 + #include <linux/compiler.h> 12 + #include <linux/errno.h> 15 13 #include <linux/kernel.h> 16 14 #include <linux/signal.h> 17 15 #include <linux/syscalls.h> 18 - #include <linux/errno.h> 19 - #include <linux/wait.h> 20 - #include <linux/ptrace.h> 21 - #include <linux/suspend.h> 22 - #include <linux/compiler.h> 23 - #include <linux/uaccess.h> 24 16 25 - #include <asm/abi.h> 26 - #include <asm/asm.h> 17 + #include <asm/compat.h> 27 18 #include <asm/compat-signal.h> 28 - #include <linux/bitops.h> 29 - #include <asm/cacheflush.h> 30 - #include <asm/sim.h> 31 - #include <asm/ucontext.h> 32 - #include <asm/fpu.h> 33 - #include <asm/war.h> 34 - #include <asm/dsp.h> 19 + #include <asm/uaccess.h> 20 + #include <asm/unistd.h> 35 21 36 22 #include "signal-common.h" 37 - 38 - /* 39 - * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 40 - */ 41 - #define __NR_O32_restart_syscall 4253 42 23 43 24 /* 32-bit compatibility types */ 44 25 45 26 typedef unsigned int __sighandler32_t; 46 27 typedef void (*vfptr_t)(void); 47 - 48 - struct ucontext32 { 49 - u32 uc_flags; 50 - s32 uc_link; 51 - compat_stack_t uc_stack; 52 - struct sigcontext32 uc_mcontext; 53 - compat_sigset_t uc_sigmask; /* mask last for extensibility */ 54 - }; 55 - 56 - struct sigframe32 { 57 - u32 sf_ass[4]; /* argument save space for o32 */ 58 - u32 sf_pad[2]; /* Was: signal trampoline */ 59 - struct sigcontext32 sf_sc; 60 - compat_sigset_t sf_mask; 61 - }; 62 - 63 - struct rt_sigframe32 { 64 - u32 rs_ass[4]; /* argument save space for o32 */ 65 - u32 rs_pad[2]; /* Was: signal trampoline */ 66 - compat_siginfo_t rs_info; 67 - struct ucontext32 rs_uc; 68 - }; 69 - 70 - static int setup_sigcontext32(struct pt_regs *regs, 71 - struct sigcontext32 __user *sc) 72 - { 73 - int err = 0; 74 - int i; 75 - 76 - err |= __put_user(regs->cp0_epc, &sc->sc_pc); 77 - 78 - err |= __put_user(0, &sc->sc_regs[0]); 79 - for (i = 1; i < 32; i++) 80 - err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 81 - 82 - err |= __put_user(regs->hi, &sc->sc_mdhi); 83 - err |= __put_user(regs->lo, &sc->sc_mdlo); 84 - if (cpu_has_dsp) { 85 - err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 86 - err |= __put_user(mfhi1(), &sc->sc_hi1); 87 - err |= __put_user(mflo1(), &sc->sc_lo1); 88 - err |= __put_user(mfhi2(), &sc->sc_hi2); 89 - err |= __put_user(mflo2(), &sc->sc_lo2); 90 - err |= __put_user(mfhi3(), &sc->sc_hi3); 91 - err |= __put_user(mflo3(), &sc->sc_lo3); 92 - } 93 - 94 - /* 95 - * Save FPU state to signal context. Signal handler 96 - * will "inherit" current FPU state. 97 - */ 98 - err |= protected_save_fp_context(sc); 99 - 100 - return err; 101 - } 102 - 103 - static int restore_sigcontext32(struct pt_regs *regs, 104 - struct sigcontext32 __user *sc) 105 - { 106 - int err = 0; 107 - s32 treg; 108 - int i; 109 - 110 - /* Always make any pending restarted system calls return -EINTR */ 111 - current->restart_block.fn = do_no_restart_syscall; 112 - 113 - err |= __get_user(regs->cp0_epc, &sc->sc_pc); 114 - err |= __get_user(regs->hi, &sc->sc_mdhi); 115 - err |= __get_user(regs->lo, &sc->sc_mdlo); 116 - if (cpu_has_dsp) { 117 - err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 118 - err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 119 - err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 120 - err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 121 - err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 122 - err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 123 - err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 124 - } 125 - 126 - for (i = 1; i < 32; i++) 127 - err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 128 - 129 - return err ?: protected_restore_fp_context(sc); 130 - } 131 28 132 29 /* 133 30 * Atomically swap in the new signal mask, and wait for a signal. ··· 144 247 145 248 return 0; 146 249 } 147 - 148 - asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) 149 - { 150 - struct sigframe32 __user *frame; 151 - sigset_t blocked; 152 - int sig; 153 - 154 - frame = (struct sigframe32 __user *) regs.regs[29]; 155 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 156 - goto badframe; 157 - if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) 158 - goto badframe; 159 - 160 - set_current_blocked(&blocked); 161 - 162 - sig = restore_sigcontext32(&regs, &frame->sf_sc); 163 - if (sig < 0) 164 - goto badframe; 165 - else if (sig) 166 - force_sig(sig, current); 167 - 168 - /* 169 - * Don't let your children do this ... 170 - */ 171 - __asm__ __volatile__( 172 - "move\t$29, %0\n\t" 173 - "j\tsyscall_exit" 174 - :/* no outputs */ 175 - :"r" (&regs)); 176 - /* Unreached */ 177 - 178 - badframe: 179 - force_sig(SIGSEGV, current); 180 - } 181 - 182 - asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 183 - { 184 - struct rt_sigframe32 __user *frame; 185 - sigset_t set; 186 - int sig; 187 - 188 - frame = (struct rt_sigframe32 __user *) regs.regs[29]; 189 - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 190 - goto badframe; 191 - if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 192 - goto badframe; 193 - 194 - set_current_blocked(&set); 195 - 196 - sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext); 197 - if (sig < 0) 198 - goto badframe; 199 - else if (sig) 200 - force_sig(sig, current); 201 - 202 - if (compat_restore_altstack(&frame->rs_uc.uc_stack)) 203 - goto badframe; 204 - 205 - /* 206 - * Don't let your children do this ... 207 - */ 208 - __asm__ __volatile__( 209 - "move\t$29, %0\n\t" 210 - "j\tsyscall_exit" 211 - :/* no outputs */ 212 - :"r" (&regs)); 213 - /* Unreached */ 214 - 215 - badframe: 216 - force_sig(SIGSEGV, current); 217 - } 218 - 219 - static int setup_frame_32(void *sig_return, struct ksignal *ksig, 220 - struct pt_regs *regs, sigset_t *set) 221 - { 222 - struct sigframe32 __user *frame; 223 - int err = 0; 224 - 225 - frame = get_sigframe(ksig, regs, sizeof(*frame)); 226 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 227 - return -EFAULT; 228 - 229 - err |= setup_sigcontext32(regs, &frame->sf_sc); 230 - err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); 231 - 232 - if (err) 233 - return -EFAULT; 234 - 235 - /* 236 - * Arguments to signal handler: 237 - * 238 - * a0 = signal number 239 - * a1 = 0 (should be cause) 240 - * a2 = pointer to struct sigcontext 241 - * 242 - * $25 and c0_epc point to the signal handler, $29 points to the 243 - * struct sigframe. 244 - */ 245 - regs->regs[ 4] = ksig->sig; 246 - regs->regs[ 5] = 0; 247 - regs->regs[ 6] = (unsigned long) &frame->sf_sc; 248 - regs->regs[29] = (unsigned long) frame; 249 - regs->regs[31] = (unsigned long) sig_return; 250 - regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 251 - 252 - DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 253 - current->comm, current->pid, 254 - frame, regs->cp0_epc, regs->regs[31]); 255 - 256 - return 0; 257 - } 258 - 259 - static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, 260 - struct pt_regs *regs, sigset_t *set) 261 - { 262 - struct rt_sigframe32 __user *frame; 263 - int err = 0; 264 - 265 - frame = get_sigframe(ksig, regs, sizeof(*frame)); 266 - if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 267 - return -EFAULT; 268 - 269 - /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ 270 - err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); 271 - 272 - /* Create the ucontext. */ 273 - err |= __put_user(0, &frame->rs_uc.uc_flags); 274 - err |= __put_user(0, &frame->rs_uc.uc_link); 275 - err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 276 - err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); 277 - err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); 278 - 279 - if (err) 280 - return -EFAULT; 281 - 282 - /* 283 - * Arguments to signal handler: 284 - * 285 - * a0 = signal number 286 - * a1 = 0 (should be cause) 287 - * a2 = pointer to ucontext 288 - * 289 - * $25 and c0_epc point to the signal handler, $29 points to 290 - * the struct rt_sigframe32. 291 - */ 292 - regs->regs[ 4] = ksig->sig; 293 - regs->regs[ 5] = (unsigned long) &frame->rs_info; 294 - regs->regs[ 6] = (unsigned long) &frame->rs_uc; 295 - regs->regs[29] = (unsigned long) frame; 296 - regs->regs[31] = (unsigned long) sig_return; 297 - regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 298 - 299 - DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 300 - current->comm, current->pid, 301 - frame, regs->cp0_epc, regs->regs[31]); 302 - 303 - return 0; 304 - } 305 - 306 - /* 307 - * o32 compatibility on 64-bit kernels, without DSP ASE 308 - */ 309 - struct mips_abi mips_abi_32 = { 310 - .setup_frame = setup_frame_32, 311 - .setup_rt_frame = setup_rt_frame_32, 312 - .restart = __NR_O32_restart_syscall, 313 - 314 - .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs), 315 - .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr), 316 - .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math), 317 - 318 - .vdso = &vdso_image_o32, 319 - };
+285
arch/mips/kernel/signal_o32.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 1991, 1992 Linus Torvalds 7 + * Copyright (C) 1994 - 2000, 2006 Ralf Baechle 8 + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 + * Copyright (C) 2016, Imagination Technologies Ltd. 10 + */ 11 + #include <linux/compiler.h> 12 + #include <linux/errno.h> 13 + #include <linux/signal.h> 14 + #include <linux/uaccess.h> 15 + 16 + #include <asm/abi.h> 17 + #include <asm/compat-signal.h> 18 + #include <asm/dsp.h> 19 + #include <asm/sim.h> 20 + #include <asm/unistd.h> 21 + 22 + #include "signal-common.h" 23 + 24 + /* 25 + * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 26 + */ 27 + #define __NR_O32_restart_syscall 4253 28 + 29 + struct sigframe32 { 30 + u32 sf_ass[4]; /* argument save space for o32 */ 31 + u32 sf_pad[2]; /* Was: signal trampoline */ 32 + struct sigcontext32 sf_sc; 33 + compat_sigset_t sf_mask; 34 + }; 35 + 36 + struct ucontext32 { 37 + u32 uc_flags; 38 + s32 uc_link; 39 + compat_stack_t uc_stack; 40 + struct sigcontext32 uc_mcontext; 41 + compat_sigset_t uc_sigmask; /* mask last for extensibility */ 42 + }; 43 + 44 + struct rt_sigframe32 { 45 + u32 rs_ass[4]; /* argument save space for o32 */ 46 + u32 rs_pad[2]; /* Was: signal trampoline */ 47 + compat_siginfo_t rs_info; 48 + struct ucontext32 rs_uc; 49 + }; 50 + 51 + static int setup_sigcontext32(struct pt_regs *regs, 52 + struct sigcontext32 __user *sc) 53 + { 54 + int err = 0; 55 + int i; 56 + 57 + err |= __put_user(regs->cp0_epc, &sc->sc_pc); 58 + 59 + err |= __put_user(0, &sc->sc_regs[0]); 60 + for (i = 1; i < 32; i++) 61 + err |= __put_user(regs->regs[i], &sc->sc_regs[i]); 62 + 63 + err |= __put_user(regs->hi, &sc->sc_mdhi); 64 + err |= __put_user(regs->lo, &sc->sc_mdlo); 65 + if (cpu_has_dsp) { 66 + err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 67 + err |= __put_user(mfhi1(), &sc->sc_hi1); 68 + err |= __put_user(mflo1(), &sc->sc_lo1); 69 + err |= __put_user(mfhi2(), &sc->sc_hi2); 70 + err |= __put_user(mflo2(), &sc->sc_lo2); 71 + err |= __put_user(mfhi3(), &sc->sc_hi3); 72 + err |= __put_user(mflo3(), &sc->sc_lo3); 73 + } 74 + 75 + /* 76 + * Save FPU state to signal context. Signal handler 77 + * will "inherit" current FPU state. 78 + */ 79 + err |= protected_save_fp_context(sc); 80 + 81 + return err; 82 + } 83 + 84 + static int restore_sigcontext32(struct pt_regs *regs, 85 + struct sigcontext32 __user *sc) 86 + { 87 + int err = 0; 88 + s32 treg; 89 + int i; 90 + 91 + /* Always make any pending restarted system calls return -EINTR */ 92 + current->restart_block.fn = do_no_restart_syscall; 93 + 94 + err |= __get_user(regs->cp0_epc, &sc->sc_pc); 95 + err |= __get_user(regs->hi, &sc->sc_mdhi); 96 + err |= __get_user(regs->lo, &sc->sc_mdlo); 97 + if (cpu_has_dsp) { 98 + err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); 99 + err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); 100 + err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); 101 + err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); 102 + err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); 103 + err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); 104 + err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); 105 + } 106 + 107 + for (i = 1; i < 32; i++) 108 + err |= __get_user(regs->regs[i], &sc->sc_regs[i]); 109 + 110 + return err ?: protected_restore_fp_context(sc); 111 + } 112 + 113 + static int setup_frame_32(void *sig_return, struct ksignal *ksig, 114 + struct pt_regs *regs, sigset_t *set) 115 + { 116 + struct sigframe32 __user *frame; 117 + int err = 0; 118 + 119 + frame = get_sigframe(ksig, regs, sizeof(*frame)); 120 + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 121 + return -EFAULT; 122 + 123 + err |= setup_sigcontext32(regs, &frame->sf_sc); 124 + err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); 125 + 126 + if (err) 127 + return -EFAULT; 128 + 129 + /* 130 + * Arguments to signal handler: 131 + * 132 + * a0 = signal number 133 + * a1 = 0 (should be cause) 134 + * a2 = pointer to struct sigcontext 135 + * 136 + * $25 and c0_epc point to the signal handler, $29 points to the 137 + * struct sigframe. 138 + */ 139 + regs->regs[ 4] = ksig->sig; 140 + regs->regs[ 5] = 0; 141 + regs->regs[ 6] = (unsigned long) &frame->sf_sc; 142 + regs->regs[29] = (unsigned long) frame; 143 + regs->regs[31] = (unsigned long) sig_return; 144 + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 145 + 146 + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 147 + current->comm, current->pid, 148 + frame, regs->cp0_epc, regs->regs[31]); 149 + 150 + return 0; 151 + } 152 + 153 + asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 154 + { 155 + struct rt_sigframe32 __user *frame; 156 + sigset_t set; 157 + int sig; 158 + 159 + frame = (struct rt_sigframe32 __user *) regs.regs[29]; 160 + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 161 + goto badframe; 162 + if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 163 + goto badframe; 164 + 165 + set_current_blocked(&set); 166 + 167 + sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext); 168 + if (sig < 0) 169 + goto badframe; 170 + else if (sig) 171 + force_sig(sig, current); 172 + 173 + if (compat_restore_altstack(&frame->rs_uc.uc_stack)) 174 + goto badframe; 175 + 176 + /* 177 + * Don't let your children do this ... 178 + */ 179 + __asm__ __volatile__( 180 + "move\t$29, %0\n\t" 181 + "j\tsyscall_exit" 182 + :/* no outputs */ 183 + :"r" (&regs)); 184 + /* Unreached */ 185 + 186 + badframe: 187 + force_sig(SIGSEGV, current); 188 + } 189 + 190 + static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, 191 + struct pt_regs *regs, sigset_t *set) 192 + { 193 + struct rt_sigframe32 __user *frame; 194 + int err = 0; 195 + 196 + frame = get_sigframe(ksig, regs, sizeof(*frame)); 197 + if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 198 + return -EFAULT; 199 + 200 + /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ 201 + err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); 202 + 203 + /* Create the ucontext. */ 204 + err |= __put_user(0, &frame->rs_uc.uc_flags); 205 + err |= __put_user(0, &frame->rs_uc.uc_link); 206 + err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); 207 + err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); 208 + err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); 209 + 210 + if (err) 211 + return -EFAULT; 212 + 213 + /* 214 + * Arguments to signal handler: 215 + * 216 + * a0 = signal number 217 + * a1 = 0 (should be cause) 218 + * a2 = pointer to ucontext 219 + * 220 + * $25 and c0_epc point to the signal handler, $29 points to 221 + * the struct rt_sigframe32. 222 + */ 223 + regs->regs[ 4] = ksig->sig; 224 + regs->regs[ 5] = (unsigned long) &frame->rs_info; 225 + regs->regs[ 6] = (unsigned long) &frame->rs_uc; 226 + regs->regs[29] = (unsigned long) frame; 227 + regs->regs[31] = (unsigned long) sig_return; 228 + regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; 229 + 230 + DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", 231 + current->comm, current->pid, 232 + frame, regs->cp0_epc, regs->regs[31]); 233 + 234 + return 0; 235 + } 236 + 237 + /* 238 + * o32 compatibility on 64-bit kernels, without DSP ASE 239 + */ 240 + struct mips_abi mips_abi_32 = { 241 + .setup_frame = setup_frame_32, 242 + .setup_rt_frame = setup_rt_frame_32, 243 + .restart = __NR_O32_restart_syscall, 244 + 245 + .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs), 246 + .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr), 247 + .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math), 248 + 249 + .vdso = &vdso_image_o32, 250 + }; 251 + 252 + 253 + asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) 254 + { 255 + struct sigframe32 __user *frame; 256 + sigset_t blocked; 257 + int sig; 258 + 259 + frame = (struct sigframe32 __user *) regs.regs[29]; 260 + if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 261 + goto badframe; 262 + if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) 263 + goto badframe; 264 + 265 + set_current_blocked(&blocked); 266 + 267 + sig = restore_sigcontext32(&regs, &frame->sf_sc); 268 + if (sig < 0) 269 + goto badframe; 270 + else if (sig) 271 + force_sig(sig, current); 272 + 273 + /* 274 + * Don't let your children do this ... 275 + */ 276 + __asm__ __volatile__( 277 + "move\t$29, %0\n\t" 278 + "j\tsyscall_exit" 279 + :/* no outputs */ 280 + :"r" (&regs)); 281 + /* Unreached */ 282 + 283 + badframe: 284 + force_sig(SIGSEGV, current); 285 + }
+1
arch/mips/kernel/smp-bmips.c
··· 363 363 pr_info("SMP: CPU%d is offline\n", cpu); 364 364 365 365 set_cpu_online(cpu, false); 366 + calculate_cpu_foreign_map(); 366 367 cpumask_clear_cpu(cpu, &cpu_callin_map); 367 368 clear_c0_status(IE_IRQ5); 368 369
+33 -9
arch/mips/kernel/smp-cps.c
··· 206 206 } 207 207 } 208 208 209 - static void boot_core(unsigned core) 209 + static void boot_core(unsigned int core, unsigned int vpe_id) 210 210 { 211 211 u32 access, stat, seq_state; 212 212 unsigned timeout; ··· 233 233 mips_cpc_lock_other(core); 234 234 235 235 if (mips_cm_revision() >= CM_REV_CM3) { 236 - /* Run VP0 following the reset */ 237 - write_cpc_co_vp_run(0x1); 236 + /* Run only the requested VP following the reset */ 237 + write_cpc_co_vp_stop(0xf); 238 + write_cpc_co_vp_run(1 << vpe_id); 238 239 239 240 /* 240 241 * Ensure that the VP_RUN register is written before the ··· 307 306 308 307 if (!test_bit(core, core_power)) { 309 308 /* Boot a VPE on a powered down core */ 310 - boot_core(core); 309 + boot_core(core, vpe_id); 311 310 goto out; 312 311 } 313 312 ··· 398 397 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 399 398 smp_mb__after_atomic(); 400 399 set_cpu_online(cpu, false); 400 + calculate_cpu_foreign_map(); 401 401 cpumask_clear_cpu(cpu, &cpu_callin_map); 402 402 403 403 return 0; ··· 413 411 414 412 void play_dead(void) 415 413 { 416 - unsigned cpu, core; 414 + unsigned int cpu, core, vpe_id; 417 415 418 416 local_irq_disable(); 419 417 idle_task_exit(); 420 418 cpu = smp_processor_id(); 421 419 cpu_death = CPU_DEATH_POWER; 422 420 423 - if (cpu_has_mipsmt) { 421 + pr_debug("CPU%d going offline\n", cpu); 422 + 423 + if (cpu_has_mipsmt || cpu_has_vp) { 424 424 core = cpu_data[cpu].core; 425 425 426 426 /* Look for another online VPE within the core */ ··· 443 439 complete(&cpu_death_chosen); 444 440 445 441 if (cpu_death == CPU_DEATH_HALT) { 446 - /* Halt this TC */ 447 - write_c0_tchalt(TCHALT_H); 448 - instruction_hazard(); 442 + vpe_id = cpu_vpe_id(&cpu_data[cpu]); 443 + 444 + pr_debug("Halting core %d VP%d\n", core, vpe_id); 445 + if (cpu_has_mipsmt) { 446 + /* Halt this TC */ 447 + write_c0_tchalt(TCHALT_H); 448 + instruction_hazard(); 449 + } else if (cpu_has_vp) { 450 + write_cpc_cl_vp_stop(1 << vpe_id); 451 + 452 + /* Ensure that the VP_STOP register is written */ 453 + wmb(); 454 + } 449 455 } else { 456 + pr_debug("Gating power to core %d\n", core); 450 457 /* Power down the core */ 451 458 cps_pm_enter_state(CPS_PM_POWER_GATED); 452 459 } ··· 484 469 static void cps_cpu_die(unsigned int cpu) 485 470 { 486 471 unsigned core = cpu_data[cpu].core; 472 + unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); 487 473 unsigned stat; 488 474 int err; 489 475 ··· 513 497 * in which case the CPC will refuse to power down the core. 514 498 */ 515 499 do { 500 + mips_cm_lock_other(core, vpe_id); 516 501 mips_cpc_lock_other(core); 517 502 stat = read_cpc_co_stat_conf(); 518 503 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; 519 504 mips_cpc_unlock_other(); 505 + mips_cm_unlock_other(); 520 506 } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && 521 507 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && 522 508 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); ··· 535 517 (void *)(unsigned long)cpu, 1); 536 518 if (err) 537 519 panic("Failed to call remote sibling CPU\n"); 520 + } else if (cpu_has_vp) { 521 + do { 522 + mips_cm_lock_other(core, vpe_id); 523 + stat = read_cpc_co_vp_running(); 524 + mips_cm_unlock_other(); 525 + } while (stat & (1 << vpe_id)); 538 526 } 539 527 } 540 528
+21 -13
arch/mips/kernel/smp.c
··· 72 72 * A logcal cpu mask containing only one VPE per core to 73 73 * reduce the number of IPIs on large MT systems. 74 74 */ 75 - cpumask_t cpu_foreign_map __read_mostly; 75 + cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; 76 76 EXPORT_SYMBOL(cpu_foreign_map); 77 77 78 78 /* representing cpus for which sibling maps can be computed */ ··· 124 124 * Calculate a new cpu_foreign_map mask whenever a 125 125 * new cpu appears or disappears. 126 126 */ 127 - static inline void calculate_cpu_foreign_map(void) 127 + void calculate_cpu_foreign_map(void) 128 128 { 129 129 int i, k, core_present; 130 130 cpumask_t temp_foreign_map; ··· 141 141 cpumask_set_cpu(i, &temp_foreign_map); 142 142 } 143 143 144 - cpumask_copy(&cpu_foreign_map, &temp_foreign_map); 144 + for_each_online_cpu(i) 145 + cpumask_andnot(&cpu_foreign_map[i], 146 + &temp_foreign_map, &cpu_sibling_map[i]); 145 147 } 146 148 147 149 struct plat_smp_ops *mp_ops; ··· 346 344 static void stop_this_cpu(void *dummy) 347 345 { 348 346 /* 349 - * Remove this CPU. Be a bit slow here and 350 - * set the bits for every online CPU so we don't miss 351 - * any IPI whilst taking this VPE down. 347 + * Remove this CPU: 352 348 */ 353 - 354 - cpumask_copy(&cpu_foreign_map, cpu_online_mask); 355 - 356 - /* Make it visible to every other CPU */ 357 - smp_mb(); 358 349 359 350 set_cpu_online(smp_processor_id(), false); 360 351 calculate_cpu_foreign_map(); ··· 507 512 smp_on_other_tlbs(flush_tlb_range_ipi, &fd); 508 513 } else { 509 514 unsigned int cpu; 515 + int exec = vma->vm_flags & VM_EXEC; 510 516 511 517 for_each_online_cpu(cpu) { 518 + /* 519 + * flush_cache_range() will only fully flush icache if 520 + * the VMA is executable, otherwise we must invalidate 521 + * ASID without it appearing to has_valid_asid() as if 522 + * mm has been completely unused by that CPU. 523 + */ 512 524 if (cpu != smp_processor_id() && cpu_context(cpu, mm)) 513 - cpu_context(cpu, mm) = 0; 525 + cpu_context(cpu, mm) = !exec; 514 526 } 515 527 } 516 528 local_flush_tlb_range(vma, start, end); ··· 562 560 unsigned int cpu; 563 561 564 562 for_each_online_cpu(cpu) { 563 + /* 564 + * flush_cache_page() only does partial flushes, so 565 + * invalidate ASID without it appearing to 566 + * has_valid_asid() as if mm has been completely unused 567 + * by that CPU. 568 + */ 565 569 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) 566 - cpu_context(cpu, vma->vm_mm) = 0; 570 + cpu_context(cpu, vma->vm_mm) = 1; 567 571 } 568 572 } 569 573 local_flush_tlb_page(vma, page);
+3 -1
arch/mips/kernel/traps.c
··· 704 704 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) 705 705 { 706 706 struct siginfo si = { 0 }; 707 + struct vm_area_struct *vma; 707 708 708 709 switch (sig) { 709 710 case 0: ··· 745 744 si.si_addr = fault_addr; 746 745 si.si_signo = sig; 747 746 down_read(&current->mm->mmap_sem); 748 - if (find_vma(current->mm, (unsigned long)fault_addr)) 747 + vma = find_vma(current->mm, (unsigned long)fault_addr); 748 + if (vma && (vma->vm_start <= (unsigned long)fault_addr)) 749 749 si.si_code = SEGV_ACCERR; 750 750 else 751 751 si.si_code = SEGV_MAPERR;
+10
arch/mips/kernel/vdso.c
··· 107 107 if (down_write_killable(&mm->mmap_sem)) 108 108 return -EINTR; 109 109 110 + /* Map delay slot emulation page */ 111 + base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, 112 + VM_READ|VM_WRITE|VM_EXEC| 113 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 114 + 0); 115 + if (IS_ERR_VALUE(base)) { 116 + ret = base; 117 + goto out; 118 + } 119 + 110 120 /* 111 121 * Determine total area size. This includes the VDSO data itself, the 112 122 * data page, and the GIC user page if present. Always create a mapping
+16 -15
arch/mips/lantiq/irq.c
··· 66 66 #endif 67 67 68 68 static int exin_avail; 69 - static struct resource ltq_eiu_irq[MAX_EIU]; 69 + static u32 ltq_eiu_irq[MAX_EIU]; 70 70 static void __iomem *ltq_icu_membase[MAX_IM]; 71 71 static void __iomem *ltq_eiu_membase; 72 72 static struct irq_domain *ltq_domain; ··· 75 75 int ltq_eiu_get_irq(int exin) 76 76 { 77 77 if (exin < exin_avail) 78 - return ltq_eiu_irq[exin].start; 78 + return ltq_eiu_irq[exin]; 79 79 return -1; 80 80 } 81 81 ··· 125 125 { 126 126 int i; 127 127 128 - for (i = 0; i < MAX_EIU; i++) { 129 - if (d->hwirq == ltq_eiu_irq[i].start) { 128 + for (i = 0; i < exin_avail; i++) { 129 + if (d->hwirq == ltq_eiu_irq[i]) { 130 130 int val = 0; 131 131 int edge = 0; 132 132 ··· 173 173 int i; 174 174 175 175 ltq_enable_irq(d); 176 - for (i = 0; i < MAX_EIU; i++) { 177 - if (d->hwirq == ltq_eiu_irq[i].start) { 176 + for (i = 0; i < exin_avail; i++) { 177 + if (d->hwirq == ltq_eiu_irq[i]) { 178 178 /* by default we are low level triggered */ 179 179 ltq_eiu_settype(d, IRQF_TRIGGER_LOW); 180 180 /* clear all pending */ ··· 195 195 int i; 196 196 197 197 ltq_disable_irq(d); 198 - for (i = 0; i < MAX_EIU; i++) { 199 - if (d->hwirq == ltq_eiu_irq[i].start) { 198 + for (i = 0; i < exin_avail; i++) { 199 + if (d->hwirq == ltq_eiu_irq[i]) { 200 200 /* disable */ 201 201 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i), 202 202 LTQ_EIU_EXIN_INEN); ··· 206 206 } 207 207 208 208 static struct irq_chip ltq_irq_type = { 209 - "icu", 209 + .name = "icu", 210 210 .irq_enable = ltq_enable_irq, 211 211 .irq_disable = ltq_disable_irq, 212 212 .irq_unmask = ltq_enable_irq, ··· 216 216 }; 217 217 218 218 static struct irq_chip ltq_eiu_type = { 219 - "eiu", 219 + .name = "eiu", 220 220 .irq_startup = ltq_startup_eiu_irq, 221 221 .irq_shutdown = ltq_shutdown_eiu_irq, 222 222 .irq_enable = ltq_enable_irq, ··· 341 341 return 0; 342 342 343 343 for (i = 0; i < exin_avail; i++) 344 - if (hw == ltq_eiu_irq[i].start) 344 + if (hw == ltq_eiu_irq[i]) 345 345 chip = &ltq_eiu_type; 346 346 347 - irq_set_chip_and_handler(hw, chip, handle_level_irq); 347 + irq_set_chip_and_handler(irq, chip, handle_level_irq); 348 348 349 349 return 0; 350 350 } ··· 439 439 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); 440 440 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { 441 441 /* find out how many external irq sources we have */ 442 - exin_avail = of_irq_count(eiu_node); 442 + exin_avail = of_property_count_u32_elems(eiu_node, 443 + "lantiq,eiu-irqs"); 443 444 444 445 if (exin_avail > MAX_EIU) 445 446 exin_avail = MAX_EIU; 446 447 447 - ret = of_irq_to_resource_table(eiu_node, 448 + ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs", 448 449 ltq_eiu_irq, exin_avail); 449 - if (ret != exin_avail) 450 + if (ret) 450 451 panic("failed to load external irq resources"); 451 452 452 453 if (!request_mem_region(res.start, resource_size(&res),
+2 -2
arch/mips/lantiq/prom.c
··· 74 74 75 75 set_io_port_base((unsigned long) KSEG1); 76 76 77 - if (fw_arg0 == -2) /* UHI interface */ 78 - dtb = (void *)fw_arg1; 77 + if (fw_passed_dtb) /* UHI interface */ 78 + dtb = (void *)fw_passed_dtb; 79 79 else if (__dtb_start != __dtb_end) 80 80 dtb = (void *)__dtb_start; 81 81 else
+7 -7
arch/mips/loongson64/loongson-3/hpet.c
··· 13 13 #define SMBUS_PCI_REG64 0x64 14 14 #define SMBUS_PCI_REGB4 0xb4 15 15 16 - #define HPET_MIN_CYCLES 64 17 - #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 16 + #define HPET_MIN_CYCLES 16 17 + #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) 18 18 19 19 static DEFINE_SPINLOCK(hpet_lock); 20 20 DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); ··· 157 157 static int hpet_next_event(unsigned long delta, 158 158 struct clock_event_device *evt) 159 159 { 160 - unsigned int cnt; 161 - int res; 160 + u32 cnt; 161 + s32 res; 162 162 163 163 cnt = hpet_read(HPET_COUNTER); 164 - cnt += delta; 164 + cnt += (u32) delta; 165 165 hpet_write(HPET_T0_CMP, cnt); 166 166 167 - res = (int)(cnt - hpet_read(HPET_COUNTER)); 167 + res = (s32)(cnt - hpet_read(HPET_COUNTER)); 168 168 169 169 return res < HPET_MIN_CYCLES ? -ETIME : 0; 170 170 } ··· 230 230 231 231 cd = &per_cpu(hpet_clockevent_device, cpu); 232 232 cd->name = "hpet"; 233 - cd->rating = 320; 233 + cd->rating = 100; 234 234 cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 235 235 cd->set_state_shutdown = hpet_set_state_shutdown; 236 236 cd->set_state_periodic = hpet_set_state_periodic;
+1
arch/mips/loongson64/loongson-3/smp.c
··· 417 417 return -EBUSY; 418 418 419 419 set_cpu_online(cpu, false); 420 + calculate_cpu_foreign_map(); 420 421 cpumask_clear_cpu(cpu, &cpu_callin_map); 421 422 local_irq_save(flags); 422 423 fixup_irqs();
+4 -4
arch/mips/math-emu/cp1emu.c
··· 434 434 * a single subroutine should be used across both 435 435 * modules. 436 436 */ 437 - static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 438 - unsigned long *contpc) 437 + int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, 438 + unsigned long *contpc) 439 439 { 440 440 union mips_instruction insn = (union mips_instruction)dec_insn.insn; 441 441 unsigned int fcr31; ··· 1268 1268 * instruction in the dslot. 1269 1269 */ 1270 1270 sig = mips_dsemul(xcp, ir, 1271 - contpc); 1271 + bcpc, contpc); 1272 1272 if (sig < 0) 1273 1273 break; 1274 1274 if (sig) ··· 1323 1323 * Single step the non-cp1 1324 1324 * instruction in the dslot 1325 1325 */ 1326 - sig = mips_dsemul(xcp, ir, contpc); 1326 + sig = mips_dsemul(xcp, ir, bcpc, contpc); 1327 1327 if (sig < 0) 1328 1328 break; 1329 1329 if (sig)
+223 -110
arch/mips/math-emu/dsemul.c
··· 1 + #include <linux/err.h> 2 + #include <linux/slab.h> 3 + 1 4 #include <asm/branch.h> 2 5 #include <asm/cacheflush.h> 3 6 #include <asm/fpu_emulator.h> ··· 8 5 #include <asm/mipsregs.h> 9 6 #include <asm/uaccess.h> 10 7 11 - #include "ieee754.h" 12 - 13 - /* 14 - * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when 15 - * we have to emulate the instruction in a COP1 branch delay slot. Do 16 - * not change cp0_epc due to the instruction 8 + /** 9 + * struct emuframe - The 'emulation' frame structure 10 + * @emul: The instruction to 'emulate'. 11 + * @badinst: A break instruction to cause a return to the kernel. 17 12 * 18 - * According to the spec: 19 - * 1) it shouldn't be a branch :-) 20 - * 2) it can be a COP instruction :-( 21 - * 3) if we are tring to run a protected memory space we must take 22 - * special care on memory access instructions :-( 13 + * This structure defines the frames placed within the delay slot emulation 14 + * page in response to a call to mips_dsemul(). Each thread may be allocated 15 + * only one frame at any given time. The kernel stores within it the 16 + * instruction to be 'emulated' followed by a break instruction, then 17 + * executes the frame in user mode. The break causes a trap to the kernel 18 + * which leads to do_dsemulret() being called unless the instruction in 19 + * @emul causes a trap itself, is a branch, or a signal is delivered to 20 + * the thread. In these cases the allocated frame will either be reused by 21 + * a subsequent delay slot 'emulation', or be freed during signal delivery or 22 + * upon thread exit. 23 + * 24 + * This approach is used because: 25 + * 26 + * - Actually emulating all instructions isn't feasible. We would need to 27 + * be able to handle instructions from all revisions of the MIPS ISA, 28 + * all ASEs & all vendor instruction set extensions. This would be a 29 + * whole lot of work & continual maintenance burden as new instructions 30 + * are introduced, and in the case of some vendor extensions may not 31 + * even be possible. Thus we need to take the approach of actually 32 + * executing the instruction. 33 + * 34 + * - We must execute the instruction within user context. If we were to 35 + * execute the instruction in kernel mode then it would have access to 36 + * kernel resources without very careful checks, leaving us with a 37 + * high potential for security or stability issues to arise. 38 + * 39 + * - We used to place the frame on the users stack, but this requires 40 + * that the stack be executable. This is bad for security so the 41 + * per-process page is now used instead. 42 + * 43 + * - The instruction in @emul may be something entirely invalid for a 44 + * delay slot. The user may (intentionally or otherwise) place a branch 45 + * in a delay slot, or a kernel mode instruction, or something else 46 + * which generates an exception. Thus we can't rely upon the break in 47 + * @badinst always being hit. For this reason we track the index of the 48 + * frame allocated to each thread, allowing us to clean it up at later 49 + * points such as signal delivery or thread exit. 50 + * 51 + * - The user may generate a fake struct emuframe if they wish, invoking 52 + * the BRK_MEMU break instruction themselves. We must therefore not 53 + * trust that BRK_MEMU means there's actually a valid frame allocated 54 + * to the thread, and must not allow the user to do anything they 55 + * couldn't already. 23 56 */ 24 - 25 - /* 26 - * "Trampoline" return routine to catch exception following 27 - * execution of delay-slot instruction execution. 28 - */ 29 - 30 57 struct emuframe { 31 58 mips_instruction emul; 32 59 mips_instruction badinst; 33 - mips_instruction cookie; 34 - unsigned long epc; 35 60 }; 36 61 37 - /* 38 - * Set up an emulation frame for instruction IR, from a delay slot of 39 - * a branch jumping to CPC. Return 0 if successful, -1 if no emulation 40 - * required, otherwise a signal number causing a frame setup failure. 41 - */ 42 - int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) 62 + static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe); 63 + 64 + static inline __user struct emuframe *dsemul_page(void) 65 + { 66 + return (__user struct emuframe *)STACK_TOP; 67 + } 68 + 69 + static int alloc_emuframe(void) 70 + { 71 + mm_context_t *mm_ctx = &current->mm->context; 72 + int idx; 73 + 74 + retry: 75 + spin_lock(&mm_ctx->bd_emupage_lock); 76 + 77 + /* Ensure we have an allocation bitmap */ 78 + if (!mm_ctx->bd_emupage_allocmap) { 79 + mm_ctx->bd_emupage_allocmap = 80 + kcalloc(BITS_TO_LONGS(emupage_frame_count), 81 + sizeof(unsigned long), 82 + GFP_ATOMIC); 83 + 84 + if (!mm_ctx->bd_emupage_allocmap) { 85 + idx = BD_EMUFRAME_NONE; 86 + goto out_unlock; 87 + } 88 + } 89 + 90 + /* Attempt to allocate a single bit/frame */ 91 + idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap, 92 + emupage_frame_count, 0); 93 + if (idx < 0) { 94 + /* 95 + * Failed to allocate a frame. We'll wait until one becomes 96 + * available. We unlock the page so that other threads actually 97 + * get the opportunity to free their frames, which means 98 + * technically the result of bitmap_full may be incorrect. 99 + * However the worst case is that we repeat all this and end up 100 + * back here again. 101 + */ 102 + spin_unlock(&mm_ctx->bd_emupage_lock); 103 + if (!wait_event_killable(mm_ctx->bd_emupage_queue, 104 + !bitmap_full(mm_ctx->bd_emupage_allocmap, 105 + emupage_frame_count))) 106 + goto retry; 107 + 108 + /* Received a fatal signal - just give in */ 109 + return BD_EMUFRAME_NONE; 110 + } 111 + 112 + /* Success! */ 113 + pr_debug("allocate emuframe %d to %d\n", idx, current->pid); 114 + out_unlock: 115 + spin_unlock(&mm_ctx->bd_emupage_lock); 116 + return idx; 117 + } 118 + 119 + static void free_emuframe(int idx, struct mm_struct *mm) 120 + { 121 + mm_context_t *mm_ctx = &mm->context; 122 + 123 + spin_lock(&mm_ctx->bd_emupage_lock); 124 + 125 + pr_debug("free emuframe %d from %d\n", idx, current->pid); 126 + bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1); 127 + 128 + /* If some thread is waiting for a frame, now's its chance */ 129 + wake_up(&mm_ctx->bd_emupage_queue); 130 + 131 + spin_unlock(&mm_ctx->bd_emupage_lock); 132 + } 133 + 134 + static bool within_emuframe(struct pt_regs *regs) 135 + { 136 + unsigned long base = (unsigned long)dsemul_page(); 137 + 138 + if (regs->cp0_epc < base) 139 + return false; 140 + if (regs->cp0_epc >= (base + PAGE_SIZE)) 141 + return false; 142 + 143 + return true; 144 + } 145 + 146 + bool dsemul_thread_cleanup(struct task_struct *tsk) 147 + { 148 + int fr_idx; 149 + 150 + /* Clear any allocated frame, retrieving its index */ 151 + fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE); 152 + 153 + /* If no frame was allocated, we're done */ 154 + if (fr_idx == BD_EMUFRAME_NONE) 155 + return false; 156 + 157 + task_lock(tsk); 158 + 159 + /* Free the frame that this thread had allocated */ 160 + if (tsk->mm) 161 + free_emuframe(fr_idx, tsk->mm); 162 + 163 + task_unlock(tsk); 164 + return true; 165 + } 166 + 167 + bool dsemul_thread_rollback(struct pt_regs *regs) 168 + { 169 + struct emuframe __user *fr; 170 + int fr_idx; 171 + 172 + /* Do nothing if we're not executing from a frame */ 173 + if (!within_emuframe(regs)) 174 + return false; 175 + 176 + /* Find the frame being executed */ 177 + fr_idx = atomic_read(&current->thread.bd_emu_frame); 178 + if (fr_idx == BD_EMUFRAME_NONE) 179 + return false; 180 + fr = &dsemul_page()[fr_idx]; 181 + 182 + /* 183 + * If the PC is at the emul instruction, roll back to the branch. If 184 + * PC is at the badinst (break) instruction, we've already emulated the 185 + * instruction so progress to the continue PC. If it's anything else 186 + * then something is amiss & the user has branched into some other area 187 + * of the emupage - we'll free the allocated frame anyway. 188 + */ 189 + if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul) 190 + regs->cp0_epc = current->thread.bd_emu_branch_pc; 191 + else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst) 192 + regs->cp0_epc = current->thread.bd_emu_cont_pc; 193 + 194 + atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE); 195 + free_emuframe(fr_idx, current->mm); 196 + return true; 197 + } 198 + 199 + void dsemul_mm_cleanup(struct mm_struct *mm) 200 + { 201 + mm_context_t *mm_ctx = &mm->context; 202 + 203 + kfree(mm_ctx->bd_emupage_allocmap); 204 + } 205 + 206 + int mips_dsemul(struct pt_regs *regs, mips_instruction ir, 207 + unsigned long branch_pc, unsigned long cont_pc) 43 208 { 44 209 int isa16 = get_isa16_mode(regs->cp0_epc); 45 210 mips_instruction break_math; 46 211 struct emuframe __user *fr; 47 - int err; 212 + int err, fr_idx; 48 213 49 214 /* NOP is easy */ 50 215 if (ir == 0) ··· 239 68 } 240 69 } 241 70 242 - pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc); 71 + pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc); 243 72 244 - /* 245 - * The strategy is to push the instruction onto the user stack 246 - * and put a trap after it which we can catch and jump to 247 - * the required address any alternative apart from full 248 - * instruction emulation!!. 249 - * 250 - * Algorithmics used a system call instruction, and 251 - * borrowed that vector. MIPS/Linux version is a bit 252 - * more heavyweight in the interests of portability and 253 - * multiprocessor support. For Linux we use a BREAK 514 254 - * instruction causing a breakpoint exception. 255 - */ 73 + /* Allocate a frame if we don't already have one */ 74 + fr_idx = atomic_read(&current->thread.bd_emu_frame); 75 + if (fr_idx == BD_EMUFRAME_NONE) 76 + fr_idx = alloc_emuframe(); 77 + if (fr_idx == BD_EMUFRAME_NONE) 78 + return SIGBUS; 79 + fr = &dsemul_page()[fr_idx]; 80 + 81 + /* Retrieve the appropriately encoded break instruction */ 256 82 break_math = BREAK_MATH(isa16); 257 83 258 - /* Ensure that the two instructions are in the same cache line */ 259 - fr = (struct emuframe __user *) 260 - ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7); 261 - 262 - /* Verify that the stack pointer is not completely insane */ 263 - if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) 264 - return SIGBUS; 265 - 84 + /* Write the instructions to the frame */ 266 85 if (isa16) { 267 86 err = __put_user(ir >> 16, 268 87 (u16 __user *)(&fr->emul)); ··· 267 106 err |= __put_user(break_math, &fr->badinst); 268 107 } 269 108 270 - err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); 271 - err |= __put_user(cpc, &fr->epc); 272 - 273 109 if (unlikely(err)) { 274 110 MIPS_FPU_EMU_INC_STATS(errors); 111 + free_emuframe(fr_idx, current->mm); 275 112 return SIGBUS; 276 113 } 277 114 115 + /* Record the PC of the branch, PC to continue from & frame index */ 116 + current->thread.bd_emu_branch_pc = branch_pc; 117 + current->thread.bd_emu_cont_pc = cont_pc; 118 + atomic_set(&current->thread.bd_emu_frame, fr_idx); 119 + 120 + /* Change user register context to execute the frame */ 278 121 regs->cp0_epc = (unsigned long)&fr->emul | isa16; 279 122 123 + /* Ensure the icache observes our newly written frame */ 280 124 flush_cache_sigtramp((unsigned long)&fr->emul); 281 125 282 126 return 0; 283 127 } 284 128 285 - int do_dsemulret(struct pt_regs *xcp) 129 + bool do_dsemulret(struct pt_regs *xcp) 286 130 { 287 - int isa16 = get_isa16_mode(xcp->cp0_epc); 288 - struct emuframe __user *fr; 289 - unsigned long epc; 290 - u32 insn, cookie; 291 - int err = 0; 292 - u16 instr[2]; 293 - 294 - fr = (struct emuframe __user *) 295 - (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction)); 296 - 297 - /* 298 - * If we can't even access the area, something is very wrong, but we'll 299 - * leave that to the default handling 300 - */ 301 - if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe))) 302 - return 0; 303 - 304 - /* 305 - * Do some sanity checking on the stackframe: 306 - * 307 - * - Is the instruction pointed to by the EPC an BREAK_MATH? 308 - * - Is the following memory word the BD_COOKIE? 309 - */ 310 - if (isa16) { 311 - err = __get_user(instr[0], 312 - (u16 __user *)(&fr->badinst)); 313 - err |= __get_user(instr[1], 314 - (u16 __user *)((long)(&fr->badinst) + 2)); 315 - insn = (instr[0] << 16) | instr[1]; 316 - } else { 317 - err = __get_user(insn, &fr->badinst); 318 - } 319 - err |= __get_user(cookie, &fr->cookie); 320 - 321 - if (unlikely(err || 322 - insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) { 131 + /* Cleanup the allocated frame, returning if there wasn't one */ 132 + if (!dsemul_thread_cleanup(current)) { 323 133 MIPS_FPU_EMU_INC_STATS(errors); 324 - return 0; 325 - } 326 - 327 - /* 328 - * At this point, we are satisfied that it's a BD emulation trap. Yes, 329 - * a user might have deliberately put two malformed and useless 330 - * instructions in a row in his program, in which case he's in for a 331 - * nasty surprise - the next instruction will be treated as a 332 - * continuation address! Alas, this seems to be the only way that we 333 - * can handle signals, recursion, and longjmps() in the context of 334 - * emulating the branch delay instruction. 335 - */ 336 - 337 - pr_debug("dsemulret\n"); 338 - 339 - if (__get_user(epc, &fr->epc)) { /* Saved EPC */ 340 - /* This is not a good situation to be in */ 341 - force_sig(SIGBUS, current); 342 - 343 - return 0; 134 + return false; 344 135 } 345 136 346 137 /* Set EPC to return to post-branch instruction */ 347 - xcp->cp0_epc = epc; 348 - MIPS_FPU_EMU_INC_STATS(ds_emul); 349 - return 1; 138 + xcp->cp0_epc = current->thread.bd_emu_cont_pc; 139 + pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); 140 + return true; 350 141 }
+227 -57
arch/mips/mm/c-r4k.c
··· 40 40 #include <asm/mips-cm.h> 41 41 42 42 /* 43 + * Bits describing what cache ops an SMP callback function may perform. 44 + * 45 + * R4K_HIT - Virtual user or kernel address based cache operations. The 46 + * active_mm must be checked before using user addresses, falling 47 + * back to kmap. 48 + * R4K_INDEX - Index based cache operations. 49 + */ 50 + 51 + #define R4K_HIT BIT(0) 52 + #define R4K_INDEX BIT(1) 53 + 54 + /** 55 + * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core. 56 + * @type: Type of cache operations (R4K_HIT or R4K_INDEX). 57 + * 58 + * Decides whether a cache op needs to be performed on every core in the system. 59 + * This may change depending on the @type of cache operation, as well as the set 60 + * of online CPUs, so preemption should be disabled by the caller to prevent CPU 61 + * hotplug from changing the result. 62 + * 63 + * Returns: 1 if the cache operation @type should be done on every core in 64 + * the system. 65 + * 0 if the cache operation @type is globalized and only needs to 66 + * be performed on a simple CPU. 67 + */ 68 + static inline bool r4k_op_needs_ipi(unsigned int type) 69 + { 70 + /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */ 71 + if (type == R4K_HIT && mips_cm_present()) 72 + return false; 73 + 74 + /* 75 + * Hardware doesn't globalize the required cache ops, so SMP calls may 76 + * be needed, but only if there are foreign CPUs (non-siblings with 77 + * separate caches). 78 + */ 79 + /* cpu_foreign_map[] undeclared when !CONFIG_SMP */ 80 + #ifdef CONFIG_SMP 81 + return !cpumask_empty(&cpu_foreign_map[0]); 82 + #else 83 + return false; 84 + #endif 85 + } 86 + 87 + /* 43 88 * Special Variant of smp_call_function for use by cache functions: 44 89 * 45 90 * o No return value ··· 93 48 * primary cache. 94 49 * o doesn't disable interrupts on the local CPU 95 50 */ 96 - static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) 51 + static inline void r4k_on_each_cpu(unsigned int type, 52 + void (*func)(void *info), void *info) 97 53 { 98 54 preempt_disable(); 99 - 100 - /* 101 - * The Coherent Manager propagates address-based cache ops to other 102 - * cores but not index-based ops. However, r4k_on_each_cpu is used 103 - * in both cases so there is no easy way to tell what kind of op is 104 - * executed to the other cores. The best we can probably do is 105 - * to restrict that call when a CM is not present because both 106 - * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops. 107 - */ 108 - if (!mips_cm_present()) 109 - smp_call_function_many(&cpu_foreign_map, func, info, 1); 55 + if (r4k_op_needs_ipi(type)) 56 + smp_call_function_many(&cpu_foreign_map[smp_processor_id()], 57 + func, info, 1); 110 58 func(info); 111 59 preempt_enable(); 112 60 } 113 - 114 - #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS) 115 - #define cpu_has_safe_index_cacheops 0 116 - #else 117 - #define cpu_has_safe_index_cacheops 1 118 - #endif 119 61 120 62 /* 121 63 * Must die. ··· 494 462 495 463 static void r4k___flush_cache_all(void) 496 464 { 497 - r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); 465 + r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL); 498 466 } 499 467 500 - static inline int has_valid_asid(const struct mm_struct *mm) 468 + /** 469 + * has_valid_asid() - Determine if an mm already has an ASID. 470 + * @mm: Memory map. 471 + * @type: R4K_HIT or R4K_INDEX, type of cache op. 472 + * 473 + * Determines whether @mm already has an ASID on any of the CPUs which cache ops 474 + * of type @type within an r4k_on_each_cpu() call will affect. If 475 + * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the 476 + * scope of the operation is confined to sibling CPUs, otherwise all online CPUs 477 + * will need to be checked. 478 + * 479 + * Must be called in non-preemptive context. 480 + * 481 + * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm. 482 + * 0 otherwise. 483 + */ 484 + static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) 501 485 { 502 - #ifdef CONFIG_MIPS_MT_SMP 503 - int i; 486 + unsigned int i; 487 + const cpumask_t *mask = cpu_present_mask; 504 488 505 - for_each_online_cpu(i) 489 + /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ 490 + #ifdef CONFIG_SMP 491 + /* 492 + * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in 493 + * each foreign core, so we only need to worry about siblings. 494 + * Otherwise we need to worry about all present CPUs. 495 + */ 496 + if (r4k_op_needs_ipi(type)) 497 + mask = &cpu_sibling_map[smp_processor_id()]; 498 + #endif 499 + for_each_cpu(i, mask) 506 500 if (cpu_context(i, mm)) 507 501 return 1; 508 - 509 502 return 0; 510 - #else 511 - return cpu_context(smp_processor_id(), mm); 512 - #endif 513 503 } 514 504 515 505 static void r4k__flush_cache_vmap(void) ··· 544 490 r4k_blast_dcache(); 545 491 } 546 492 493 + /* 494 + * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes 495 + * whole caches when vma is executable. 496 + */ 547 497 static inline void local_r4k_flush_cache_range(void * args) 548 498 { 549 499 struct vm_area_struct *vma = args; 550 500 int exec = vma->vm_flags & VM_EXEC; 551 501 552 - if (!(has_valid_asid(vma->vm_mm))) 502 + if (!has_valid_asid(vma->vm_mm, R4K_INDEX)) 553 503 return; 554 504 555 505 /* ··· 574 516 int exec = vma->vm_flags & VM_EXEC; 575 517 576 518 if (cpu_has_dc_aliases || exec) 577 - r4k_on_each_cpu(local_r4k_flush_cache_range, vma); 519 + r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma); 578 520 } 579 521 580 522 static inline void local_r4k_flush_cache_mm(void * args) 581 523 { 582 524 struct mm_struct *mm = args; 583 525 584 - if (!has_valid_asid(mm)) 526 + if (!has_valid_asid(mm, R4K_INDEX)) 585 527 return; 586 528 587 529 /* ··· 606 548 if (!cpu_has_dc_aliases) 607 549 return; 608 550 609 - r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); 551 + r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm); 610 552 } 611 553 612 554 struct flush_cache_page_args { ··· 631 573 void *vaddr; 632 574 633 575 /* 634 - * If ownes no valid ASID yet, cannot possibly have gotten 576 + * If owns no valid ASID yet, cannot possibly have gotten 635 577 * this page into the cache. 636 578 */ 637 - if (!has_valid_asid(mm)) 579 + if (!has_valid_asid(mm, R4K_HIT)) 638 580 return; 639 581 640 582 addr &= PAGE_MASK; ··· 701 643 args.addr = addr; 702 644 args.pfn = pfn; 703 645 704 - r4k_on_each_cpu(local_r4k_flush_cache_page, &args); 646 + r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args); 705 647 } 706 648 707 649 static inline void local_r4k_flush_data_cache_page(void * addr) ··· 714 656 if (in_atomic()) 715 657 local_r4k_flush_data_cache_page((void *)addr); 716 658 else 717 - r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); 659 + r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page, 660 + (void *) addr); 718 661 } 719 662 720 663 struct flush_icache_range_args { 721 664 unsigned long start; 722 665 unsigned long end; 666 + unsigned int type; 723 667 }; 724 668 725 - static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) 669 + static inline void __local_r4k_flush_icache_range(unsigned long start, 670 + unsigned long end, 671 + unsigned int type) 726 672 { 727 673 if (!cpu_has_ic_fills_f_dc) { 728 - if (end - start >= dcache_size) { 674 + if (type == R4K_INDEX || 675 + (type & R4K_INDEX && end - start >= dcache_size)) { 729 676 r4k_blast_dcache(); 730 677 } else { 731 678 R4600_HIT_CACHEOP_WAR_IMPL; ··· 738 675 } 739 676 } 740 677 741 - if (end - start > icache_size) 678 + if (type == R4K_INDEX || 679 + (type & R4K_INDEX && end - start > icache_size)) 742 680 r4k_blast_icache(); 743 681 else { 744 682 switch (boot_cpu_type()) { ··· 765 701 #endif 766 702 } 767 703 704 + static inline void local_r4k_flush_icache_range(unsigned long start, 705 + unsigned long end) 706 + { 707 + __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX); 708 + } 709 + 768 710 static inline void local_r4k_flush_icache_range_ipi(void *args) 769 711 { 770 712 struct flush_icache_range_args *fir_args = args; 771 713 unsigned long start = fir_args->start; 772 714 unsigned long end = fir_args->end; 715 + unsigned int type = fir_args->type; 773 716 774 - local_r4k_flush_icache_range(start, end); 717 + __local_r4k_flush_icache_range(start, end, type); 775 718 } 776 719 777 720 static void r4k_flush_icache_range(unsigned long start, unsigned long end) 778 721 { 779 722 struct flush_icache_range_args args; 723 + unsigned long size, cache_size; 780 724 781 725 args.start = start; 782 726 args.end = end; 727 + args.type = R4K_HIT | R4K_INDEX; 783 728 784 - r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); 729 + /* 730 + * Indexed cache ops require an SMP call. 731 + * Consider if that can or should be avoided. 732 + */ 733 + preempt_disable(); 734 + if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) { 735 + /* 736 + * If address-based cache ops don't require an SMP call, then 737 + * use them exclusively for small flushes. 738 + */ 739 + size = start - end; 740 + cache_size = icache_size; 741 + if (!cpu_has_ic_fills_f_dc) { 742 + size *= 2; 743 + cache_size += dcache_size; 744 + } 745 + if (size <= cache_size) 746 + args.type &= ~R4K_INDEX; 747 + } 748 + r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args); 749 + preempt_enable(); 785 750 instruction_hazard(); 786 751 } 787 752 ··· 837 744 * subset property so we have to flush the primary caches 838 745 * explicitly 839 746 */ 840 - if (cpu_has_safe_index_cacheops && size >= dcache_size) { 747 + if (size >= dcache_size) { 841 748 r4k_blast_dcache(); 842 749 } else { 843 750 R4600_HIT_CACHEOP_WAR_IMPL; ··· 874 781 return; 875 782 } 876 783 877 - if (cpu_has_safe_index_cacheops && size >= dcache_size) { 784 + if (size >= dcache_size) { 878 785 r4k_blast_dcache(); 879 786 } else { 880 787 R4600_HIT_CACHEOP_WAR_IMPL; ··· 887 794 } 888 795 #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ 889 796 797 + struct flush_cache_sigtramp_args { 798 + struct mm_struct *mm; 799 + struct page *page; 800 + unsigned long addr; 801 + }; 802 + 890 803 /* 891 804 * While we're protected against bad userland addresses we don't care 892 805 * very much about what happens in that case. Usually a segmentation 893 806 * fault will dump the process later on anyway ... 894 807 */ 895 - static void local_r4k_flush_cache_sigtramp(void * arg) 808 + static void local_r4k_flush_cache_sigtramp(void *args) 896 809 { 810 + struct flush_cache_sigtramp_args *fcs_args = args; 811 + unsigned long addr = fcs_args->addr; 812 + struct page *page = fcs_args->page; 813 + struct mm_struct *mm = fcs_args->mm; 814 + int map_coherent = 0; 815 + void *vaddr; 816 + 897 817 unsigned long ic_lsize = cpu_icache_line_size(); 898 818 unsigned long dc_lsize = cpu_dcache_line_size(); 899 819 unsigned long sc_lsize = cpu_scache_line_size(); 900 - unsigned long addr = (unsigned long) arg; 820 + 821 + /* 822 + * If owns no valid ASID yet, cannot possibly have gotten 823 + * this page into the cache. 824 + */ 825 + if (!has_valid_asid(mm, R4K_HIT)) 826 + return; 827 + 828 + if (mm == current->active_mm) { 829 + vaddr = NULL; 830 + } else { 831 + /* 832 + * Use kmap_coherent or kmap_atomic to do flushes for 833 + * another ASID than the current one. 834 + */ 835 + map_coherent = (cpu_has_dc_aliases && 836 + page_mapcount(page) && 837 + !Page_dcache_dirty(page)); 838 + if (map_coherent) 839 + vaddr = kmap_coherent(page, addr); 840 + else 841 + vaddr = kmap_atomic(page); 842 + addr = (unsigned long)vaddr + (addr & ~PAGE_MASK); 843 + } 901 844 902 845 R4600_HIT_CACHEOP_WAR_IMPL; 903 - if (dc_lsize) 904 - protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); 905 - if (!cpu_icache_snoops_remote_store && scache_size) 906 - protected_writeback_scache_line(addr & ~(sc_lsize - 1)); 846 + if (!cpu_has_ic_fills_f_dc) { 847 + if (dc_lsize) 848 + vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1)) 849 + : protected_writeback_dcache_line( 850 + addr & ~(dc_lsize - 1)); 851 + if (!cpu_icache_snoops_remote_store && scache_size) 852 + vaddr ? flush_scache_line(addr & ~(sc_lsize - 1)) 853 + : protected_writeback_scache_line( 854 + addr & ~(sc_lsize - 1)); 855 + } 907 856 if (ic_lsize) 908 - protected_flush_icache_line(addr & ~(ic_lsize - 1)); 857 + vaddr ? flush_icache_line(addr & ~(ic_lsize - 1)) 858 + : protected_flush_icache_line(addr & ~(ic_lsize - 1)); 859 + 860 + if (vaddr) { 861 + if (map_coherent) 862 + kunmap_coherent(); 863 + else 864 + kunmap_atomic(vaddr); 865 + } 866 + 909 867 if (MIPS4K_ICACHE_REFILL_WAR) { 910 868 __asm__ __volatile__ ( 911 869 ".set push\n\t" ··· 981 837 982 838 static void r4k_flush_cache_sigtramp(unsigned long addr) 983 839 { 984 - r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); 840 + struct flush_cache_sigtramp_args args; 841 + int npages; 842 + 843 + down_read(&current->mm->mmap_sem); 844 + 845 + npages = get_user_pages_fast(addr, 1, 0, &args.page); 846 + if (npages < 1) 847 + goto out; 848 + 849 + args.mm = current->mm; 850 + args.addr = addr; 851 + 852 + r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args); 853 + 854 + put_page(args.page); 855 + out: 856 + up_read(&current->mm->mmap_sem); 985 857 } 986 858 987 859 static void r4k_flush_icache_all(void) ··· 1011 851 int size; 1012 852 }; 1013 853 854 + static inline void local_r4k_flush_kernel_vmap_range_index(void *args) 855 + { 856 + /* 857 + * Aliases only affect the primary caches so don't bother with 858 + * S-caches or T-caches. 859 + */ 860 + r4k_blast_dcache(); 861 + } 862 + 1014 863 static inline void local_r4k_flush_kernel_vmap_range(void *args) 1015 864 { 1016 865 struct flush_kernel_vmap_range_args *vmra = args; ··· 1030 861 * Aliases only affect the primary caches so don't bother with 1031 862 * S-caches or T-caches. 1032 863 */ 1033 - if (cpu_has_safe_index_cacheops && size >= dcache_size) 1034 - r4k_blast_dcache(); 1035 - else { 1036 - R4600_HIT_CACHEOP_WAR_IMPL; 1037 - blast_dcache_range(vaddr, vaddr + size); 1038 - } 864 + R4600_HIT_CACHEOP_WAR_IMPL; 865 + blast_dcache_range(vaddr, vaddr + size); 1039 866 } 1040 867 1041 868 static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) ··· 1041 876 args.vaddr = (unsigned long) vaddr; 1042 877 args.size = size; 1043 878 1044 - r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); 879 + if (size >= dcache_size) 880 + r4k_on_each_cpu(R4K_INDEX, 881 + local_r4k_flush_kernel_vmap_range_index, NULL); 882 + else 883 + r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range, 884 + &args); 1045 885 } 1046 886 1047 887 static inline void rm7k_erratum31(void)
+2 -2
arch/mips/mm/sc-debugfs.c
··· 73 73 74 74 file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, 75 75 NULL, &sc_prefetch_fops); 76 - if (IS_ERR(file)) 77 - return PTR_ERR(file); 76 + if (!file) 77 + return -ENOMEM; 78 78 79 79 return 0; 80 80 }
+1 -1
arch/mips/mm/sc-rm7k.c
··· 161 161 local_irq_save(flags); 162 162 blast_rm7k_tcache(); 163 163 clear_c0_config(RM7K_CONF_TE); 164 - local_irq_save(flags); 164 + local_irq_restore(flags); 165 165 } 166 166 167 167 static void rm7k_sc_disable(void)
+1 -1
arch/mips/mm/tlbex.c
··· 888 888 } 889 889 } 890 890 if (!did_vmalloc_branch) { 891 - if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 891 + if (single_insn_swpd) { 892 892 uasm_il_b(p, r, label_vmalloc_done); 893 893 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 894 894 } else {
+1 -1
arch/mips/mm/uasm-mips.c
··· 65 65 #ifndef CONFIG_CPU_MIPSR6 66 66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 67 67 #else 68 - { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, 68 + { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, 69 69 #endif 70 70 { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD }, 71 71 { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE },
+1 -5
arch/mips/mm/uasm.c
··· 378 378 int ISAFUNC(uasm_in_compat_space_p)(long addr) 379 379 { 380 380 /* Is this address in 32bit compat space? */ 381 - #ifdef CONFIG_64BIT 382 - return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); 383 - #else 384 - return 1; 385 - #endif 381 + return addr == (int)addr; 386 382 } 387 383 UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); 388 384
+1 -1
arch/mips/net/bpf_jit.c
··· 1199 1199 1200 1200 memset(&ctx, 0, sizeof(ctx)); 1201 1201 1202 - ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); 1202 + ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); 1203 1203 if (ctx.offsets == NULL) 1204 1204 return; 1205 1205
+2 -2
arch/mips/pic32/pic32mzda/init.c
··· 33 33 { 34 34 ulong ftaddr = 0; 35 35 36 - if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3) 37 - return (ulong)fw_arg1; 36 + if (fw_passed_dtb && !fw_arg2 && !fw_arg3) 37 + return (ulong)fw_passed_dtb; 38 38 39 39 if (__dtb_start < __dtb_end) 40 40 ftaddr = (ulong)__dtb_start;
-25
arch/mips/pistachio/init.c
··· 59 59 return sys_type; 60 60 } 61 61 62 - static void __init plat_setup_iocoherency(void) 63 - { 64 - /* 65 - * Kernel has been configured with software coherency 66 - * but we might choose to turn it off and use hardware 67 - * coherency instead. 68 - */ 69 - if (mips_cm_numiocu() != 0) { 70 - /* Nothing special needs to be done to enable coherency */ 71 - pr_info("CMP IOCU detected\n"); 72 - hw_coherentio = 1; 73 - if (coherentio == 0) 74 - pr_info("Hardware DMA cache coherency disabled\n"); 75 - else 76 - pr_info("Hardware DMA cache coherency enabled\n"); 77 - } else { 78 - if (coherentio == 1) 79 - pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n"); 80 - else 81 - pr_info("Software DMA cache coherency enabled\n"); 82 - } 83 - } 84 - 85 62 void __init *plat_get_fdt(void) 86 63 { 87 64 if (fw_arg0 != -2) ··· 69 92 void __init plat_mem_setup(void) 70 93 { 71 94 __dt_setup_arch(plat_get_fdt()); 72 - 73 - plat_setup_iocoherency(); 74 95 } 75 96 76 97 #define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+1 -1
arch/mips/ralink/mt7620.c
··· 175 175 }; 176 176 177 177 static struct rt2880_pmx_func spis_grp_mt7628[] = { 178 - FUNC("pwm", 3, 14, 4), 178 + FUNC("pwm_uart2", 3, 14, 4), 179 179 FUNC("util", 2, 14, 4), 180 180 FUNC("gpio", 1, 14, 4), 181 181 FUNC("spis", 0, 14, 4),
+11 -11
drivers/ssb/driver_gpio.c
··· 23 23 **************************************************/ 24 24 25 25 #if IS_ENABLED(CONFIG_SSB_EMBEDDED) 26 - static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) 26 + static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned int gpio) 27 27 { 28 28 struct ssb_bus *bus = gpiochip_get_data(chip); 29 29 ··· 38 38 * ChipCommon 39 39 **************************************************/ 40 40 41 - static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio) 41 + static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned int gpio) 42 42 { 43 43 struct ssb_bus *bus = gpiochip_get_data(chip); 44 44 45 45 return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); 46 46 } 47 47 48 - static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, 48 + static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned int gpio, 49 49 int value) 50 50 { 51 51 struct ssb_bus *bus = gpiochip_get_data(chip); ··· 54 54 } 55 55 56 56 static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, 57 - unsigned gpio) 57 + unsigned int gpio) 58 58 { 59 59 struct ssb_bus *bus = gpiochip_get_data(chip); 60 60 ··· 63 63 } 64 64 65 65 static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, 66 - unsigned gpio, int value) 66 + unsigned int gpio, int value) 67 67 { 68 68 struct ssb_bus *bus = gpiochip_get_data(chip); 69 69 ··· 72 72 return 0; 73 73 } 74 74 75 - static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) 75 + static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned int gpio) 76 76 { 77 77 struct ssb_bus *bus = gpiochip_get_data(chip); 78 78 ··· 85 85 return 0; 86 86 } 87 87 88 - static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio) 88 + static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned int gpio) 89 89 { 90 90 struct ssb_bus *bus = gpiochip_get_data(chip); 91 91 ··· 256 256 257 257 #ifdef CONFIG_SSB_DRIVER_EXTIF 258 258 259 - static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio) 259 + static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned int gpio) 260 260 { 261 261 struct ssb_bus *bus = gpiochip_get_data(chip); 262 262 263 263 return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); 264 264 } 265 265 266 - static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, 266 + static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned int gpio, 267 267 int value) 268 268 { 269 269 struct ssb_bus *bus = gpiochip_get_data(chip); ··· 272 272 } 273 273 274 274 static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, 275 - unsigned gpio) 275 + unsigned int gpio) 276 276 { 277 277 struct ssb_bus *bus = gpiochip_get_data(chip); 278 278 ··· 281 281 } 282 282 283 283 static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, 284 - unsigned gpio, int value) 284 + unsigned int gpio, int value) 285 285 { 286 286 struct ssb_bus *bus = gpiochip_get_data(chip); 287 287