Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mips_fixes_5.0_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Paul Burton:
"A batch of MIPS fixes for 5.0, nothing too scary.

- A workaround for a Loongson 3 CPU bug is the biggest change, but
still fairly straightforward. It adds extra memory barriers (sync
instructions) around atomics to avoid a CPU bug that can break
atomicity.

- Loongson64 also sees a fix for powering off some systems which
would incorrectly reboot rather than waiting for the power down
sequence to complete.

- We have DT fixes for the Ingenic JZ4740 SoC & the JZ4780-based Ci20
board, and a DT warning fix for the Nexsys4/MIPSfpga board.

- The Cavium Octeon platform sees a further fix to the behaviour of
the pcie_disable command line argument that was introduced in v3.3.

- The VDSO, introduced in v4.4, sees build fixes for configurations
of GCC that were built using the --with-fp-32= flag to specify a
default 32-bit floating point ABI.

- get_frame_info() sees a fix for configurations with
CONFIG_KALLSYMS=n, for which it previously always returned an
error.

- If the MIPS Coherence Manager (CM) reports an error then we'll now
clear that error correctly so that the GCR_ERROR_CAUSE register
will be updated with information about any future errors"

* tag 'mips_fixes_5.0_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
mips: cm: reprime error cause
mips: loongson64: remove unreachable(), fix loongson_poweroff().
MIPS: Remove function size check in get_frame_info()
MIPS: Use lower case for addresses in nexys4ddr.dts
MIPS: Loongson: Introduce and use loongson_llsc_mb()
MIPS: VDSO: Include $(ccflags-vdso) in o32,n32 .lds builds
MIPS: VDSO: Use same -m%-float cflag as the kernel proper
MIPS: OCTEON: don't set octeon_dma_bar_type if PCI is disabled
DTS: CI20: Fix bugs in ci20's device tree.
MIPS: DTS: jz4740: Correct interrupt number of DMA core

+127 -22
+15
arch/mips/Kconfig
··· 1403 1403 please say 'N' here. If you want a high-performance kernel to run on 1404 1404 new Loongson 3 machines only, please say 'Y' here. 1405 1405 1406 + config CPU_LOONGSON3_WORKAROUNDS 1407 + bool "Old Loongson 3 LLSC Workarounds" 1408 + default y if SMP 1409 + depends on CPU_LOONGSON3 1410 + help 1411 + Loongson 3 processors have the llsc issues which require workarounds. 1412 + Without workarounds the system may hang unexpectedly. 1413 + 1414 + Newer Loongson 3 will fix these issues and no workarounds are needed. 1415 + The workarounds have no significant side effect on them but may 1416 + decrease the performance of the system so this option should be 1417 + disabled unless the kernel is intended to be run on old systems. 1418 + 1419 + If unsure, please say Y. 1420 + 1406 1421 config CPU_LOONGSON2E 1407 1422 bool "Loongson 2E" 1408 1423 depends on SYS_HAS_CPU_LOONGSON2E
+4 -4
arch/mips/boot/dts/ingenic/ci20.dts
··· 76 76 status = "okay"; 77 77 78 78 pinctrl-names = "default"; 79 - pinctrl-0 = <&pins_uart2>; 79 + pinctrl-0 = <&pins_uart3>; 80 80 }; 81 81 82 82 &uart4 { ··· 196 196 bias-disable; 197 197 }; 198 198 199 - pins_uart2: uart2 { 200 - function = "uart2"; 201 - groups = "uart2-data", "uart2-hwflow"; 199 + pins_uart3: uart3 { 200 + function = "uart3"; 201 + groups = "uart3-data", "uart3-hwflow"; 202 202 bias-disable; 203 203 }; 204 204
+1 -1
arch/mips/boot/dts/ingenic/jz4740.dtsi
··· 161 161 #dma-cells = <2>; 162 162 163 163 interrupt-parent = <&intc>; 164 - interrupts = <29>; 164 + interrupts = <20>; 165 165 166 166 clocks = <&cgu JZ4740_CLK_DMA>; 167 167
+4 -4
arch/mips/boot/dts/xilfpga/nexys4ddr.dts
··· 90 90 interrupts = <0>; 91 91 }; 92 92 93 - axi_i2c: i2c@10A00000 { 93 + axi_i2c: i2c@10a00000 { 94 94 compatible = "xlnx,xps-iic-2.00.a"; 95 95 interrupt-parent = <&axi_intc>; 96 96 interrupts = <4>; 97 - reg = < 0x10A00000 0x10000 >; 97 + reg = < 0x10a00000 0x10000 >; 98 98 clocks = <&ext>; 99 99 xlnx,clk-freq = <0x5f5e100>; 100 100 xlnx,family = "Artix7"; ··· 106 106 #address-cells = <1>; 107 107 #size-cells = <0>; 108 108 109 - ad7420@4B { 109 + ad7420@4b { 110 110 compatible = "adi,adt7420"; 111 - reg = <0x4B>; 111 + reg = <0x4b>; 112 112 }; 113 113 } ; 114 114 };
+6
arch/mips/include/asm/atomic.h
··· 58 58 if (kernel_uses_llsc) { \ 59 59 int temp; \ 60 60 \ 61 + loongson_llsc_mb(); \ 61 62 __asm__ __volatile__( \ 62 63 " .set push \n" \ 63 64 " .set "MIPS_ISA_LEVEL" \n" \ ··· 86 85 if (kernel_uses_llsc) { \ 87 86 int temp; \ 88 87 \ 88 + loongson_llsc_mb(); \ 89 89 __asm__ __volatile__( \ 90 90 " .set push \n" \ 91 91 " .set "MIPS_ISA_LEVEL" \n" \ ··· 120 118 if (kernel_uses_llsc) { \ 121 119 int temp; \ 122 120 \ 121 + loongson_llsc_mb(); \ 123 122 __asm__ __volatile__( \ 124 123 " .set push \n" \ 125 124 " .set "MIPS_ISA_LEVEL" \n" \ ··· 259 256 if (kernel_uses_llsc) { \ 260 257 long temp; \ 261 258 \ 259 + loongson_llsc_mb(); \ 262 260 __asm__ __volatile__( \ 263 261 " .set push \n" \ 264 262 " .set "MIPS_ISA_LEVEL" \n" \ ··· 287 283 if (kernel_uses_llsc) { \ 288 284 long temp; \ 289 285 \ 286 + loongson_llsc_mb(); \ 290 287 __asm__ __volatile__( \ 291 288 " .set push \n" \ 292 289 " .set "MIPS_ISA_LEVEL" \n" \ ··· 321 316 if (kernel_uses_llsc) { \ 322 317 long temp; \ 323 318 \ 319 + loongson_llsc_mb(); \ 324 320 __asm__ __volatile__( \ 325 321 " .set push \n" \ 326 322 " .set "MIPS_ISA_LEVEL" \n" \
+36
arch/mips/include/asm/barrier.h
··· 222 222 #define __smp_mb__before_atomic() __smp_mb__before_llsc() 223 223 #define __smp_mb__after_atomic() smp_llsc_mb() 224 224 225 + /* 226 + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, 227 + * store or pref) in between an ll & sc can cause the sc instruction to 228 + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code 229 + * containing such sequences, this bug bites harder than we might otherwise 230 + * expect due to reordering & speculation: 231 + * 232 + * 1) A memory access appearing prior to the ll in program order may actually 233 + * be executed after the ll - this is the reordering case. 234 + * 235 + * In order to avoid this we need to place a memory barrier (ie. a sync 236 + * instruction) prior to every ll instruction, in between it & any earlier 237 + * memory access instructions. Many of these cases are already covered by 238 + * smp_mb__before_llsc() but for the remaining cases, typically ones in 239 + * which multiple CPUs may operate on a memory location but ordering is not 240 + * usually guaranteed, we use loongson_llsc_mb() below. 241 + * 242 + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. 243 + * 244 + * 2) If a conditional branch exists between an ll & sc with a target outside 245 + * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() 246 + * or similar, then misprediction of the branch may allow speculative 247 + * execution of memory accesses from outside of the ll-sc loop. 248 + * 249 + * In order to avoid this we need a memory barrier (ie. a sync instruction) 250 + * at each affected branch target, for which we also use loongson_llsc_mb() 251 + * defined below. 252 + * 253 + * This case affects all current Loongson 3 CPUs. 254 + */ 255 + #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ 256 + #define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 257 + #else 258 + #define loongson_llsc_mb() do { } while (0) 259 + #endif 260 + 225 261 #include <asm-generic/barrier.h> 226 262 227 263 #endif /* __ASM_BARRIER_H */
+5
arch/mips/include/asm/bitops.h
··· 69 69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 70 70 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 71 71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 72 + loongson_llsc_mb(); 72 73 do { 73 74 __asm__ __volatile__( 74 75 " " __LL "%0, %1 # set_bit \n" ··· 80 79 } while (unlikely(!temp)); 81 80 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 82 81 } else if (kernel_uses_llsc) { 82 + loongson_llsc_mb(); 83 83 do { 84 84 __asm__ __volatile__( 85 85 " .set push \n" ··· 125 123 : "ir" (~(1UL << bit))); 126 124 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 127 125 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 126 + loongson_llsc_mb(); 128 127 do { 129 128 __asm__ __volatile__( 130 129 " " __LL "%0, %1 # clear_bit \n" ··· 136 133 } while (unlikely(!temp)); 137 134 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 138 135 } else if (kernel_uses_llsc) { 136 + loongson_llsc_mb(); 139 137 do { 140 138 __asm__ __volatile__( 141 139 " .set push \n" ··· 197 193 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 198 194 unsigned long temp; 199 195 196 + loongson_llsc_mb(); 200 197 do { 201 198 __asm__ __volatile__( 202 199 " .set push \n"
+3
arch/mips/include/asm/futex.h
··· 50 50 "i" (-EFAULT) \ 51 51 : "memory"); \ 52 52 } else if (cpu_has_llsc) { \ 53 + loongson_llsc_mb(); \ 53 54 __asm__ __volatile__( \ 54 55 " .set push \n" \ 55 56 " .set noat \n" \ ··· 164 163 "i" (-EFAULT) 165 164 : "memory"); 166 165 } else if (cpu_has_llsc) { 166 + loongson_llsc_mb(); 167 167 __asm__ __volatile__( 168 168 "# futex_atomic_cmpxchg_inatomic \n" 169 169 " .set push \n" ··· 194 192 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 195 193 "i" (-EFAULT) 196 194 : "memory"); 195 + loongson_llsc_mb(); 197 196 } else 198 197 return -ENOSYS; 199 198
+2
arch/mips/include/asm/pgtable.h
··· 228 228 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 229 229 : [global] "r" (page_global)); 230 230 } else if (kernel_uses_llsc) { 231 + loongson_llsc_mb(); 231 232 __asm__ __volatile__ ( 232 233 " .set push \n" 233 234 " .set "MIPS_ISA_ARCH_LEVEL" \n" ··· 243 242 " .set pop \n" 244 243 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 245 244 : [global] "r" (page_global)); 245 + loongson_llsc_mb(); 246 246 } 247 247 #else /* !CONFIG_SMP */ 248 248 if (pte_none(*buddy))
+1 -1
arch/mips/kernel/mips-cm.c
··· 457 457 } 458 458 459 459 /* reprime cause register */ 460 - write_gcr_error_cause(0); 460 + write_gcr_error_cause(cm_error); 461 461 }
+3 -4
arch/mips/kernel/process.c
··· 371 371 static int get_frame_info(struct mips_frame_info *info) 372 372 { 373 373 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 374 - union mips_instruction insn, *ip, *ip_end; 374 + union mips_instruction insn, *ip; 375 375 const unsigned int max_insns = 128; 376 376 unsigned int last_insn_size = 0; 377 377 unsigned int i; ··· 384 384 if (!ip) 385 385 goto err; 386 386 387 - ip_end = (void *)ip + info->func_size; 388 - 389 - for (i = 0; i < max_insns && ip < ip_end; i++) { 387 + for (i = 0; i < max_insns; i++) { 390 388 ip = (void *)ip + last_insn_size; 389 + 391 390 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { 392 391 insn.word = ip->halfword[0] << 16; 393 392 last_insn_size = 2;
+23
arch/mips/loongson64/Platform
··· 23 23 endif 24 24 25 25 cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap 26 + 27 + # 28 + # Some versions of binutils, not currently mainline as of 2019/02/04, support 29 + # an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction 30 + # to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a 31 + # description). 32 + # 33 + # We disable this in order to prevent the assembler meddling with the 34 + # instruction that labels refer to, ie. if we label an ll instruction: 35 + # 36 + # 1: ll v0, 0(a0) 37 + # 38 + # ...then with the assembler fix applied the label may actually point at a sync 39 + # instruction inserted by the assembler, and if we were using the label in an 40 + # exception table the table would no longer contain the address of the ll 41 + # instruction. 42 + # 43 + # Avoid this by explicitly disabling that assembler behaviour. If upstream 44 + # binutils does not merge support for the flag then we can revisit & remove 45 + # this later - for now it ensures vendor toolchains don't cause problems. 46 + # 47 + cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) 48 + 26 49 # 27 50 # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a 28 51 # as MIPS64 R2; older versions as just R1. This leaves the possibility open
+6 -1
arch/mips/loongson64/common/reset.c
··· 59 59 { 60 60 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE 61 61 mach_prepare_shutdown(); 62 - unreachable(); 62 + 63 + /* 64 + * It needs a wait loop here, but mips/kernel/reset.c already calls 65 + * a generic delay loop, machine_hang(), so simply return. 66 + */ 67 + return; 63 68 #else 64 69 void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; 65 70
+10
arch/mips/mm/tlbex.c
··· 932 932 * to mimic that here by taking a load/istream page 933 933 * fault. 934 934 */ 935 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 936 + uasm_i_sync(p, 0); 935 937 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); 936 938 uasm_i_jr(p, ptr); 937 939 ··· 1648 1646 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1649 1647 { 1650 1648 #ifdef CONFIG_SMP 1649 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 1650 + uasm_i_sync(p, 0); 1651 1651 # ifdef CONFIG_PHYS_ADDR_T_64BIT 1652 1652 if (cpu_has_64bits) 1653 1653 uasm_i_lld(p, pte, 0, ptr); ··· 2263 2259 #endif 2264 2260 2265 2261 uasm_l_nopage_tlbl(&l, p); 2262 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2263 + uasm_i_sync(&p, 0); 2266 2264 build_restore_work_registers(&p); 2267 2265 #ifdef CONFIG_CPU_MICROMIPS 2268 2266 if ((unsigned long)tlb_do_page_fault_0 & 1) { ··· 2319 2313 #endif 2320 2314 2321 2315 uasm_l_nopage_tlbs(&l, p); 2316 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2317 + uasm_i_sync(&p, 0); 2322 2318 build_restore_work_registers(&p); 2323 2319 #ifdef CONFIG_CPU_MICROMIPS 2324 2320 if ((unsigned long)tlb_do_page_fault_1 & 1) { ··· 2376 2368 #endif 2377 2369 2378 2370 uasm_l_nopage_tlbm(&l, p); 2371 + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) 2372 + uasm_i_sync(&p, 0); 2379 2373 build_restore_work_registers(&p); 2380 2374 #ifdef CONFIG_CPU_MICROMIPS 2381 2375 if ((unsigned long)tlb_do_page_fault_1 & 1) {
+5 -5
arch/mips/pci/pci-octeon.c
··· 568 568 if (octeon_has_feature(OCTEON_FEATURE_PCIE)) 569 569 return 0; 570 570 571 + if (!octeon_is_pci_host()) { 572 + pr_notice("Not in host mode, PCI Controller not initialized\n"); 573 + return 0; 574 + } 575 + 571 576 /* Point pcibios_map_irq() to the PCI version of it */ 572 577 octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; 573 578 ··· 583 578 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_SMALL; 584 579 else 585 580 octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; 586 - 587 - if (!octeon_is_pci_host()) { 588 - pr_notice("Not in host mode, PCI Controller not initialized\n"); 589 - return 0; 590 - } 591 581 592 582 /* PCI I/O and PCI MEM values */ 593 583 set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
+3 -2
arch/mips/vdso/Makefile
··· 8 8 $(filter -E%,$(KBUILD_CFLAGS)) \ 9 9 $(filter -mmicromips,$(KBUILD_CFLAGS)) \ 10 10 $(filter -march=%,$(KBUILD_CFLAGS)) \ 11 + $(filter -m%-float,$(KBUILD_CFLAGS)) \ 11 12 -D__VDSO__ 12 13 13 14 ifdef CONFIG_CC_IS_CLANG ··· 130 129 $(call cmd,force_checksrc) 131 130 $(call if_changed_rule,cc_o_c) 132 131 133 - $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 132 + $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 134 133 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE 135 134 $(call if_changed_dep,cpp_lds_S) 136 135 ··· 170 169 $(call cmd,force_checksrc) 171 170 $(call if_changed_rule,cc_o_c) 172 171 173 - $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 172 + $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 174 173 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE 175 174 $(call if_changed_dep,cpp_lds_S) 176 175