Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hexagon: parenthesize registers in asm predicates

Hexagon requires that register predicates in assembly be parenthesized.

Link: https://github.com/ClangBuiltLinux/linux/issues/754
Link: http://lkml.kernel.org/r/20191209222956.239798-3-ndesaulniers@google.com
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Suggested-by: Sid Manning <sidneym@codeaurora.org>
Acked-by: Brian Cain <bcain@codeaurora.org>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Tuowen Zhao <ztuowen@gmail.com>
Cc: Mika Westerberg <mika.westerberg@linux.intel.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexios Zavras <alexios.zavras@intel.com>
Cc: Allison Randal <allison@lohutok.net>
Cc: Will Deacon <will@kernel.org>
Cc: Richard Fontana <rfontana@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Nick Desaulniers and committed by
Linus Torvalds
780a0cfd 213921f9

+23 -23
+4 -4
arch/hexagon/include/asm/atomic.h
··· 91 91 "1: %0 = memw_locked(%1);\n" \ 92 92 " %0 = "#op "(%0,%2);\n" \ 93 93 " memw_locked(%1,P3)=%0;\n" \ 94 - " if !P3 jump 1b;\n" \ 94 + " if (!P3) jump 1b;\n" \ 95 95 : "=&r" (output) \ 96 96 : "r" (&v->counter), "r" (i) \ 97 97 : "memory", "p3" \ ··· 107 107 "1: %0 = memw_locked(%1);\n" \ 108 108 " %0 = "#op "(%0,%2);\n" \ 109 109 " memw_locked(%1,P3)=%0;\n" \ 110 - " if !P3 jump 1b;\n" \ 110 + " if (!P3) jump 1b;\n" \ 111 111 : "=&r" (output) \ 112 112 : "r" (&v->counter), "r" (i) \ 113 113 : "memory", "p3" \ ··· 124 124 "1: %0 = memw_locked(%2);\n" \ 125 125 " %1 = "#op "(%0,%3);\n" \ 126 126 " memw_locked(%2,P3)=%1;\n" \ 127 - " if !P3 jump 1b;\n" \ 127 + " if (!P3) jump 1b;\n" \ 128 128 : "=&r" (output), "=&r" (val) \ 129 129 : "r" (&v->counter), "r" (i) \ 130 130 : "memory", "p3" \ ··· 173 173 " }" 174 174 " memw_locked(%2, p3) = %1;" 175 175 " {" 176 - " if !p3 jump 1b;" 176 + " if (!p3) jump 1b;" 177 177 " }" 178 178 "2:" 179 179 : "=&r" (__oldval), "=&r" (tmp)
+4 -4
arch/hexagon/include/asm/bitops.h
··· 38 38 "1: R12 = memw_locked(R10);\n" 39 39 " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" 40 40 " memw_locked(R10,P1) = R12;\n" 41 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 41 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 42 42 : "=&r" (oldval) 43 43 : "r" (addr), "r" (nr) 44 44 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 62 62 "1: R12 = memw_locked(R10);\n" 63 63 " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" 64 64 " memw_locked(R10,P1) = R12;\n" 65 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 65 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 66 66 : "=&r" (oldval) 67 67 : "r" (addr), "r" (nr) 68 68 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 88 88 "1: R12 = memw_locked(R10);\n" 89 89 " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" 90 90 " memw_locked(R10,P1) = R12;\n" 91 - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" 91 + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" 92 92 : "=&r" (oldval) 93 93 : "r" (addr), "r" (nr) 94 94 : "r10", "r11", "r12", "p0", "p1", "memory" ··· 223 223 int r; 224 224 225 225 asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" 226 - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" 226 + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" 227 227 : "=&r" (r) 228 228 : "r" (x) 229 229 : "p0");
+1 -1
arch/hexagon/include/asm/cmpxchg.h
··· 30 30 __asm__ __volatile__ ( 31 31 "1: %0 = memw_locked(%1);\n" /* load into retval */ 32 32 " memw_locked(%1,P0) = %2;\n" /* store into memory */ 33 - " if !P0 jump 1b;\n" 33 + " if (!P0) jump 1b;\n" 34 34 : "=&r" (retval) 35 35 : "r" (ptr), "r" (x) 36 36 : "memory", "p0"
+3 -3
arch/hexagon/include/asm/futex.h
··· 16 16 /* For example: %1 = %4 */ \ 17 17 insn \ 18 18 "2: memw_locked(%3,p2) = %1;\n" \ 19 - " if !p2 jump 1b;\n" \ 19 + " if (!p2) jump 1b;\n" \ 20 20 " %1 = #0;\n" \ 21 21 "3:\n" \ 22 22 ".section .fixup,\"ax\"\n" \ ··· 84 84 "1: %1 = memw_locked(%3)\n" 85 85 " {\n" 86 86 " p2 = cmp.eq(%1,%4)\n" 87 - " if !p2.new jump:NT 3f\n" 87 + " if (!p2.new) jump:NT 3f\n" 88 88 " }\n" 89 89 "2: memw_locked(%3,p2) = %5\n" 90 - " if !p2 jump 1b\n" 90 + " if (!p2) jump 1b\n" 91 91 "3:\n" 92 92 ".section .fixup,\"ax\"\n" 93 93 "4: %0 = #%6\n"
+10 -10
arch/hexagon/include/asm/spinlock.h
··· 30 30 __asm__ __volatile__( 31 31 "1: R6 = memw_locked(%0);\n" 32 32 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 33 - " { if !P3 jump 1b; }\n" 33 + " { if (!P3) jump 1b; }\n" 34 34 " memw_locked(%0,P3) = R6;\n" 35 - " { if !P3 jump 1b; }\n" 35 + " { if (!P3) jump 1b; }\n" 36 36 : 37 37 : "r" (&lock->lock) 38 38 : "memory", "r6", "p3" ··· 46 46 "1: R6 = memw_locked(%0);\n" 47 47 " R6 = add(R6,#-1);\n" 48 48 " memw_locked(%0,P3) = R6\n" 49 - " if !P3 jump 1b;\n" 49 + " if (!P3) jump 1b;\n" 50 50 : 51 51 : "r" (&lock->lock) 52 52 : "memory", "r6", "p3" ··· 61 61 __asm__ __volatile__( 62 62 " R6 = memw_locked(%1);\n" 63 63 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 64 - " { if !P3 jump 1f; }\n" 64 + " { if (!P3) jump 1f; }\n" 65 65 " memw_locked(%1,P3) = R6;\n" 66 66 " { %0 = P3 }\n" 67 67 "1:\n" ··· 78 78 __asm__ __volatile__( 79 79 "1: R6 = memw_locked(%0)\n" 80 80 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 81 - " { if !P3 jump 1b; }\n" 81 + " { if (!P3) jump 1b; }\n" 82 82 " memw_locked(%0,P3) = R6;\n" 83 - " { if !P3 jump 1b; }\n" 83 + " { if (!P3) jump 1b; }\n" 84 84 : 85 85 : "r" (&lock->lock) 86 86 : "memory", "r6", "p3" ··· 94 94 __asm__ __volatile__( 95 95 " R6 = memw_locked(%1)\n" 96 96 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 97 - " { if !P3 jump 1f; }\n" 97 + " { if (!P3) jump 1f; }\n" 98 98 " memw_locked(%1,P3) = R6;\n" 99 99 " %0 = P3;\n" 100 100 "1:\n" ··· 117 117 __asm__ __volatile__( 118 118 "1: R6 = memw_locked(%0);\n" 119 119 " P3 = cmp.eq(R6,#0);\n" 120 - " { if !P3 jump 1b; R6 = #1; }\n" 120 + " { if (!P3) jump 1b; R6 = #1; }\n" 121 121 " memw_locked(%0,P3) = R6;\n" 122 - " { if !P3 jump 1b; }\n" 122 + " { if (!P3) jump 1b; }\n" 123 123 : 124 124 : "r" (&lock->lock) 125 125 : "memory", "r6", "p3" ··· 139 139 __asm__ __volatile__( 140 140 " R6 = memw_locked(%1);\n" 141 141 " P3 = cmp.eq(R6,#0);\n" 142 - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" 142 + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" 143 143 " memw_locked(%1,P3) = R6;\n" 144 144 " %0 = P3;\n" 145 145 "1:\n"
+1 -1
arch/hexagon/kernel/vm_entry.S
··· 369 369 R26.L = #LO(do_work_pending); 370 370 R0 = #VM_INT_DISABLE; 371 371 } 372 - if P0 jump check_work_pending 372 + if (P0) jump check_work_pending 373 373 { 374 374 R0 = R25; 375 375 callr R24