Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: Remove superfluous newlines from inline assemblies

Remove superfluous newlines from inline assemblies. Compilers use the
number of lines of inline assemblies as heuristic for the complexity
and inline decisions. Therefore inline assemblies should only contain
as many lines as required.

A lot of inline assemblies contain a superfluous newline for the last
line. Remove such newlines to improve compiler inlining decisions.

Suggested-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>

authored by

Heiko Carstens and committed by
Alexander Gordeev
4335edb7 f0edc8f1

+110 -110
+1 -1
arch/s390/hypfs/hypfs_sprp.c
··· 27 27 { 28 28 union register_pair r1 = { .even = virt_to_phys(data), }; 29 29 30 - asm volatile("diag %[r1],%[r3],0x304\n" 30 + asm volatile("diag %[r1],%[r3],0x304" 31 31 : [r1] "+&d" (r1.pair) 32 32 : [r3] "d" (cmd) 33 33 : "memory");
+9 -9
arch/s390/include/asm/ap.h
··· 143 143 " lghi 2,0\n" /* 0 into gr2 */ 144 144 " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ 145 145 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 146 - " lgr %[reg2],2\n" /* gr2 into reg2 */ 146 + " lgr %[reg2],2" /* gr2 into reg2 */ 147 147 : [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2) 148 148 : [qid] "d" (qid) 149 149 : "cc", "0", "1", "2"); ··· 186 186 asm volatile( 187 187 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 188 188 " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ 189 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 189 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 190 190 : [reg1] "=&d" (reg1.value) 191 191 : [reg0] "d" (reg0) 192 192 : "cc", "0", "1"); ··· 211 211 asm volatile( 212 212 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 213 213 " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ 214 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 214 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 215 215 : [reg1] "=&d" (reg1.value) 216 216 : [reg0] "d" (reg0) 217 217 : "cc", "0", "1"); ··· 315 315 " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */ 316 316 " lgr 2,%[reg2]\n" /* ni addr into gr2 */ 317 317 " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ 318 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 318 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 319 319 : [reg1] "+&d" (reg1.value) 320 320 : [reg0] "d" (reg0), [reg2] "d" (reg2) 321 321 : "cc", "memory", "0", "1", "2"); ··· 363 363 " lgr 1,%[reg1]\n" /* qact in info into gr1 */ 364 364 " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ 365 365 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 366 - " lgr %[reg2],2\n" /* qact out info into reg2 */ 366 + " lgr %[reg2],2" /* qact out info into reg2 */ 367 367 : [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2) 368 368 : [reg0] "d" (reg0) 369 369 : "cc", "0", "1", "2"); ··· 388 388 asm volatile( 389 389 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 390 390 " .insn rre,0xb2af0000,0,0\n" /* PQAP(BAPQ) */ 391 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 391 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 392 392 : [reg1] "=&d" (reg1.value) 393 393 : [reg0] "d" (reg0) 394 394 : "cc", "0", "1"); ··· 416 416 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 417 417 " lgr 2,%[reg2]\n" /* secret index into gr2 */ 418 418 " .insn rre,0xb2af0000,0,0\n" /* PQAP(AAPQ) */ 419 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 419 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 420 420 : [reg1] "=&d" (reg1.value) 421 421 : [reg0] "d" (reg0), [reg2] "d" (reg2) 422 422 : "cc", "0", "1", "2"); ··· 453 453 " lgr 0,%[reg0]\n" /* qid param in gr0 */ 454 454 "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n" 455 455 " brc 2,0b\n" /* handle partial completion */ 456 - " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 456 + " lgr %[reg1],1" /* gr1 (status) into reg1 */ 457 457 : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), 458 458 [nqap_r2] "+&d" (nqap_r2.pair) 459 459 : [nqap_r1] "d" (nqap_r1.pair) ··· 518 518 " brc 6,0b\n" /* handle partial complete */ 519 519 "2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */ 520 520 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 521 - " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */ 521 + " lgr %[reg2],2" /* gr2 (res length) into reg2 */ 522 522 : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), 523 523 [reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair), 524 524 [rp2] "+&d" (rp2.pair)
+14 -14
arch/s390/include/asm/atomic_ops.h
··· 17 17 int val; 18 18 19 19 asm volatile( 20 - " l %[val],%[ptr]\n" 20 + " l %[val],%[ptr]" 21 21 : [val] "=d" (val) : [ptr] "R" (*ptr)); 22 22 return val; 23 23 } ··· 26 26 { 27 27 if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) { 28 28 asm volatile( 29 - " mvhi %[ptr],%[val]\n" 29 + " mvhi %[ptr],%[val]" 30 30 : [ptr] "=Q" (*ptr) : [val] "K" (val)); 31 31 } else { 32 32 asm volatile( 33 - " st %[val],%[ptr]\n" 33 + " st %[val],%[ptr]" 34 34 : [ptr] "=R" (*ptr) : [val] "d" (val)); 35 35 } 36 36 } ··· 40 40 long val; 41 41 42 42 asm volatile( 43 - " lg %[val],%[ptr]\n" 43 + " lg %[val],%[ptr]" 44 44 : [val] "=d" (val) : [ptr] "RT" (*ptr)); 45 45 return val; 46 46 } ··· 49 49 { 50 50 if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) { 51 51 asm volatile( 52 - " mvghi %[ptr],%[val]\n" 52 + " mvghi %[ptr],%[val]" 53 53 : [ptr] "=Q" (*ptr) : [val] "K" (val)); 54 54 } else { 55 55 asm volatile( 56 - " stg %[val],%[ptr]\n" 56 + " stg %[val],%[ptr]" 57 57 : [ptr] "=RT" (*ptr) : [val] "d" (val)); 58 58 } 59 59 } ··· 66 66 op_type old; \ 67 67 \ 68 68 asm volatile( \ 69 - op_string " %[old],%[val],%[ptr]\n" \ 69 + op_string " %[old],%[val],%[ptr]" \ 70 70 op_barrier \ 71 71 : [old] "=d" (old), [ptr] "+QS" (*ptr) \ 72 72 : [val] "d" (val) : "cc", "memory"); \ ··· 75 75 76 76 #define __ATOMIC_OPS(op_name, op_type, op_string) \ 77 77 __ATOMIC_OP(op_name, op_type, op_string, "") \ 78 - __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 78 + __ATOMIC_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") 79 79 80 80 __ATOMIC_OPS(__atomic_add, int, "laa") 81 81 __ATOMIC_OPS(__atomic_and, int, "lan") ··· 94 94 static __always_inline void op_name(op_type val, op_type *ptr) \ 95 95 { \ 96 96 asm volatile( \ 97 - op_string " %[ptr],%[val]\n" \ 97 + op_string " %[ptr],%[val]" \ 98 98 op_barrier \ 99 99 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\ 100 100 } 101 101 102 102 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ 103 103 __ATOMIC_CONST_OP(op_name, op_type, op_string, "") \ 104 - __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 104 + __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") 105 105 106 106 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi") 107 107 __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi") ··· 179 179 int cc; \ 180 180 \ 181 181 asm volatile( \ 182 - op_string " %[tmp],%[val],%[ptr]\n" \ 182 + op_string " %[tmp],%[val],%[ptr]" \ 183 183 op_barrier \ 184 184 : "=@cc" (cc), [tmp] "=d" (tmp), [ptr] "+QS" (*ptr) \ 185 185 : [val] "d" (val) \ ··· 189 189 190 190 #define __ATOMIC_TEST_OPS(op_name, op_type, op_string) \ 191 191 __ATOMIC_TEST_OP(op_name, op_type, op_string, "") \ 192 - __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 192 + __ATOMIC_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") 193 193 194 194 __ATOMIC_TEST_OPS(__atomic_add_and_test, int, "laal") 195 195 __ATOMIC_TEST_OPS(__atomic64_add_and_test, long, "laalg") ··· 203 203 int cc; \ 204 204 \ 205 205 asm volatile( \ 206 - op_string " %[ptr],%[val]\n" \ 206 + op_string " %[ptr],%[val]" \ 207 207 op_barrier \ 208 208 : "=@cc" (cc), [ptr] "+QS" (*ptr) \ 209 209 : [val] "i" (val) \ ··· 213 213 214 214 #define __ATOMIC_CONST_TEST_OPS(op_name, op_type, op_string) \ 215 215 __ATOMIC_CONST_TEST_OP(op_name, op_type, op_string, "") \ 216 - __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 216 + __ATOMIC_CONST_TEST_OP(op_name##_barrier, op_type, op_string, "\nbcr 14,0") 217 217 218 218 __ATOMIC_CONST_TEST_OPS(__atomic_add_const_and_test, int, "alsi") 219 219 __ATOMIC_CONST_TEST_OPS(__atomic64_add_const_and_test, long, "algsi")
+4 -4
arch/s390/include/asm/barrier.h
··· 18 18 19 19 #ifdef MARCH_HAS_Z196_FEATURES 20 20 /* Fast-BCR without checkpoint synchronization */ 21 - #define __ASM_BCR_SERIALIZE "bcr 14,0\n" 21 + #define __ASM_BCR_SERIALIZE "bcr 14,0" 22 22 #else 23 - #define __ASM_BCR_SERIALIZE "bcr 15,0\n" 23 + #define __ASM_BCR_SERIALIZE "bcr 15,0" 24 24 #endif 25 25 26 26 static __always_inline void bcr_serialize(void) ··· 69 69 70 70 if (__builtin_constant_p(size) && size > 0) { 71 71 asm(" clgr %2,%1\n" 72 - " slbgr %0,%0\n" 72 + " slbgr %0,%0" 73 73 :"=d" (mask) : "d" (size-1), "d" (index) :"cc"); 74 74 return mask; 75 75 } 76 76 asm(" clgr %1,%2\n" 77 - " slbgr %0,%0\n" 77 + " slbgr %0,%0" 78 78 :"=d" (mask) : "d" (size), "d" (index) :"cc"); 79 79 return ~mask; 80 80 }
+1 -1
arch/s390/include/asm/bitops.h
··· 62 62 addr += (nr ^ (BITS_PER_LONG - BITS_PER_BYTE)) / BITS_PER_BYTE; 63 63 mask = 1UL << (nr & (BITS_PER_BYTE - 1)); 64 64 asm volatile( 65 - " tm %[addr],%[mask]\n" 65 + " tm %[addr],%[mask]" 66 66 : "=@cc" (cc) 67 67 : [addr] "Q" (*addr), [mask] "I" (mask) 68 68 );
+1 -1
arch/s390/include/asm/checksum.h
··· 27 27 kmsan_check_memory(buff, len); 28 28 asm volatile( 29 29 "0: cksm %[sum],%[rp]\n" 30 - " jo 0b\n" 30 + " jo 0b" 31 31 : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory"); 32 32 return sum; 33 33 }
+6 -6
arch/s390/include/asm/cmpxchg.h
··· 18 18 static __always_inline u32 __cs_asm(u64 ptr, u32 old, u32 new) 19 19 { 20 20 asm volatile( 21 - " cs %[old],%[new],%[ptr]\n" 21 + " cs %[old],%[new],%[ptr]" 22 22 : [old] "+d" (old), [ptr] "+Q" (*(u32 *)ptr) 23 23 : [new] "d" (new) 24 24 : "memory", "cc"); ··· 28 28 static __always_inline u64 __csg_asm(u64 ptr, u64 old, u64 new) 29 29 { 30 30 asm volatile( 31 - " csg %[old],%[new],%[ptr]\n" 31 + " csg %[old],%[new],%[ptr]" 32 32 : [old] "+d" (old), [ptr] "+QS" (*(u64 *)ptr) 33 33 : [new] "d" (new) 34 34 : "memory", "cc"); ··· 126 126 } \ 127 127 case 4: { \ 128 128 asm volatile( \ 129 - " cs %[__old],%[__new],%[__ptr]\n" \ 129 + " cs %[__old],%[__new],%[__ptr]" \ 130 130 : [__old] "+d" (*__oldp), \ 131 131 [__ptr] "+Q" (*(ptr)), \ 132 132 "=@cc" (__cc) \ ··· 136 136 } \ 137 137 case 8: { \ 138 138 asm volatile( \ 139 - " csg %[__old],%[__new],%[__ptr]\n" \ 139 + " csg %[__old],%[__new],%[__ptr]" \ 140 140 : [__old] "+d" (*__oldp), \ 141 141 [__ptr] "+QS" (*(ptr)), \ 142 142 "=@cc" (__cc) \ ··· 241 241 static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new) 242 242 { 243 243 asm volatile( 244 - " cdsg %[old],%[new],%[ptr]\n" 244 + " cdsg %[old],%[new],%[ptr]" 245 245 : [old] "+d" (old), [ptr] "+QS" (*ptr) 246 246 : [new] "d" (new) 247 247 : "memory", "cc"); ··· 258 258 int cc; 259 259 260 260 asm volatile( 261 - " cdsg %[old],%[new],%[ptr]\n" 261 + " cdsg %[old],%[new],%[ptr]" 262 262 : [old] "+d" (*oldp), [ptr] "+QS" (*ptr), "=@cc" (cc) 263 263 : [new] "d" (new) 264 264 : "memory");
+12 -12
arch/s390/include/asm/cpacf.h
··· 229 229 asm volatile( 230 230 " la %%r1,%[pb]\n" 231 231 " lghi %%r0,%[fc]\n" 232 - " .insn rre,%[opc] << 16,%[r1],%[r2]\n" 232 + " .insn rre,%[opc] << 16,%[r1],%[r2]" 233 233 : [pb] "=R" (*pb) 234 234 : [opc] "i" (opc), [fc] "i" (fc), 235 235 [r1] "i" (r1), [r2] "i" (r2) ··· 242 242 asm volatile( 243 243 " la %%r1,%[pb]\n" 244 244 " lghi %%r0,%[fc]\n" 245 - " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n" 245 + " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]" 246 246 : [pb] "=R" (*pb) 247 247 : [opc] "i" (opc), [fc] "i" (fc), [r1] "i" (r1), 248 248 [r2] "i" (r2), [r3] "i" (r3), [m4] "i" (m4) ··· 416 416 " lgr 0,%[fc]\n" 417 417 " lgr 1,%[pba]\n" 418 418 "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" 419 - " brc 1,0b\n" /* handle partial completion */ 419 + " brc 1,0b" /* handle partial completion */ 420 420 : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) 421 421 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 422 422 [opc] "i" (CPACF_KM) ··· 448 448 " lgr 0,%[fc]\n" 449 449 " lgr 1,%[pba]\n" 450 450 "0: .insn rre,%[opc] << 16,%[dst],%[src]\n" 451 - " brc 1,0b\n" /* handle partial completion */ 451 + " brc 1,0b" /* handle partial completion */ 452 452 : [src] "+&d" (s.pair), [dst] "+&d" (d.pair) 453 453 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 454 454 [opc] "i" (CPACF_KMC) ··· 476 476 " lgr 0,%[fc]\n" 477 477 " lgr 1,%[pba]\n" 478 478 "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" 479 - " brc 1,0b\n" /* handle partial completion */ 479 + " brc 1,0b" /* handle partial completion */ 480 480 : [src] "+&d" (s.pair) 481 481 : [fc] "d" (func), [pba] "d" ((unsigned long)(param)), 482 482 [opc] "i" (CPACF_KIMD) ··· 501 501 " lgr 0,%[fc]\n" 502 502 " lgr 1,%[pba]\n" 503 503 "0: .insn rrf,%[opc] << 16,0,%[src],8,0\n" 504 - " brc 1,0b\n" /* handle partial completion */ 504 + " brc 1,0b" /* handle partial completion */ 505 505 : [src] "+&d" (s.pair) 506 506 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 507 507 [opc] "i" (CPACF_KLMD) ··· 530 530 " lgr 1,%[pba]\n" 531 531 "0: .insn rre,%[opc] << 16,0,%[src]\n" 532 532 " brc 1,0b\n" /* handle partial completion */ 533 - " lgr %[r0],0\n" 533 + " lgr %[r0],0" 534 534 : [r0] "+d" (*gr0), [src] "+&d" (s.pair) 535 535 : [pba] "d" ((unsigned long)param), 536 536 [opc] "i" (CPACF_KMAC) ··· 580 580 " lgr 0,%[fc]\n" 581 581 " lgr 1,%[pba]\n" 582 582 "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n" 583 - " brc 1,0b\n" /* handle partial completion */ 583 + " brc 1,0b" /* handle partial completion */ 584 584 : [src] "+&d" (s.pair), [dst] "+&d" (d.pair), 585 585 [ctr] "+&d" (c.pair) 586 586 : [fc] "d" (func), [pba] "d" ((unsigned long)param), ··· 614 614 " lgr 0,%[fc]\n" 615 615 " lgr 1,%[pba]\n" 616 616 "0: .insn rre,%[opc] << 16,%[dst],%[seed]\n" 617 - " brc 1,0b\n" /* handle partial completion */ 617 + " brc 1,0b" /* handle partial completion */ 618 618 : [dst] "+&d" (d.pair) 619 619 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 620 620 [seed] "d" (s.pair), [opc] "i" (CPACF_PRNO) ··· 640 640 asm volatile ( 641 641 " lghi 0,%[fc]\n" 642 642 "0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n" 643 - " brc 1,0b\n" /* handle partial completion */ 643 + " brc 1,0b" /* handle partial completion */ 644 644 : [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair) 645 645 : [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO) 646 646 : "cc", "memory", "0"); ··· 692 692 asm volatile( 693 693 " lgr 0,%[fc]\n" 694 694 " lgr 1,%[pba]\n" 695 - " .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */ 695 + " .insn rre,%[opc] << 16,0,0" /* PCKMO opcode */ 696 696 : 697 697 : [fc] "d" (func), [pba] "d" ((unsigned long)param), 698 698 [opc] "i" (CPACF_PCKMO) ··· 725 725 " lgr 0,%[fc]\n" 726 726 " lgr 1,%[pba]\n" 727 727 "0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n" 728 - " brc 1,0b\n" /* handle partial completion */ 728 + " brc 1,0b" /* handle partial completion */ 729 729 : [dst] "+&d" (d.pair), [src] "+&d" (s.pair), 730 730 [aad] "+&d" (a.pair) 731 731 : [fc] "d" (func), [pba] "d" ((unsigned long)param),
+4 -4
arch/s390/include/asm/ctlreg.h
··· 100 100 BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ 101 101 typecheck(struct ctlreg, array[0]); \ 102 102 asm volatile( \ 103 - " lctlg %[_low],%[_high],%[_arr]\n" \ 103 + " lctlg %[_low],%[_high],%[_arr]" \ 104 104 : \ 105 105 : [_arr] "Q" (*(struct addrtype *)(&array)), \ 106 106 [_low] "i" (low), [_high] "i" (high) \ ··· 119 119 BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ 120 120 typecheck(struct ctlreg, array[0]); \ 121 121 asm volatile( \ 122 - " stctg %[_low],%[_high],%[_arr]\n" \ 122 + " stctg %[_low],%[_high],%[_arr]" \ 123 123 : [_arr] "=Q" (*(struct addrtype *)(&array)) \ 124 124 : [_low] "i" (low), [_high] "i" (high)); \ 125 125 } while (0) ··· 127 127 static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg) 128 128 { 129 129 asm volatile( 130 - " lctlg %[cr],%[cr],%[reg]\n" 130 + " lctlg %[cr],%[cr],%[reg]" 131 131 : 132 132 : [reg] "Q" (*reg), [cr] "i" (cr) 133 133 : "memory"); ··· 136 136 static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg) 137 137 { 138 138 asm volatile( 139 - " stctg %[cr],%[cr],%[reg]\n" 139 + " stctg %[cr],%[cr],%[reg]" 140 140 : [reg] "=Q" (*reg) 141 141 : [cr] "i" (cr)); 142 142 }
+18 -18
arch/s390/include/asm/fpu-insn.h
··· 38 38 39 39 static __always_inline void fpu_cefbr(u8 f1, s32 val) 40 40 { 41 - asm volatile("cefbr %[f1],%[val]\n" 41 + asm volatile("cefbr %[f1],%[val]" 42 42 : 43 43 : [f1] "I" (f1), [val] "d" (val) 44 44 : "memory"); ··· 48 48 { 49 49 unsigned long val; 50 50 51 - asm volatile("cgebr %[val],%[mode],%[f2]\n" 51 + asm volatile("cgebr %[val],%[mode],%[f2]" 52 52 : [val] "=d" (val) 53 53 : [f2] "I" (f2), [mode] "I" (mode) 54 54 : "memory"); ··· 57 57 58 58 static __always_inline void fpu_debr(u8 f1, u8 f2) 59 59 { 60 - asm volatile("debr %[f1],%[f2]\n" 60 + asm volatile("debr %[f1],%[f2]" 61 61 : 62 62 : [f1] "I" (f1), [f2] "I" (f2) 63 63 : "memory"); ··· 66 66 static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg) 67 67 { 68 68 instrument_read(reg, sizeof(*reg)); 69 - asm volatile("ld %[fpr],%[reg]\n" 69 + asm volatile("ld %[fpr],%[reg]" 70 70 : 71 71 : [fpr] "I" (fpr), [reg] "Q" (reg->ui) 72 72 : "memory"); ··· 74 74 75 75 static __always_inline void fpu_ldgr(u8 f1, u32 val) 76 76 { 77 - asm volatile("ldgr %[f1],%[val]\n" 77 + asm volatile("ldgr %[f1],%[val]" 78 78 : 79 79 : [f1] "I" (f1), [val] "d" (val) 80 80 : "memory"); ··· 113 113 static __always_inline void fpu_std(unsigned short fpr, freg_t *reg) 114 114 { 115 115 instrument_write(reg, sizeof(*reg)); 116 - asm volatile("std %[fpr],%[reg]\n" 116 + asm volatile("std %[fpr],%[reg]" 117 117 : [reg] "=Q" (reg->ui) 118 118 : [fpr] "I" (fpr) 119 119 : "memory"); ··· 181 181 static __always_inline void fpu_vl(u8 v1, const void *vxr) 182 182 { 183 183 instrument_read(vxr, sizeof(__vector128)); 184 - asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n" 184 + asm volatile("VL %[v1],%O[vxr],,%R[vxr]" 185 185 : 186 186 : [vxr] "Q" (*(__vector128 *)vxr), 187 187 [v1] "I" (v1) ··· 195 195 instrument_read(vxr, sizeof(__vector128)); 196 196 asm volatile( 197 197 " la 1,%[vxr]\n" 198 - " VL %[v1],0,,1\n" 198 + " VL %[v1],0,,1" 199 199 : 200 200 : [vxr] "R" (*(__vector128 *)vxr), 201 201 [v1] "I" (v1) ··· 239 239 240 240 size = min(index + 1, sizeof(__vector128)); 241 241 instrument_read(vxr, size); 242 - asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n" 242 + asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]" 243 243 : 244 244 : [vxr] "Q" (*(u8 *)vxr), 245 245 [index] "d" (index), ··· 257 257 instrument_read(vxr, size); 258 258 asm volatile( 259 259 " la 1,%[vxr]\n" 260 - " VLL %[v1],%[index],0,1\n" 260 + " VLL %[v1],%[index],0,1" 261 261 : 262 262 : [vxr] "R" (*(u8 *)vxr), 263 263 [index] "d" (index), ··· 277 277 } *_v = (void *)(_vxrs); \ 278 278 \ 279 279 instrument_read(_v, size); \ 280 - asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 280 + asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]" \ 281 281 : \ 282 282 : [vxrs] "Q" (*_v), \ 283 283 [v1] "I" (_v1), [v3] "I" (_v3) \ ··· 297 297 instrument_read(_v, size); \ 298 298 asm volatile( \ 299 299 " la 1,%[vxrs]\n" \ 300 - " VLM %[v1],%[v3],0,1\n" \ 300 + " VLM %[v1],%[v3],0,1" \ 301 301 : \ 302 302 : [vxrs] "R" (*_v), \ 303 303 [v1] "I" (_v1), [v3] "I" (_v3) \ ··· 360 360 static __always_inline void fpu_vst(u8 v1, const void *vxr) 361 361 { 362 362 instrument_write(vxr, sizeof(__vector128)); 363 - asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n" 363 + asm volatile("VST %[v1],%O[vxr],,%R[vxr]" 364 364 : [vxr] "=Q" (*(__vector128 *)vxr) 365 365 : [v1] "I" (v1) 366 366 : "memory"); ··· 373 373 instrument_write(vxr, sizeof(__vector128)); 374 374 asm volatile( 375 375 " la 1,%[vxr]\n" 376 - " VST %[v1],0,,1\n" 376 + " VST %[v1],0,,1" 377 377 : [vxr] "=R" (*(__vector128 *)vxr) 378 378 : [v1] "I" (v1) 379 379 : "memory", "1"); ··· 389 389 390 390 size = min(index + 1, sizeof(__vector128)); 391 391 instrument_write(vxr, size); 392 - asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n" 392 + asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]" 393 393 : [vxr] "=Q" (*(u8 *)vxr) 394 394 : [index] "d" (index), [v1] "I" (v1) 395 395 : "memory"); ··· 405 405 instrument_write(vxr, size); 406 406 asm volatile( 407 407 " la 1,%[vxr]\n" 408 - " VSTL %[v1],%[index],0,1\n" 408 + " VSTL %[v1],%[index],0,1" 409 409 : [vxr] "=R" (*(u8 *)vxr) 410 410 : [index] "d" (index), [v1] "I" (v1) 411 411 : "memory", "1"); ··· 423 423 } *_v = (void *)(_vxrs); \ 424 424 \ 425 425 instrument_write(_v, size); \ 426 - asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \ 426 + asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]" \ 427 427 : [vxrs] "=Q" (*_v) \ 428 428 : [v1] "I" (_v1), [v3] "I" (_v3) \ 429 429 : "memory"); \ ··· 442 442 instrument_write(_v, size); \ 443 443 asm volatile( \ 444 444 " la 1,%[vxrs]\n" \ 445 - " VSTM %[v1],%[v3],0,1\n" \ 445 + " VSTM %[v1],%[v3],0,1" \ 446 446 : [vxrs] "=R" (*_v) \ 447 447 : [v1] "I" (_v1), [v3] "I" (_v3) \ 448 448 : "memory", "1"); \
+1 -1
arch/s390/include/asm/kvm_para.h
··· 76 76 HYPERCALL_REGS_##args; \ 77 77 \ 78 78 asm volatile ( \ 79 - " diag 2,4,0x500\n" \ 79 + " diag 2,4,0x500" \ 80 80 : "=d" (__rc) \ 81 81 : "d" (__nr) HYPERCALL_FMT_##args \ 82 82 : "memory", "cc"); \
+4 -4
arch/s390/include/asm/percpu.h
··· 73 73 if (__builtin_constant_p(val__) && \ 74 74 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ 75 75 asm volatile( \ 76 - op2 " %[ptr__],%[val__]\n" \ 76 + op2 " %[ptr__],%[val__]" \ 77 77 : [ptr__] "+Q" (*ptr__) \ 78 78 : [val__] "i" ((szcast)val__) \ 79 79 : "cc"); \ 80 80 } else { \ 81 81 asm volatile( \ 82 - op1 " %[old__],%[val__],%[ptr__]\n" \ 82 + op1 " %[old__],%[val__],%[ptr__]" \ 83 83 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 84 84 : [val__] "d" (val__) \ 85 85 : "cc"); \ ··· 98 98 preempt_disable_notrace(); \ 99 99 ptr__ = raw_cpu_ptr(&(pcp)); \ 100 100 asm volatile( \ 101 - op " %[old__],%[val__],%[ptr__]\n" \ 101 + op " %[old__],%[val__],%[ptr__]" \ 102 102 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 103 103 : [val__] "d" (val__) \ 104 104 : "cc"); \ ··· 117 117 preempt_disable_notrace(); \ 118 118 ptr__ = raw_cpu_ptr(&(pcp)); \ 119 119 asm volatile( \ 120 - op " %[old__],%[val__],%[ptr__]\n" \ 120 + op " %[old__],%[val__],%[ptr__]" \ 121 121 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 122 122 : [val__] "d" (val__) \ 123 123 : "cc"); \
+1 -1
arch/s390/include/asm/processor.h
··· 163 163 "2: stg %[poison],0(%[addr])\n" 164 164 " j 4f\n" 165 165 "3: mvc 8(1,%[addr]),0(%[addr])\n" 166 - "4:\n" 166 + "4:" 167 167 : [addr] "+&a" (erase_low), [count] "+&d" (count), [tmp] "=&a" (tmp) 168 168 : [poison] "d" (poison) 169 169 : "memory", "cc"
+1 -1
arch/s390/include/asm/rwonce.h
··· 19 19 \ 20 20 BUILD_BUG_ON(sizeof(x) != 16); \ 21 21 asm volatile( \ 22 - " lpq %[val],%[_x]\n" \ 22 + " lpq %[val],%[_x]" \ 23 23 : [val] "=d" (__u.val) \ 24 24 : [_x] "QS" (x) \ 25 25 : "memory"); \
+1 -1
arch/s390/include/asm/spinlock.h
··· 98 98 kcsan_release(); 99 99 asm_inline volatile( 100 100 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", ALT_FACILITY(49)) /* NIAI 7 */ 101 - " mvhhi %[lock],0\n" 101 + " mvhhi %[lock],0" 102 102 : [lock] "=Q" (((unsigned short *)&lp->lock)[1]) 103 103 : 104 104 : "memory");
+2 -2
arch/s390/include/asm/stacktrace.h
··· 199 199 " lg 15,%[_stack]\n" \ 200 200 " stg %[_frame],%[_bc](15)\n" \ 201 201 " brasl 14,%[_fn]\n" \ 202 - " lgr 15,%[_prev]\n" \ 202 + " lgr 15,%[_prev]" \ 203 203 : [_prev] "=&d" (prev), CALL_FMT_##nr \ 204 204 : [_stack] "R" (__stack), \ 205 205 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ ··· 250 250 " lra 14,0(1)\n" \ 251 251 " lpswe %[psw_enter]\n" \ 252 252 "0: lpswe 0(7)\n" \ 253 - "1:\n" \ 253 + "1:" \ 254 254 : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \ 255 255 : [psw_enter] "Q" (psw_enter) \ 256 256 : "7", CALL_CLOBBER_##nr); \
+1 -1
arch/s390/include/asm/string.h
··· 125 125 asm volatile( 126 126 " lgr 0,%[c]\n" 127 127 "0: srst %[ret],%[s]\n" 128 - " jo 0b\n" 128 + " jo 0b" 129 129 : [ret] "+&a" (ret), [s] "+&a" (s) 130 130 : [c] "d" (c) 131 131 : "cc", "memory", "0");
+1 -1
arch/s390/include/asm/syscall.h
··· 155 155 SYSCALL_REGS_##nr; \ 156 156 \ 157 157 asm volatile ( \ 158 - " svc 0\n" \ 158 + " svc 0" \ 159 159 : "=d" (rc) \ 160 160 : "d" (r1) SYSCALL_FMT_##nr \ 161 161 : "memory"); \
+1 -1
arch/s390/include/asm/timex.h
··· 81 81 { 82 82 asm volatile( 83 83 " lgr 0,%[val]\n" 84 - " sckpf\n" 84 + " sckpf" 85 85 : 86 86 : [val] "d" ((unsigned long)val) 87 87 : "0");
+1 -1
arch/s390/kernel/diag/diag310.c
··· 66 66 union register_pair rp = { .even = (unsigned long)addr, .odd = size }; 67 67 68 68 diag_stat_inc(DIAG_STAT_X310); 69 - asm volatile("diag %[rp],%[subcode],0x310\n" 69 + asm volatile("diag %[rp],%[subcode],0x310" 70 70 : [rp] "+d" (rp.pair) 71 71 : [subcode] "d" (subcode) 72 72 : "memory");
+1 -1
arch/s390/kernel/diag/diag324.c
··· 101 101 union register_pair rp = { .even = (unsigned long)addr }; 102 102 103 103 diag_stat_inc(DIAG_STAT_X324); 104 - asm volatile("diag %[rp],%[subcode],0x324\n" 104 + asm volatile("diag %[rp],%[subcode],0x324" 105 105 : [rp] "+d" (rp.pair) 106 106 : [subcode] "d" (subcode) 107 107 : "memory");
+1 -1
arch/s390/kernel/setup.c
··· 839 839 return; 840 840 841 841 diag_stat_inc(DIAG_STAT_X318); 842 - asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); 842 + asm volatile("diag %0,0,0x318" : : "d" (diag318_info.val)); 843 843 } 844 844 845 845 /*
+1 -1
arch/s390/kernel/skey.c
··· 11 11 unsigned long real; 12 12 13 13 asm volatile( 14 - " lra %[real],0(%[address])\n" 14 + " lra %[real],0(%[address])" 15 15 : [real] "=d" (real) 16 16 : [address] "a" (address) 17 17 : "cc");
+1 -1
arch/s390/kernel/smp.c
··· 340 340 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 341 341 " brc 2,0b # busy, try again\n" 342 342 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 343 - " brc 2,1b # busy, try again\n" 343 + " brc 2,1b # busy, try again" 344 344 : : "d" (pcpu->address), "d" (source_cpu), 345 345 "K" (SIGP_RESTART), "K" (SIGP_STOP) 346 346 : "0", "1", "cc");
+3 -3
arch/s390/kvm/kvm-s390.c
··· 356 356 { 357 357 asm volatile( 358 358 " lghi 0,0\n" 359 - " .insn rsy,0xeb0000000016,0,0,%[query]\n" 359 + " .insn rsy,0xeb0000000016,0,0,%[query]" 360 360 : [query] "=QS" (*query) 361 361 : 362 362 : "cc", "0"); ··· 368 368 " lghi 0,0\n" 369 369 " la 1,%[query]\n" 370 370 /* Parameter registers are ignored */ 371 - " .insn rre,0xb9380000,2,4\n" 371 + " .insn rre,0xb9380000,2,4" 372 372 : [query] "=R" (*query) 373 373 : 374 374 : "cc", "0", "1"); ··· 380 380 " lghi 0,0\n" 381 381 " la 1,%[query]\n" 382 382 /* Parameter registers are ignored */ 383 - " .insn rrf,0xb9390000,2,4,6,0\n" 383 + " .insn rrf,0xb9390000,2,4,6,0" 384 384 : [query] "=R" (*query) 385 385 : 386 386 : "cc", "0", "1");
+3 -3
arch/s390/lib/spinlock.c
··· 96 96 97 97 asm_inline volatile( 98 98 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */ 99 - " l %[owner],%[lock]\n" 99 + " l %[owner],%[lock]" 100 100 : [owner] "=d" (owner) : [lock] "R" (*lock) : "memory"); 101 101 return owner; 102 102 } ··· 109 109 110 110 asm_inline volatile( 111 111 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */ 112 - " cs %[old],%[new],%[lock]\n" 112 + " cs %[old],%[new],%[lock]" 113 113 : [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc) 114 114 : [new] "d" (new) 115 115 : "memory"); ··· 124 124 125 125 asm_inline volatile( 126 126 ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */ 127 - " cs %[old],%[new],%[lock]\n" 127 + " cs %[old],%[new],%[lock]" 128 128 : [old] "+d" (old), [lock] "+Q" (*lock) 129 129 : [new] "d" (new) 130 130 : "cc", "memory");
+4 -4
arch/s390/lib/string.c
··· 27 27 asm volatile( 28 28 " lghi 0,0\n" 29 29 "0: srst %[e],%[s]\n" 30 - " jo 0b\n" 30 + " jo 0b" 31 31 : [e] "+&a" (e), [s] "+&a" (s) 32 32 : 33 33 : "cc", "memory", "0"); ··· 41 41 asm volatile( 42 42 " lghi 0,0\n" 43 43 "0: srst %[p],%[s]\n" 44 - " jo 0b\n" 44 + " jo 0b" 45 45 : [p] "+&d" (p), [s] "+&a" (s) 46 46 : 47 47 : "cc", "memory", "0"); ··· 95 95 "0: srst %[dummy],%[dest]\n" 96 96 " jo 0b\n" 97 97 "1: mvst %[dummy],%[src]\n" 98 - " jo 1b\n" 98 + " jo 1b" 99 99 : [dummy] "+&a" (dummy), [dest] "+&a" (dest), [src] "+&a" (src) 100 100 : 101 101 : "cc", "memory", "0"); ··· 291 291 asm volatile( 292 292 " lgr 0,%[c]\n" 293 293 "0: srst %[ret],%[s]\n" 294 - " jo 0b\n" 294 + " jo 0b" 295 295 : [ret] "+&a" (ret), [s] "+&a" (s) 296 296 : [c] "d" (c) 297 297 : "cc", "memory", "0");
+2 -2
arch/s390/lib/test_unwind.c
··· 150 150 regs.gprs[15] = current_stack_pointer; 151 151 152 152 asm volatile( 153 - "basr %[psw_addr],0\n" 153 + "basr %[psw_addr],0" 154 154 : [psw_addr] "=d" (regs.psw.addr)); 155 155 return regs; 156 156 } ··· 232 232 asm volatile( 233 233 " nopr %%r7\n" 234 234 "test_unwind_kprobed_insn:\n" 235 - " nopr %%r7\n" 235 + " nopr %%r7" 236 236 :); 237 237 } 238 238
+4 -4
arch/s390/lib/xor.c
··· 27 27 "1: exrl %0,2f\n" 28 28 " j 3f\n" 29 29 "2: xc 0(1,%1),0(%2)\n" 30 - "3:\n" 30 + "3:" 31 31 : : "d" (bytes), "a" (p1), "a" (p2) 32 32 : "0", "cc", "memory"); 33 33 } ··· 53 53 " j 4f\n" 54 54 "2: xc 0(1,%1),0(%2)\n" 55 55 "3: xc 0(1,%1),0(%3)\n" 56 - "4:\n" 56 + "4:" 57 57 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3) 58 58 : : "0", "cc", "memory"); 59 59 } ··· 84 84 "2: xc 0(1,%1),0(%2)\n" 85 85 "3: xc 0(1,%1),0(%3)\n" 86 86 "4: xc 0(1,%1),0(%4)\n" 87 - "5:\n" 87 + "5:" 88 88 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4) 89 89 : : "0", "cc", "memory"); 90 90 } ··· 121 121 "3: xc 0(1,%1),0(%3)\n" 122 122 "4: xc 0(1,%1),0(%4)\n" 123 123 "5: xc 0(1,%1),0(%5)\n" 124 - "6:\n" 124 + "6:" 125 125 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4), 126 126 "+a" (p5) 127 127 : : "0", "cc", "memory");
+1 -1
arch/s390/mm/maccess.c
··· 41 41 " ex %1,0(1)\n" 42 42 " lg %1,0(%3)\n" 43 43 " lra %0,0(%0)\n" 44 - " sturg %1,%0\n" 44 + " sturg %1,%0" 45 45 : "+&a" (aligned), "+&a" (count), "=m" (tmp) 46 46 : "a" (&tmp), "a" (&tmp[offset]), "a" (src) 47 47 : "cc", "memory", "1");
+1 -1
arch/s390/mm/pgalloc.c
··· 245 245 unsigned long real; 246 246 247 247 asm volatile( 248 - " lra %0,0(%1)\n" 248 + " lra %0,0(%1)" 249 249 : "=d" (real) : "a" (address) : "cc"); 250 250 return real; 251 251 }
+2 -2
arch/s390/pci/pci_insn.c
··· 145 145 return -EIO; 146 146 147 147 asm volatile( 148 - ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n" 148 + ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]" 149 149 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib)); 150 150 151 151 return 0; ··· 442 442 443 443 static inline void __pciwb_mio(void) 444 444 { 445 - asm volatile (".insn rre,0xb9d50000,0,0\n"); 445 + asm volatile (".insn rre,0xb9d50000,0,0"); 446 446 } 447 447 448 448 void zpci_barrier(void)
+1 -1
drivers/s390/char/sclp_early_core.c
··· 51 51 " stg %[addr],%[psw_wait_addr]\n" 52 52 " stg %[addr],%[psw_ext_addr]\n" 53 53 " lpswe %[psw_wait]\n" 54 - "0:\n" 54 + "0:" 55 55 : [addr] "=&d" (addr), 56 56 [psw_wait_addr] "=Q" (psw_wait.addr), 57 57 [psw_ext_addr] "=Q" (get_lowcore()->external_new_psw.addr)
+1 -1
drivers/s390/cio/cmf.c
··· 167 167 asm volatile( 168 168 " lgr 1,%[r1]\n" 169 169 " lgr 2,%[mbo]\n" 170 - " schm\n" 170 + " schm" 171 171 : 172 172 : [r1] "d" ((unsigned long)onoff), 173 173 [mbo] "d" (virt_to_phys(area))