Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: Remove superfluous new lines from inline assemblies

GCC uses the number of lines of an inline assembly to calculate its length
(number of instructions). This has an impact on GCCs inlining decisions.

Therefore remove superfluous new lines from a couple of inline
assemblies, so that their real size is reflected.

Also use an "asm inline" statement for the fpu_lfpc_safe() inline assembly
to enforce that GCC assumes the minimum size for this inline assembly,
since it contains various statements which make it appear much larger than
the resulting code is.

Suggested-by: Juergen Christ <jchrist@linux.ibm.com>
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>

authored by

Heiko Carstens and committed by
Alexander Gordeev
7c7f32c9 2ca248f5

+10 -10
+2 -2
arch/s390/include/asm/atomic_ops.h
··· 73 73 } \ 74 74 75 75 #define __ATOMIC_OPS(op_name, op_type, op_string) \ 76 - __ATOMIC_OP(op_name, op_type, op_string, "\n") \ 76 + __ATOMIC_OP(op_name, op_type, op_string, "") \ 77 77 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 78 78 79 79 __ATOMIC_OPS(__atomic_add, int, "laa") ··· 99 99 } 100 100 101 101 #define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \ 102 - __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \ 102 + __ATOMIC_CONST_OP(op_name, op_type, op_string, "") \ 103 103 __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n") 104 104 105 105 __ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
+1 -1
arch/s390/include/asm/checksum.h
··· 25 25 26 26 instrument_read(buff, len); 27 27 kmsan_check_memory(buff, len); 28 - asm volatile("\n" 28 + asm volatile( 29 29 "0: cksm %[sum],%[rp]\n" 30 30 " jo 0b\n" 31 31 : [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
+7 -7
arch/s390/include/asm/fpu-insn.h
··· 103 103 u32 tmp; 104 104 105 105 instrument_read(fpc, sizeof(*fpc)); 106 - asm volatile("\n" 106 + asm_inline volatile( 107 107 "0: lfpc %[fpc]\n" 108 108 "1: nopr %%r7\n" 109 109 ".pushsection .fixup, \"ax\"\n" ··· 188 188 static __always_inline void fpu_vl(u8 v1, const void *vxr) 189 189 { 190 190 instrument_read(vxr, sizeof(__vector128)); 191 - asm volatile("\n" 191 + asm volatile( 192 192 " la 1,%[vxr]\n" 193 193 " VL %[v1],0,,1\n" 194 194 : ··· 246 246 247 247 size = min(index + 1, sizeof(__vector128)); 248 248 instrument_read(vxr, size); 249 - asm volatile("\n" 249 + asm volatile( 250 250 " la 1,%[vxr]\n" 251 251 " VLL %[v1],%[index],0,1\n" 252 252 : ··· 284 284 } *_v = (void *)(_vxrs); \ 285 285 \ 286 286 instrument_read(_v, size); \ 287 - asm volatile("\n" \ 287 + asm volatile( \ 288 288 " la 1,%[vxrs]\n" \ 289 289 " VLM %[v1],%[v3],0,1\n" \ 290 290 : \ ··· 367 367 static __always_inline void fpu_vst(u8 v1, const void *vxr) 368 368 { 369 369 instrument_write(vxr, sizeof(__vector128)); 370 - asm volatile("\n" 370 + asm volatile( 371 371 " la 1,%[vxr]\n" 372 372 " VST %[v1],0,,1\n" 373 373 : [vxr] "=R" (*(__vector128 *)vxr) ··· 396 396 397 397 size = min(index + 1, sizeof(__vector128)); 398 398 instrument_write(vxr, size); 399 - asm volatile("\n" 399 + asm volatile( 400 400 " la 1,%[vxr]\n" 401 401 " VSTL %[v1],%[index],0,1\n" 402 402 : [vxr] "=R" (*(u8 *)vxr) ··· 430 430 } *_v = (void *)(_vxrs); \ 431 431 \ 432 432 instrument_write(_v, size); \ 433 - asm volatile("\n" \ 433 + asm volatile( \ 434 434 " la 1,%[vxrs]\n" \ 435 435 " VSTM %[v1],%[v3],0,1\n" \ 436 436 : [vxrs] "=R" (*_v) \