Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: Use inline qualifier for all EX_TABLE and ALTERNATIVE inline assemblies

Use asm_inline for all inline assemblies which make use of the EX_TABLE or
ALTERNATIVE macros.

These macros expand to many lines and the compiler assumes the number of
lines within an inline assembly is the same as the number of instructions
within an inline assembly. This has an effect on inlining and loop
unrolling decisions.

In order to avoid incorrect assumptions use asm_inline, which tells the
compiler that an inline assembly has the smallest possible size.

In order to avoid confusion when asm_inline should be used or not, since a
couple of inline assemblies are quite large: the rule is to always use
asm_inline whenever the EX_TABLE or ALTERNATIVE macro is used. In specific
cases there may be reasons to not follow this guideline, but that should
be documented with the corresponding code.

Using the inline qualifier everywhere has only a small effect on the kernel
image size:

add/remove: 0/10 grow/shrink: 19/8 up/down: 1492/-1858 (-366)

The only location where this seems to matter is load_unaligned_zeropad()
from word-at-a-time.h where the compiler inlines more functions within the
dcache code, which is indeed code where performance matters.

Suggested-by: Juergen Christ <jchrist@linux.ibm.com>
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Heiko Carstens and committed by
Vasily Gorbik
0dafe996 caa3cd5c

+48 -44
+1 -1
arch/s390/boot/ipl_parm.c
··· 37 37 { 38 38 union register_pair r1 = { .even = (unsigned long)addr, .odd = 0 }; 39 39 40 - asm volatile( 40 + asm_inline volatile( 41 41 " diag %[r1],%[subcode],0x308\n" 42 42 "0:\n" 43 43 EX_TABLE(0b, 0b)
+3 -3
arch/s390/boot/physmem_info.c
··· 67 67 rx.odd = rx2; 68 68 ry = 0x10; /* storage configuration */ 69 69 exception = 1; 70 - asm volatile( 70 + asm_inline volatile( 71 71 " diag %[rx],%[ry],0x260\n" 72 72 "0: lhi %[exc],0\n" 73 73 "1:\n" ··· 105 105 { 106 106 unsigned long storage_limit; 107 107 108 - asm volatile( 108 + asm_inline volatile( 109 109 " lghi %%r1,%[subcode]\n" 110 110 " lghi %%r2,0\n" 111 111 " diag %%r2,%%r4,0x500\n" ··· 126 126 int cc, exception; 127 127 128 128 exception = 1; 129 - asm volatile( 129 + asm_inline volatile( 130 130 " tprot 0(%[addr]),0\n" 131 131 "0: lhi %[exc],0\n" 132 132 "1:\n"
+2 -2
arch/s390/boot/startup.c
··· 77 77 int rc = 1; 78 78 79 79 cpu = stap(); 80 - asm volatile( 80 + asm_inline volatile( 81 81 " diag %[cpu],%%r0,0x9c\n" 82 82 "0: lhi %[rc],0\n" 83 83 "1:\n" ··· 138 138 int rc = 1; 139 139 140 140 /* Test ESSA_GET_STATE */ 141 - asm volatile( 141 + asm_inline volatile( 142 142 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 143 143 "0: lhi %[rc],0\n" 144 144 "1:\n"
+3 -3
arch/s390/include/asm/cpu_mf.h
··· 171 171 { 172 172 int rc = -EINVAL; 173 173 174 - asm volatile ( 174 + asm_inline volatile ( 175 175 "0: qctri %1\n" 176 176 "1: lhi %0,0\n" 177 177 "2:\n" ··· 185 185 { 186 186 int cc; 187 187 188 - asm volatile ( 188 + asm_inline volatile ( 189 189 " lcctl %[ctl]\n" 190 190 CC_IPM(cc) 191 191 : CC_OUT(cc, cc) ··· 200 200 u64 _content; 201 201 int cc; 202 202 203 - asm volatile ( 203 + asm_inline volatile ( 204 204 " ecctr %[_content],%[ctr]\n" 205 205 CC_IPM(cc) 206 206 : CC_OUT(cc, cc), [_content] "=d" (_content)
+1 -1
arch/s390/include/asm/diag.h
··· 66 66 end_addr = pfn_to_phys(start_pfn + num_pfn - 1); 67 67 68 68 diag_stat_inc(DIAG_STAT_X010); 69 - asm volatile( 69 + asm_inline volatile( 70 70 "0: diag %0,%1,0x10\n" 71 71 "1: nopr %%r7\n" 72 72 EX_TABLE(0b, 1b)
+5 -1
arch/s390/include/asm/processor.h
··· 416 416 417 417 static __always_inline void bpon(void) 418 418 { 419 - asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82))); 419 + asm_inline volatile( 420 + ALTERNATIVE(" nop\n", 421 + " .insn rrf,0xb2e80000,0,0,13,0\n", 422 + ALT_SPEC(82)) 423 + ); 420 424 } 421 425 422 426 #endif /* __ASSEMBLY__ */
+7 -7
arch/s390/include/asm/uaccess.h
··· 147 147 { \ 148 148 int rc; \ 149 149 \ 150 - asm volatile( \ 150 + asm_inline volatile( \ 151 151 " llilh %%r0,%[spec]\n" \ 152 152 "0: mvcos %[to],%[from],%[size]\n" \ 153 153 "1: lhi %[rc],0\n" \ ··· 263 263 { \ 264 264 int rc; \ 265 265 \ 266 - asm volatile( \ 266 + asm_inline volatile( \ 267 267 " lhi %%r0,%[spec]\n" \ 268 268 "0: mvcos %[to],%[from],%[size]\n" \ 269 269 "1: lhi %[rc],0\n" \ ··· 490 490 _old = ((unsigned int)old & 0xff) << shift; 491 491 _new = ((unsigned int)new & 0xff) << shift; 492 492 mask = ~(0xff << shift); 493 - asm volatile( 493 + asm_inline volatile( 494 494 " spka 0(%[key])\n" 495 495 " sacf 256\n" 496 496 " llill %[count],%[max_loops]\n" ··· 538 538 _old = ((unsigned int)old & 0xffff) << shift; 539 539 _new = ((unsigned int)new & 0xffff) << shift; 540 540 mask = ~(0xffff << shift); 541 - asm volatile( 541 + asm_inline volatile( 542 542 " spka 0(%[key])\n" 543 543 " sacf 256\n" 544 544 " llill %[count],%[max_loops]\n" ··· 580 580 case 4: { 581 581 unsigned int prev = old; 582 582 583 - asm volatile( 583 + asm_inline volatile( 584 584 " spka 0(%[key])\n" 585 585 " sacf 256\n" 586 586 "0: cs %[prev],%[new],%[address]\n" ··· 601 601 case 8: { 602 602 unsigned long prev = old; 603 603 604 - asm volatile( 604 + asm_inline volatile( 605 605 " spka 0(%[key])\n" 606 606 " sacf 256\n" 607 607 "0: csg %[prev],%[new],%[address]\n" ··· 622 622 case 16: { 623 623 __uint128_t prev = old; 624 624 625 - asm volatile( 625 + asm_inline volatile( 626 626 " spka 0(%[key])\n" 627 627 " sacf 256\n" 628 628 "0: cdsg %[prev],%[new],%[address]\n"
+1 -1
arch/s390/include/asm/word-at-a-time.h
··· 52 52 { 53 53 unsigned long data; 54 54 55 - asm volatile( 55 + asm_inline volatile( 56 56 "0: lg %[data],0(%[addr])\n" 57 57 "1: nopr %%r7\n" 58 58 EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
+1 -1
arch/s390/kernel/cert_store.c
··· 235 235 { 236 236 union register_pair rp = { .even = (unsigned long)addr, }; 237 237 238 - asm volatile( 238 + asm_inline volatile( 239 239 " diag %[rp],%[subcode],0x320\n" 240 240 "0: nopr %%r7\n" 241 241 EX_TABLE(0b, 0b)
+2 -2
arch/s390/kernel/diag/diag.c
··· 195 195 { 196 196 union register_pair rp = { .even = *subcode, .odd = size }; 197 197 198 - asm volatile( 198 + asm_inline volatile( 199 199 " diag %[addr],%[rp],0x204\n" 200 200 "0: nopr %%r7\n" 201 201 EX_TABLE(0b,0b) ··· 286 286 int rc = -EOPNOTSUPP; 287 287 288 288 diag_stat_inc(DIAG_STAT_X224); 289 - asm volatile("\n" 289 + asm_inline volatile("\n" 290 290 " diag %[type],%[addr],0x224\n" 291 291 "0: lhi %[rc],0\n" 292 292 "1:\n"
+1 -1
arch/s390/kernel/ipl.c
··· 186 186 187 187 r1.even = addr; 188 188 r1.odd = 0; 189 - asm volatile( 189 + asm_inline volatile( 190 190 " diag %[r1],%[subcode],0x308\n" 191 191 "0: nopr %%r7\n" 192 192 EX_TABLE(0b,0b)
+1 -1
arch/s390/kernel/traps.c
··· 257 257 258 258 if (!IS_ENABLED(CONFIG_BUG)) 259 259 return; 260 - asm volatile( 260 + asm_inline volatile( 261 261 " mc 0,0\n" 262 262 "0: xgr %0,%0\n" 263 263 "1:\n"
+2 -2
arch/s390/mm/pfault.c
··· 56 56 if (pfault_disable) 57 57 return rc; 58 58 diag_stat_inc(DIAG_STAT_X258); 59 - asm volatile( 59 + asm_inline volatile( 60 60 " diag %[refbk],%[rc],0x258\n" 61 61 "0: nopr %%r7\n" 62 62 EX_TABLE(0b, 0b) ··· 78 78 if (pfault_disable) 79 79 return; 80 80 diag_stat_inc(DIAG_STAT_X258); 81 - asm volatile( 81 + asm_inline volatile( 82 82 " diag %[refbk],0,0x258\n" 83 83 "0: nopr %%r7\n" 84 84 EX_TABLE(0b, 0b)
+2 -2
arch/s390/pci/pci_clp.c
··· 56 56 int cc, exception; 57 57 58 58 exception = 1; 59 - asm volatile ( 59 + asm_inline volatile ( 60 60 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n" 61 61 "0: lhi %[exc],0\n" 62 62 "1:\n" ··· 79 79 u64 ignored; 80 80 81 81 exception = 1; 82 - asm volatile ( 82 + asm_inline volatile ( 83 83 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n" 84 84 "0: lhi %[exc],0\n" 85 85 "1:\n"
+6 -6
arch/s390/pci/pci_insn.c
··· 160 160 u64 __data; 161 161 162 162 exception = 1; 163 - asm volatile ( 163 + asm_inline volatile ( 164 164 " .insn rre,0xb9d20000,%[data],%[req_off]\n" 165 165 "0: lhi %[exc],0\n" 166 166 "1:\n" ··· 229 229 u64 __data; 230 230 231 231 exception = 1; 232 - asm volatile ( 232 + asm_inline volatile ( 233 233 " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n" 234 234 "0: lhi %[exc],0\n" 235 235 "1:\n" ··· 267 267 int cc, exception; 268 268 269 269 exception = 1; 270 - asm volatile ( 270 + asm_inline volatile ( 271 271 " .insn rre,0xb9d00000,%[data],%[req_off]\n" 272 272 "0: lhi %[exc],0\n" 273 273 "1:\n" ··· 321 321 int cc, exception; 322 322 323 323 exception = 1; 324 - asm volatile ( 324 + asm_inline volatile ( 325 325 " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n" 326 326 "0: lhi %[exc],0\n" 327 327 "1:\n" ··· 356 356 int cc, exception; 357 357 358 358 exception = 1; 359 - asm volatile ( 359 + asm_inline volatile ( 360 360 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" 361 361 "0: lhi %[exc],0\n" 362 362 "1:\n" ··· 410 410 int cc, exception; 411 411 412 412 exception = 1; 413 - asm volatile ( 413 + asm_inline volatile ( 414 414 " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n" 415 415 "0: lhi %[exc],0\n" 416 416 "1:\n"
+3 -3
arch/s390/pci/pci_mmio.c
··· 34 34 int cc, exception; 35 35 36 36 exception = 1; 37 - asm volatile ( 37 + asm_inline volatile ( 38 38 " sacf 256\n" 39 39 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" 40 40 "1: lhi %[exc],0\n" ··· 64 64 * address space. pcistg then uses the user mappings. 65 65 */ 66 66 exception = 1; 67 - asm volatile ( 67 + asm_inline volatile ( 68 68 " sacf 256\n" 69 69 "0: llgc %[tmp],0(%[src])\n" 70 70 "4: sllg %[val],%[val],8\n" ··· 211 211 * user address @dst 212 212 */ 213 213 exception = 1; 214 - asm volatile ( 214 + asm_inline volatile ( 215 215 " sacf 256\n" 216 216 "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" 217 217 "1: lhi %[exc],0\n"
+1 -1
drivers/s390/block/dasd_diag.c
··· 76 76 } addr_type; 77 77 78 78 exception = 1; 79 - asm volatile( 79 + asm_inline volatile( 80 80 " diag %[rx],%[cmd],0x250\n" 81 81 "0: lhi %[exc],0\n" 82 82 "1:\n"
+1 -1
drivers/s390/char/diag_ftp.c
··· 106 106 int rc; 107 107 108 108 diag_stat_inc(DIAG_STAT_X2C4); 109 - asm volatile( 109 + asm_inline volatile( 110 110 " diag %[addr],%[cmd],0x2c4\n" 111 111 "0: j 2f\n" 112 112 "1: la %[rc],%[err]\n"
+1 -1
drivers/s390/char/sclp.h
··· 318 318 int cc, exception; 319 319 320 320 exception = 1; 321 - asm volatile( 321 + asm_inline volatile( 322 322 "0: .insn rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */ 323 323 "1: lhi %[exc],0\n" 324 324 "2:\n"
+4 -4
drivers/s390/cio/ioasm.c
··· 22 22 int ccode, exception; 23 23 24 24 exception = 1; 25 - asm volatile( 25 + asm_inline volatile( 26 26 " lgr 1,%[r1]\n" 27 27 " stsch %[addr]\n" 28 28 "0: lhi %[exc],0\n" ··· 52 52 int ccode, exception; 53 53 54 54 exception = 1; 55 - asm volatile( 55 + asm_inline volatile( 56 56 " lgr 1,%[r1]\n" 57 57 " msch %[addr]\n" 58 58 "0: lhi %[exc],0\n" ··· 106 106 int ccode, exception; 107 107 108 108 exception = 1; 109 - asm volatile( 109 + asm_inline volatile( 110 110 " lgr 1,%[r1]\n" 111 111 " ssch %[addr]\n" 112 112 "0: lhi %[exc],0\n" ··· 178 178 int cc, exception; 179 179 180 180 exception = 1; 181 - asm volatile( 181 + asm_inline volatile( 182 182 " .insn rre,0xb25f0000,%[chsc_area],0\n" 183 183 "0: lhi %[exc],0\n" 184 184 "1:\n"