Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: raise minimum supported machine generation to z10

Machine generations up to z9 (released in May 2006) have been officially
out of service for several years now (z9 end of service - January 31, 2019).
No distributions build kernels supporting those old machine generations
anymore, except Debian, which seems to pick the oldest supported
generation. The team supporting Debian on s390 has been notified about
the change.

Raising minimum supported machine generation to z10 helps to reduce
maintenance cost and effectively remove code, which is not getting
enough testing coverage due to lack of older hardware and distributions
support. Besides that this unblocks some optimization opportunities and
allows to use wider instruction set in asm files for future features
implementation. Due to this change spectre mitigation and usercopy
implementations could be drastically simplified and many newer instructions
could be converted from ".insn" encoding to instruction names.

Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

+38 -435
+2 -66
arch/s390/Kconfig
··· 120 120 select ARCH_WANT_IPC_PARSE_VERSION 121 121 select BUILDTIME_TABLE_SORT 122 122 select CLONE_BACKWARDS2 123 - select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 124 123 select DMA_OPS if PCI 125 124 select DYNAMIC_FTRACE if FUNCTION_TRACER 126 125 select GENERIC_ALLOCATOR ··· 229 230 230 231 menu "Processor type and features" 231 232 232 - config HAVE_MARCH_Z900_FEATURES 233 - def_bool n 234 - 235 - config HAVE_MARCH_Z990_FEATURES 236 - def_bool n 237 - select HAVE_MARCH_Z900_FEATURES 238 - 239 - config HAVE_MARCH_Z9_109_FEATURES 240 - def_bool n 241 - select HAVE_MARCH_Z990_FEATURES 242 - 243 233 config HAVE_MARCH_Z10_FEATURES 244 234 def_bool n 245 - select HAVE_MARCH_Z9_109_FEATURES 246 235 247 236 config HAVE_MARCH_Z196_FEATURES 248 237 def_bool n ··· 256 269 prompt "Processor type" 257 270 default MARCH_Z196 258 271 259 - config MARCH_Z900 260 - bool "IBM zSeries model z800 and z900" 261 - select HAVE_MARCH_Z900_FEATURES 262 - depends on $(cc-option,-march=z900) 263 - help 264 - Select this to enable optimizations for model z800/z900 (2064 and 265 - 2066 series). This will enable some optimizations that are not 266 - available on older ESA/390 (31 Bit) only CPUs. 267 - 268 - config MARCH_Z990 269 - bool "IBM zSeries model z890 and z990" 270 - select HAVE_MARCH_Z990_FEATURES 271 - depends on $(cc-option,-march=z990) 272 - help 273 - Select this to enable optimizations for model z890/z990 (2084 and 274 - 2086 series). The kernel will be slightly faster but will not work 275 - on older machines. 276 - 277 - config MARCH_Z9_109 278 - bool "IBM System z9" 279 - select HAVE_MARCH_Z9_109_FEATURES 280 - depends on $(cc-option,-march=z9-109) 281 - help 282 - Select this to enable optimizations for IBM System z9 (2094 and 283 - 2096 series). The kernel will be slightly faster but will not work 284 - on older machines. 285 - 286 272 config MARCH_Z10 287 273 bool "IBM System z10" 288 274 select HAVE_MARCH_Z10_FEATURES 289 275 depends on $(cc-option,-march=z10) 290 276 help 291 - Select this to enable optimizations for IBM System z10 (2097 and 292 - 2098 series). The kernel will be slightly faster but will not work 293 - on older machines. 277 + Select this to enable optimizations for IBM System z10 (2097 and 2098 278 + series). This is the oldest machine generation currently supported. 294 279 295 280 config MARCH_Z196 296 281 bool "IBM zEnterprise 114 and 196" ··· 311 352 312 353 endchoice 313 354 314 - config MARCH_Z900_TUNE 315 - def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT 316 - 317 - config MARCH_Z990_TUNE 318 - def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT 319 - 320 - config MARCH_Z9_109_TUNE 321 - def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT 322 - 323 355 config MARCH_Z10_TUNE 324 356 def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT 325 357 ··· 346 396 Tune the generated code for the target processor for which the kernel 347 397 will be compiled. 348 398 349 - config TUNE_Z900 350 - bool "IBM zSeries model z800 and z900" 351 - depends on $(cc-option,-mtune=z900) 352 - 353 - config TUNE_Z990 354 - bool "IBM zSeries model z890 and z990" 355 - depends on $(cc-option,-mtune=z990) 356 - 357 - config TUNE_Z9_109 358 - bool "IBM System z9" 359 - depends on $(cc-option,-mtune=z9-109) 360 - 361 399 config TUNE_Z10 362 400 bool "IBM System z10" 363 - depends on $(cc-option,-mtune=z10) 364 401 365 402 config TUNE_Z196 366 403 bool "IBM zEnterprise 114 and 196" ··· 536 599 config EXPOLINE_EXTERN 537 600 def_bool n 538 601 depends on EXPOLINE 539 - depends on HAVE_MARCH_Z10_FEATURES 540 602 depends on CC_IS_GCC && GCC_VERSION >= 110200 541 603 depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) 542 604 prompt "Generate expolines as extern functions."
-6
arch/s390/Makefile
··· 36 36 37 37 export LD_BFD 38 38 39 - mflags-$(CONFIG_MARCH_Z900) := -march=z900 40 - mflags-$(CONFIG_MARCH_Z990) := -march=z990 41 - mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 42 39 mflags-$(CONFIG_MARCH_Z10) := -march=z10 43 40 mflags-$(CONFIG_MARCH_Z196) := -march=z196 44 41 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 ··· 48 51 aflags-y += $(mflags-y) 49 52 cflags-y += $(mflags-y) 50 53 51 - cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 52 - cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 53 - cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 54 54 cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10 55 55 cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196 56 56 cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
-12
arch/s390/include/asm/bitops.h
··· 256 256 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 257 257 } 258 258 259 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 260 - 261 259 /** 262 260 * __flogr - find leftmost one 263 261 * @word - The word to search ··· 373 375 { 374 376 return fls64(word); 375 377 } 376 - 377 - #else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ 378 - 379 - #include <asm-generic/bitops/__ffs.h> 380 - #include <asm-generic/bitops/ffs.h> 381 - #include <asm-generic/bitops/__fls.h> 382 - #include <asm-generic/bitops/fls.h> 383 - #include <asm-generic/bitops/fls64.h> 384 - 385 - #endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ 386 378 387 379 #include <asm-generic/bitops/ffz.h> 388 380 #include <asm-generic/bitops/hweight.h>
+3 -5
arch/s390/include/asm/lowcore.h
··· 163 163 __u64 gmap; /* 0x03d0 */ 164 164 __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ 165 165 166 - /* br %r1 trampoline */ 167 - __u16 br_r1_trampoline; /* 0x0400 */ 168 - __u32 return_lpswe; /* 0x0402 */ 169 - __u32 return_mcck_lpswe; /* 0x0406 */ 170 - __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */ 166 + __u32 return_lpswe; /* 0x0400 */ 167 + __u32 return_mcck_lpswe; /* 0x0404 */ 168 + __u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */ 171 169 172 170 /* 173 171 * 0xe00 contains the address of the IPL Parameter Information
-34
arch/s390/include/asm/nospec-insn.h
··· 10 10 11 11 #ifdef CC_USING_EXPOLINE 12 12 13 - _LC_BR_R1 = __LC_BR_R1 14 - 15 13 /* 16 14 * The expoline macros are used to create thunks in the same format 17 15 * as gcc generates them. The 'comdat' section flag makes sure that ··· 37 39 .popsection 38 40 .endm 39 41 40 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 41 42 .macro __THUNK_PROLOG_BR r1,r2 42 43 __THUNK_PROLOG_NAME __s390_indirect_jump_r\r1 43 44 .endm ··· 52 55 .macro __THUNK_BRASL r1,r2,r3 53 56 brasl \r1,__s390_indirect_jump_r\r2 54 57 .endm 55 - #else 56 - .macro __THUNK_PROLOG_BR r1,r2 57 - __THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1 58 - .endm 59 - 60 - .macro __THUNK_EPILOG_BR r1,r2 61 - __THUNK_EPILOG_NAME __s390_indirect_jump_r\r2\()use_r\r1 62 - .endm 63 - 64 - .macro __THUNK_BR r1,r2 65 - jg __s390_indirect_jump_r\r2\()use_r\r1 66 - .endm 67 - 68 - .macro __THUNK_BRASL r1,r2,r3 69 - brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2 70 - .endm 71 - #endif 72 58 73 59 .macro __DECODE_RR expand,reg,ruse 74 60 .set __decode_fail,1 ··· 92 112 .endm 93 113 94 114 .macro __THUNK_EX_BR reg,ruse 95 - # Be very careful when adding instructions to this macro! 96 - # The ALTERNATIVE replacement code has a .+10 which targets 97 - # the "br \reg" after the code has been patched. 98 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 99 115 exrl 0,555f 100 116 j . 101 - #else 102 - .ifc \reg,%r1 103 - ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 104 - j . 105 - .else 106 - larl \ruse,555f 107 - ex 0,0(\ruse) 108 - j . 109 - .endif 110 - #endif 111 117 555: br \reg 112 118 .endm 113 119
-4
arch/s390/include/asm/timex.h
··· 187 187 188 188 static inline unsigned long get_tod_clock_fast(void) 189 189 { 190 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 191 190 unsigned long clk; 192 191 193 192 asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 194 193 return clk; 195 - #else 196 - return get_tod_clock(); 197 - #endif 198 194 } 199 195 200 196 static inline cycles_t get_cycles(void)
-18
arch/s390/include/asm/uaccess.h
··· 92 92 }; 93 93 }; 94 94 95 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 96 - 97 95 #define __put_get_user_asm(to, from, size, oac_spec) \ 98 96 ({ \ 99 97 int __rc; \ ··· 184 186 } 185 187 return rc; 186 188 } 187 - 188 - #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 189 - 190 - static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 191 - { 192 - size = raw_copy_to_user(ptr, x, size); 193 - return size ? -EFAULT : 0; 194 - } 195 - 196 - static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 197 - { 198 - size = raw_copy_from_user(x, ptr, size); 199 - return size ? -EFAULT : 0; 200 - } 201 - 202 - #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 203 189 204 190 /* 205 191 * These are the main single-value transfer routines. They automatically
-1
arch/s390/kernel/asm-offsets.c
··· 122 122 OFFSET(__LC_LPP, lowcore, lpp); 123 123 OFFSET(__LC_CURRENT_PID, lowcore, current_pid); 124 124 OFFSET(__LC_GMAP, lowcore, gmap); 125 - OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); 126 125 OFFSET(__LC_LAST_BREAK, lowcore, last_break); 127 126 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 128 127 OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
-6
arch/s390/kernel/cache.c
··· 70 70 struct cacheinfo *cache; 71 71 int idx; 72 72 73 - if (!test_facility(34)) 74 - return; 75 73 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); 76 74 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { 77 75 cache = this_cpu_ci->info_list + idx; ··· 129 131 union cache_topology ct; 130 132 enum cache_type ctype; 131 133 132 - if (!test_facility(34)) 133 - return -EOPNOTSUPP; 134 134 if (!this_cpu_ci) 135 135 return -EINVAL; 136 136 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ··· 152 156 union cache_topology ct; 153 157 enum cache_type ctype; 154 158 155 - if (!test_facility(34)) 156 - return -EOPNOTSUPP; 157 159 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 158 160 for (idx = 0, level = 0; level < this_cpu_ci->num_levels && 159 161 idx < this_cpu_ci->num_leaves; idx++, level++) {
+2 -15
arch/s390/kernel/ftrace.c
··· 60 60 #ifdef CONFIG_EXPOLINE 61 61 asm( 62 62 " .align 16\n" 63 - "ftrace_shared_hotpatch_trampoline_ex:\n" 64 - " lmg %r0,%r1,2(%r1)\n" 65 - " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n" 66 - " j .\n" 67 - "ftrace_shared_hotpatch_trampoline_ex_end:\n" 68 - ); 69 - 70 - asm( 71 - " .align 16\n" 72 63 "ftrace_shared_hotpatch_trampoline_exrl:\n" 73 64 " lmg %r0,%r1,2(%r1)\n" 74 65 " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */ ··· 81 90 tend = ftrace_shared_hotpatch_trampoline_br_end; 82 91 #ifdef CONFIG_EXPOLINE 83 92 if (!nospec_disable) { 84 - tstart = ftrace_shared_hotpatch_trampoline_ex; 85 - tend = ftrace_shared_hotpatch_trampoline_ex_end; 86 - if (test_facility(35)) { /* exrl */ 87 - tstart = ftrace_shared_hotpatch_trampoline_exrl; 88 - tend = ftrace_shared_hotpatch_trampoline_exrl_end; 89 - } 93 + tstart = ftrace_shared_hotpatch_trampoline_exrl; 94 + tend = ftrace_shared_hotpatch_trampoline_exrl_end; 90 95 } 91 96 #endif /* CONFIG_EXPOLINE */ 92 97 if (end)
-2
arch/s390/kernel/ftrace.h
··· 16 16 extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[]; 17 17 extern const char ftrace_shared_hotpatch_trampoline_br[]; 18 18 extern const char ftrace_shared_hotpatch_trampoline_br_end[]; 19 - extern const char ftrace_shared_hotpatch_trampoline_ex[]; 20 - extern const char ftrace_shared_hotpatch_trampoline_ex_end[]; 21 19 extern const char ftrace_shared_hotpatch_trampoline_exrl[]; 22 20 extern const char ftrace_shared_hotpatch_trampoline_exrl_end[]; 23 21 extern const char ftrace_plt_template[];
-11
arch/s390/kernel/mcount.S
··· 35 35 .if \allregs == 1 36 36 # save psw mask 37 37 # don't put any instructions clobbering CC before this point 38 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 39 38 epsw %r1,%r14 40 39 risbg %r14,%r1,0,31,32 41 - #else 42 - epsw %r14,%r1 43 - sllg %r14,%r14,32 44 - lr %r14,%r1 45 - #endif 46 40 .endif 47 41 48 42 lgr %r1,%r15 ··· 52 58 53 59 .if \allregs == 1 54 60 stg %r14,(STACK_PTREGS_PSW)(%r15) 55 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 56 61 mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS 57 - #else 58 - lghi %r14,_PIF_FTRACE_FULL_REGS 59 - stg %r14,STACK_PTREGS_FLAGS(%r15) 60 - #endif 61 62 .else 62 63 xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15) 63 64 .endif
+3 -9
arch/s390/kernel/module.c
··· 517 517 518 518 ij = me->core_layout.base + me->arch.plt_offset + 519 519 me->arch.plt_size - PLT_ENTRY_SIZE; 520 - if (test_facility(35)) { 521 - ij[0] = 0xc6000000; /* exrl %r0,.+10 */ 522 - ij[1] = 0x0005a7f4; /* j . */ 523 - ij[2] = 0x000007f1; /* br %r1 */ 524 - } else { 525 - ij[0] = 0x44000000 | (unsigned int) 526 - offsetof(struct lowcore, br_r1_trampoline); 527 - ij[1] = 0xa7f40000; /* j . */ 528 - } 520 + ij[0] = 0xc6000000; /* exrl %r0,.+10 */ 521 + ij[1] = 0x0005a7f4; /* j . */ 522 + ij[2] = 0x000007f1; /* br %r1 */ 529 523 } 530 524 531 525 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-6
arch/s390/kernel/nospec-branch.c
··· 118 118 if (thunk[0] == 0xc6 && thunk[1] == 0x00) 119 119 /* exrl %r0,<target-br> */ 120 120 br = thunk + (*(int *)(thunk + 2)) * 2; 121 - else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 && 122 - thunk[6] == 0x44 && thunk[7] == 0x00 && 123 - (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 && 124 - (thunk[1] & 0xf0) == (thunk[8] & 0xf0)) 125 - /* larl %rx,<target br> + ex %r0,0(%rx) */ 126 - br = thunk + (*(int *)(thunk + 2)) * 2; 127 121 else 128 122 continue; 129 123 if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+5 -6
arch/s390/kernel/perf_cpum_cf.c
··· 1451 1451 /* Get the CPU speed, try sampling facility first and CPU attributes second. */ 1452 1452 static void cfdiag_get_cpu_speed(void) 1453 1453 { 1454 + unsigned long mhz; 1455 + 1454 1456 if (cpum_sf_avail()) { /* Sampling facility first */ 1455 1457 struct hws_qsi_info_block si; 1456 1458 ··· 1466 1464 /* Fallback: CPU speed extract static part. Used in case 1467 1465 * CPU Measurement Sampling Facility is turned off. 1468 1466 */ 1469 - if (test_facility(34)) { 1470 - unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); 1471 - 1472 - if (mhz != -1UL) 1473 - cfdiag_cpu_speed = mhz & 0xffffffff; 1474 - } 1467 + mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); 1468 + if (mhz != -1UL) 1469 + cfdiag_cpu_speed = mhz & 0xffffffff; 1475 1470 } 1476 1471 1477 1472 static int cfset_init(void)
+3 -19
arch/s390/kernel/processor.c
··· 172 172 static int __init setup_hwcaps(void) 173 173 { 174 174 /* instructions named N3, "backported" to esa-mode */ 175 - if (test_facility(0)) 176 - elf_hwcap |= HWCAP_ESAN3; 175 + elf_hwcap |= HWCAP_ESAN3; 177 176 178 177 /* z/Architecture mode active */ 179 178 elf_hwcap |= HWCAP_ZARCH; ··· 190 191 elf_hwcap |= HWCAP_LDISP; 191 192 192 193 /* extended-immediate */ 193 - if (test_facility(21)) 194 - elf_hwcap |= HWCAP_EIMM; 194 + elf_hwcap |= HWCAP_EIMM; 195 195 196 196 /* extended-translation facility 3 enhancement */ 197 197 if (test_facility(22) && test_facility(30)) ··· 260 262 get_cpu_id(&cpu_id); 261 263 add_device_randomness(&cpu_id, sizeof(cpu_id)); 262 264 switch (cpu_id.machine) { 263 - case 0x2064: 264 - case 0x2066: 265 - default: /* Use "z900" as default for 64 bit kernels. */ 266 - strcpy(elf_platform, "z900"); 267 - break; 268 - case 0x2084: 269 - case 0x2086: 270 - strcpy(elf_platform, "z990"); 271 - break; 272 - case 0x2094: 273 - case 0x2096: 274 - strcpy(elf_platform, "z9-109"); 275 - break; 276 - case 0x2097: 277 - case 0x2098: 265 + default: /* Use "z10" as default. */ 278 266 strcpy(elf_platform, "z10"); 279 267 break; 280 268 case 0x2817:
-1
arch/s390/kernel/setup.c
··· 490 490 lc->spinlock_lockval = arch_spin_lockval(0); 491 491 lc->spinlock_index = 0; 492 492 arch_spin_lock_setup(0); 493 - lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 494 493 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 495 494 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 496 495 lc->preempt_count = PREEMPT_DISABLED;
-1
arch/s390/kernel/smp.c
··· 207 207 lc->cpu_nr = cpu; 208 208 lc->spinlock_lockval = arch_spin_lockval(cpu); 209 209 lc->spinlock_index = 0; 210 - lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 211 210 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 212 211 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 213 212 lc->preempt_count = PREEMPT_DISABLED;
+3 -13
arch/s390/kernel/uprobes.c
··· 177 177 __typeof__(*(ptr)) input; \ 178 178 int __rc = 0; \ 179 179 \ 180 - if (!test_facility(34)) \ 181 - __rc = EMU_ILLEGAL_OP; \ 182 - else if ((u64 __force)ptr & mask) \ 180 + if ((u64 __force)ptr & mask) \ 183 181 __rc = EMU_SPECIFICATION; \ 184 182 else if (get_user(input, ptr)) \ 185 183 __rc = EMU_ADDRESSING; \ ··· 192 194 __typeof__(ptr) __ptr = (ptr); \ 193 195 int __rc = 0; \ 194 196 \ 195 - if (!test_facility(34)) \ 196 - __rc = EMU_ILLEGAL_OP; \ 197 - else if ((u64 __force)__ptr & mask) \ 197 + if ((u64 __force)__ptr & mask) \ 198 198 __rc = EMU_SPECIFICATION; \ 199 199 else if (put_user(*(input), __ptr)) \ 200 200 __rc = EMU_ADDRESSING; \ ··· 209 213 __typeof__(*(ptr)) input; \ 210 214 int __rc = 0; \ 211 215 \ 212 - if (!test_facility(34)) \ 213 - __rc = EMU_ILLEGAL_OP; \ 214 - else if ((u64 __force)ptr & mask) \ 216 + if ((u64 __force)ptr & mask) \ 215 217 __rc = EMU_SPECIFICATION; \ 216 218 else if (get_user(input, ptr)) \ 217 219 __rc = EMU_ADDRESSING; \ ··· 321 327 break; 322 328 case 0xc6: 323 329 switch (insn->opc1) { 324 - case 0x02: /* pfdrl */ 325 - if (!test_facility(34)) 326 - rc = EMU_ILLEGAL_OP; 327 - break; 328 330 case 0x04: /* cghrl */ 329 331 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64); 330 332 break;
+7 -163
arch/s390/lib/uaccess.c
··· 8 8 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 9 9 */ 10 10 11 - #include <linux/jump_label.h> 12 11 #include <linux/uaccess.h> 13 12 #include <linux/export.h> 14 - #include <linux/errno.h> 15 13 #include <linux/mm.h> 16 14 #include <asm/asm-extable.h> 17 - #include <asm/mmu_context.h> 18 - #include <asm/facility.h> 19 15 20 16 #ifdef CONFIG_DEBUG_ENTRY 21 17 void debug_user_asce(int exit) ··· 31 35 } 32 36 #endif /*CONFIG_DEBUG_ENTRY */ 33 37 34 - #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 35 - static DEFINE_STATIC_KEY_FALSE(have_mvcos); 36 - 37 - static int __init uaccess_init(void) 38 - { 39 - if (test_facility(27)) 40 - static_branch_enable(&have_mvcos); 41 - return 0; 42 - } 43 - early_initcall(uaccess_init); 44 - 45 - static inline int copy_with_mvcos(void) 46 - { 47 - if (static_branch_likely(&have_mvcos)) 48 - return 1; 49 - return 0; 50 - } 51 - #else 52 - static inline int copy_with_mvcos(void) 53 - { 54 - return 1; 55 - } 56 - #endif 57 - 58 - static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, 59 - unsigned long size, unsigned long key) 38 + static unsigned long raw_copy_from_user_key(void *to, const void __user *from, 39 + unsigned long size, unsigned long key) 60 40 { 61 41 unsigned long tmp1, tmp2; 62 42 union oac spec = { ··· 62 90 "4: slgr %0,%0\n" 63 91 "5:\n" 64 92 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 65 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 93 + : "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2) 66 94 : [spec] "d" (spec.val) 67 95 : "cc", "memory", "0"); 68 96 return size; 69 - } 70 - 71 - static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, 72 - unsigned long size, unsigned long key) 73 - { 74 - unsigned long tmp1, tmp2; 75 - 76 - tmp1 = -256UL; 77 - asm volatile( 78 - " sacf 0\n" 79 - "0: mvcp 0(%0,%2),0(%1),%[key]\n" 80 - "7: jz 5f\n" 81 - "1: algr %0,%3\n" 82 - " la %1,256(%1)\n" 83 - " la %2,256(%2)\n" 84 - "2: mvcp 0(%0,%2),0(%1),%[key]\n" 85 - "8: jnz 1b\n" 86 - " j 5f\n" 87 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 88 - " lghi %3,-4096\n" 89 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 90 - " slgr %4,%1\n" 91 - " clgr %0,%4\n" /* copy crosses next page boundary? */ 92 - " jnh 6f\n" 93 - "4: mvcp 0(%4,%2),0(%1),%[key]\n" 94 - "9: slgr %0,%4\n" 95 - " j 6f\n" 96 - "5: slgr %0,%0\n" 97 - "6: sacf 768\n" 98 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 99 - EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 100 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 101 - : [key] "d" (key << 4) 102 - : "cc", "memory"); 103 - return size; 104 - } 105 - 106 - static unsigned long raw_copy_from_user_key(void *to, const void __user *from, 107 - unsigned long n, unsigned long key) 108 - { 109 - if (copy_with_mvcos()) 110 - return copy_from_user_mvcos(to, from, n, key); 111 - return copy_from_user_mvcp(to, from, n, key); 112 97 } 113 98 114 99 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) ··· 90 161 } 91 162 EXPORT_SYMBOL(_copy_from_user_key); 92 163 93 - static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, 94 - unsigned long size, unsigned long key) 164 + static unsigned long raw_copy_to_user_key(void __user *to, const void *from, 165 + unsigned long size, unsigned long key) 95 166 { 96 167 unsigned long tmp1, tmp2; 97 168 union oac spec = { ··· 121 192 "4: slgr %0,%0\n" 122 193 "5:\n" 123 194 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 124 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 195 + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) 125 196 : [spec] "d" (spec.val) 126 197 : "cc", "memory", "0"); 127 198 return size; 128 - } 129 - 130 - static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, 131 - unsigned long size, unsigned long key) 132 - { 133 - unsigned long tmp1, tmp2; 134 - 135 - tmp1 = -256UL; 136 - asm volatile( 137 - " sacf 0\n" 138 - "0: mvcs 0(%0,%1),0(%2),%[key]\n" 139 - "7: jz 5f\n" 140 - "1: algr %0,%3\n" 141 - " la %1,256(%1)\n" 142 - " la %2,256(%2)\n" 143 - "2: mvcs 0(%0,%1),0(%2),%[key]\n" 144 - "8: jnz 1b\n" 145 - " j 5f\n" 146 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 147 - " lghi %3,-4096\n" 148 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 149 - " slgr %4,%1\n" 150 - " clgr %0,%4\n" /* copy crosses next page boundary? */ 151 - " jnh 6f\n" 152 - "4: mvcs 0(%4,%1),0(%2),%[key]\n" 153 - "9: slgr %0,%4\n" 154 - " j 6f\n" 155 - "5: slgr %0,%0\n" 156 - "6: sacf 768\n" 157 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 158 - EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 159 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 160 - : [key] "d" (key << 4) 161 - : "cc", "memory"); 162 - return size; 163 - } 164 - 165 - static unsigned long raw_copy_to_user_key(void __user *to, const void *from, 166 - unsigned long n, unsigned long key) 167 - { 168 - if (copy_with_mvcos()) 169 - return copy_to_user_mvcos(to, from, n, key); 170 - return copy_to_user_mvcs(to, from, n, key); 171 199 } 172 200 173 201 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) ··· 144 258 } 145 259 EXPORT_SYMBOL(_copy_to_user_key); 146 260 147 - static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) 261 + unsigned long __clear_user(void __user *to, unsigned long size) 148 262 { 149 263 unsigned long tmp1, tmp2; 150 264 union oac spec = { ··· 175 289 : "a" (empty_zero_page), [spec] "d" (spec.val) 176 290 : "cc", "memory", "0"); 177 291 return size; 178 - } 179 - 180 - static inline unsigned long clear_user_xc(void __user *to, unsigned long size) 181 - { 182 - unsigned long tmp1, tmp2; 183 - 184 - asm volatile( 185 - " sacf 256\n" 186 - " aghi %0,-1\n" 187 - " jo 5f\n" 188 - " bras %3,3f\n" 189 - " xc 0(1,%1),0(%1)\n" 190 - "0: aghi %0,257\n" 191 - " la %2,255(%1)\n" /* %2 = ptr + 255 */ 192 - " srl %2,12\n" 193 - " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ 194 - " slgr %2,%1\n" 195 - " clgr %0,%2\n" /* clear crosses next page boundary? */ 196 - " jnh 5f\n" 197 - " aghi %2,-1\n" 198 - "1: ex %2,0(%3)\n" 199 - " aghi %2,1\n" 200 - " slgr %0,%2\n" 201 - " j 5f\n" 202 - "2: xc 0(256,%1),0(%1)\n" 203 - " la %1,256(%1)\n" 204 - "3: aghi %0,-256\n" 205 - " jnm 2b\n" 206 - "4: ex %0,0(%3)\n" 207 - "5: slgr %0,%0\n" 208 - "6: sacf 768\n" 209 - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 210 - : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) 211 - : : "cc", "memory"); 212 - return size; 213 - } 214 - 215 - unsigned long __clear_user(void __user *to, unsigned long size) 216 - { 217 - if (copy_with_mvcos()) 218 - return clear_user_mvcos(to, size); 219 - return clear_user_xc(to, size); 220 292 } 221 293 EXPORT_SYMBOL(__clear_user);
+2 -6
arch/s390/mm/vmem.c
··· 584 584 __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT, 585 585 SET_MEMORY_RO | SET_MEMORY_X); 586 586 587 - if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) { 588 - /* 589 - * Lowcore must be executable for LPSWE 590 - * and expoline trampoline branch instructions. 591 - */ 587 + /* lowcore must be executable for LPSWE */ 588 + if (!static_key_enabled(&cpu_has_bear)) 592 589 set_memory_x(0, 1); 593 - } 594 590 595 591 pr_info("Write protected kernel read-only data: %luk\n", 596 592 (unsigned long)(__end_rodata - _stext) >> 10);
+8 -23
arch/s390/net/bpf_jit_comp.c
··· 570 570 if (nospec_uses_trampoline()) { 571 571 jit->r14_thunk_ip = jit->prg; 572 572 /* Generate __s390_indirect_jump_r14 thunk */ 573 - if (test_facility(35)) { 574 - /* exrl %r0,.+10 */ 575 - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 576 - } else { 577 - /* larl %r1,.+14 */ 578 - EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); 579 - /* ex 0,0(%r1) */ 580 - EMIT4_DISP(0x44000000, REG_0, REG_1, 0); 581 - } 573 + /* exrl %r0,.+10 */ 574 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 582 575 /* j . */ 583 576 EMIT4_PCREL(0xa7f40000, 0); 584 577 } ··· 582 589 (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) { 583 590 jit->r1_thunk_ip = jit->prg; 584 591 /* Generate __s390_indirect_jump_r1 thunk */ 585 - if (test_facility(35)) { 586 - /* exrl %r0,.+10 */ 587 - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 588 - /* j . */ 589 - EMIT4_PCREL(0xa7f40000, 0); 590 - /* br %r1 */ 591 - _EMIT2(0x07f1); 592 - } else { 593 - /* ex 0,S390_lowcore.br_r1_tampoline */ 594 - EMIT4_DISP(0x44000000, REG_0, REG_0, 595 - offsetof(struct lowcore, br_r1_trampoline)); 596 - /* j . */ 597 - EMIT4_PCREL(0xa7f40000, 0); 598 - } 592 + /* exrl %r0,.+10 */ 593 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 594 + /* j . */ 595 + EMIT4_PCREL(0xa7f40000, 0); 596 + /* br %r1 */ 597 + _EMIT2(0x07f1); 599 598 } 600 599 } 601 600
-8
arch/s390/tools/gen_facilities.c
··· 27 27 */ 28 28 .name = "FACILITIES_ALS", 29 29 .bits = (int[]){ 30 - #ifdef CONFIG_HAVE_MARCH_Z900_FEATURES 31 30 0, /* N3 instructions */ 32 31 1, /* z/Arch mode installed */ 33 - #endif 34 - #ifdef CONFIG_HAVE_MARCH_Z990_FEATURES 35 32 18, /* long displacement facility */ 36 - #endif 37 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 38 33 21, /* extended-immediate facility */ 39 34 25, /* store clock fast */ 40 - #endif 41 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 42 35 27, /* mvcos */ 43 36 32, /* compare and swap and store */ 44 37 33, /* compare and swap and store 2 */ 45 38 34, /* general instructions extension */ 46 39 35, /* execute extensions */ 47 - #endif 48 40 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 49 41 45, /* fast-BCR, etc. */ 50 42 #endif