Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

- Add sorting of mcount locations at build time

- Rework uaccess functions with C exception handling to shorten inline
assembly size and enable full inlining. This yields near-optimal code
for small constant copies with a ~40kb kernel size increase

- Add support for a configurable STRICT_MM_TYPECHECKS which allows to
generate better code, but also allows to have type checking for debug
builds

- Optimize get_lowcore() for common callers with alternatives that
nearly revert to the pre-relocated lowcore code, while also slightly
reducing syscall entry and exit time

- Convert MACHINE_HAS_* checks for single facility tests into cpu_has_*
style macros that call test_facility(), and for features with
additional conditions, add a new ALT_TYPE_FEATURE alternative to
provide a static branch via alternative patching. Also, move machine
feature detection to the decompressor for early patching and add
debugging functionality to easily show which alternatives are patched

- Add exception table support to early boot / startup code to get rid
of the open coded exception handling

- Use asm_inline for all inline assemblies with EX_TABLE or ALTERNATIVE
to ensure correct inlining and unrolling decisions

- Remove 2k page table leftovers now that s390 has been switched to
always allocate 4k page tables

- Split kfence pool into 4k mappings in arch_kfence_init_pool() and
remove the architecture-specific kfence_split_mapping()

- Use READ_ONCE_NOCHECK() in regs_get_kernel_stack_nth() to silence
spurious KASAN warnings from opportunistic ftrace argument tracing

- Force __atomic_add_const() variants on s390 to always return void,
ensuring compile errors for improper usage

- Remove s390's ioremap_wt() and pgprot_writethrough() due to
mismatched semantics and lack of known users, relying on asm-generic
fallbacks

- Signal eventfd in vfio-ap to notify userspace when the guest AP
configuration changes, including during mdev removal

- Convert mdev_types from an array to a pointer in vfio-ccw and vfio-ap
drivers to avoid fake flex array confusion

- Cleanup trap code

- Remove references to the outdated linux390@de.ibm.com address

- Other various small fixes and improvements all over the code

* tag 's390-6.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (78 commits)
s390: Use inline qualifier for all EX_TABLE and ALTERNATIVE inline assemblies
s390/kfence: Split kfence pool into 4k mappings in arch_kfence_init_pool()
s390/ptrace: Avoid KASAN false positives in regs_get_kernel_stack_nth()
s390/boot: Ignore vmlinux.map
s390/sysctl: Remove "vm/allocate_pgste" sysctl
s390: Remove 2k vs 4k page table leftovers
s390/tlb: Use mm_has_pgste() instead of mm_alloc_pgste()
s390/lowcore: Use lghi instead llilh to clear register
s390/syscall: Merge __do_syscall() and do_syscall()
s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly
s390/smp: Implement raw_smp_processor_id() with inline assembly
s390/current: Implement current with inline assembly
s390/lowcore: Use inline qualifier for get_lowcore() inline assembly
s390: Move s390 sysctls into their own file under arch/s390
s390/syscall: Simplify syscall_get_arguments()
s390/vfio-ap: Notify userspace that guest's AP config changed when mdev removed
s390: Remove ioremap_wt() and pgprot_writethrough()
s390/mm: Add configurable STRICT_MM_TYPECHECKS
s390/mm: Convert pgste_val() into function
s390/mm: Convert pgprot_val() into function
...

+1446 -1096
+1 -1
arch/s390/Kconfig
··· 70 70 imply IMA_SECURE_AND_OR_TRUSTED_BOOT 71 71 select ALTERNATE_USER_ADDRESS_SPACE 72 72 select ARCH_32BIT_USTAT_F_TINODE 73 - select ARCH_BINFMT_ELF_STATE 74 73 select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE 75 74 select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM 76 75 select ARCH_ENABLE_MEMORY_HOTREMOVE ··· 182 183 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 183 184 select HAVE_ARCH_VMAP_STACK 184 185 select HAVE_ASM_MODVERSIONS 186 + select HAVE_BUILDTIME_MCOUNT_SORT 185 187 select HAVE_CMPXCHG_DOUBLE 186 188 select HAVE_CMPXCHG_LOCAL 187 189 select HAVE_DEBUG_KMEMLEAK
+10
arch/s390/Kconfig.debug
··· 13 13 14 14 If unsure, say N. 15 15 16 + config STRICT_MM_TYPECHECKS 17 + bool "Strict Memory Management Type Checks" 18 + depends on DEBUG_KERNEL 19 + help 20 + Enable strict type checking for memory management types like pte_t 21 + and pmd_t. This generates slightly worse code and should be used 22 + for debug builds. 23 + 24 + If unsure, say N. 25 + 16 26 config CIO_INJECT 17 27 bool "CIO Inject interfaces" 18 28 depends on DEBUG_KERNEL && DEBUG_FS
+1
arch/s390/boot/.gitignore
··· 5 5 section_cmp.* 6 6 vmlinux 7 7 vmlinux.lds 8 + vmlinux.map 8 9 vmlinux.syms
+1 -1
arch/s390/boot/Makefile
··· 26 26 27 27 obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o 28 28 obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o 29 - obj-y += version.o pgm_check_info.o ctype.o ipl_data.o relocs.o alternative.o 29 + obj-y += version.o pgm_check.o ctype.o ipl_data.o relocs.o alternative.o 30 30 obj-y += uv.o printk.o 31 31 obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 32 32 obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
+135
arch/s390/boot/alternative.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define boot_fmt(fmt) "alt: " fmt 3 + #include "boot.h" 4 + 5 + #define a_debug boot_debug 2 6 3 7 #include "../kernel/alternative.c" 8 + 9 + static void alt_debug_all(int type) 10 + { 11 + int i; 12 + 13 + switch (type) { 14 + case ALT_TYPE_FACILITY: 15 + for (i = 0; i < ARRAY_SIZE(alt_debug.facilities); i++) 16 + alt_debug.facilities[i] = -1UL; 17 + break; 18 + case ALT_TYPE_FEATURE: 19 + for (i = 0; i < ARRAY_SIZE(alt_debug.mfeatures); i++) 20 + alt_debug.mfeatures[i] = -1UL; 21 + break; 22 + case ALT_TYPE_SPEC: 23 + alt_debug.spec = 1; 24 + break; 25 + } 26 + } 27 + 28 + static void alt_debug_modify(int type, unsigned int nr, bool clear) 29 + { 30 + switch (type) { 31 + case ALT_TYPE_FACILITY: 32 + if (clear) 33 + __clear_facility(nr, alt_debug.facilities); 34 + else 35 + __set_facility(nr, alt_debug.facilities); 36 + break; 37 + case ALT_TYPE_FEATURE: 38 + if (clear) 39 + __clear_machine_feature(nr, alt_debug.mfeatures); 40 + else 41 + __set_machine_feature(nr, alt_debug.mfeatures); 42 + break; 43 + } 44 + } 45 + 46 + static char *alt_debug_parse(int type, char *str) 47 + { 48 + unsigned long val, endval; 49 + char *endp; 50 + bool clear; 51 + int i; 52 + 53 + if (*str == ':') { 54 + str++; 55 + } else { 56 + alt_debug_all(type); 57 + return str; 58 + } 59 + clear = false; 60 + if (*str == '!') { 61 + alt_debug_all(type); 62 + clear = true; 63 + str++; 64 + } 65 + while (*str) { 66 + val = simple_strtoull(str, &endp, 0); 67 + if (str == endp) 68 + break; 69 + str = endp; 70 + if (*str == '-') { 71 + str++; 72 + endval = simple_strtoull(str, &endp, 0); 73 + if (str == endp) 74 + break; 75 + str = endp; 76 + while (val <= endval) { 77 + alt_debug_modify(type, val, clear); 78 + val++; 79 + } 80 + } else { 81 + alt_debug_modify(type, val, clear); 82 + } 83 + if (*str != ',') 84 + break; 85 + str++; 86 + } 87 + return str; 88 + } 89 + 90 + /* 91 + * Use debug-alternative command line parameter for debugging: 92 + * "debug-alternative" 93 + * -> print debug message for every single alternative 94 + * 95 + * "debug-alternative=0;2" 96 + * -> print debug message for all alternatives with type 0 and 2 97 + * 98 + * "debug-alternative=0:0-7" 99 + * -> print debug message for all alternatives with type 0 and with 100 + * facility numbers within the range of 0-7 101 + * (if type 0 is ALT_TYPE_FACILITY) 102 + * 103 + * "debug-alternative=0:!8;1" 104 + * -> print debug message for all alternatives with type 0, for all 105 + * facility number, except facility 8, and in addition print all 106 + * alternatives with type 1 107 + */ 108 + void alt_debug_setup(char *str) 109 + { 110 + unsigned long type; 111 + char *endp; 112 + int i; 113 + 114 + if (!str) { 115 + alt_debug_all(ALT_TYPE_FACILITY); 116 + alt_debug_all(ALT_TYPE_FEATURE); 117 + alt_debug_all(ALT_TYPE_SPEC); 118 + return; 119 + } 120 + while (*str) { 121 + type = simple_strtoull(str, &endp, 0); 122 + if (str == endp) 123 + break; 124 + str = endp; 125 + switch (type) { 126 + case ALT_TYPE_FACILITY: 127 + case ALT_TYPE_FEATURE: 128 + str = alt_debug_parse(type, str); 129 + break; 130 + case ALT_TYPE_SPEC: 131 + alt_debug_all(ALT_TYPE_SPEC); 132 + break; 133 + } 134 + if (*str != ';') 135 + break; 136 + str++; 137 + } 138 + }
+2 -6
arch/s390/boot/boot.h
··· 11 11 #include <linux/printk.h> 12 12 #include <asm/physmem_info.h> 13 13 14 - struct machine_info { 15 - unsigned char has_edat1 : 1; 16 - unsigned char has_edat2 : 1; 17 - }; 18 - 19 14 struct vmlinux_info { 20 15 unsigned long entry; 21 16 unsigned long image_size; /* does not include .bss */ ··· 64 69 void verify_facilities(void); 65 70 void print_missing_facilities(void); 66 71 void sclp_early_setup_buffer(void); 67 - void print_pgm_check_info(void); 72 + void alt_debug_setup(char *str); 73 + void do_pgm_check(struct pt_regs *regs); 68 74 unsigned long randomize_within_range(unsigned long size, unsigned long align, 69 75 unsigned long min, unsigned long max); 70 76 void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned long asce_limit);
+15 -10
arch/s390/boot/head.S
··· 254 254 xc 0xf00(256),0xf00 255 255 larl %r13,.Lctl 256 256 lctlg %c0,%c15,0(%r13) # load control registers 257 - stcke __LC_BOOT_CLOCK 258 - mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1 257 + larl %r13,tod_clock_base 258 + stcke 0(%r13) 259 + mvc __LC_LAST_UPDATE_CLOCK(8),1(%r13) 259 260 larl %r13,6f 260 261 spt 0(%r13) 261 262 mvc __LC_LAST_UPDATE_TIMER(8),0(%r13) ··· 293 292 294 293 #include "head_kdump.S" 295 294 296 - # 297 - # This program check is active immediately after kernel start 298 - # and until early_pgm_check_handler is set in kernel/early.c 299 - # It simply saves general/control registers and psw in 300 - # the save area and does disabled wait with a faulty address. 301 - # 302 295 SYM_CODE_START_LOCAL(startup_pgm_check_handler) 303 296 stmg %r8,%r15,__LC_SAVE_AREA 304 297 la %r8,4095 ··· 306 311 oi __LC_RETURN_PSW+1,0x2 # set wait state bit 307 312 larl %r9,.Lold_psw_disabled_wait 308 313 stg %r9,__LC_PGM_NEW_PSW+8 309 - larl %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD 310 - brasl %r14,print_pgm_check_info 314 + larl %r15,_dump_info_stack_end-(STACK_FRAME_OVERHEAD+__PT_SIZE) 315 + la %r2,STACK_FRAME_OVERHEAD(%r15) 316 + mvc __PT_PSW(16,%r2),__LC_PSW_SAVE_AREA-4095(%r8) 317 + mvc __PT_R0(128,%r2),__LC_GPREGS_SAVE_AREA-4095(%r8) 318 + mvc __PT_LAST_BREAK(8,%r2),__LC_PGM_LAST_BREAK 319 + mvc __PT_INT_CODE(4,%r2),__LC_PGM_INT_CODE 320 + brasl %r14,do_pgm_check 321 + larl %r9,startup_pgm_check_handler 322 + stg %r9,__LC_PGM_NEW_PSW+8 323 + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 324 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 325 + lpswe __LC_RETURN_PSW 311 326 .Lold_psw_disabled_wait: 312 327 la %r8,4095 313 328 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
+11 -22
arch/s390/boot/ipl_parm.c
··· 5 5 #include <linux/pgtable.h> 6 6 #include <asm/abs_lowcore.h> 7 7 #include <asm/page-states.h> 8 + #include <asm/machine.h> 8 9 #include <asm/ebcdic.h> 9 10 #include <asm/sclp.h> 10 11 #include <asm/sections.h> ··· 35 34 36 35 static inline int __diag308(unsigned long subcode, void *addr) 37 36 { 38 - unsigned long reg1, reg2; 39 - union register_pair r1; 40 - psw_t old; 37 + union register_pair r1 = { .even = (unsigned long)addr, .odd = 0 }; 41 38 42 - r1.even = (unsigned long) addr; 43 - r1.odd = 0; 44 - asm volatile( 45 - " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 46 - " epsw %[reg1],%[reg2]\n" 47 - " st %[reg1],0(%[psw_pgm])\n" 48 - " st %[reg2],4(%[psw_pgm])\n" 49 - " larl %[reg1],1f\n" 50 - " stg %[reg1],8(%[psw_pgm])\n" 39 + asm_inline volatile( 51 40 " diag %[r1],%[subcode],0x308\n" 52 - "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 53 - : [r1] "+&d" (r1.pair), 54 - [reg1] "=&d" (reg1), 55 - [reg2] "=&a" (reg2), 56 - "+Q" (get_lowcore()->program_new_psw), 57 - "=Q" (old) 58 - : [subcode] "d" (subcode), 59 - [psw_old] "a" (&old), 60 - [psw_pgm] "a" (&get_lowcore()->program_new_psw) 41 + "0:\n" 42 + EX_TABLE(0b, 0b) 43 + : [r1] "+d" (r1.pair) 44 + : [subcode] "d" (subcode) 61 45 : "cc", "memory"); 62 46 return r1.odd; 63 47 } ··· 281 295 if (!strcmp(param, "facilities") && val) 282 296 modify_fac_list(val); 283 297 298 + if (!strcmp(param, "debug-alternative")) 299 + alt_debug_setup(val); 300 + 284 301 if (!strcmp(param, "nokaslr")) 285 302 __kaslr_enabled = 0; 286 303 ··· 301 312 } 302 313 #endif 303 314 if (!strcmp(param, "relocate_lowcore") && test_facility(193)) 304 - relocate_lowcore = 1; 315 + set_machine_feature(MFEATURE_LOWCORE); 305 316 if (!strcmp(param, "earlyprintk")) 306 317 boot_earlyprintk = true; 307 318 if (!strcmp(param, "debug"))
+37 -11
arch/s390/boot/pgm_check_info.c arch/s390/boot/pgm_check.c
··· 32 32 } 33 33 } 34 34 35 - void print_pgm_check_info(void) 36 - { 37 - unsigned long *gpregs = (unsigned long *)get_lowcore()->gpregs_save_area; 38 - struct psw_bits *psw = &psw_bits(get_lowcore()->psw_save_area); 35 + extern struct exception_table_entry __start___ex_table[]; 36 + extern struct exception_table_entry __stop___ex_table[]; 39 37 38 + static inline unsigned long extable_insn(const struct exception_table_entry *x) 39 + { 40 + return (unsigned long)&x->insn + x->insn; 41 + } 42 + 43 + static bool ex_handler(struct pt_regs *regs) 44 + { 45 + const struct exception_table_entry *ex; 46 + 47 + for (ex = __start___ex_table; ex < __stop___ex_table; ex++) { 48 + if (extable_insn(ex) != regs->psw.addr) 49 + continue; 50 + if (ex->type != EX_TYPE_FIXUP) 51 + return false; 52 + regs->psw.addr = extable_fixup(ex); 53 + return true; 54 + } 55 + return false; 56 + } 57 + 58 + void do_pgm_check(struct pt_regs *regs) 59 + { 60 + struct psw_bits *psw = &psw_bits(regs->psw); 61 + unsigned long *gpregs = regs->gprs; 62 + 63 + if (ex_handler(regs)) 64 + return; 40 65 if (bootdebug) 41 66 boot_rb_dump(); 42 67 boot_emerg("Linux version %s\n", kernel_version); 43 68 if (!is_prot_virt_guest() && early_command_line[0]) 44 69 boot_emerg("Kernel command line: %s\n", early_command_line); 45 70 boot_emerg("Kernel fault: interruption code %04x ilc:%d\n", 46 - get_lowcore()->pgm_code, get_lowcore()->pgm_ilc >> 1); 71 + regs->int_code & 0xffff, regs->int_code >> 17); 47 72 if (kaslr_enabled()) { 48 73 boot_emerg("Kernel random base: %lx\n", __kaslr_offset); 49 74 boot_emerg("Kernel random base phys: %lx\n", __kaslr_offset_phys); 50 75 } 51 76 boot_emerg("PSW : %016lx %016lx (%pS)\n", 52 - get_lowcore()->psw_save_area.mask, 53 - get_lowcore()->psw_save_area.addr, 54 - (void *)get_lowcore()->psw_save_area.addr); 77 + regs->psw.mask, regs->psw.addr, (void *)regs->psw.addr); 55 78 boot_emerg(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x P:%x AS:%x CC:%x PM:%x RI:%x EA:%x\n", 56 79 psw->per, psw->dat, psw->io, psw->ext, psw->key, psw->mcheck, 57 80 psw->wait, psw->pstate, psw->as, psw->cc, psw->pm, psw->ri, psw->eaba); ··· 82 59 boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[4], gpregs[5], gpregs[6], gpregs[7]); 83 60 boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[8], gpregs[9], gpregs[10], gpregs[11]); 84 61 boot_emerg(" %016lx %016lx %016lx %016lx\n", gpregs[12], gpregs[13], gpregs[14], gpregs[15]); 85 - print_stacktrace(get_lowcore()->gpregs_save_area[15]); 62 + print_stacktrace(gpregs[15]); 86 63 boot_emerg("Last Breaking-Event-Address:\n"); 87 - boot_emerg(" [<%016lx>] %pS\n", (unsigned long)get_lowcore()->pgm_last_break, 88 - (void *)get_lowcore()->pgm_last_break); 64 + boot_emerg(" [<%016lx>] %pS\n", regs->last_break, (void *)regs->last_break); 65 + /* Convert to disabled wait PSW */ 66 + psw->io = 0; 67 + psw->ext = 0; 68 + psw->wait = 1; 89 69 }
+21 -63
arch/s390/boot/physmem_info.c
··· 59 59 60 60 static int __diag260(unsigned long rx1, unsigned long rx2) 61 61 { 62 - unsigned long reg1, reg2, ry; 63 62 union register_pair rx; 64 63 int cc, exception; 65 - psw_t old; 64 + unsigned long ry; 66 65 67 66 rx.even = rx1; 68 67 rx.odd = rx2; 69 68 ry = 0x10; /* storage configuration */ 70 69 exception = 1; 71 - asm volatile( 72 - " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 73 - " epsw %[reg1],%[reg2]\n" 74 - " st %[reg1],0(%[psw_pgm])\n" 75 - " st %[reg2],4(%[psw_pgm])\n" 76 - " larl %[reg1],1f\n" 77 - " stg %[reg1],8(%[psw_pgm])\n" 70 + asm_inline volatile( 78 71 " diag %[rx],%[ry],0x260\n" 79 - " lhi %[exc],0\n" 80 - "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 72 + "0: lhi %[exc],0\n" 73 + "1:\n" 81 74 CC_IPM(cc) 82 - : CC_OUT(cc, cc), 83 - [exc] "+d" (exception), 84 - [reg1] "=&d" (reg1), 85 - [reg2] "=&a" (reg2), 86 - [ry] "+&d" (ry), 87 - "+Q" (get_lowcore()->program_new_psw), 88 - "=Q" (old) 89 - : [rx] "d" (rx.pair), 90 - [psw_old] "a" (&old), 91 - [psw_pgm] "a" (&get_lowcore()->program_new_psw) 75 + EX_TABLE(0b, 1b) 76 + : CC_OUT(cc, cc), [exc] "+d" (exception), [ry] "+d" (ry) 77 + : [rx] "d" (rx.pair) 92 78 : CC_CLOBBER_LIST("memory")); 93 79 cc = exception ? -1 : CC_TRANSFORM(cc); 94 80 return cc == 0 ? ry : -1; ··· 104 118 static int diag500_storage_limit(unsigned long *max_physmem_end) 105 119 { 106 120 unsigned long storage_limit; 107 - unsigned long reg1, reg2; 108 - psw_t old; 109 121 110 - asm volatile( 111 - " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 112 - " epsw %[reg1],%[reg2]\n" 113 - " st %[reg1],0(%[psw_pgm])\n" 114 - " st %[reg2],4(%[psw_pgm])\n" 115 - " larl %[reg1],1f\n" 116 - " stg %[reg1],8(%[psw_pgm])\n" 117 - " lghi 1,%[subcode]\n" 118 - " lghi 2,0\n" 119 - " diag 2,4,0x500\n" 120 - "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 121 - " lgr %[slimit],2\n" 122 - : [reg1] "=&d" (reg1), 123 - [reg2] "=&a" (reg2), 124 - [slimit] "=d" (storage_limit), 125 - "=Q" (get_lowcore()->program_new_psw), 126 - "=Q" (old) 127 - : [psw_old] "a" (&old), 128 - [psw_pgm] "a" (&get_lowcore()->program_new_psw), 129 - [subcode] "i" (DIAG500_SC_STOR_LIMIT) 122 + asm_inline volatile( 123 + " lghi %%r1,%[subcode]\n" 124 + " lghi %%r2,0\n" 125 + " diag %%r2,%%r4,0x500\n" 126 + "0: lgr %[slimit],%%r2\n" 127 + EX_TABLE(0b, 0b) 128 + : [slimit] "=d" (storage_limit) 129 + : [subcode] "i" (DIAG500_SC_STOR_LIMIT) 130 130 : "memory", "1", "2"); 131 131 if (!storage_limit) 132 132 return -EINVAL; ··· 123 151 124 152 static int tprot(unsigned long addr) 125 153 { 126 - unsigned long reg1, reg2; 127 154 int cc, exception; 128 - psw_t old; 129 155 130 156 exception = 1; 131 - asm volatile( 132 - " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 133 - " epsw %[reg1],%[reg2]\n" 134 - " st %[reg1],0(%[psw_pgm])\n" 135 - " st %[reg2],4(%[psw_pgm])\n" 136 - " larl %[reg1],1f\n" 137 - " stg %[reg1],8(%[psw_pgm])\n" 157 + asm_inline volatile( 138 158 " tprot 0(%[addr]),0\n" 139 - " lhi %[exc],0\n" 140 - "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 159 + "0: lhi %[exc],0\n" 160 + "1:\n" 141 161 CC_IPM(cc) 142 - : CC_OUT(cc, cc), 143 - [exc] "+d" (exception), 144 - [reg1] "=&d" (reg1), 145 - [reg2] "=&a" (reg2), 146 - "=Q" (get_lowcore()->program_new_psw.addr), 147 - "=Q" (old) 148 - : [psw_old] "a" (&old), 149 - [psw_pgm] "a" (&get_lowcore()->program_new_psw), 150 - [addr] "a" (addr) 162 + EX_TABLE(0b, 1b) 163 + : CC_OUT(cc, cc), [exc] "+d" (exception) 164 + : [addr] "a" (addr) 151 165 : CC_CLOBBER_LIST("memory")); 152 166 cc = exception ? -EFAULT : CC_TRANSFORM(cc); 153 167 return cc;
+2 -2
arch/s390/boot/printk.c
··· 8 8 #include <asm/sections.h> 9 9 #include <asm/lowcore.h> 10 10 #include <asm/setup.h> 11 + #include <asm/timex.h> 11 12 #include <asm/sclp.h> 12 13 #include <asm/uv.h> 13 14 #include "boot.h" ··· 200 199 static char *add_timestamp(char *buf) 201 200 { 202 201 #ifdef CONFIG_PRINTK_TIME 203 - union tod_clock *boot_clock = (union tod_clock *)&get_lowcore()->boot_clock; 204 - unsigned long ns = tod_to_ns(get_tod_clock() - boot_clock->tod); 202 + unsigned long ns = tod_to_ns(__get_tod_clock_monotonic()); 205 203 char ts[MAX_NUMLEN]; 206 204 207 205 *buf++ = '[';
+85 -29
arch/s390/boot/startup.c
··· 7 7 #include <asm/extmem.h> 8 8 #include <asm/sections.h> 9 9 #include <asm/maccess.h> 10 + #include <asm/machine.h> 11 + #include <asm/sysinfo.h> 10 12 #include <asm/cpu_mf.h> 11 13 #include <asm/setup.h> 14 + #include <asm/timex.h> 12 15 #include <asm/kasan.h> 13 16 #include <asm/kexec.h> 14 17 #include <asm/sclp.h> ··· 37 34 unsigned long __bootdata_preserved(page_noexec_mask); 38 35 unsigned long __bootdata_preserved(segment_noexec_mask); 39 36 unsigned long __bootdata_preserved(region_noexec_mask); 40 - int __bootdata_preserved(relocate_lowcore); 37 + union tod_clock __bootdata_preserved(tod_clock_base); 38 + u64 __bootdata_preserved(clock_comparator_max) = -1UL; 41 39 42 40 u64 __bootdata_preserved(stfle_fac_list[16]); 43 41 struct oldmem_data __bootdata_preserved(oldmem_data); 44 - 45 - struct machine_info machine; 46 42 47 43 void error(char *x) 48 44 { ··· 50 48 disabled_wait(); 51 49 } 52 50 51 + static char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 52 + 53 + static void detect_machine_type(void) 54 + { 55 + struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page; 56 + 57 + /* Check current-configuration-level */ 58 + if (stsi(NULL, 0, 0, 0) <= 2) { 59 + set_machine_feature(MFEATURE_LPAR); 60 + return; 61 + } 62 + /* Get virtual-machine cpu information. */ 63 + if (stsi(vmms, 3, 2, 2) || !vmms->count) 64 + return; 65 + /* Detect known hypervisors */ 66 + if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 67 + set_machine_feature(MFEATURE_KVM); 68 + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) 69 + set_machine_feature(MFEATURE_VM); 70 + } 71 + 72 + static void detect_diag9c(void) 73 + { 74 + unsigned int cpu; 75 + int rc = 1; 76 + 77 + cpu = stap(); 78 + asm_inline volatile( 79 + " diag %[cpu],%%r0,0x9c\n" 80 + "0: lhi %[rc],0\n" 81 + "1:\n" 82 + EX_TABLE(0b, 1b) 83 + : [rc] "+d" (rc) 84 + : [cpu] "d" (cpu) 85 + : "cc", "memory"); 86 + if (!rc) 87 + set_machine_feature(MFEATURE_DIAG9C); 88 + } 89 + 90 + static void reset_tod_clock(void) 91 + { 92 + union tod_clock clk; 93 + 94 + if (store_tod_clock_ext_cc(&clk) == 0) 95 + return; 96 + /* TOD clock not running. Set the clock to Unix Epoch. */ 97 + if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk)) 98 + disabled_wait(); 99 + memset(&tod_clock_base, 0, sizeof(tod_clock_base)); 100 + tod_clock_base.tod = TOD_UNIX_EPOCH; 101 + get_lowcore()->last_update_clock = TOD_UNIX_EPOCH; 102 + } 103 + 53 104 static void detect_facilities(void) 54 105 { 55 - if (test_facility(8)) { 56 - machine.has_edat1 = 1; 106 + if (cpu_has_edat1()) 57 107 local_ctl_set_bit(0, CR0_EDAT_BIT); 58 - } 59 - if (test_facility(78)) 60 - machine.has_edat2 = 1; 61 108 page_noexec_mask = -1UL; 62 109 segment_noexec_mask = -1UL; 63 110 region_noexec_mask = -1UL; 64 - if (!test_facility(130)) { 111 + if (!cpu_has_nx()) { 65 112 page_noexec_mask &= ~_PAGE_NOEXEC; 66 113 segment_noexec_mask &= ~_SEGMENT_ENTRY_NOEXEC; 67 114 region_noexec_mask &= ~_REGION_ENTRY_NOEXEC; 68 115 } 116 + if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) 117 + set_machine_feature(MFEATURE_PCI_MIO); 118 + reset_tod_clock(); 119 + if (test_facility(139) && (tod_clock_base.tod >> 63)) { 120 + /* Enable signed clock comparator comparisons */ 121 + set_machine_feature(MFEATURE_SCC); 122 + clock_comparator_max = -1UL >> 1; 123 + local_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT); 124 + } 125 + if (test_facility(50) && test_facility(73)) { 126 + set_machine_feature(MFEATURE_TX); 127 + local_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT); 128 + } 129 + if (cpu_has_vx()) 130 + local_ctl_set_bit(0, CR0_VECTOR_BIT); 69 131 } 70 132 71 133 static int cmma_test_essa(void) 72 134 { 73 - unsigned long reg1, reg2, tmp = 0; 135 + unsigned long tmp = 0; 74 136 int rc = 1; 75 - psw_t old; 76 137 77 138 /* Test ESSA_GET_STATE */ 78 - asm volatile( 79 - " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" 80 - " epsw %[reg1],%[reg2]\n" 81 - " st %[reg1],0(%[psw_pgm])\n" 82 - " st %[reg2],4(%[psw_pgm])\n" 83 - " larl %[reg1],1f\n" 84 - " stg %[reg1],8(%[psw_pgm])\n" 139 + asm_inline volatile( 85 140 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" 86 - " la %[rc],0\n" 87 - "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" 88 - : [reg1] "=&d" (reg1), 89 - [reg2] "=&a" (reg2), 90 - [rc] "+&d" (rc), 91 - [tmp] "+&d" (tmp), 92 - "+Q" (get_lowcore()->program_new_psw), 93 - "=Q" (old) 94 - : [psw_old] "a" (&old), 95 - [psw_pgm] "a" (&get_lowcore()->program_new_psw), 96 - [cmd] "i" (ESSA_GET_STATE) 141 + "0: lhi %[rc],0\n" 142 + "1:\n" 143 + EX_TABLE(0b, 1b) 144 + : [rc] "+d" (rc), [tmp] "+d" (tmp) 145 + : [cmd] "i" (ESSA_GET_STATE) 97 146 : "cc", "memory"); 98 147 return rc; 99 148 } ··· 515 462 516 463 read_ipl_report(); 517 464 sclp_early_read_info(); 465 + sclp_early_detect_machine_features(); 518 466 detect_facilities(); 467 + detect_diag9c(); 468 + detect_machine_type(); 519 469 cmma_init(); 520 470 sanitize_prot_virt_host(); 521 471 max_physmem_end = detect_max_physmem_end();
+5 -3
arch/s390/boot/vmem.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #define boot_fmt(fmt) "vmem: " fmt 3 + #include <linux/cpufeature.h> 3 4 #include <linux/sched/task.h> 4 5 #include <linux/pgtable.h> 5 6 #include <linux/kasan.h> ··· 11 10 #include <asm/ctlreg.h> 12 11 #include <asm/physmem_info.h> 13 12 #include <asm/maccess.h> 13 + #include <asm/machine.h> 14 14 #include <asm/abs_lowcore.h> 15 15 #include "decompressor.h" 16 16 #include "boot.h" ··· 316 314 { 317 315 unsigned long pa, size = end - addr; 318 316 319 - if (!machine.has_edat2 || !large_page_mapping_allowed(mode) || 317 + if (!cpu_has_edat2() || !large_page_mapping_allowed(mode) || 320 318 !IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE)) 321 319 return INVALID_PHYS_ADDR; 322 320 ··· 332 330 { 333 331 unsigned long pa, size = end - addr; 334 332 335 - if (!machine.has_edat1 || !large_page_mapping_allowed(mode) || 333 + if (!cpu_has_edat1() || !large_page_mapping_allowed(mode) || 336 334 !IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE)) 337 335 return INVALID_PHYS_ADDR; 338 336 ··· 518 516 __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER); 519 517 __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER); 520 518 521 - if (relocate_lowcore) 519 + if (machine_has_relocated_lowcore()) 522 520 lowcore_address = LOWCORE_ALT_ADDRESS; 523 521 524 522 /*
+1 -1
arch/s390/boot/vmlinux.lds.S
··· 40 40 *(.rodata.*) 41 41 _erodata = . ; 42 42 } 43 + EXCEPTION_TABLE(16) 43 44 .got : { 44 45 *(.got) 45 46 } ··· 166 165 /DISCARD/ : { 167 166 COMMON_DISCARDS 168 167 *(.eh_frame) 169 - *(__ex_table) 170 168 *(*__ksymtab*) 171 169 *(___kcrctab*) 172 170 }
+2
arch/s390/configs/debug_defconfig
··· 885 885 CONFIG_HIST_TRIGGERS=y 886 886 CONFIG_FTRACE_STARTUP_TEST=y 887 887 # CONFIG_EVENT_TRACE_STARTUP_TEST is not set 888 + CONFIG_FTRACE_SORT_STARTUP_TEST=y 888 889 CONFIG_SAMPLES=y 889 890 CONFIG_SAMPLE_TRACE_PRINTK=m 890 891 CONFIG_SAMPLE_FTRACE_DIRECT=m 891 892 CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m 892 893 CONFIG_SAMPLE_FTRACE_OPS=m 893 894 CONFIG_DEBUG_ENTRY=y 895 + CONFIG_STRICT_MM_TYPECHECKS=y 894 896 CONFIG_CIO_INJECT=y 895 897 CONFIG_KUNIT=m 896 898 CONFIG_KUNIT_DEBUGFS=y
+2
arch/s390/configs/mmtypes.config
··· 1 + # Help: Enable strict memory management typechecks 2 + CONFIG_STRICT_MM_TYPECHECKS=y
+3 -2
arch/s390/hypfs/hypfs_diag0c.c
··· 9 9 10 10 #include <linux/slab.h> 11 11 #include <linux/cpu.h> 12 + #include <asm/machine.h> 12 13 #include <asm/diag.h> 13 14 #include <asm/hypfs.h> 14 15 #include "hypfs.h" ··· 108 107 */ 109 108 int __init hypfs_diag0c_init(void) 110 109 { 111 - if (!MACHINE_IS_VM) 110 + if (!machine_is_vm()) 112 111 return 0; 113 112 hypfs_dbfs_create_file(&dbfs_file_0c); 114 113 return 0; ··· 119 118 */ 120 119 void hypfs_diag0c_exit(void) 121 120 { 122 - if (!MACHINE_IS_VM) 121 + if (!machine_is_vm()) 123 122 return; 124 123 hypfs_dbfs_remove_file(&dbfs_file_0c); 125 124 }
+2 -1
arch/s390/hypfs/hypfs_diag_fs.c
··· 16 16 #include <linux/string.h> 17 17 #include <linux/vmalloc.h> 18 18 #include <linux/mm.h> 19 + #include <asm/machine.h> 19 20 #include <asm/diag.h> 20 21 #include <asm/ebcdic.h> 21 22 #include "hypfs_diag.h" ··· 383 382 384 383 int __init __hypfs_diag_fs_init(void) 385 384 { 386 - if (MACHINE_IS_LPAR) 385 + if (machine_is_lpar()) 387 386 return diag224_get_name_table(); 388 387 return 0; 389 388 }
+3 -2
arch/s390/hypfs/hypfs_vm.c
··· 11 11 #include <linux/string.h> 12 12 #include <linux/vmalloc.h> 13 13 #include <asm/extable.h> 14 + #include <asm/machine.h> 14 15 #include <asm/diag.h> 15 16 #include <asm/ebcdic.h> 16 17 #include <asm/timex.h> ··· 122 121 123 122 int hypfs_vm_init(void) 124 123 { 125 - if (!MACHINE_IS_VM) 124 + if (!machine_is_vm()) 126 125 return 0; 127 126 if (diag2fc(0, all_guests, NULL) > 0) 128 127 diag2fc_guest_query = all_guests; ··· 136 135 137 136 void hypfs_vm_exit(void) 138 137 { 139 - if (!MACHINE_IS_VM) 138 + if (!machine_is_vm()) 140 139 return; 141 140 hypfs_dbfs_remove_file(&dbfs_file_2fc); 142 141 }
+3 -2
arch/s390/hypfs/inode.c
··· 24 24 #include <linux/kobject.h> 25 25 #include <linux/seq_file.h> 26 26 #include <linux/uio.h> 27 + #include <asm/machine.h> 27 28 #include <asm/ebcdic.h> 28 29 #include "hypfs.h" 29 30 ··· 185 184 goto out; 186 185 } 187 186 hypfs_delete_tree(sb->s_root); 188 - if (MACHINE_IS_VM) 187 + if (machine_is_vm()) 189 188 rc = hypfs_vm_create_files(sb->s_root); 190 189 else 191 190 rc = hypfs_diag_create_files(sb->s_root); ··· 274 273 sb->s_root = root_dentry = d_make_root(root_inode); 275 274 if (!root_dentry) 276 275 return -ENOMEM; 277 - if (MACHINE_IS_VM) 276 + if (machine_is_vm()) 278 277 rc = hypfs_vm_create_files(root_dentry); 279 278 else 280 279 rc = hypfs_diag_create_files(root_dentry);
-7
arch/s390/include/asm/abs_lowcore.h
··· 25 25 put_cpu(); 26 26 } 27 27 28 - extern int relocate_lowcore; 29 - 30 - static inline int have_relocated_lowcore(void) 31 - { 32 - return relocate_lowcore; 33 - } 34 - 35 28 #endif /* _ASM_S390_ABS_LOWCORE_H */
+6 -5
arch/s390/include/asm/alternative.h
··· 32 32 #define ALT_CTX_ALL (ALT_CTX_EARLY | ALT_CTX_LATE) 33 33 34 34 #define ALT_TYPE_FACILITY 0 35 - #define ALT_TYPE_SPEC 1 36 - #define ALT_TYPE_LOWCORE 2 35 + #define ALT_TYPE_FEATURE 1 36 + #define ALT_TYPE_SPEC 2 37 37 38 38 #define ALT_DATA_SHIFT 0 39 39 #define ALT_TYPE_SHIFT 20 ··· 43 43 ALT_TYPE_FACILITY << ALT_TYPE_SHIFT | \ 44 44 (facility) << ALT_DATA_SHIFT) 45 45 46 + #define ALT_FEATURE(feature) (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ 47 + ALT_TYPE_FEATURE << ALT_TYPE_SHIFT | \ 48 + (feature) << ALT_DATA_SHIFT) 49 + 46 50 #define ALT_SPEC(facility) (ALT_CTX_LATE << ALT_CTX_SHIFT | \ 47 51 ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \ 48 52 (facility) << ALT_DATA_SHIFT) 49 - 50 - #define ALT_LOWCORE (ALT_CTX_EARLY << ALT_CTX_SHIFT | \ 51 - ALT_TYPE_LOWCORE << ALT_TYPE_SHIFT) 52 53 53 54 #ifndef __ASSEMBLY__ 54 55
+2 -1
arch/s390/include/asm/appldata.h
··· 9 9 #define _ASM_S390_APPLDATA_H 10 10 11 11 #include <linux/io.h> 12 + #include <asm/machine.h> 12 13 #include <asm/diag.h> 13 14 14 15 #define APPLDATA_START_INTERVAL_REC 0x80 ··· 49 48 { 50 49 int ry; 51 50 52 - if (!MACHINE_IS_VM) 51 + if (!machine_is_vm()) 53 52 return -EOPNOTSUPP; 54 53 parm_list->diag = 0xdc; 55 54 parm_list->function = fn;
+8
arch/s390/include/asm/asm-extable.h
··· 14 14 #define EX_TYPE_UA_LOAD_REGPAIR 6 15 15 #define EX_TYPE_ZEROPAD 7 16 16 #define EX_TYPE_FPC 8 17 + #define EX_TYPE_UA_MVCOS_TO 9 18 + #define EX_TYPE_UA_MVCOS_FROM 10 17 19 18 20 #define EX_DATA_REG_ERR_SHIFT 0 19 21 #define EX_DATA_REG_ERR GENMASK(3, 0) ··· 85 83 86 84 #define EX_TABLE_FPC(_fault, _target) \ 87 85 __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0) 86 + 87 + #define EX_TABLE_UA_MVCOS_TO(_fault, _target) \ 88 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_TO, __stringify(%%r0), __stringify(%%r0), 0) 89 + 90 + #define EX_TABLE_UA_MVCOS_FROM(_fault, _target) \ 91 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_FROM, __stringify(%%r0), __stringify(%%r0), 0) 88 92 89 93 #endif /* __ASM_EXTABLE_H */
+4 -4
arch/s390/include/asm/atomic_ops.h
··· 163 163 164 164 #undef __ATOMIC64_OPS 165 165 166 - #define __atomic_add_const(val, ptr) __atomic_add(val, ptr) 167 - #define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr) 168 - #define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr) 169 - #define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr) 166 + #define __atomic_add_const(val, ptr) ((void)__atomic_add(val, ptr)) 167 + #define __atomic_add_const_barrier(val, ptr) ((void)__atomic_add(val, ptr)) 168 + #define __atomic64_add_const(val, ptr) ((void)__atomic64_add(val, ptr)) 169 + #define __atomic64_add_const_barrier(val, ptr) ((void)__atomic64_add(val, ptr)) 170 170 171 171 #endif /* MARCH_HAS_Z196_FEATURES */ 172 172
+3 -3
arch/s390/include/asm/cpu_mf.h
··· 171 171 { 172 172 int rc = -EINVAL; 173 173 174 - asm volatile ( 174 + asm_inline volatile ( 175 175 "0: qctri %1\n" 176 176 "1: lhi %0,0\n" 177 177 "2:\n" ··· 185 185 { 186 186 int cc; 187 187 188 - asm volatile ( 188 + asm_inline volatile ( 189 189 " lcctl %[ctl]\n" 190 190 CC_IPM(cc) 191 191 : CC_OUT(cc, cc) ··· 200 200 u64 _content; 201 201 int cc; 202 202 203 - asm volatile ( 203 + asm_inline volatile ( 204 204 " ecctr %[_content],%[ctr]\n" 205 205 CC_IPM(cc) 206 206 : CC_OUT(cc, cc), [_content] "=d" (_content)
+14
arch/s390/include/asm/cpufeature.h
··· 9 9 #ifndef __ASM_S390_CPUFEATURE_H 10 10 #define __ASM_S390_CPUFEATURE_H 11 11 12 + #include <asm/facility.h> 13 + 12 14 enum { 13 15 S390_CPU_FEATURE_MSA, 14 16 S390_CPU_FEATURE_VXRS, ··· 21 19 #define cpu_feature(feature) (feature) 22 20 23 21 int cpu_have_feature(unsigned int nr); 22 + 23 + #define cpu_has_bear() test_facility(193) 24 + #define cpu_has_edat1() test_facility(8) 25 + #define cpu_has_edat2() test_facility(78) 26 + #define cpu_has_gs() test_facility(133) 27 + #define cpu_has_idte() test_facility(3) 28 + #define cpu_has_nx() test_facility(130) 29 + #define cpu_has_rdp() test_facility(194) 30 + #define cpu_has_seq_insn() test_facility(85) 31 + #define cpu_has_tlb_lc() test_facility(51) 32 + #define cpu_has_topology() test_facility(11) 33 + #define cpu_has_vx() test_facility(129) 24 34 25 35 #endif /* __ASM_S390_CPUFEATURE_H */
+17 -1
arch/s390/include/asm/current.h
··· 11 11 #define _S390_CURRENT_H 12 12 13 13 #include <asm/lowcore.h> 14 + #include <asm/machine.h> 14 15 15 16 struct task_struct; 16 17 17 - #define current ((struct task_struct *const)get_lowcore()->current_task) 18 + static __always_inline struct task_struct *get_current(void) 19 + { 20 + unsigned long ptr, lc_current; 21 + 22 + lc_current = offsetof(struct lowcore, current_task); 23 + asm_inline( 24 + ALTERNATIVE(" lg %[ptr],%[offzero](%%r0)\n", 25 + " lg %[ptr],%[offalt](%%r0)\n", 26 + ALT_FEATURE(MFEATURE_LOWCORE)) 27 + : [ptr] "=d" (ptr) 28 + : [offzero] "i" (lc_current), 29 + [offalt] "i" (lc_current + LOWCORE_ALT_ADDRESS)); 30 + return (struct task_struct *)ptr; 31 + } 32 + 33 + #define current get_current() 18 34 19 35 #endif /* !(_S390_CURRENT_H) */
+1 -1
arch/s390/include/asm/diag.h
··· 66 66 end_addr = pfn_to_phys(start_pfn + num_pfn - 1); 67 67 68 68 diag_stat_inc(DIAG_STAT_X010); 69 - asm volatile( 69 + asm_inline volatile( 70 70 "0: diag %0,%1,0x10\n" 71 71 "1: nopr %%r7\n" 72 72 EX_TABLE(0b, 1b)
-32
arch/s390/include/asm/elf.h
··· 158 158 #define ELF_DATA ELFDATA2MSB 159 159 #define ELF_ARCH EM_S390 160 160 161 - /* s390 specific phdr types */ 162 - #define PT_S390_PGSTE 0x70000000 163 - 164 161 /* 165 162 * ELF register definitions.. 166 163 */ ··· 187 190 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 188 191 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 189 192 #define compat_start_thread start_thread31 190 - 191 - struct arch_elf_state { 192 - int rc; 193 - }; 194 - 195 - #define INIT_ARCH_ELF_STATE { .rc = 0 } 196 - 197 - #define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) 198 - #ifdef CONFIG_PGSTE 199 - #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 200 - ({ \ 201 - struct arch_elf_state *_state = state; \ 202 - if ((phdr)->p_type == PT_S390_PGSTE && \ 203 - !page_table_allocate_pgste && \ 204 - !test_thread_flag(TIF_PGSTE) && \ 205 - !current->mm->context.alloc_pgste) { \ 206 - set_thread_flag(TIF_PGSTE); \ 207 - set_pt_regs_flag(task_pt_regs(current), \ 208 - PIF_EXECVE_PGSTE_RESTART); \ 209 - _state->rc = -EAGAIN; \ 210 - } \ 211 - _state->rc; \ 212 - }) 213 - #else 214 - #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 215 - ({ \ 216 - (state)->rc; \ 217 - }) 218 - #endif 219 193 220 194 /* For SVR4/S390 the function pointer to be registered with `atexit` is 221 195 passed in R14. */
+1 -6
arch/s390/include/asm/fpu.h
··· 44 44 #ifndef _ASM_S390_FPU_H 45 45 #define _ASM_S390_FPU_H 46 46 47 + #include <linux/cpufeature.h> 47 48 #include <linux/processor.h> 48 49 #include <linux/preempt.h> 49 50 #include <linux/string.h> ··· 52 51 #include <asm/sigcontext.h> 53 52 #include <asm/fpu-types.h> 54 53 #include <asm/fpu-insn.h> 55 - #include <asm/facility.h> 56 - 57 - static inline bool cpu_has_vx(void) 58 - { 59 - return likely(test_facility(129)); 60 - } 61 54 62 55 enum { 63 56 KERNEL_FPC_BIT = 0,
+2 -1
arch/s390/include/asm/hugetlb.h
··· 9 9 #ifndef _ASM_S390_HUGETLB_H 10 10 #define _ASM_S390_HUGETLB_H 11 11 12 + #include <linux/cpufeature.h> 12 13 #include <linux/pgtable.h> 13 14 #include <linux/swap.h> 14 15 #include <linux/swapops.h> 15 16 #include <asm/page.h> 16 17 17 - #define hugepages_supported() (MACHINE_HAS_EDAT1) 18 + #define hugepages_supported() cpu_has_edat1() 18 19 19 20 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 20 21 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-2
arch/s390/include/asm/io.h
··· 34 34 35 35 #define ioremap_wc(addr, size) \ 36 36 ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) 37 - #define ioremap_wt(addr, size) \ 38 - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) 39 37 40 38 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 41 39 {
+3 -14
arch/s390/include/asm/kfence.h
··· 12 12 13 13 static __always_inline bool arch_kfence_init_pool(void) 14 14 { 15 - return true; 16 - } 17 - 18 - #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) 19 - 20 - /* 21 - * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(), 22 - * but earlier where page table allocations still happen with memblock. 23 - * Reason is that arch_kfence_init_pool() gets called when the system 24 - * is still in a limbo state - disabling and enabling bottom halves is 25 - * not yet allowed, but that is what our page_table_alloc() would do. 26 - */ 27 - static __always_inline void kfence_split_mapping(void) 28 - { 29 15 #ifdef CONFIG_KFENCE 30 16 unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT; 31 17 32 18 set_memory_4k((unsigned long)__kfence_pool, pool_pages); 33 19 #endif 20 + return true; 34 21 } 22 + 23 + #define arch_kfence_test_address(addr) ((addr) & PAGE_MASK) 35 24 36 25 static inline bool kfence_protect_page(unsigned long addr, bool protect) 37 26 {
+11 -7
arch/s390/include/asm/lowcore.h
··· 10 10 #define _ASM_S390_LOWCORE_H 11 11 12 12 #include <linux/types.h> 13 + #include <asm/machine.h> 13 14 #include <asm/ptrace.h> 14 15 #include <asm/ctlreg.h> 15 16 #include <asm/cpu.h> ··· 127 126 __u64 int_clock; /* 0x0318 */ 128 127 __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */ 129 128 __u64 clock_comparator; /* 0x0328 */ 130 - __u64 boot_clock[2]; /* 0x0330 */ 129 + __u8 pad_0x0330[0x0340-0x0330]; /* 0x0330 */ 131 130 132 131 /* Current process. */ 133 132 __u64 current_task; /* 0x0340 */ ··· 223 222 224 223 if (__is_defined(__DECOMPRESSOR)) 225 224 return NULL; 226 - asm(ALTERNATIVE("llilh %[lc],0", "llilh %[lc],%[alt]", ALT_LOWCORE) 227 - : [lc] "=d" (lc) 228 - : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16)); 225 + asm_inline( 226 + ALTERNATIVE(" lghi %[lc],0", 227 + " llilh %[lc],%[alt]", 228 + ALT_FEATURE(MFEATURE_LOWCORE)) 229 + : [lc] "=d" (lc) 230 + : [alt] "i" (LOWCORE_ALT_ADDRESS >> 16)); 229 231 return lc; 230 232 } 231 233 ··· 242 238 #else /* __ASSEMBLY__ */ 243 239 244 240 .macro GET_LC reg 245 - ALTERNATIVE "llilh \reg,0", \ 241 + ALTERNATIVE "lghi \reg,0", \ 246 242 __stringify(llilh \reg, LOWCORE_ALT_ADDRESS >> 16), \ 247 - ALT_LOWCORE 243 + ALT_FEATURE(MFEATURE_LOWCORE) 248 244 .endm 249 245 250 246 .macro STMG_LC start, end, savearea 251 247 ALTERNATIVE "stmg \start, \end, \savearea", \ 252 248 __stringify(stmg \start, \end, LOWCORE_ALT_ADDRESS + \savearea), \ 253 - ALT_LOWCORE 249 + ALT_FEATURE(MFEATURE_LOWCORE) 254 250 .endm 255 251 256 252 #endif /* __ASSEMBLY__ */
+103
arch/s390/include/asm/machine.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright IBM Corp. 2024 4 + */ 5 + 6 + #ifndef __ASM_S390_MACHINE_H 7 + #define __ASM_S390_MACHINE_H 8 + 9 + #include <linux/const.h> 10 + 11 + #define MFEATURE_LOWCORE 0 12 + #define MFEATURE_PCI_MIO 1 13 + #define MFEATURE_SCC 2 14 + #define MFEATURE_TLB_GUEST 3 15 + #define MFEATURE_TX 4 16 + #define MFEATURE_ESOP 5 17 + #define MFEATURE_DIAG9C 6 18 + #define MFEATURE_VM 7 19 + #define MFEATURE_KVM 8 20 + #define MFEATURE_LPAR 9 21 + 22 + #ifndef __ASSEMBLY__ 23 + 24 + #include <linux/bitops.h> 25 + #include <asm/alternative.h> 26 + 27 + extern unsigned long machine_features[1]; 28 + 29 + #define MAX_MFEATURE_BIT (sizeof(machine_features) * BITS_PER_BYTE) 30 + 31 + static inline void __set_machine_feature(unsigned int nr, unsigned long *mfeatures) 32 + { 33 + if (nr >= MAX_MFEATURE_BIT) 34 + return; 35 + __set_bit(nr, mfeatures); 36 + } 37 + 38 + static inline void set_machine_feature(unsigned int nr) 39 + { 40 + __set_machine_feature(nr, machine_features); 41 + } 42 + 43 + static inline void __clear_machine_feature(unsigned int nr, unsigned long *mfeatures) 44 + { 45 + if (nr >= MAX_MFEATURE_BIT) 46 + return; 47 + __clear_bit(nr, mfeatures); 48 + } 49 + 50 + static inline void clear_machine_feature(unsigned int nr) 51 + { 52 + __clear_machine_feature(nr, machine_features); 53 + } 54 + 55 + static bool __test_machine_feature(unsigned int nr, unsigned long *mfeatures) 56 + { 57 + if (nr >= MAX_MFEATURE_BIT) 58 + return false; 59 + return test_bit(nr, mfeatures); 60 + } 61 + 62 + static bool test_machine_feature(unsigned int nr) 63 + { 64 + return __test_machine_feature(nr, machine_features); 65 + } 66 + 67 + static __always_inline bool __test_machine_feature_constant(unsigned int nr) 68 + { 69 + asm goto( 70 + ALTERNATIVE("brcl 15,%l[l_no]", "brcl 0,0", ALT_FEATURE(%[nr])) 71 + : 72 + : [nr] "i" (nr) 73 + : 74 + : l_no); 75 + return true; 76 + l_no: 77 + return false; 78 + } 79 + 80 + #define DEFINE_MACHINE_HAS_FEATURE(name, feature) \ 81 + static __always_inline bool machine_has_##name(void) \ 82 + { \ 83 + if (!__is_defined(__DECOMPRESSOR) && __builtin_constant_p(feature)) \ 84 + return __test_machine_feature_constant(feature); \ 85 + return test_machine_feature(feature); \ 86 + } 87 + 88 + DEFINE_MACHINE_HAS_FEATURE(relocated_lowcore, MFEATURE_LOWCORE) 89 + DEFINE_MACHINE_HAS_FEATURE(scc, MFEATURE_SCC) 90 + DEFINE_MACHINE_HAS_FEATURE(tlb_guest, MFEATURE_TLB_GUEST) 91 + DEFINE_MACHINE_HAS_FEATURE(tx, MFEATURE_TX) 92 + DEFINE_MACHINE_HAS_FEATURE(esop, MFEATURE_ESOP) 93 + DEFINE_MACHINE_HAS_FEATURE(diag9c, MFEATURE_DIAG9C) 94 + DEFINE_MACHINE_HAS_FEATURE(vm, MFEATURE_VM) 95 + DEFINE_MACHINE_HAS_FEATURE(kvm, MFEATURE_KVM) 96 + DEFINE_MACHINE_HAS_FEATURE(lpar, MFEATURE_LPAR) 97 + 98 + #define machine_is_vm machine_has_vm 99 + #define machine_is_kvm machine_has_kvm 100 + #define machine_is_lpar machine_has_lpar 101 + 102 + #endif /* __ASSEMBLY__ */ 103 + #endif /* __ASM_S390_MACHINE_H */
-3
arch/s390/include/asm/mmu.h
··· 22 22 * The following bitfields need a down_write on the mm 23 23 * semaphore when they are written to. As they are only 24 24 * written once, they can be read without a lock. 25 - * 26 - * The mmu context allocates 4K page tables. 27 25 */ 28 - unsigned int alloc_pgste:1; 29 26 /* The mmu context uses extended page tables. */ 30 27 unsigned int has_pgste:1; 31 28 /* The mmu context uses storage keys. */
-3
arch/s390/include/asm/mmu_context.h
··· 29 29 mm->context.gmap_asce = 0; 30 30 mm->context.flush_mm = 0; 31 31 #ifdef CONFIG_PGSTE 32 - mm->context.alloc_pgste = page_table_allocate_pgste || 33 - test_thread_flag(TIF_PGSTE) || 34 - (current->mm && current->mm->context.alloc_pgste); 35 32 mm->context.has_pgste = 0; 36 33 mm->context.uses_skeys = 0; 37 34 mm->context.uses_cmm = 0;
+39 -32
arch/s390/include/asm/page.h
··· 71 71 #define vma_alloc_zeroed_movable_folio(vma, vaddr) \ 72 72 vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr) 73 73 74 - /* 75 - * These are used to make use of C type-checking.. 76 - */ 74 + #ifdef CONFIG_STRICT_MM_TYPECHECKS 75 + #define STRICT_MM_TYPECHECKS 76 + #endif 77 + 78 + #ifdef STRICT_MM_TYPECHECKS 77 79 78 80 typedef struct { unsigned long pgprot; } pgprot_t; 79 81 typedef struct { unsigned long pgste; } pgste_t; ··· 84 82 typedef struct { unsigned long pud; } pud_t; 85 83 typedef struct { unsigned long p4d; } p4d_t; 86 84 typedef struct { unsigned long pgd; } pgd_t; 85 + 86 + #define DEFINE_PGVAL_FUNC(name) \ 87 + static __always_inline unsigned long name ## _val(name ## _t name) \ 88 + { \ 89 + return name.name; \ 90 + } 91 + 92 + #else /* STRICT_MM_TYPECHECKS */ 93 + 94 + typedef unsigned long pgprot_t; 95 + typedef unsigned long pgste_t; 96 + typedef unsigned long pte_t; 97 + typedef unsigned long pmd_t; 98 + typedef unsigned long pud_t; 99 + typedef unsigned long p4d_t; 100 + typedef unsigned long pgd_t; 101 + 102 + #define DEFINE_PGVAL_FUNC(name) \ 103 + static __always_inline unsigned long name ## _val(name ## _t name) \ 104 + { \ 105 + return name; \ 106 + } 107 + 108 + #endif /* STRICT_MM_TYPECHECKS */ 109 + 110 + DEFINE_PGVAL_FUNC(pgprot) 111 + DEFINE_PGVAL_FUNC(pgste) 112 + DEFINE_PGVAL_FUNC(pte) 113 + DEFINE_PGVAL_FUNC(pmd) 114 + DEFINE_PGVAL_FUNC(pud) 115 + DEFINE_PGVAL_FUNC(p4d) 116 + DEFINE_PGVAL_FUNC(pgd) 117 + 87 118 typedef pte_t *pgtable_t; 88 119 89 - #define pgprot_val(x) ((x).pgprot) 90 - #define pgste_val(x) ((x).pgste) 91 - 92 - static inline unsigned long pte_val(pte_t pte) 93 - { 94 - return pte.pte; 95 - } 96 - 97 - static inline unsigned long pmd_val(pmd_t pmd) 98 - { 99 - return pmd.pmd; 100 - } 101 - 102 - static inline unsigned long pud_val(pud_t pud) 103 - { 104 - return pud.pud; 105 - } 106 - 107 - static inline unsigned long p4d_val(p4d_t p4d) 108 - { 109 - return p4d.p4d; 110 - } 111 - 112 - static inline unsigned long pgd_val(pgd_t pgd) 113 - { 114 - return pgd.pgd; 115 - } 116 - 120 + #define __pgprot(x) ((pgprot_t) { (x) } ) 117 121 #define __pgste(x) ((pgste_t) { (x) } ) 118 122 #define __pte(x) ((pte_t) { (x) } ) 119 123 #define __pmd(x) ((pmd_t) { (x) } ) 120 124 #define __pud(x) ((pud_t) { (x) } ) 121 125 #define __p4d(x) ((p4d_t) { (x) } ) 122 126 #define __pgd(x) ((pgd_t) { (x) } ) 123 - #define __pgprot(x) ((pgprot_t) { (x) } ) 124 127 125 128 static inline void page_set_storage_key(unsigned long addr, 126 129 unsigned char skey, int mapped)
-1
arch/s390/include/asm/pgalloc.h
··· 26 26 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm); 27 27 void page_table_free(struct mm_struct *, unsigned long *); 28 28 void page_table_free_pgste(struct ptdesc *ptdesc); 29 - extern int page_table_allocate_pgste; 30 29 31 30 static inline void crst_table_init(unsigned long *crst, unsigned long entry) 32 31 {
+11 -12
arch/s390/include/asm/pgtable.h
··· 14 14 15 15 #include <linux/sched.h> 16 16 #include <linux/mm_types.h> 17 + #include <linux/cpufeature.h> 17 18 #include <linux/page-flags.h> 18 19 #include <linux/radix-tree.h> 19 20 #include <linux/atomic.h> ··· 584 583 return 0; 585 584 } 586 585 587 - static inline int mm_alloc_pgste(struct mm_struct *mm) 586 + static inline pgste_t clear_pgste_bit(pgste_t pgste, unsigned long mask) 588 587 { 589 - #ifdef CONFIG_PGSTE 590 - if (unlikely(mm->context.alloc_pgste)) 591 - return 1; 592 - #endif 593 - return 0; 588 + return __pgste(pgste_val(pgste) & ~mask); 589 + } 590 + 591 + static inline pgste_t set_pgste_bit(pgste_t pgste, unsigned long mask) 592 + { 593 + return __pgste(pgste_val(pgste) | mask); 594 594 } 595 595 596 596 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) ··· 1341 1339 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead. 1342 1340 * A local RDP can be used to do the flush. 1343 1341 */ 1344 - if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT)) 1342 + if (cpu_has_rdp() && !(pte_val(*ptep) & _PAGE_PROTECT)) 1345 1343 __ptep_rdp(address, ptep, 0, 0, 1); 1346 1344 } 1347 1345 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault ··· 1356 1354 { 1357 1355 if (pte_same(*ptep, entry)) 1358 1356 return 0; 1359 - if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) 1357 + if (cpu_has_rdp() && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry)) 1360 1358 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry); 1361 1359 else 1362 1360 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry); ··· 1403 1401 1404 1402 #define pgprot_writecombine pgprot_writecombine 1405 1403 pgprot_t pgprot_writecombine(pgprot_t prot); 1406 - 1407 - #define pgprot_writethrough pgprot_writethrough 1408 - pgprot_t pgprot_writethrough(pgprot_t prot); 1409 1404 1410 1405 #define PFN_PTE_SHIFT PAGE_SHIFT 1411 1406 ··· 1889 1890 #define has_transparent_hugepage has_transparent_hugepage 1890 1891 static inline int has_transparent_hugepage(void) 1891 1892 { 1892 - return MACHINE_HAS_EDAT1 ? 1 : 0; 1893 + return cpu_has_edat1() ? 1 : 0; 1893 1894 } 1894 1895 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1895 1896
+5 -1
arch/s390/include/asm/processor.h
··· 416 416 417 417 static __always_inline void bpon(void) 418 418 { 419 - asm volatile(ALTERNATIVE("nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82))); 419 + asm_inline volatile( 420 + ALTERNATIVE(" nop\n", 421 + " .insn rrf,0xb2e80000,0,0,13,0\n", 422 + ALT_SPEC(82)) 423 + ); 420 424 } 421 425 422 426 #endif /* __ASSEMBLY__ */
-2
arch/s390/include/asm/ptrace.h
··· 12 12 #include <asm/tpi.h> 13 13 14 14 #define PIF_SYSCALL 0 /* inside a system call */ 15 - #define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */ 16 15 #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ 17 16 #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ 18 17 #define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ 19 18 20 19 #define _PIF_SYSCALL BIT(PIF_SYSCALL) 21 - #define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART) 22 20 #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) 23 21 #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) 24 22 #define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
+1
arch/s390/include/asm/sclp.h
··· 168 168 int sclp_early_get_core_info(struct sclp_core_info *info); 169 169 void sclp_early_get_ipl_info(struct sclp_ipl_info *info); 170 170 void sclp_early_detect(void); 171 + void sclp_early_detect_machine_features(void); 171 172 void sclp_early_printk(const char *s); 172 173 void __sclp_early_printk(const char *s, unsigned int len); 173 174 void sclp_emergency_printk(const char *s);
-42
arch/s390/include/asm/setup.h
··· 13 13 #define PARMAREA 0x10400 14 14 15 15 #define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE 16 - /* 17 - * Machine features detected in early.c 18 - */ 19 - 20 - #define MACHINE_FLAG_VM BIT(0) 21 - #define MACHINE_FLAG_KVM BIT(1) 22 - #define MACHINE_FLAG_LPAR BIT(2) 23 - #define MACHINE_FLAG_DIAG9C BIT(3) 24 - #define MACHINE_FLAG_ESOP BIT(4) 25 - #define MACHINE_FLAG_IDTE BIT(5) 26 - #define MACHINE_FLAG_EDAT1 BIT(7) 27 - #define MACHINE_FLAG_EDAT2 BIT(8) 28 - #define MACHINE_FLAG_TOPOLOGY BIT(10) 29 - #define MACHINE_FLAG_TE BIT(11) 30 - #define MACHINE_FLAG_TLB_LC BIT(12) 31 - #define MACHINE_FLAG_TLB_GUEST BIT(14) 32 - #define MACHINE_FLAG_NX BIT(15) 33 - #define MACHINE_FLAG_GS BIT(16) 34 - #define MACHINE_FLAG_SCC BIT(17) 35 - #define MACHINE_FLAG_PCI_MIO BIT(18) 36 - #define MACHINE_FLAG_RDP BIT(19) 37 - #define MACHINE_FLAG_SEQ_INSN BIT(20) 38 16 39 17 #define LPP_MAGIC BIT(31) 40 18 #define LPP_PID_MASK _AC(0xffffffff, UL) ··· 55 77 56 78 /* The Write Back bit position in the physaddr is given by the SLPC PCI */ 57 79 extern unsigned long mio_wb_bit_mask; 58 - 59 - #define MACHINE_IS_VM (get_lowcore()->machine_flags & MACHINE_FLAG_VM) 60 - #define MACHINE_IS_KVM (get_lowcore()->machine_flags & MACHINE_FLAG_KVM) 61 - #define MACHINE_IS_LPAR (get_lowcore()->machine_flags & MACHINE_FLAG_LPAR) 62 - 63 - #define MACHINE_HAS_DIAG9C (get_lowcore()->machine_flags & MACHINE_FLAG_DIAG9C) 64 - #define MACHINE_HAS_ESOP (get_lowcore()->machine_flags & MACHINE_FLAG_ESOP) 65 - #define MACHINE_HAS_IDTE (get_lowcore()->machine_flags & MACHINE_FLAG_IDTE) 66 - #define MACHINE_HAS_EDAT1 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT1) 67 - #define MACHINE_HAS_EDAT2 (get_lowcore()->machine_flags & MACHINE_FLAG_EDAT2) 68 - #define MACHINE_HAS_TOPOLOGY (get_lowcore()->machine_flags & MACHINE_FLAG_TOPOLOGY) 69 - #define MACHINE_HAS_TE (get_lowcore()->machine_flags & MACHINE_FLAG_TE) 70 - #define MACHINE_HAS_TLB_LC (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_LC) 71 - #define MACHINE_HAS_TLB_GUEST (get_lowcore()->machine_flags & MACHINE_FLAG_TLB_GUEST) 72 - #define MACHINE_HAS_NX (get_lowcore()->machine_flags & MACHINE_FLAG_NX) 73 - #define MACHINE_HAS_GS (get_lowcore()->machine_flags & MACHINE_FLAG_GS) 74 - #define MACHINE_HAS_SCC (get_lowcore()->machine_flags & MACHINE_FLAG_SCC) 75 - #define MACHINE_HAS_PCI_MIO (get_lowcore()->machine_flags & MACHINE_FLAG_PCI_MIO) 76 - #define MACHINE_HAS_RDP (get_lowcore()->machine_flags & MACHINE_FLAG_RDP) 77 - #define MACHINE_HAS_SEQ_INSN (get_lowcore()->machine_flags & MACHINE_FLAG_SEQ_INSN) 78 80 79 81 /* 80 82 * Console mode. Override with conmode=
+21 -3
arch/s390/include/asm/smp.h
··· 7 7 #ifndef __ASM_SMP_H 8 8 #define __ASM_SMP_H 9 9 10 - #include <asm/sigp.h> 11 - #include <asm/lowcore.h> 12 10 #include <asm/processor.h> 11 + #include <asm/lowcore.h> 12 + #include <asm/machine.h> 13 + #include <asm/sigp.h> 13 14 14 - #define raw_smp_processor_id() (get_lowcore()->cpu_nr) 15 + static __always_inline unsigned int raw_smp_processor_id(void) 16 + { 17 + unsigned long lc_cpu_nr; 18 + unsigned int cpu; 19 + 20 + BUILD_BUG_ON(sizeof_field(struct lowcore, cpu_nr) != sizeof(cpu)); 21 + lc_cpu_nr = offsetof(struct lowcore, cpu_nr); 22 + asm_inline( 23 + ALTERNATIVE(" ly %[cpu],%[offzero](%%r0)\n", 24 + " ly %[cpu],%[offalt](%%r0)\n", 25 + ALT_FEATURE(MFEATURE_LOWCORE)) 26 + : [cpu] "=d" (cpu) 27 + : [offzero] "i" (lc_cpu_nr), 28 + [offalt] "i" (lc_cpu_nr + LOWCORE_ALT_ADDRESS), 29 + "m" (((struct lowcore *)0)->cpu_nr)); 30 + return cpu; 31 + } 32 + 15 33 #define arch_scale_cpu_capacity smp_cpu_get_capacity 16 34 17 35 extern struct mutex smp_cpu_state_mutex;
+18 -2
arch/s390/include/asm/spinlock.h
··· 16 16 #include <asm/processor.h> 17 17 #include <asm/alternative.h> 18 18 19 - #define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval) 19 + static __always_inline unsigned int spinlock_lockval(void) 20 + { 21 + unsigned long lc_lockval; 22 + unsigned int lockval; 23 + 24 + BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval)); 25 + lc_lockval = offsetof(struct lowcore, spinlock_lockval); 26 + asm_inline( 27 + ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n", 28 + " ly %[lockval],%[offalt](%%r0)\n", 29 + ALT_FEATURE(MFEATURE_LOWCORE)) 30 + : [lockval] "=d" (lockval) 31 + : [offzero] "i" (lc_lockval), 32 + [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS), 33 + "m" (((struct lowcore *)0)->spinlock_lockval)); 34 + return lockval; 35 + } 20 36 21 37 extern int spin_retry; 22 38 ··· 76 60 int old = 0; 77 61 78 62 barrier(); 79 - return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL)); 63 + return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval())); 80 64 } 81 65 82 66 static inline void arch_spin_lock(arch_spinlock_t *lp)
+2 -4
arch/s390/include/asm/syscall.h
··· 65 65 unsigned long *args) 66 66 { 67 67 unsigned long mask = -1UL; 68 - unsigned int n = 6; 69 68 70 69 #ifdef CONFIG_COMPAT 71 70 if (test_tsk_thread_flag(task, TIF_31BIT)) 72 71 mask = 0xffffffff; 73 72 #endif 74 - while (n-- > 0) 75 - if (n > 0) 76 - args[n] = regs->gprs[2 + n] & mask; 73 + for (int i = 1; i < 6; i++) 74 + args[i] = regs->gprs[2 + i] & mask; 77 75 78 76 args[0] = regs->orig_gpr2 & mask; 79 77 }
+27 -1
arch/s390/include/asm/sysinfo.h
··· 11 11 #ifndef __ASM_S390_SYSINFO_H 12 12 #define __ASM_S390_SYSINFO_H 13 13 14 - #include <asm/bitsperlong.h> 15 14 #include <linux/uuid.h> 15 + #include <asm/bitsperlong.h> 16 + #include <asm/asm.h> 17 + 18 + /* 19 + * stsi - store system information 20 + * 21 + * Returns the current configuration level if function code 0 was specified. 22 + * Otherwise returns 0 on success or a negative value on error. 23 + */ 24 + static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) 25 + { 26 + int r0 = (fc << 28) | sel1; 27 + int cc; 28 + 29 + asm volatile( 30 + " lr %%r0,%[r0]\n" 31 + " lr %%r1,%[r1]\n" 32 + " stsi %[sysinfo]\n" 33 + " lr %[r0],%%r0\n" 34 + CC_IPM(cc) 35 + : CC_OUT(cc, cc), [r0] "+d" (r0), [sysinfo] "=Q" (*(char *)sysinfo) 36 + : [r1] "d" (sel2) 37 + : CC_CLOBBER_LIST("0", "1", "memory")); 38 + if (cc == 3) 39 + return -EOPNOTSUPP; 40 + return fc ? 0 : (unsigned int)r0 >> 28; 41 + } 16 42 17 43 struct sysinfo_1_1_1 { 18 44 unsigned char p:1;
-2
arch/s390/include/asm/thread_info.h
··· 67 67 #define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */ 68 68 #define TIF_UPROBE 4 /* breakpointed or single-stepping */ 69 69 #define TIF_PATCH_PENDING 5 /* pending live patching update */ 70 - #define TIF_PGSTE 6 /* New mm's will use 4K page tables */ 71 70 #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ 72 71 #define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */ 73 72 #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ ··· 88 89 #define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) 89 90 #define _TIF_UPROBE BIT(TIF_UPROBE) 90 91 #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) 91 - #define _TIF_PGSTE BIT(TIF_PGSTE) 92 92 #define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) 93 93 #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) 94 94 #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST)
+3 -2
arch/s390/include/asm/timex.h
··· 13 13 #include <linux/preempt.h> 14 14 #include <linux/time64.h> 15 15 #include <asm/lowcore.h> 16 + #include <asm/machine.h> 16 17 #include <asm/asm.h> 17 18 18 19 /* The value of the TOD clock for 1.1.1970. */ ··· 268 267 */ 269 268 static inline int tod_after(unsigned long a, unsigned long b) 270 269 { 271 - if (MACHINE_HAS_SCC) 270 + if (machine_has_scc()) 272 271 return (long) a > (long) b; 273 272 return a > b; 274 273 } ··· 282 281 */ 283 282 static inline int tod_after_eq(unsigned long a, unsigned long b) 284 283 { 285 - if (MACHINE_HAS_SCC) 284 + if (machine_has_scc()) 286 285 return (long) a >= (long) b; 287 286 return a >= b; 288 287 }
+1 -1
arch/s390/include/asm/tlb.h
··· 84 84 tlb->mm->context.flush_mm = 1; 85 85 tlb->freed_tables = 1; 86 86 tlb->cleared_pmds = 1; 87 - if (mm_alloc_pgste(tlb->mm)) 87 + if (mm_has_pgste(tlb->mm)) 88 88 gmap_unlink(tlb->mm, (unsigned long *)pte, address); 89 89 tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte)); 90 90 }
+5 -3
arch/s390/include/asm/tlbflush.h
··· 2 2 #ifndef _S390_TLBFLUSH_H 3 3 #define _S390_TLBFLUSH_H 4 4 5 + #include <linux/cpufeature.h> 5 6 #include <linux/mm.h> 6 7 #include <linux/sched.h> 7 8 #include <asm/processor.h> 9 + #include <asm/machine.h> 8 10 9 11 /* 10 12 * Flush all TLB entries on the local CPU. ··· 24 22 unsigned long opt; 25 23 26 24 opt = IDTE_PTOA; 27 - if (MACHINE_HAS_TLB_GUEST) 25 + if (machine_has_tlb_guest()) 28 26 opt |= IDTE_GUEST_ASCE; 29 27 /* Global TLB flush for the mm */ 30 28 asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); ··· 54 52 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 55 53 barrier(); 56 54 gmap_asce = READ_ONCE(mm->context.gmap_asce); 57 - if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { 55 + if (cpu_has_idte() && gmap_asce != -1UL) { 58 56 if (gmap_asce) 59 57 __tlb_flush_idte(gmap_asce); 60 58 __tlb_flush_idte(mm->context.asce); ··· 68 66 69 67 static inline void __tlb_flush_kernel(void) 70 68 { 71 - if (MACHINE_HAS_IDTE) 69 + if (cpu_has_idte()) 72 70 __tlb_flush_idte(init_mm.context.asce); 73 71 else 74 72 __tlb_flush_global();
+91 -121
arch/s390/include/asm/uaccess.h
··· 13 13 /* 14 14 * User space memory access functions 15 15 */ 16 + #include <linux/pgtable.h> 16 17 #include <asm/asm-extable.h> 17 18 #include <asm/processor.h> 18 19 #include <asm/extable.h> ··· 23 22 24 23 void debug_user_asce(int exit); 25 24 26 - union oac { 27 - unsigned int val; 28 - struct { 29 - struct { 30 - unsigned short key : 4; 31 - unsigned short : 4; 32 - unsigned short as : 2; 33 - unsigned short : 4; 34 - unsigned short k : 1; 35 - unsigned short a : 1; 36 - } oac1; 37 - struct { 38 - unsigned short key : 4; 39 - unsigned short : 4; 40 - unsigned short as : 2; 41 - unsigned short : 4; 42 - unsigned short k : 1; 43 - unsigned short a : 1; 44 - } oac2; 45 - }; 46 - }; 25 + #ifdef CONFIG_KMSAN 26 + #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 27 + #else 28 + #define uaccess_kmsan_or_inline __always_inline 29 + #endif 47 30 48 - static __always_inline __must_check unsigned long 49 - raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key) 31 + #define INLINE_COPY_FROM_USER 32 + #define INLINE_COPY_TO_USER 33 + 34 + static uaccess_kmsan_or_inline __must_check unsigned long 35 + raw_copy_from_user(void *to, const void __user *from, unsigned long size) 50 36 { 51 - unsigned long rem; 52 - union oac spec = { 53 - .oac2.key = key, 54 - .oac2.as = PSW_BITS_AS_SECONDARY, 55 - .oac2.k = 1, 56 - .oac2.a = 1, 57 - }; 37 + unsigned long osize; 38 + int cc; 58 39 59 - asm_inline volatile( 60 - " lr %%r0,%[spec]\n" 61 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 62 - "1: jz 5f\n" 63 - " algr %[size],%[val]\n" 64 - " slgr %[from],%[val]\n" 65 - " slgr %[to],%[val]\n" 66 - " j 0b\n" 67 - "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */ 68 - " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */ 69 - " slgr %[rem],%[from]\n" 70 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 71 - " jnh 6f\n" 72 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 73 - "4: slgr %[size],%[rem]\n" 74 - " j 6f\n" 75 - "5: lghi %[size],0\n" 76 - "6:\n" 77 - EX_TABLE(0b, 2b) 78 - EX_TABLE(1b, 2b) 79 - EX_TABLE(3b, 6b) 80 - EX_TABLE(4b, 6b) 81 - : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem) 82 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 83 - : "cc", "memory", "0"); 84 - return size; 40 + while (1) { 41 + osize = size; 42 + asm_inline volatile( 43 + " lhi %%r0,%[spec]\n" 44 + "0: mvcos %[to],%[from],%[size]\n" 45 + "1: nopr %%r7\n" 46 + CC_IPM(cc) 47 + EX_TABLE_UA_MVCOS_FROM(0b, 0b) 48 + EX_TABLE_UA_MVCOS_FROM(1b, 0b) 49 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to) 50 + : [spec] "I" (0x81), [from] "Q" (*(const char __user *)from) 51 + : CC_CLOBBER_LIST("memory", "0")); 52 + if (__builtin_constant_p(osize) && osize <= 4096) 53 + return osize - size; 54 + if (likely(CC_TRANSFORM(cc) == 0)) 55 + return osize - size; 56 + size -= 4096; 57 + to += 4096; 58 + from += 4096; 59 + } 85 60 } 86 61 87 - static __always_inline __must_check unsigned long 88 - raw_copy_from_user(void *to, const void __user *from, unsigned long n) 62 + static uaccess_kmsan_or_inline __must_check unsigned long 63 + raw_copy_to_user(void __user *to, const void *from, unsigned long size) 89 64 { 90 - return raw_copy_from_user_key(to, from, n, 0); 91 - } 65 + unsigned long osize; 66 + int cc; 92 67 93 - static __always_inline __must_check unsigned long 94 - raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key) 95 - { 96 - unsigned long rem; 97 - union oac spec = { 98 - .oac1.key = key, 99 - .oac1.as = PSW_BITS_AS_SECONDARY, 100 - .oac1.k = 1, 101 - .oac1.a = 1, 102 - }; 103 - 104 - asm_inline volatile( 105 - " lr %%r0,%[spec]\n" 106 - "0: mvcos 0(%[to]),0(%[from]),%[size]\n" 107 - "1: jz 5f\n" 108 - " algr %[size],%[val]\n" 109 - " slgr %[to],%[val]\n" 110 - " slgr %[from],%[val]\n" 111 - " j 0b\n" 112 - "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */ 113 - " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */ 114 - " slgr %[rem],%[to]\n" 115 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 116 - " jnh 6f\n" 117 - "3: mvcos 0(%[to]),0(%[from]),%[rem]\n" 118 - "4: slgr %[size],%[rem]\n" 119 - " j 6f\n" 120 - "5: lghi %[size],0\n" 121 - "6:\n" 122 - EX_TABLE(0b, 2b) 123 - EX_TABLE(1b, 2b) 124 - EX_TABLE(3b, 6b) 125 - EX_TABLE(4b, 6b) 126 - : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem) 127 - : [val] "a" (-4096UL), [spec] "d" (spec.val) 128 - : "cc", "memory", "0"); 129 - return size; 130 - } 131 - 132 - static __always_inline __must_check unsigned long 133 - raw_copy_to_user(void __user *to, const void *from, unsigned long n) 134 - { 135 - return raw_copy_to_user_key(to, from, n, 0); 68 + while (1) { 69 + osize = size; 70 + asm_inline volatile( 71 + " llilh %%r0,%[spec]\n" 72 + "0: mvcos %[to],%[from],%[size]\n" 73 + "1: nopr %%r7\n" 74 + CC_IPM(cc) 75 + EX_TABLE_UA_MVCOS_TO(0b, 0b) 76 + EX_TABLE_UA_MVCOS_TO(1b, 0b) 77 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 78 + : [spec] "I" (0x81), [from] "Q" (*(const char *)from) 79 + : CC_CLOBBER_LIST("memory", "0")); 80 + if (__builtin_constant_p(osize) && osize <= 4096) 81 + return osize - size; 82 + if (likely(CC_TRANSFORM(cc) == 0)) 83 + return osize - size; 84 + size -= 4096; 85 + to += 4096; 86 + from += 4096; 87 + } 136 88 } 137 89 138 90 unsigned long __must_check ··· 111 157 } 112 158 113 159 int __noreturn __put_user_bad(void); 114 - 115 - #ifdef CONFIG_KMSAN 116 - #define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory 117 - #else 118 - #define uaccess_kmsan_or_inline __always_inline 119 - #endif 120 160 121 161 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT 122 162 ··· 147 199 { \ 148 200 int rc; \ 149 201 \ 150 - asm volatile( \ 202 + asm_inline volatile( \ 151 203 " llilh %%r0,%[spec]\n" \ 152 204 "0: mvcos %[to],%[from],%[size]\n" \ 153 205 "1: lhi %[rc],0\n" \ ··· 263 315 { \ 264 316 int rc; \ 265 317 \ 266 - asm volatile( \ 318 + asm_inline volatile( \ 267 319 " lhi %%r0,%[spec]\n" \ 268 320 "0: mvcos %[to],%[from],%[size]\n" \ 269 321 "1: lhi %[rc],0\n" \ ··· 363 415 364 416 long __must_check strnlen_user(const char __user *src, long count); 365 417 366 - /* 367 - * Zero Userspace 368 - */ 369 - unsigned long __must_check __clear_user(void __user *to, unsigned long size); 418 + static uaccess_kmsan_or_inline __must_check unsigned long 419 + __clear_user(void __user *to, unsigned long size) 420 + { 421 + unsigned long osize; 422 + int cc; 370 423 371 - static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 424 + while (1) { 425 + osize = size; 426 + asm_inline volatile( 427 + " llilh %%r0,%[spec]\n" 428 + "0: mvcos %[to],%[from],%[size]\n" 429 + "1: nopr %%r7\n" 430 + CC_IPM(cc) 431 + EX_TABLE_UA_MVCOS_TO(0b, 0b) 432 + EX_TABLE_UA_MVCOS_TO(1b, 0b) 433 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 434 + : [spec] "I" (0x81), [from] "Q" (*(const char *)empty_zero_page) 435 + : CC_CLOBBER_LIST("memory", "0")); 436 + if (__builtin_constant_p(osize) && osize <= 4096) 437 + return osize - size; 438 + if (CC_TRANSFORM(cc) == 0) 439 + return osize - size; 440 + size -= 4096; 441 + to += 4096; 442 + } 443 + } 444 + 445 + static __always_inline unsigned long __must_check clear_user(void __user *to, unsigned long n) 372 446 { 373 447 might_fault(); 374 448 return __clear_user(to, n); ··· 490 520 _old = ((unsigned int)old & 0xff) << shift; 491 521 _new = ((unsigned int)new & 0xff) << shift; 492 522 mask = ~(0xff << shift); 493 - asm volatile( 523 + asm_inline volatile( 494 524 " spka 0(%[key])\n" 495 525 " sacf 256\n" 496 526 " llill %[count],%[max_loops]\n" ··· 538 568 _old = ((unsigned int)old & 0xffff) << shift; 539 569 _new = ((unsigned int)new & 0xffff) << shift; 540 570 mask = ~(0xffff << shift); 541 - asm volatile( 571 + asm_inline volatile( 542 572 " spka 0(%[key])\n" 543 573 " sacf 256\n" 544 574 " llill %[count],%[max_loops]\n" ··· 580 610 case 4: { 581 611 unsigned int prev = old; 582 612 583 - asm volatile( 613 + asm_inline volatile( 584 614 " spka 0(%[key])\n" 585 615 " sacf 256\n" 586 616 "0: cs %[prev],%[new],%[address]\n" ··· 601 631 case 8: { 602 632 unsigned long prev = old; 603 633 604 - asm volatile( 634 + asm_inline volatile( 605 635 " spka 0(%[key])\n" 606 636 " sacf 256\n" 607 637 "0: csg %[prev],%[new],%[address]\n" ··· 622 652 case 16: { 623 653 __uint128_t prev = old; 624 654 625 - asm volatile( 655 + asm_inline volatile( 626 656 " spka 0(%[key])\n" 627 657 " sacf 256\n" 628 658 "0: cdsg %[prev],%[new],%[address]\n"
+1 -1
arch/s390/include/asm/word-at-a-time.h
··· 52 52 { 53 53 unsigned long data; 54 54 55 - asm volatile( 55 + asm_inline volatile( 56 56 "0: lg %[data],0(%[addr])\n" 57 57 "1: nopr %%r7\n" 58 58 EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
-1
arch/s390/kernel/abs_lowcore.c
··· 5 5 #include <asm/sections.h> 6 6 7 7 unsigned long __bootdata_preserved(__abs_lowcore); 8 - int __bootdata_preserved(relocate_lowcore); 9 8 10 9 int abs_lowcore_map(int cpu, struct lowcore *lc, bool alloc) 11 10 {
+57 -8
arch/s390/kernel/alternative.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 + #ifndef pr_fmt 4 + #define pr_fmt(fmt) "alt: " fmt 5 + #endif 6 + 3 7 #include <linux/uaccess.h> 8 + #include <linux/printk.h> 4 9 #include <asm/nospec-branch.h> 5 10 #include <asm/abs_lowcore.h> 6 11 #include <asm/alternative.h> 7 12 #include <asm/facility.h> 13 + #include <asm/sections.h> 14 + #include <asm/machine.h> 15 + 16 + #ifndef a_debug 17 + #define a_debug pr_debug 18 + #endif 19 + 20 + #ifndef __kernel_va 21 + #define __kernel_va(x) (void *)(x) 22 + #endif 23 + 24 + unsigned long __bootdata_preserved(machine_features[1]); 25 + 26 + struct alt_debug { 27 + unsigned long facilities[MAX_FACILITY_BIT / BITS_PER_LONG]; 28 + unsigned long mfeatures[MAX_MFEATURE_BIT / BITS_PER_LONG]; 29 + int spec; 30 + }; 31 + 32 + static struct alt_debug __bootdata_preserved(alt_debug); 33 + 34 + static void alternative_dump(u8 *old, u8 *new, unsigned int len, unsigned int type, unsigned int data) 35 + { 36 + char oinsn[33], ninsn[33]; 37 + unsigned long kptr; 38 + unsigned int pos; 39 + 40 + for (pos = 0; pos < len && 2 * pos < sizeof(oinsn) - 3; pos++) 41 + hex_byte_pack(&oinsn[2 * pos], old[pos]); 42 + oinsn[2 * pos] = 0; 43 + for (pos = 0; pos < len && 2 * pos < sizeof(ninsn) - 3; pos++) 44 + hex_byte_pack(&ninsn[2 * pos], new[pos]); 45 + ninsn[2 * pos] = 0; 46 + kptr = (unsigned long)__kernel_va(old); 47 + a_debug("[%d/%3d] %016lx: %s -> %s\n", type, data, kptr, oinsn, ninsn); 48 + } 8 49 9 50 void __apply_alternatives(struct alt_instr *start, struct alt_instr *end, unsigned int ctx) 10 51 { 11 - u8 *instr, *replacement; 52 + struct alt_debug *d; 12 53 struct alt_instr *a; 13 - bool replace; 54 + bool debug, replace; 55 + u8 *old, *new; 14 56 15 57 /* 16 58 * The scan order should be from start to end. A later scanned 17 59 * alternative code can overwrite previously scanned alternative code. 18 60 */ 61 + d = &alt_debug; 19 62 for (a = start; a < end; a++) { 20 63 if (!(a->ctx & ctx)) 21 64 continue; 22 65 switch (a->type) { 23 66 case ALT_TYPE_FACILITY: 24 67 replace = test_facility(a->data); 68 + debug = __test_facility(a->data, d->facilities); 69 + break; 70 + case ALT_TYPE_FEATURE: 71 + replace = test_machine_feature(a->data); 72 + debug = __test_machine_feature(a->data, d->mfeatures); 25 73 break; 26 74 case ALT_TYPE_SPEC: 27 75 replace = nobp_enabled(); 28 - break; 29 - case ALT_TYPE_LOWCORE: 30 - replace = have_relocated_lowcore(); 76 + debug = d->spec; 31 77 break; 32 78 default: 33 79 replace = false; 80 + debug = false; 34 81 } 35 82 if (!replace) 36 83 continue; 37 - instr = (u8 *)&a->instr_offset + a->instr_offset; 38 - replacement = (u8 *)&a->repl_offset + a->repl_offset; 39 - s390_kernel_write(instr, replacement, a->instrlen); 84 + old = (u8 *)&a->instr_offset + a->instr_offset; 85 + new = (u8 *)&a->repl_offset + a->repl_offset; 86 + if (debug) 87 + alternative_dump(old, new, a->instrlen, a->type, a->data); 88 + s390_kernel_write(old, new, a->instrlen); 40 89 } 41 90 }
+3 -2
arch/s390/kernel/asm-offsets.c
··· 49 49 OFFSET(__PT_R14, pt_regs, gprs[14]); 50 50 OFFSET(__PT_R15, pt_regs, gprs[15]); 51 51 OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); 52 + OFFSET(__PT_INT_CODE, pt_regs, int_code); 52 53 OFFSET(__PT_FLAGS, pt_regs, flags); 53 54 OFFSET(__PT_CR1, pt_regs, cr1); 54 55 OFFSET(__PT_LAST_BREAK, pt_regs, last_break); ··· 77 76 OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr); 78 77 OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code); 79 78 OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc); 80 - OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code); 79 + OFFSET(__LC_PGM_CODE, lowcore, pgm_code); 80 + OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_int_code); 81 81 OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code); 82 82 OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num); 83 83 OFFSET(__LC_PER_CODE, lowcore, per_code); ··· 124 122 OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer); 125 123 OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock); 126 124 OFFSET(__LC_INT_CLOCK, lowcore, int_clock); 127 - OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock); 128 125 OFFSET(__LC_CURRENT, lowcore, current_task); 129 126 OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack); 130 127 OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
+1 -1
arch/s390/kernel/cert_store.c
··· 235 235 { 236 236 union register_pair rp = { .even = (unsigned long)addr, }; 237 237 238 - asm volatile( 238 + asm_inline volatile( 239 239 " diag %[rp],%[subcode],0x320\n" 240 240 "0: nopr %%r7\n" 241 241 EX_TABLE(0b, 0b)
+2 -2
arch/s390/kernel/diag/diag.c
··· 195 195 { 196 196 union register_pair rp = { .even = *subcode, .odd = size }; 197 197 198 - asm volatile( 198 + asm_inline volatile( 199 199 " diag %[addr],%[rp],0x204\n" 200 200 "0: nopr %%r7\n" 201 201 EX_TABLE(0b,0b) ··· 286 286 int rc = -EOPNOTSUPP; 287 287 288 288 diag_stat_inc(DIAG_STAT_X224); 289 - asm volatile("\n" 289 + asm_inline volatile("\n" 290 290 " diag %[type],%[addr],0x224\n" 291 291 "0: lhi %[rc],0\n" 292 292 "1:\n"
+13 -103
arch/s390/kernel/early.c
··· 8 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 9 10 10 #include <linux/sched/debug.h> 11 + #include <linux/cpufeature.h> 11 12 #include <linux/compiler.h> 12 13 #include <linux/init.h> 13 14 #include <linux/errno.h> ··· 22 21 #include <asm/asm-extable.h> 23 22 #include <linux/memblock.h> 24 23 #include <asm/access-regs.h> 24 + #include <asm/machine.h> 25 25 #include <asm/diag.h> 26 26 #include <asm/ebcdic.h> 27 27 #include <asm/fpu.h> ··· 38 36 #include <asm/boot_data.h> 39 37 #include "entry.h" 40 38 41 - #define decompressor_handled_param(param) \ 42 - static int __init ignore_decompressor_param_##param(char *s) \ 39 + #define __decompressor_handled_param(func, param) \ 40 + static int __init ignore_decompressor_param_##func(char *s) \ 43 41 { \ 44 42 return 0; \ 45 43 } \ 46 - early_param(#param, ignore_decompressor_param_##param) 44 + early_param(#param, ignore_decompressor_param_##func) 45 + 46 + #define decompressor_handled_param(param) __decompressor_handled_param(param, param) 47 47 48 48 decompressor_handled_param(mem); 49 49 decompressor_handled_param(vmalloc); ··· 55 51 decompressor_handled_param(cmma); 56 52 decompressor_handled_param(relocate_lowcore); 57 53 decompressor_handled_param(bootdebug); 54 + __decompressor_handled_param(debug_alternative, debug-alternative); 58 55 #if IS_ENABLED(CONFIG_KVM) 59 56 decompressor_handled_param(prot_virt); 60 57 #endif ··· 66 61 init_task.kasan_depth = 0; 67 62 pr_info("KernelAddressSanitizer initialized\n"); 68 63 #endif 69 - } 70 - 71 - static void __init reset_tod_clock(void) 72 - { 73 - union tod_clock clk; 74 - 75 - if (store_tod_clock_ext_cc(&clk) == 0) 76 - return; 77 - /* TOD clock not running. Set the clock to Unix Epoch. */ 78 - if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk)) 79 - disabled_wait(); 80 - 81 - memset(&tod_clock_base, 0, sizeof(tod_clock_base)); 82 - tod_clock_base.tod = TOD_UNIX_EPOCH; 83 - get_lowcore()->last_update_clock = TOD_UNIX_EPOCH; 84 64 } 85 65 86 66 /* ··· 85 95 } 86 96 87 97 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE); 88 - 89 - static noinline __init void detect_machine_type(void) 90 - { 91 - struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page; 92 - 93 - /* Check current-configuration-level */ 94 - if (stsi(NULL, 0, 0, 0) <= 2) { 95 - get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR; 96 - return; 97 - } 98 - /* Get virtual-machine cpu information. */ 99 - if (stsi(vmms, 3, 2, 2) || !vmms->count) 100 - return; 101 - 102 - /* Detect known hypervisors */ 103 - if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 104 - get_lowcore()->machine_flags |= MACHINE_FLAG_KVM; 105 - else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) 106 - get_lowcore()->machine_flags |= MACHINE_FLAG_VM; 107 - } 108 98 109 99 /* Remove leading, trailing and double whitespace. */ 110 100 static inline void strim_all(char *str) ··· 126 156 strim_all(hvstr); 127 157 } else { 128 158 sprintf(hvstr, "%s", 129 - MACHINE_IS_LPAR ? "LPAR" : 130 - MACHINE_IS_VM ? "z/VM" : 131 - MACHINE_IS_KVM ? "KVM" : "unknown"); 159 + machine_is_lpar() ? "LPAR" : 160 + machine_is_vm() ? "z/VM" : 161 + machine_is_kvm() ? "KVM" : "unknown"); 132 162 } 133 163 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr); 134 164 } ··· 137 167 { 138 168 int max_mnest; 139 169 140 - if (!test_facility(11)) 170 + if (!cpu_has_topology()) 141 171 return; 142 - get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY; 143 172 for (max_mnest = 6; max_mnest > 1; max_mnest--) { 144 173 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0) 145 174 break; ··· 187 218 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 188 219 } 189 220 190 - static __init void detect_diag9c(void) 191 - { 192 - unsigned int cpu_address; 193 - int rc; 194 - 195 - cpu_address = stap(); 196 - diag_stat_inc(DIAG_STAT_X09C); 197 - asm volatile( 198 - " diag %2,0,0x9c\n" 199 - "0: la %0,0\n" 200 - "1:\n" 201 - EX_TABLE(0b,1b) 202 - : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); 203 - if (!rc) 204 - get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C; 205 - } 206 - 207 - static __init void detect_machine_facilities(void) 208 - { 209 - if (test_facility(8)) { 210 - get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1; 211 - system_ctl_set_bit(0, CR0_EDAT_BIT); 212 - } 213 - if (test_facility(78)) 214 - get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2; 215 - if (test_facility(3)) 216 - get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE; 217 - if (test_facility(50) && test_facility(73)) { 218 - get_lowcore()->machine_flags |= MACHINE_FLAG_TE; 219 - system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT); 220 - } 221 - if (test_facility(51)) 222 - get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC; 223 - if (test_facility(129)) 224 - system_ctl_set_bit(0, CR0_VECTOR_BIT); 225 - if (test_facility(130)) 226 - get_lowcore()->machine_flags |= MACHINE_FLAG_NX; 227 - if (test_facility(133)) 228 - get_lowcore()->machine_flags |= MACHINE_FLAG_GS; 229 - if (test_facility(139) && (tod_clock_base.tod >> 63)) { 230 - /* Enabled signed clock comparator comparisons */ 231 - get_lowcore()->machine_flags |= MACHINE_FLAG_SCC; 232 - clock_comparator_max = -1ULL >> 1; 233 - system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT); 234 - } 235 - if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) { 236 - get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO; 237 - /* the control bit is set during PCI initialization */ 238 - } 239 - if (test_facility(194)) 240 - get_lowcore()->machine_flags |= MACHINE_FLAG_RDP; 241 - if (test_facility(85)) 242 - get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN; 243 - } 244 - 245 221 static inline void save_vector_registers(void) 246 222 { 247 223 #ifdef CONFIG_CRASH_DUMP 248 - if (test_facility(129)) 224 + if (cpu_has_vx()) 249 225 save_vx_regs(boot_cpu_vector_save_area); 250 226 #endif 251 227 } ··· 222 308 void __init startup_init(void) 223 309 { 224 310 kasan_early_init(); 225 - reset_tod_clock(); 226 311 time_early_init(); 227 312 init_kernel_storage_key(); 228 313 lockdep_off(); 229 314 sort_amode31_extable(); 230 315 setup_lowcore_early(); 231 - detect_machine_type(); 232 316 setup_arch_string(); 233 317 setup_boot_command_line(); 234 - detect_diag9c(); 235 - detect_machine_facilities(); 236 318 save_vector_registers(); 237 319 setup_topology(); 238 320 sclp_early_detect();
+10 -9
arch/s390/kernel/entry.S
··· 29 29 #include <asm/nmi.h> 30 30 #include <asm/nospec-insn.h> 31 31 #include <asm/lowcore.h> 32 + #include <asm/machine.h> 32 33 33 34 _LPP_OFFSET = __LC_LPP 34 35 ··· 45 44 ALTERNATIVE_2 "b \lpswe;nopr", \ 46 45 ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \ 47 46 __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ 48 - ALT_LOWCORE 47 + ALT_FEATURE(MFEATURE_LOWCORE) 49 48 .endm 50 49 51 50 .macro MBEAR reg, lowcore ··· 68 67 clg %r14,__LC_RESTART_STACK(\lowcore) 69 68 je \oklabel 70 69 la %r14,\savearea(\lowcore) 71 - j stack_overflow 70 + j stack_invalid 72 71 .endm 73 72 74 73 /* ··· 316 315 tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception 317 316 jnz .Lpgm_svcper # -> single stepped svc 318 317 2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 319 - # CHECK_VMAP_STACK branches to stack_overflow or 4f 318 + # CHECK_VMAP_STACK branches to stack_invalid or 4f 320 319 CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 321 320 3: lg %r15,__LC_KERNEL_STACK(%r13) 322 321 4: la %r11,STACK_FRAME_OVERHEAD(%r15) ··· 591 590 .section .kprobes.text, "ax" 592 591 593 592 /* 594 - * The synchronous or the asynchronous stack overflowed. We are dead. 593 + * The synchronous or the asynchronous stack pointer is invalid. We are dead. 595 594 * No need to properly save the registers, we are going to panic anyway. 596 595 * Setup a pt_regs so that show_trace can provide a good call trace. 597 596 */ 598 - SYM_CODE_START(stack_overflow) 597 + SYM_CODE_START(stack_invalid) 599 598 GET_LC %r15 600 599 lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack 601 600 la %r11,STACK_FRAME_OVERHEAD(%r15) ··· 605 604 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 606 605 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 607 606 lgr %r2,%r11 # pass pointer to pt_regs 608 - jg kernel_stack_overflow 609 - SYM_CODE_END(stack_overflow) 607 + jg kernel_stack_invalid 608 + SYM_CODE_END(stack_invalid) 610 609 611 610 .section .data, "aw" 612 611 .balign 4 ··· 622 621 .balign 8 623 622 #define SYSCALL(esame,emu) .quad __s390x_ ## esame 624 623 SYM_DATA_START(sys_call_table) 625 - #include "asm/syscall_table.h" 624 + #include <asm/syscall_table.h> 626 625 SYM_DATA_END(sys_call_table) 627 626 #undef SYSCALL 628 627 ··· 630 629 631 630 #define SYSCALL(esame,emu) .quad __s390_ ## emu 632 631 SYM_DATA_START(sys_call_table_emu) 633 - #include "asm/syscall_table.h" 632 + #include <asm/syscall_table.h> 634 633 SYM_DATA_END(sys_call_table_emu) 635 634 #undef SYSCALL 636 635 #endif
+1 -1
arch/s390/kernel/entry.h
··· 31 31 void do_non_secure_storage_access(struct pt_regs *regs); 32 32 void do_secure_storage_violation(struct pt_regs *regs); 33 33 void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str); 34 - void kernel_stack_overflow(struct pt_regs * regs); 34 + void kernel_stack_invalid(struct pt_regs *regs); 35 35 void handle_signal32(struct ksignal *ksig, sigset_t *oldset, 36 36 struct pt_regs *regs); 37 37
+6 -5
arch/s390/kernel/ftrace.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/types.h> 15 15 #include <linux/kmsan-checks.h> 16 + #include <linux/cpufeature.h> 16 17 #include <linux/kprobes.h> 17 18 #include <linux/execmem.h> 18 19 #include <trace/syscall.h> ··· 70 69 71 70 bool ftrace_need_init_nop(void) 72 71 { 73 - return !MACHINE_HAS_SEQ_INSN; 72 + return !cpu_has_seq_insn(); 74 73 } 75 74 76 75 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) ··· 190 189 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 191 190 unsigned long addr) 192 191 { 193 - if (MACHINE_HAS_SEQ_INSN) 192 + if (cpu_has_seq_insn()) 194 193 return ftrace_patch_branch_insn(rec->ip, old_addr, addr); 195 194 else 196 195 return ftrace_modify_trampoline_call(rec, old_addr, addr); ··· 214 213 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 215 214 unsigned long addr) 216 215 { 217 - /* Expect brcl 0xf,... for the !MACHINE_HAS_SEQ_INSN case */ 218 - if (MACHINE_HAS_SEQ_INSN) 216 + /* Expect brcl 0xf,... for the !cpu_has_seq_insn() case */ 217 + if (cpu_has_seq_insn()) 219 218 return ftrace_patch_branch_insn(rec->ip, addr, 0); 220 219 else 221 220 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); ··· 235 234 236 235 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 237 236 { 238 - if (MACHINE_HAS_SEQ_INSN) 237 + if (cpu_has_seq_insn()) 239 238 return ftrace_patch_branch_insn(rec->ip, 0, addr); 240 239 else 241 240 return ftrace_make_trampoline_call(rec, addr);
+2 -1
arch/s390/kernel/guarded_storage.c
··· 4 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 5 */ 6 6 7 + #include <linux/cpufeature.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/syscalls.h> 9 10 #include <linux/signal.h> ··· 110 109 SYSCALL_DEFINE2(s390_guarded_storage, int, command, 111 110 struct gs_cb __user *, gs_cb) 112 111 { 113 - if (!MACHINE_HAS_GS) 112 + if (!cpu_has_gs()) 114 113 return -EOPNOTSUPP; 115 114 switch (command) { 116 115 case GS_ENABLE:
+1 -3
arch/s390/kernel/head64.S
··· 18 18 19 19 __HEAD 20 20 SYM_CODE_START(startup_continue) 21 - larl %r1,tod_clock_base 22 - GET_LC %r2 23 - mvc 0(16,%r1),__LC_BOOT_CLOCK(%r2) 24 21 # 25 22 # Setup stack 26 23 # 24 + GET_LC %r2 27 25 larl %r14,init_task 28 26 stg %r14,__LC_CURRENT(%r2) 29 27 larl %r15,init_thread_union+STACK_INIT_OFFSET
+2 -1
arch/s390/kernel/hiperdispatch.c
··· 45 45 * therefore delaying the throughput loss caused by using SMP threads. 46 46 */ 47 47 48 + #include <linux/cpufeature.h> 48 49 #include <linux/cpumask.h> 49 50 #include <linux/debugfs.h> 50 51 #include <linux/device.h> ··· 88 87 89 88 static int hd_set_hiperdispatch_mode(int enable) 90 89 { 91 - if (!MACHINE_HAS_TOPOLOGY) 90 + if (!cpu_has_topology()) 92 91 enable = 0; 93 92 if (hd_enabled == enable) 94 93 return 0;
+12 -11
arch/s390/kernel/ipl.c
··· 22 22 #include <linux/debug_locks.h> 23 23 #include <linux/vmalloc.h> 24 24 #include <asm/asm-extable.h> 25 + #include <asm/machine.h> 25 26 #include <asm/diag.h> 26 27 #include <asm/ipl.h> 27 28 #include <asm/smp.h> ··· 186 185 187 186 r1.even = addr; 188 187 r1.odd = 0; 189 - asm volatile( 188 + asm_inline volatile( 190 189 " diag %[r1],%[subcode],0x308\n" 191 190 "0: nopr %%r7\n" 192 191 EX_TABLE(0b,0b) ··· 686 685 goto out; 687 686 switch (ipl_info.type) { 688 687 case IPL_TYPE_CCW: 689 - if (MACHINE_IS_VM) 688 + if (machine_is_vm()) 690 689 rc = sysfs_create_group(&ipl_kset->kobj, 691 690 &ipl_ccw_attr_group_vm); 692 691 else ··· 1273 1272 ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM; 1274 1273 1275 1274 /* VM PARM */ 1276 - if (MACHINE_IS_VM && ipl_block_valid && 1275 + if (machine_is_vm() && ipl_block_valid && 1277 1276 (ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) { 1278 1277 1279 1278 ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP; ··· 1287 1286 { 1288 1287 int rc; 1289 1288 1290 - if (!MACHINE_IS_VM) 1289 + if (!machine_is_vm()) 1291 1290 return 0; 1292 1291 1293 1292 reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL); ··· 1312 1311 return -ENOMEM; 1313 1312 1314 1313 rc = sysfs_create_group(&reipl_kset->kobj, 1315 - MACHINE_IS_VM ? &reipl_ccw_attr_group_vm 1316 - : &reipl_ccw_attr_group_lpar); 1314 + machine_is_vm() ? &reipl_ccw_attr_group_vm 1315 + : &reipl_ccw_attr_group_lpar); 1317 1316 if (rc) 1318 1317 return rc; 1319 1318 ··· 1988 1987 1989 1988 static int vmcmd_init(void) 1990 1989 { 1991 - if (!MACHINE_IS_VM) 1990 + if (!machine_is_vm()) 1992 1991 return -EOPNOTSUPP; 1993 1992 vmcmd_kset = kset_create_and_add("vmcmd", NULL, firmware_kobj); 1994 1993 if (!vmcmd_kset) ··· 2265 2264 2266 2265 static int __init vmcmd_on_reboot_setup(char *str) 2267 2266 { 2268 - if (!MACHINE_IS_VM) 2267 + if (!machine_is_vm()) 2269 2268 return 1; 2270 2269 strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE); 2271 2270 vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0; ··· 2276 2275 2277 2276 static int __init vmcmd_on_panic_setup(char *str) 2278 2277 { 2279 - if (!MACHINE_IS_VM) 2278 + if (!machine_is_vm()) 2280 2279 return 1; 2281 2280 strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE); 2282 2281 vmcmd_on_panic[VMCMD_MAX_SIZE] = 0; ··· 2287 2286 2288 2287 static int __init vmcmd_on_halt_setup(char *str) 2289 2288 { 2290 - if (!MACHINE_IS_VM) 2289 + if (!machine_is_vm()) 2291 2290 return 1; 2292 2291 strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE); 2293 2292 vmcmd_on_halt[VMCMD_MAX_SIZE] = 0; ··· 2298 2297 2299 2298 static int __init vmcmd_on_poff_setup(char *str) 2300 2299 { 2301 - if (!MACHINE_IS_VM) 2300 + if (!machine_is_vm()) 2302 2301 return 1; 2303 2302 strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE); 2304 2303 vmcmd_on_poff[VMCMD_MAX_SIZE] = 0;
+5 -3
arch/s390/kernel/irq.c
··· 9 9 */ 10 10 11 11 #include <linux/kernel_stat.h> 12 + #include <linux/cpufeature.h> 12 13 #include <linux/interrupt.h> 13 14 #include <linux/seq_file.h> 14 15 #include <linux/proc_fs.h> ··· 26 25 #include <asm/irq_regs.h> 27 26 #include <asm/cputime.h> 28 27 #include <asm/lowcore.h> 28 + #include <asm/machine.h> 29 29 #include <asm/irq.h> 30 30 #include <asm/hw_irq.h> 31 31 #include <asm/stacktrace.h> ··· 150 148 151 149 if (user_mode(regs)) { 152 150 update_timer_sys(); 153 - if (static_branch_likely(&cpu_has_bear)) 151 + if (cpu_has_bear()) 154 152 current->thread.last_break = regs->last_break; 155 153 } 156 154 ··· 165 163 do_irq_async(regs, THIN_INTERRUPT); 166 164 else 167 165 do_irq_async(regs, IO_INTERRUPT); 168 - } while (MACHINE_IS_LPAR && irq_pending(regs)); 166 + } while (machine_is_lpar() && irq_pending(regs)); 169 167 170 168 irq_exit_rcu(); 171 169 ··· 186 184 187 185 if (user_mode(regs)) { 188 186 update_timer_sys(); 189 - if (static_branch_likely(&cpu_has_bear)) 187 + if (cpu_has_bear()) 190 188 current->thread.last_break = regs->last_break; 191 189 } 192 190
+3 -2
arch/s390/kernel/kprobes.c
··· 13 13 #include <linux/ptrace.h> 14 14 #include <linux/preempt.h> 15 15 #include <linux/stop_machine.h> 16 + #include <linux/cpufeature.h> 16 17 #include <linux/kdebug.h> 17 18 #include <linux/uaccess.h> 18 19 #include <linux/extable.h> ··· 154 153 { 155 154 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 156 155 157 - if (MACHINE_HAS_SEQ_INSN) { 156 + if (cpu_has_seq_insn()) { 158 157 swap_instruction(&args); 159 158 text_poke_sync(); 160 159 } else { ··· 167 166 { 168 167 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 169 168 170 - if (MACHINE_HAS_SEQ_INSN) { 169 + if (cpu_has_seq_insn()) { 171 170 swap_instruction(&args); 172 171 text_poke_sync(); 173 172 } else {
+4 -2
arch/s390/kernel/machine_kexec.c
··· 13 13 #include <linux/reboot.h> 14 14 #include <linux/ftrace.h> 15 15 #include <linux/debug_locks.h> 16 + #include <linux/cpufeature.h> 16 17 #include <asm/guarded_storage.h> 18 + #include <asm/machine.h> 17 19 #include <asm/pfault.h> 18 20 #include <asm/cio.h> 19 21 #include <asm/fpu.h> ··· 96 94 mcesa = __va(get_lowcore()->mcesad & MCESA_ORIGIN_MASK); 97 95 if (cpu_has_vx()) 98 96 save_vx_regs((__vector128 *) mcesa->vector_save_area); 99 - if (MACHINE_HAS_GS) { 97 + if (cpu_has_gs()) { 100 98 local_ctl_store(2, &cr2_old.reg); 101 99 cr2_new = cr2_old; 102 100 cr2_new.gse = 1; ··· 180 178 static int machine_kexec_prepare_kdump(void) 181 179 { 182 180 #ifdef CONFIG_CRASH_DUMP 183 - if (MACHINE_IS_VM) 181 + if (machine_is_vm()) 184 182 diag10_range(PFN_DOWN(crashk_res.start), 185 183 PFN_DOWN(crashk_res.end - crashk_res.start + 1)); 186 184 return 0;
+5 -4
arch/s390/kernel/nmi.c
··· 9 9 */ 10 10 11 11 #include <linux/kernel_stat.h> 12 + #include <linux/cpufeature.h> 12 13 #include <linux/init.h> 13 14 #include <linux/errno.h> 14 15 #include <linux/entry-common.h> ··· 46 45 47 46 static inline int nmi_needs_mcesa(void) 48 47 { 49 - return cpu_has_vx() || MACHINE_HAS_GS; 48 + return cpu_has_vx() || cpu_has_gs(); 50 49 } 51 50 52 51 /* ··· 62 61 if (!nmi_needs_mcesa()) 63 62 return; 64 63 *mcesad = __pa(&boot_mcesa); 65 - if (MACHINE_HAS_GS) 64 + if (cpu_has_gs()) 66 65 *mcesad |= ilog2(MCESA_MAX_SIZE); 67 66 } 68 67 ··· 74 73 *mcesad = 0; 75 74 if (!nmi_needs_mcesa()) 76 75 return 0; 77 - size = MACHINE_HAS_GS ? MCESA_MAX_SIZE : MCESA_MIN_SIZE; 76 + size = cpu_has_gs() ? MCESA_MAX_SIZE : MCESA_MIN_SIZE; 78 77 origin = kmalloc(size, GFP_KERNEL); 79 78 if (!origin) 80 79 return -ENOMEM; 81 80 /* The pointer is stored with mcesa_bits ORed in */ 82 81 kmemleak_not_leak(origin); 83 82 *mcesad = __pa(origin); 84 - if (MACHINE_HAS_GS) 83 + if (cpu_has_gs()) 85 84 *mcesad |= ilog2(MCESA_MAX_SIZE); 86 85 return 0; 87 86 }
+6 -4
arch/s390/kernel/processor.c
··· 8 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 9 10 10 #include <linux/stop_machine.h> 11 + #include <linux/cpufeature.h> 11 12 #include <linux/bitops.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/random.h> ··· 20 19 #include <linux/cpu.h> 21 20 #include <linux/smp.h> 22 21 #include <asm/text-patching.h> 22 + #include <asm/machine.h> 23 23 #include <asm/diag.h> 24 24 #include <asm/facility.h> 25 25 #include <asm/elf.h> ··· 211 209 elf_hwcap |= HWCAP_DFP; 212 210 213 211 /* huge page support */ 214 - if (MACHINE_HAS_EDAT1) 212 + if (cpu_has_edat1()) 215 213 elf_hwcap |= HWCAP_HPAGE; 216 214 217 215 /* 64-bit register support for 31-bit processes */ 218 216 elf_hwcap |= HWCAP_HIGH_GPRS; 219 217 220 218 /* transactional execution */ 221 - if (MACHINE_HAS_TE) 219 + if (machine_has_tx()) 222 220 elf_hwcap |= HWCAP_TE; 223 221 224 222 /* vector */ ··· 246 244 elf_hwcap |= HWCAP_NNPA; 247 245 248 246 /* guarded storage */ 249 - if (MACHINE_HAS_GS) 247 + if (cpu_has_gs()) 250 248 elf_hwcap |= HWCAP_GS; 251 249 252 - if (MACHINE_HAS_PCI_MIO) 250 + if (test_machine_feature(MFEATURE_PCI_MIO)) 253 251 elf_hwcap |= HWCAP_PCI_MIO; 254 252 255 253 /* virtualization support */
+14 -11
arch/s390/kernel/ptrace.c
··· 7 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 8 */ 9 9 10 - #include "asm/ptrace.h" 11 10 #include <linux/kernel.h> 12 11 #include <linux/sched.h> 13 12 #include <linux/sched/task_stack.h> 13 + #include <linux/cpufeature.h> 14 14 #include <linux/mm.h> 15 15 #include <linux/smp.h> 16 16 #include <linux/errno.h> ··· 31 31 #include <asm/unistd.h> 32 32 #include <asm/runtime_instr.h> 33 33 #include <asm/facility.h> 34 + #include <asm/machine.h> 35 + #include <asm/ptrace.h> 36 + #include <asm/rwonce.h> 34 37 #include <asm/fpu.h> 35 38 36 39 #include "entry.h" ··· 63 60 cr0_new = cr0_old; 64 61 cr2_new = cr2_old; 65 62 /* Take care of the enable/disable of transactional execution. */ 66 - if (MACHINE_HAS_TE) { 63 + if (machine_has_tx()) { 67 64 /* Set or clear transaction execution TXC bit 8. */ 68 65 cr0_new.tcx = 1; 69 66 if (task->thread.per_flags & PER_FLAG_NO_TE) ··· 78 75 } 79 76 } 80 77 /* Take care of enable/disable of guarded storage. */ 81 - if (MACHINE_HAS_GS) { 78 + if (cpu_has_gs()) { 82 79 cr2_new.gse = 0; 83 80 if (task->thread.gs_cb) 84 81 cr2_new.gse = 1; ··· 473 470 case PTRACE_GET_LAST_BREAK: 474 471 return put_user(child->thread.last_break, (unsigned long __user *)data); 475 472 case PTRACE_ENABLE_TE: 476 - if (!MACHINE_HAS_TE) 473 + if (!machine_has_tx()) 477 474 return -EIO; 478 475 child->thread.per_flags &= ~PER_FLAG_NO_TE; 479 476 return 0; 480 477 case PTRACE_DISABLE_TE: 481 - if (!MACHINE_HAS_TE) 478 + if (!machine_has_tx()) 482 479 return -EIO; 483 480 child->thread.per_flags |= PER_FLAG_NO_TE; 484 481 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; 485 482 return 0; 486 483 case PTRACE_TE_ABORT_RAND: 487 - if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) 484 + if (!machine_has_tx() || (child->thread.per_flags & PER_FLAG_NO_TE)) 488 485 return -EIO; 489 486 switch (data) { 490 487 case 0UL: ··· 1036 1033 { 1037 1034 struct gs_cb *data = target->thread.gs_cb; 1038 1035 1039 - if (!MACHINE_HAS_GS) 1036 + if (!cpu_has_gs()) 1040 1037 return -ENODEV; 1041 1038 if (!data) 1042 1039 return -ENODATA; ··· 1053 1050 struct gs_cb gs_cb = { }, *data = NULL; 1054 1051 int rc; 1055 1052 1056 - if (!MACHINE_HAS_GS) 1053 + if (!cpu_has_gs()) 1057 1054 return -ENODEV; 1058 1055 if (!target->thread.gs_cb) { 1059 1056 data = kzalloc(sizeof(*data), GFP_KERNEL); ··· 1090 1087 { 1091 1088 struct gs_cb *data = target->thread.gs_bc_cb; 1092 1089 1093 - if (!MACHINE_HAS_GS) 1090 + if (!cpu_has_gs()) 1094 1091 return -ENODEV; 1095 1092 if (!data) 1096 1093 return -ENODATA; ··· 1104 1101 { 1105 1102 struct gs_cb *data = target->thread.gs_bc_cb; 1106 1103 1107 - if (!MACHINE_HAS_GS) 1104 + if (!cpu_has_gs()) 1108 1105 return -ENODEV; 1109 1106 if (!data) { 1110 1107 data = kzalloc(sizeof(*data), GFP_KERNEL); ··· 1574 1571 addr = kernel_stack_pointer(regs) + n * sizeof(long); 1575 1572 if (!regs_within_kernel_stack(regs, addr)) 1576 1573 return 0; 1577 - return *(unsigned long *)addr; 1574 + return READ_ONCE_NOCHECK(addr); 1578 1575 }
+9 -14
arch/s390/kernel/setup.c
··· 54 54 55 55 #include <asm/archrandom.h> 56 56 #include <asm/boot_data.h> 57 + #include <asm/machine.h> 57 58 #include <asm/ipl.h> 58 59 #include <asm/facility.h> 59 60 #include <asm/smp.h> ··· 181 180 struct lowcore *lowcore_ptr[NR_CPUS]; 182 181 EXPORT_SYMBOL(lowcore_ptr); 183 182 184 - DEFINE_STATIC_KEY_FALSE(cpu_has_bear); 185 - 186 183 /* 187 184 * The Write Back bit position in the physaddr is given by the SLPC PCI. 188 185 * Leaving the mask zero always uses write through which is safe ··· 250 251 char query_buffer[1024]; 251 252 char *ptr; 252 253 253 - if (MACHINE_IS_VM) { 254 + if (machine_is_vm()) { 254 255 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); 255 256 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 256 257 ptr = strstr(query_buffer, "SUBCHANNEL ="); ··· 288 289 SET_CONSOLE_SCLP; 289 290 #endif 290 291 } 291 - } else if (MACHINE_IS_KVM) { 292 + } else if (machine_is_kvm()) { 292 293 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE)) 293 294 SET_CONSOLE_VT220; 294 295 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE)) ··· 651 652 return; 652 653 } 653 654 654 - if (!oldmem_data.start && MACHINE_IS_VM) 655 + if (!oldmem_data.start && machine_is_vm()) 655 656 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); 656 657 crashk_res.start = crash_base; 657 658 crashk_res.end = crash_base + crash_size - 1; ··· 897 898 /* 898 899 * print what head.S has found out about the machine 899 900 */ 900 - if (MACHINE_IS_VM) 901 + if (machine_is_vm()) 901 902 pr_info("Linux is running as a z/VM " 902 903 "guest operating system in 64-bit mode\n"); 903 - else if (MACHINE_IS_KVM) 904 + else if (machine_is_kvm()) 904 905 pr_info("Linux is running under KVM in 64-bit mode\n"); 905 - else if (MACHINE_IS_LPAR) 906 + else if (machine_is_lpar()) 906 907 pr_info("Linux is running natively in 64-bit mode\n"); 907 908 else 908 909 pr_info("Linux is running as a guest in 64-bit mode\n"); ··· 910 911 if (!boot_earlyprintk) 911 912 boot_rb_foreach(print_rb_entry); 912 913 913 - if (have_relocated_lowcore()) 914 + if (machine_has_relocated_lowcore()) 914 915 pr_info("Lowcore relocated to 0x%px\n", get_lowcore()); 915 916 916 917 log_component_list(); ··· 960 961 setup_uv(); 961 962 dma_contiguous_reserve(ident_map_size); 962 963 vmcp_cma_reserve(); 963 - if (MACHINE_HAS_EDAT2) 964 + if (cpu_has_edat2()) 964 965 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); 965 966 966 967 reserve_crashkernel(); ··· 980 981 numa_setup(); 981 982 smp_detect_cpus(); 982 983 topology_init_early(); 983 - 984 - if (test_facility(193)) 985 - static_branch_enable(&cpu_has_bear); 986 - 987 984 setup_protection_map(); 988 985 /* 989 986 * Create kernel page tables.
+5 -3
arch/s390/kernel/smp.c
··· 18 18 #define KMSG_COMPONENT "cpu" 19 19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 20 20 21 + #include <linux/cpufeature.h> 21 22 #include <linux/workqueue.h> 22 23 #include <linux/memblock.h> 23 24 #include <linux/export.h> ··· 39 38 #include <linux/kprobes.h> 40 39 #include <asm/access-regs.h> 41 40 #include <asm/asm-offsets.h> 41 + #include <asm/machine.h> 42 42 #include <asm/ctlreg.h> 43 43 #include <asm/pfault.h> 44 44 #include <asm/diag.h> ··· 418 416 419 417 void notrace smp_yield_cpu(int cpu) 420 418 { 421 - if (!MACHINE_HAS_DIAG9C) 419 + if (!machine_has_diag9c()) 422 420 return; 423 421 diag_stat_inc_norecursion(DIAG_STAT_X09C); 424 422 asm volatile("diag %0,0,0x9c" ··· 563 561 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 564 562 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 565 563 return -EIO; 566 - if (!cpu_has_vx() && !MACHINE_HAS_GS) 564 + if (!cpu_has_vx() && !cpu_has_gs()) 567 565 return 0; 568 566 pa = lc->mcesad & MCESA_ORIGIN_MASK; 569 - if (MACHINE_HAS_GS) 567 + if (cpu_has_gs()) 570 568 pa |= lc->mcesad & MCESA_LC_MASK; 571 569 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 572 570 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
+18 -33
arch/s390/kernel/syscall.c
··· 12 12 * platform. 13 13 */ 14 14 15 + #include <linux/cpufeature.h> 15 16 #include <linux/errno.h> 16 17 #include <linux/sched.h> 17 18 #include <linux/mm.h> ··· 82 81 return -ENOSYS; 83 82 } 84 83 85 - static void do_syscall(struct pt_regs *regs) 84 + void noinstr __do_syscall(struct pt_regs *regs, int per_trap) 86 85 { 87 86 unsigned long nr; 88 87 88 + add_random_kstack_offset(); 89 + enter_from_user_mode(regs); 90 + regs->psw = get_lowcore()->svc_old_psw; 91 + regs->int_code = get_lowcore()->svc_int_code; 92 + update_timer_sys(); 93 + if (cpu_has_bear()) 94 + current->thread.last_break = regs->last_break; 95 + local_irq_enable(); 96 + regs->orig_gpr2 = regs->gprs[2]; 97 + if (unlikely(per_trap)) 98 + set_thread_flag(TIF_PER_TRAP); 99 + regs->flags = 0; 100 + set_pt_regs_flag(regs, PIF_SYSCALL); 89 101 nr = regs->int_code & 0xffff; 90 - if (!nr) { 102 + if (likely(!nr)) { 91 103 nr = regs->gprs[1] & 0xffff; 92 104 regs->int_code &= ~0xffffUL; 93 105 regs->int_code |= nr; 94 106 } 95 - 96 107 regs->gprs[2] = nr; 97 - 98 108 if (nr == __NR_restart_syscall && !(current->restart_block.arch_data & 1)) { 99 109 regs->psw.addr = current->restart_block.arch_data; 100 110 current->restart_block.arch_data = 1; 101 111 } 102 112 nr = syscall_enter_from_user_mode_work(regs, nr); 103 - 104 113 /* 105 114 * In the s390 ptrace ABI, both the syscall number and the return value 106 115 * use gpr2. However, userspace puts the syscall number either in the ··· 118 107 * work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here 119 108 * and if set, the syscall will be skipped. 120 109 */ 121 - 122 110 if (unlikely(test_and_clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET))) 123 111 goto out; 124 112 regs->gprs[2] = -ENOSYS; 125 - if (likely(nr >= NR_syscalls)) 126 - goto out; 127 - do { 113 + if (likely(nr < NR_syscalls)) 128 114 regs->gprs[2] = current->thread.sys_call_table[nr](regs); 129 - } while (test_and_clear_pt_regs_flag(regs, PIF_EXECVE_PGSTE_RESTART)); 130 115 out: 131 - syscall_exit_to_user_mode_work(regs); 132 - } 133 - 134 - void noinstr __do_syscall(struct pt_regs *regs, int per_trap) 135 - { 136 - add_random_kstack_offset(); 137 - enter_from_user_mode(regs); 138 - regs->psw = get_lowcore()->svc_old_psw; 139 - regs->int_code = get_lowcore()->svc_int_code; 140 - update_timer_sys(); 141 - if (static_branch_likely(&cpu_has_bear)) 142 - current->thread.last_break = regs->last_break; 143 - 144 - local_irq_enable(); 145 - regs->orig_gpr2 = regs->gprs[2]; 146 - 147 - if (per_trap) 148 - set_thread_flag(TIF_PER_TRAP); 149 - 150 - regs->flags = 0; 151 - set_pt_regs_flag(regs, PIF_SYSCALL); 152 - do_syscall(regs); 153 - exit_to_user_mode(); 116 + syscall_exit_to_user_mode(regs); 154 117 }
+6 -42
arch/s390/kernel/sysinfo.c
··· 5 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 6 */ 7 7 8 + #include <linux/cpufeature.h> 8 9 #include <linux/debugfs.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/mm.h> ··· 16 15 #include <linux/export.h> 17 16 #include <linux/slab.h> 18 17 #include <asm/asm-extable.h> 18 + #include <asm/machine.h> 19 19 #include <asm/ebcdic.h> 20 20 #include <asm/debug.h> 21 21 #include <asm/sysinfo.h> 22 22 #include <asm/cpcmd.h> 23 23 #include <asm/topology.h> 24 24 #include <asm/fpu.h> 25 + #include <asm/asm.h> 25 26 26 27 int topology_max_mnest; 27 - 28 - static inline int __stsi(void *sysinfo, int fc, int sel1, int sel2, int *lvl) 29 - { 30 - int r0 = (fc << 28) | sel1; 31 - int rc = 0; 32 - 33 - asm volatile( 34 - " lr 0,%[r0]\n" 35 - " lr 1,%[r1]\n" 36 - " stsi 0(%[sysinfo])\n" 37 - "0: jz 2f\n" 38 - "1: lhi %[rc],%[retval]\n" 39 - "2: lr %[r0],0\n" 40 - EX_TABLE(0b, 1b) 41 - : [r0] "+d" (r0), [rc] "+d" (rc) 42 - : [r1] "d" (sel2), 43 - [sysinfo] "a" (sysinfo), 44 - [retval] "K" (-EOPNOTSUPP) 45 - : "cc", "0", "1", "memory"); 46 - *lvl = ((unsigned int) r0) >> 28; 47 - return rc; 48 - } 49 - 50 - /* 51 - * stsi - store system information 52 - * 53 - * Returns the current configuration level if function code 0 was specified. 54 - * Otherwise returns 0 on success or a negative value on error. 55 - */ 56 - int stsi(void *sysinfo, int fc, int sel1, int sel2) 57 - { 58 - int lvl, rc; 59 - 60 - rc = __stsi(sysinfo, fc, sel1, sel2, &lvl); 61 - if (rc) 62 - return rc; 63 - return fc ? 0 : lvl; 64 - } 65 - EXPORT_SYMBOL(stsi); 66 28 67 29 #ifdef CONFIG_PROC_FS 68 30 ··· 118 154 int i; 119 155 120 156 seq_putc(m, '\n'); 121 - if (!MACHINE_HAS_TOPOLOGY) 157 + if (!cpu_has_topology()) 122 158 return; 123 159 if (stsi(info, 15, 1, topology_max_mnest)) 124 160 return; ··· 379 415 static __init int create_proc_service_level(void) 380 416 { 381 417 proc_create_seq("service_levels", 0, NULL, &service_level_seq_ops); 382 - if (MACHINE_IS_VM) 418 + if (machine_is_vm()) 383 419 register_service_level(&service_level_vm); 384 420 return 0; 385 421 } ··· 523 559 sf = &stsi_file[i]; 524 560 debugfs_create_file(sf->name, 0400, stsi_root, NULL, sf->fops); 525 561 } 526 - if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && MACHINE_HAS_TOPOLOGY) { 562 + if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && cpu_has_topology()) { 527 563 char link_to[10]; 528 564 529 565 sprintf(link_to, "15_1_%d", topology_mnest_limit());
+2 -2
arch/s390/kernel/time.c
··· 54 54 #include <asm/cio.h> 55 55 #include "entry.h" 56 56 57 - union tod_clock tod_clock_base __section(".data"); 57 + union tod_clock __bootdata_preserved(tod_clock_base); 58 58 EXPORT_SYMBOL_GPL(tod_clock_base); 59 59 60 - u64 clock_comparator_max = -1ULL; 60 + u64 __bootdata_preserved(clock_comparator_max); 61 61 EXPORT_SYMBOL_GPL(clock_comparator_max); 62 62 63 63 static DEFINE_PER_CPU(struct clock_event_device, comparators);
+10 -9
arch/s390/kernel/topology.c
··· 6 6 #define KMSG_COMPONENT "cpu" 7 7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 8 9 + #include <linux/cpufeature.h> 9 10 #include <linux/workqueue.h> 10 11 #include <linux/memblock.h> 11 12 #include <linux/uaccess.h> ··· 241 240 { 242 241 int cpu, rc; 243 242 244 - if (!MACHINE_HAS_TOPOLOGY) 243 + if (!cpu_has_topology()) 245 244 return -EOPNOTSUPP; 246 245 if (fc) 247 246 rc = ptf(PTF_VERTICAL); ··· 316 315 hd_status = 0; 317 316 rc = 0; 318 317 mutex_lock(&smp_cpu_state_mutex); 319 - if (MACHINE_HAS_TOPOLOGY) { 318 + if (cpu_has_topology()) { 320 319 rc = 1; 321 320 store_topology(info); 322 321 tl_to_masks(info); 323 322 } 324 323 update_cpu_masks(); 325 - if (!MACHINE_HAS_TOPOLOGY) 324 + if (!cpu_has_topology()) 326 325 topology_update_polarization_simple(); 327 326 if (cpu_management == 1) 328 327 hd_status = hd_enable_hiperdispatch(); ··· 377 376 378 377 void topology_expect_change(void) 379 378 { 380 - if (!MACHINE_HAS_TOPOLOGY) 379 + if (!cpu_has_topology()) 381 380 return; 382 381 /* This is racy, but it doesn't matter since it is just a heuristic. 383 382 * Worst case is that we poll in a higher frequency for a bit longer. ··· 501 500 int rc; 502 501 503 502 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); 504 - if (rc || !MACHINE_HAS_TOPOLOGY) 503 + if (rc || !cpu_has_topology()) 505 504 return rc; 506 505 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group); 507 506 if (rc) ··· 570 569 571 570 set_sched_topology(s390_topology); 572 571 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) { 573 - if (MACHINE_HAS_TOPOLOGY) 572 + if (cpu_has_topology()) 574 573 topology_mode = TOPOLOGY_MODE_HW; 575 574 else 576 575 topology_mode = TOPOLOGY_MODE_SINGLE; 577 576 } 578 - if (!MACHINE_HAS_TOPOLOGY) 577 + if (!cpu_has_topology()) 579 578 goto out; 580 579 tl_info = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); 581 580 info = tl_info; ··· 597 596 { 598 597 if (!enabled) 599 598 return TOPOLOGY_MODE_SINGLE; 600 - return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; 599 + return cpu_has_topology() ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE; 601 600 } 602 601 603 602 static inline int topology_is_enabled(void) ··· 687 686 int rc = 0; 688 687 689 688 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE); 690 - if (MACHINE_HAS_TOPOLOGY) 689 + if (cpu_has_topology()) 691 690 set_topology_timer(); 692 691 else 693 692 topology_update_polarization_simple();
+39 -72
arch/s390/kernel/traps.c
··· 3 3 * S390 version 4 4 * Copyright IBM Corp. 1999, 2000 5 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 7 7 * 8 8 * Derived from "arch/i386/kernel/traps.c" 9 9 * Copyright (C) 1991, 1992 Linus Torvalds 10 10 */ 11 11 12 - /* 13 - * 'Traps.c' handles hardware traps and faults after we have saved some 14 - * state in 'asm.s'. 15 - */ 16 - #include "asm/irqflags.h" 17 - #include "asm/ptrace.h" 12 + #include <linux/cpufeature.h> 18 13 #include <linux/kprobes.h> 19 14 #include <linux/kdebug.h> 20 15 #include <linux/randomize_kstack.h> ··· 24 29 #include <linux/entry-common.h> 25 30 #include <linux/kmsan.h> 26 31 #include <asm/asm-extable.h> 32 + #include <asm/irqflags.h> 33 + #include <asm/ptrace.h> 27 34 #include <asm/vtime.h> 28 35 #include <asm/fpu.h> 29 36 #include <asm/fault.h> ··· 39 42 address = current->thread.trap_tdb.data[3]; 40 43 else 41 44 address = regs->psw.addr; 42 - return (void __user *) (address - (regs->int_code >> 16)); 45 + return (void __user *)(address - (regs->int_code >> 16)); 43 46 } 44 47 45 48 #ifdef CONFIG_GENERIC_BUG ··· 54 57 if (user_mode(regs)) { 55 58 force_sig_fault(si_signo, si_code, get_trap_ip(regs)); 56 59 report_user_fault(regs, si_signo, 0); 57 - } else { 60 + } else { 58 61 if (!fixup_exception(regs)) 59 62 die(regs, str); 60 - } 63 + } 61 64 } 62 65 63 66 static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) 64 67 { 65 - if (notify_die(DIE_TRAP, str, regs, 0, 66 - regs->int_code, si_signo) == NOTIFY_STOP) 68 + if (notify_die(DIE_TRAP, str, regs, 0, regs->int_code, si_signo) == NOTIFY_STOP) 67 69 return; 68 70 do_report_trap(regs, si_signo, si_code, str); 69 71 } ··· 74 78 return; 75 79 if (!current->ptrace) 76 80 return; 77 - force_sig_fault(SIGTRAP, TRAP_HWBKPT, 78 - (void __force __user *) current->thread.per_event.address); 81 + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __force __user *)current->thread.per_event.address); 79 82 } 80 83 NOKPROBE_SYMBOL(do_per_trap); 81 84 ··· 93 98 do_trap(regs, signr, sicode, str); \ 94 99 } 95 100 96 - DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 97 - "addressing exception") 98 - DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, 99 - "execute exception") 100 - DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, 101 - "fixpoint divide exception") 102 - DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, 103 - "fixpoint overflow exception") 104 - DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, 105 - "HFP overflow exception") 106 - DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, 107 - "HFP underflow exception") 108 - DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, 109 - "HFP significance exception") 110 - DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, 111 - "HFP divide exception") 112 - DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, 113 - "HFP square root exception") 114 - DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, 115 - "operand exception") 116 - DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, 117 - "privileged operation") 118 - DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 119 - "special operation exception") 120 - DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 121 - "transaction constraint exception") 101 + DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, "addressing exception") 102 + DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV, "fixpoint divide exception") 103 + DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN, "execute exception") 104 + DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV, "HFP divide exception") 105 + DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF, "HFP overflow exception") 106 + DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES, "HFP significance exception") 107 + DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV, "HFP square root exception") 108 + DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND, "HFP underflow exception") 109 + DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN, "operand exception") 110 + DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF, "fixpoint overflow exception") 111 + DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC, "privileged operation") 112 + DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, "special operation exception") 113 + DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); 114 + DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, "transaction constraint exception") 122 115 123 116 static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc) 124 117 { 125 118 int si_code = 0; 119 + 126 120 /* FPC[2] is Data Exception Code */ 127 121 if ((fpc & 0x00000300) == 0) { 128 122 /* bits 6 and 7 of DXC are 0 iff IEEE exception */ ··· 137 153 138 154 static void illegal_op(struct pt_regs *regs) 139 155 { 140 - __u8 opcode[6]; 141 - __u16 __user *location; 142 156 int is_uprobe_insn = 0; 157 + u16 __user *location; 143 158 int signal = 0; 159 + u16 opcode; 144 160 145 161 location = get_trap_ip(regs); 146 - 147 162 if (user_mode(regs)) { 148 - if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 163 + if (get_user(opcode, location)) 149 164 return; 150 - if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 165 + if (opcode == S390_BREAKPOINT_U16) { 151 166 if (current->ptrace) 152 167 force_sig_fault(SIGTRAP, TRAP_BRKPT, location); 153 168 else 154 169 signal = SIGILL; 155 170 #ifdef CONFIG_UPROBES 156 - } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { 171 + } else if (opcode == UPROBE_SWBP_INSN) { 157 172 is_uprobe_insn = 1; 158 173 #endif 159 - } else 174 + } else { 160 175 signal = SIGILL; 176 + } 161 177 } 162 178 /* 163 - * We got either an illegal op in kernel mode, or user space trapped 179 + * This is either an illegal op in kernel mode, or user space trapped 164 180 * on a uprobes illegal instruction. See if kprobes or uprobes picks 165 181 * it up. If not, SIGILL. 166 182 */ 167 183 if (is_uprobe_insn || !user_mode(regs)) { 168 - if (notify_die(DIE_BPT, "bpt", regs, 0, 169 - 3, SIGTRAP) != NOTIFY_STOP) 184 + if (notify_die(DIE_BPT, "bpt", regs, 0, 3, SIGTRAP) != NOTIFY_STOP) 170 185 signal = SIGILL; 171 186 } 172 187 if (signal) ··· 173 190 } 174 191 NOKPROBE_SYMBOL(illegal_op); 175 192 176 - DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 177 - "specification exception"); 178 - 179 193 static void vector_exception(struct pt_regs *regs) 180 194 { 181 195 int si_code, vic; 182 - 183 - if (!cpu_has_vx()) { 184 - do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation"); 185 - return; 186 - } 187 196 188 197 /* get vector interrupt code from fpc */ 189 198 save_user_fpu_regs(); ··· 224 249 { 225 250 if (user_mode(regs)) 226 251 return; 227 - 228 252 switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) { 229 253 case BUG_TRAP_TYPE_NONE: 230 254 fixup_exception(regs); ··· 236 262 } 237 263 } 238 264 239 - void kernel_stack_overflow(struct pt_regs *regs) 265 + void kernel_stack_invalid(struct pt_regs *regs) 240 266 { 241 267 /* 242 268 * Normally regs are unpoisoned by the generic entry code, but ··· 244 270 */ 245 271 kmsan_unpoison_entry_regs(regs); 246 272 bust_spinlocks(1); 247 - printk("Kernel stack overflow.\n"); 273 + pr_emerg("Kernel stack pointer invalid\n"); 248 274 show_regs(regs); 249 275 bust_spinlocks(0); 250 - panic("Corrupt kernel stack, can't continue."); 276 + panic("Invalid kernel stack pointer, cannot continue"); 251 277 } 252 - NOKPROBE_SYMBOL(kernel_stack_overflow); 278 + NOKPROBE_SYMBOL(kernel_stack_invalid); 253 279 254 280 static void __init test_monitor_call(void) 255 281 { ··· 257 283 258 284 if (!IS_ENABLED(CONFIG_BUG)) 259 285 return; 260 - asm volatile( 286 + asm_inline volatile( 261 287 " mc 0,0\n" 262 288 "0: lhi %[val],0\n" 263 289 "1:\n" ··· 297 323 teid.val = lc->trans_exc_code; 298 324 regs->int_code = lc->pgm_int_code; 299 325 regs->int_parm_long = teid.val; 300 - 301 326 /* 302 327 * In case of a guest fault, short-circuit the fault handler and return. 303 328 * This way the sie64a() function will return 0; fault address and ··· 309 336 current->thread.gmap_int_code = regs->int_code & 0xffff; 310 337 return; 311 338 } 312 - 313 339 state = irqentry_enter(regs); 314 - 315 340 if (user_mode(regs)) { 316 341 update_timer_sys(); 317 - if (!static_branch_likely(&cpu_has_bear)) { 342 + if (!cpu_has_bear()) { 318 343 if (regs->last_break < 4096) 319 344 regs->last_break = 1; 320 345 } 321 346 current->thread.last_break = regs->last_break; 322 347 } 323 - 324 348 if (lc->pgm_code & 0x0200) { 325 349 /* transaction abort */ 326 350 current->thread.trap_tdb = lc->pgm_tdb; 327 351 } 328 - 329 352 if (lc->pgm_code & PGM_INT_CODE_PER) { 330 353 if (user_mode(regs)) { 331 354 struct per_event *ev = &current->thread.per_event; ··· 337 368 goto out; 338 369 } 339 370 } 340 - 341 371 if (!irqs_disabled_flags(regs->psw.mask)) 342 372 trace_hardirqs_on(); 343 373 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); 344 - 345 374 trapnr = regs->int_code & PGM_INT_CODE_MASK; 346 375 if (trapnr) 347 376 pgm_check_table[trapnr](regs);
+3 -3
arch/s390/kvm/interrupt.c
··· 10 10 #define KMSG_COMPONENT "kvm-s390" 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 + #include <linux/cpufeature.h> 13 14 #include <linux/interrupt.h> 14 15 #include <linux/kvm_host.h> 15 16 #include <linux/hrtimer.h> ··· 578 577 /* take care of lazy register loading */ 579 578 kvm_s390_fpu_store(vcpu->run); 580 579 save_access_regs(vcpu->run->s.regs.acrs); 581 - if (MACHINE_HAS_GS && vcpu->arch.gs_enabled) 580 + if (cpu_has_gs() && vcpu->arch.gs_enabled) 582 581 save_gs_cb(current->thread.gs_cb); 583 582 584 583 /* Extended save area */ ··· 949 948 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC); 950 949 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea, 951 950 (u64 *) __LC_PGM_LAST_BREAK); 952 - rc |= put_guest_lc(vcpu, pgm_info.code, 953 - (u16 *)__LC_PGM_INT_CODE); 951 + rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_CODE); 954 952 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, 955 953 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 956 954 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
+10 -8
arch/s390/kvm/kvm-s390.c
··· 23 23 #include <linux/mman.h> 24 24 #include <linux/module.h> 25 25 #include <linux/moduleparam.h> 26 + #include <linux/cpufeature.h> 26 27 #include <linux/random.h> 27 28 #include <linux/slab.h> 28 29 #include <linux/timer.h> ··· 37 36 #include <asm/access-regs.h> 38 37 #include <asm/asm-offsets.h> 39 38 #include <asm/lowcore.h> 39 + #include <asm/machine.h> 40 40 #include <asm/stp.h> 41 41 #include <asm/gmap.h> 42 42 #include <asm/nmi.h> ··· 445 443 if (test_facility(201)) /* PFCR */ 446 444 pfcr_query(&kvm_s390_available_subfunc.pfcr); 447 445 448 - if (MACHINE_HAS_ESOP) 446 + if (machine_has_esop()) 449 447 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); 450 448 /* 451 449 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow), 452 450 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing). 453 451 */ 454 - if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao || 452 + if (!sclp.has_sief2 || !machine_has_esop() || !sclp.has_64bscao || 455 453 !test_facility(3) || !nested) 456 454 return; 457 455 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2); ··· 640 638 r = min_t(unsigned int, num_online_cpus(), r); 641 639 break; 642 640 case KVM_CAP_S390_COW: 643 - r = MACHINE_HAS_ESOP; 641 + r = machine_has_esop(); 644 642 break; 645 643 case KVM_CAP_S390_VECTOR_REGISTERS: 646 644 r = test_facility(129); ··· 3398 3396 /* we emulate STHYI in kvm */ 3399 3397 set_kvm_facility(kvm->arch.model.fac_mask, 74); 3400 3398 set_kvm_facility(kvm->arch.model.fac_list, 74); 3401 - if (MACHINE_HAS_TLB_GUEST) { 3399 + if (machine_has_tlb_guest()) { 3402 3400 set_kvm_facility(kvm->arch.model.fac_mask, 147); 3403 3401 set_kvm_facility(kvm->arch.model.fac_list, 147); 3404 3402 } ··· 3894 3892 3895 3893 kvm_s390_vcpu_setup_model(vcpu); 3896 3894 3897 - /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */ 3898 - if (MACHINE_HAS_ESOP) 3895 + /* pgste_set_pte has special handling for !machine_has_esop() */ 3896 + if (machine_has_esop()) 3899 3897 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; 3900 3898 if (test_kvm_facility(vcpu->kvm, 9)) 3901 3899 vcpu->arch.sie_block->ecb |= ECB_SRSI; ··· 5178 5176 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; 5179 5177 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; 5180 5178 } 5181 - if (MACHINE_HAS_GS) { 5179 + if (cpu_has_gs()) { 5182 5180 preempt_disable(); 5183 5181 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT); 5184 5182 if (current->thread.gs_cb) { ··· 5244 5242 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 5245 5243 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; 5246 5244 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; 5247 - if (MACHINE_HAS_GS) { 5245 + if (cpu_has_gs()) { 5248 5246 preempt_disable(); 5249 5247 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT); 5250 5248 if (vcpu->arch.gs_enabled)
+25 -6
arch/s390/lib/spinlock.c
··· 10 10 #include <linux/export.h> 11 11 #include <linux/spinlock.h> 12 12 #include <linux/jiffies.h> 13 + #include <linux/sysctl.h> 13 14 #include <linux/init.h> 14 15 #include <linux/smp.h> 15 16 #include <linux/percpu.h> 16 17 #include <linux/io.h> 17 18 #include <asm/alternative.h> 19 + #include <asm/machine.h> 18 20 #include <asm/asm.h> 19 21 20 22 int spin_retry = -1; ··· 38 36 return 1; 39 37 } 40 38 __setup("spin_retry=", spin_retry_setup); 39 + 40 + static const struct ctl_table s390_spin_sysctl_table[] = { 41 + { 42 + .procname = "spin_retry", 43 + .data = &spin_retry, 44 + .maxlen = sizeof(int), 45 + .mode = 0644, 46 + .proc_handler = proc_dointvec, 47 + }, 48 + }; 49 + 50 + static int __init init_s390_spin_sysctls(void) 51 + { 52 + register_sysctl_init("kernel", s390_spin_sysctl_table); 53 + return 0; 54 + } 55 + arch_initcall(init_s390_spin_sysctls); 41 56 42 57 struct spin_wait { 43 58 struct spin_wait *next, *prev; ··· 160 141 161 142 ix = get_lowcore()->spinlock_index++; 162 143 barrier(); 163 - lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ 144 + lockval = spinlock_lockval(); /* cpu + 1 */ 164 145 node = this_cpu_ptr(&spin_wait[ix]); 165 146 node->prev = node->next = NULL; 166 147 node_id = node->node_id; ··· 231 212 if (count-- >= 0) 232 213 continue; 233 214 count = spin_retry; 234 - if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1)) 215 + if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1)) 235 216 smp_yield_cpu(owner - 1); 236 217 } 237 218 ··· 251 232 { 252 233 int lockval, old, new, owner, count; 253 234 254 - lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ 235 + lockval = spinlock_lockval(); /* cpu + 1 */ 255 236 256 237 /* Pass the virtual CPU to the lock holder if it is not running */ 257 238 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); ··· 274 255 if (count-- >= 0) 275 256 continue; 276 257 count = spin_retry; 277 - if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1)) 258 + if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1)) 278 259 smp_yield_cpu(owner - 1); 279 260 } 280 261 } ··· 290 271 291 272 int arch_spin_trylock_retry(arch_spinlock_t *lp) 292 273 { 293 - int cpu = SPINLOCK_LOCKVAL; 274 + int cpu = spinlock_lockval(); 294 275 int owner, count; 295 276 296 277 for (count = spin_retry; count > 0; count--) { ··· 356 337 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; 357 338 if (!cpu) 358 339 return; 359 - if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1)) 340 + if (machine_is_lpar() && !arch_vcpu_is_preempted(cpu - 1)) 360 341 return; 361 342 smp_yield_cpu(cpu - 1); 362 343 }
+86 -36
arch/s390/lib/uaccess.c
··· 31 31 } 32 32 #endif /*CONFIG_DEBUG_ENTRY */ 33 33 34 + union oac { 35 + unsigned int val; 36 + struct { 37 + struct { 38 + unsigned short key : 4; 39 + unsigned short : 4; 40 + unsigned short as : 2; 41 + unsigned short : 4; 42 + unsigned short k : 1; 43 + unsigned short a : 1; 44 + } oac1; 45 + struct { 46 + unsigned short key : 4; 47 + unsigned short : 4; 48 + unsigned short as : 2; 49 + unsigned short : 4; 50 + unsigned short k : 1; 51 + unsigned short a : 1; 52 + } oac2; 53 + }; 54 + }; 55 + 56 + static uaccess_kmsan_or_inline __must_check unsigned long 57 + raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key) 58 + { 59 + unsigned long osize; 60 + union oac spec = { 61 + .oac2.key = key, 62 + .oac2.as = PSW_BITS_AS_SECONDARY, 63 + .oac2.k = 1, 64 + .oac2.a = 1, 65 + }; 66 + int cc; 67 + 68 + while (1) { 69 + osize = size; 70 + asm_inline volatile( 71 + " lr %%r0,%[spec]\n" 72 + "0: mvcos %[to],%[from],%[size]\n" 73 + "1: nopr %%r7\n" 74 + CC_IPM(cc) 75 + EX_TABLE_UA_MVCOS_FROM(0b, 0b) 76 + EX_TABLE_UA_MVCOS_FROM(1b, 0b) 77 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to) 78 + : [spec] "d" (spec.val), [from] "Q" (*(const char __user *)from) 79 + : CC_CLOBBER_LIST("memory", "0")); 80 + if (CC_TRANSFORM(cc) == 0) 81 + return osize - size; 82 + size -= 4096; 83 + to += 4096; 84 + from += 4096; 85 + } 86 + } 87 + 34 88 unsigned long _copy_from_user_key(void *to, const void __user *from, 35 89 unsigned long n, unsigned long key) 36 90 { ··· 102 48 } 103 49 EXPORT_SYMBOL(_copy_from_user_key); 104 50 51 + static uaccess_kmsan_or_inline __must_check unsigned long 52 + raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key) 53 + { 54 + unsigned long osize; 55 + union oac spec = { 56 + .oac1.key = key, 57 + .oac1.as = PSW_BITS_AS_SECONDARY, 58 + .oac1.k = 1, 59 + .oac1.a = 1, 60 + }; 61 + int cc; 62 + 63 + while (1) { 64 + osize = size; 65 + asm_inline volatile( 66 + " lr %%r0,%[spec]\n" 67 + "0: mvcos %[to],%[from],%[size]\n" 68 + "1: nopr %%r7\n" 69 + CC_IPM(cc) 70 + EX_TABLE_UA_MVCOS_TO(0b, 0b) 71 + EX_TABLE_UA_MVCOS_TO(1b, 0b) 72 + : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to) 73 + : [spec] "d" (spec.val), [from] "Q" (*(const char *)from) 74 + : CC_CLOBBER_LIST("memory", "0")); 75 + if (CC_TRANSFORM(cc) == 0) 76 + return osize - size; 77 + size -= 4096; 78 + to += 4096; 79 + from += 4096; 80 + } 81 + } 82 + 105 83 unsigned long _copy_to_user_key(void __user *to, const void *from, 106 84 unsigned long n, unsigned long key) 107 85 { ··· 144 58 return raw_copy_to_user_key(to, from, n, key); 145 59 } 146 60 EXPORT_SYMBOL(_copy_to_user_key); 147 - 148 - unsigned long __clear_user(void __user *to, unsigned long size) 149 - { 150 - unsigned long rem; 151 - union oac spec = { 152 - .oac1.as = PSW_BITS_AS_SECONDARY, 153 - .oac1.a = 1, 154 - }; 155 - 156 - asm volatile( 157 - " lr 0,%[spec]\n" 158 - "0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n" 159 - "1: jz 5f\n" 160 - " algr %[size],%[val]\n" 161 - " slgr %[to],%[val]\n" 162 - " j 0b\n" 163 - "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */ 164 - " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */ 165 - " slgr %[rem],%[to]\n" 166 - " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */ 167 - " jnh 6f\n" 168 - "3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n" 169 - "4: slgr %[size],%[rem]\n" 170 - " j 6f\n" 171 - "5: slgr %[size],%[size]\n" 172 - "6:\n" 173 - EX_TABLE(0b, 2b) 174 - EX_TABLE(1b, 2b) 175 - EX_TABLE(3b, 6b) 176 - EX_TABLE(4b, 6b) 177 - : [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem) 178 - : [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val) 179 - : "cc", "memory", "0"); 180 - return size; 181 - } 182 - EXPORT_SYMBOL(__clear_user);
+5 -3
arch/s390/mm/dump_pagetables.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/cpufeature.h> 2 4 #include <linux/set_memory.h> 3 5 #include <linux/ptdump.h> 4 6 #include <linux/seq_file.h> ··· 84 82 * in which case we have two lpswe instructions in lowcore that need 85 83 * to be executable. 86 84 */ 87 - if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear))) 85 + if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !cpu_has_bear())) 88 86 return; 89 87 WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX), 90 88 "s390/mm: Found insecure W+X mapping at address %pS\n", ··· 169 167 }, 170 168 }; 171 169 172 - if (!MACHINE_HAS_NX) 170 + if (!cpu_has_nx()) 173 171 return true; 174 172 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); 175 173 if (st.wx_pages) { ··· 178 176 return false; 179 177 } else { 180 178 pr_info("Checked W+X mappings: passed, no %sW+X pages found\n", 181 - (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ? 179 + (nospec_uses_trampoline() || !cpu_has_bear()) ? 182 180 "unexpected " : ""); 183 181 184 182 return true;
+47
arch/s390/mm/extable.c
··· 73 73 return true; 74 74 } 75 75 76 + struct insn_ssf { 77 + u64 opc1 : 8; 78 + u64 r3 : 4; 79 + u64 opc2 : 4; 80 + u64 b1 : 4; 81 + u64 d1 : 12; 82 + u64 b2 : 4; 83 + u64 d2 : 12; 84 + } __packed; 85 + 86 + static bool ex_handler_ua_mvcos(const struct exception_table_entry *ex, 87 + bool from, struct pt_regs *regs) 88 + { 89 + unsigned long uaddr, remainder; 90 + struct insn_ssf *insn; 91 + 92 + /* 93 + * If the faulting user space access crossed a page boundary retry by 94 + * limiting the access to the first page (adjust length accordingly). 95 + * Then the mvcos instruction will either complete with condition code 96 + * zero, or generate another fault where the user space access did not 97 + * cross a page boundary. 98 + * If the faulting user space access did not cross a page boundary set 99 + * length to zero and retry. In this case no user space access will 100 + * happen, and the mvcos instruction will complete with condition code 101 + * zero. 102 + * In both cases the instruction will complete with condition code 103 + * zero (copying finished), and the register which contains the 104 + * length, indicates the number of bytes copied. 105 + */ 106 + regs->psw.addr = extable_fixup(ex); 107 + insn = (struct insn_ssf *)regs->psw.addr; 108 + if (from) 109 + uaddr = regs->gprs[insn->b2] + insn->d2; 110 + else 111 + uaddr = regs->gprs[insn->b1] + insn->d1; 112 + remainder = PAGE_SIZE - (uaddr & (PAGE_SIZE - 1)); 113 + if (regs->gprs[insn->r3] <= remainder) 114 + remainder = 0; 115 + regs->gprs[insn->r3] = remainder; 116 + return true; 117 + } 118 + 76 119 bool fixup_exception(struct pt_regs *regs) 77 120 { 78 121 const struct exception_table_entry *ex; ··· 138 95 return ex_handler_zeropad(ex, regs); 139 96 case EX_TYPE_FPC: 140 97 return ex_handler_fpc(ex, regs); 98 + case EX_TYPE_UA_MVCOS_TO: 99 + return ex_handler_ua_mvcos(ex, false, regs); 100 + case EX_TYPE_UA_MVCOS_FROM: 101 + return ex_handler_ua_mvcos(ex, true, regs); 141 102 } 142 103 panic("invalid exception table entry"); 143 104 }
+5 -4
arch/s390/mm/extmem.c
··· 21 21 #include <linux/ioport.h> 22 22 #include <linux/refcount.h> 23 23 #include <linux/pgtable.h> 24 + #include <asm/machine.h> 24 25 #include <asm/diag.h> 25 26 #include <asm/page.h> 26 27 #include <asm/ebcdic.h> ··· 256 255 int rc; 257 256 struct dcss_segment seg; 258 257 259 - if (!MACHINE_IS_VM) 258 + if (!machine_is_vm()) 260 259 return -ENOSYS; 261 260 262 261 dcss_mkname(name, seg.dcss_name); ··· 419 418 struct dcss_segment *seg; 420 419 int rc; 421 420 422 - if (!MACHINE_IS_VM) 421 + if (!machine_is_vm()) 423 422 return -ENOSYS; 424 423 425 424 mutex_lock(&dcss_lock); ··· 541 540 unsigned long dummy; 542 541 struct dcss_segment *seg; 543 542 544 - if (!MACHINE_IS_VM) 543 + if (!machine_is_vm()) 545 544 return; 546 545 547 546 mutex_lock(&dcss_lock); ··· 573 572 char cmd2[80]; 574 573 int i, response; 575 574 576 - if (!MACHINE_IS_VM) 575 + if (!machine_is_vm()) 577 576 return; 578 577 579 578 mutex_lock(&dcss_lock);
+20 -13
arch/s390/mm/fault.c
··· 11 11 12 12 #include <linux/kernel_stat.h> 13 13 #include <linux/mmu_context.h> 14 + #include <linux/cpufeature.h> 14 15 #include <linux/perf_event.h> 15 16 #include <linux/signal.h> 16 17 #include <linux/sched.h> 17 18 #include <linux/sched/debug.h> 18 - #include <linux/jump_label.h> 19 19 #include <linux/kernel.h> 20 20 #include <linux/errno.h> 21 21 #include <linux/string.h> ··· 46 46 #include <asm/uv.h> 47 47 #include "../kernel/entry.h" 48 48 49 - static DEFINE_STATIC_KEY_FALSE(have_store_indication); 50 - 51 - static int __init fault_init(void) 52 - { 53 - if (test_facility(75)) 54 - static_branch_enable(&have_store_indication); 55 - return 0; 56 - } 57 - early_initcall(fault_init); 58 - 59 49 /* 60 50 * Find out which address space caused the exception. 61 51 */ ··· 71 81 { 72 82 union teid teid = { .val = regs->int_parm_long }; 73 83 74 - if (static_branch_likely(&have_store_indication)) 84 + if (test_facility(75)) 75 85 return teid.fsi == TEID_FSI_STORE; 76 86 return false; 77 87 } ··· 164 174 } 165 175 166 176 int show_unhandled_signals = 1; 177 + 178 + static const struct ctl_table s390_fault_sysctl_table[] = { 179 + { 180 + .procname = "userprocess_debug", 181 + .data = &show_unhandled_signals, 182 + .maxlen = sizeof(int), 183 + .mode = 0644, 184 + .proc_handler = proc_dointvec, 185 + }, 186 + }; 187 + 188 + static int __init init_s390_fault_sysctls(void) 189 + { 190 + register_sysctl_init("kernel", s390_fault_sysctl_table); 191 + return 0; 192 + } 193 + arch_initcall(init_s390_fault_sysctls); 167 194 168 195 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) 169 196 { ··· 384 377 */ 385 378 return handle_fault_error_nolock(regs, 0); 386 379 } 387 - if (unlikely(MACHINE_HAS_NX && teid.b56)) { 380 + if (unlikely(cpu_has_nx() && teid.b56)) { 388 381 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK); 389 382 return handle_fault_error_nolock(regs, SEGV_ACCERR); 390 383 }
+9 -10
arch/s390/mm/gmap.c
··· 8 8 * Janosch Frank <frankja@linux.vnet.ibm.com> 9 9 */ 10 10 11 + #include <linux/cpufeature.h> 11 12 #include <linux/kernel.h> 12 13 #include <linux/pagewalk.h> 13 14 #include <linux/swap.h> ··· 21 20 #include <linux/pgtable.h> 22 21 #include <asm/page-states.h> 23 22 #include <asm/pgalloc.h> 23 + #include <asm/machine.h> 24 24 #include <asm/gmap.h> 25 25 #include <asm/page.h> 26 26 #include <asm/tlb.h> ··· 137 135 138 136 static void gmap_flush_tlb(struct gmap *gmap) 139 137 { 140 - if (MACHINE_HAS_IDTE) 138 + if (cpu_has_idte()) 141 139 __tlb_flush_idte(gmap->asce); 142 140 else 143 141 __tlb_flush_global(); ··· 2027 2025 gaddr &= HPAGE_MASK; 2028 2026 pmdp_notify_gmap(gmap, pmdp, gaddr); 2029 2027 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN)); 2030 - if (MACHINE_HAS_TLB_GUEST) 2028 + if (machine_has_tlb_guest()) 2031 2029 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, 2032 2030 IDTE_GLOBAL); 2033 - else if (MACHINE_HAS_IDTE) 2031 + else if (cpu_has_idte()) 2034 2032 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 2035 2033 else 2036 2034 __pmdp_csp(pmdp); ··· 2105 2103 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | 2106 2104 _SEGMENT_ENTRY_GMAP_UC | 2107 2105 _SEGMENT_ENTRY)); 2108 - if (MACHINE_HAS_TLB_GUEST) 2106 + if (machine_has_tlb_guest()) 2109 2107 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2110 2108 gmap->asce, IDTE_LOCAL); 2111 - else if (MACHINE_HAS_IDTE) 2109 + else if (cpu_has_idte()) 2112 2110 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL); 2113 2111 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 2114 2112 } ··· 2138 2136 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE | 2139 2137 _SEGMENT_ENTRY_GMAP_UC | 2140 2138 _SEGMENT_ENTRY)); 2141 - if (MACHINE_HAS_TLB_GUEST) 2139 + if (machine_has_tlb_guest()) 2142 2140 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2143 2141 gmap->asce, IDTE_GLOBAL); 2144 - else if (MACHINE_HAS_IDTE) 2142 + else if (cpu_has_idte()) 2145 2143 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2146 2144 else 2147 2145 __pmdp_csp(pmdp); ··· 2260 2258 /* Do we have pgstes? if yes, we are done */ 2261 2259 if (mm_has_pgste(mm)) 2262 2260 return 0; 2263 - /* Fail if the page tables are 2K */ 2264 - if (!mm_alloc_pgste(mm)) 2265 - return -EINVAL; 2266 2261 mmap_write_lock(mm); 2267 2262 mm->context.has_pgste = 1; 2268 2263 /* split thp mappings and disable thp for future mappings */
+4 -3
arch/s390/mm/hugetlbpage.c
··· 9 9 #define KMSG_COMPONENT "hugetlb" 10 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 11 12 - #include <asm/pgalloc.h> 12 + #include <linux/cpufeature.h> 13 13 #include <linux/mm.h> 14 14 #include <linux/hugetlb.h> 15 15 #include <linux/mman.h> 16 16 #include <linux/sched/mm.h> 17 17 #include <linux/security.h> 18 + #include <asm/pgalloc.h> 18 19 19 20 /* 20 21 * If the bit selected by single-bit bitmask "a" is set within "x", move ··· 249 248 250 249 bool __init arch_hugetlb_valid_size(unsigned long size) 251 250 { 252 - if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) 251 + if (cpu_has_edat1() && size == PMD_SIZE) 253 252 return true; 254 - else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) 253 + else if (cpu_has_edat2() && size == PUD_SIZE) 255 254 return true; 256 255 else 257 256 return false;
+3 -3
arch/s390/mm/init.c
··· 8 8 * Copyright (C) 1995 Linus Torvalds 9 9 */ 10 10 11 + #include <linux/cpufeature.h> 11 12 #include <linux/signal.h> 12 13 #include <linux/sched.h> 13 14 #include <linux/kernel.h> ··· 118 117 { 119 118 unsigned long size = __end_ro_after_init - __start_ro_after_init; 120 119 121 - if (MACHINE_HAS_NX) 120 + if (cpu_has_nx()) 122 121 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT); 123 122 __set_memory_ro(__start_ro_after_init, __end_ro_after_init); 124 123 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); ··· 175 174 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 176 175 177 176 pv_init(); 178 - kfence_split_mapping(); 179 177 180 178 /* this will put all low memory onto the freelists */ 181 179 memblock_free_all(); ··· 285 285 unsigned long size_pages = PFN_DOWN(size); 286 286 int rc; 287 287 288 - if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) 288 + if (WARN_ON_ONCE(pgprot_val(params->pgprot) != pgprot_val(PAGE_KERNEL))) 289 289 return -EINVAL; 290 290 291 291 VM_BUG_ON(!mhp_range_allowed(start, size, true));
+1 -8
arch/s390/mm/mmap.c
··· 51 51 { 52 52 unsigned long gap = rlim_stack->rlim_cur; 53 53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap; 54 - unsigned long gap_min, gap_max; 55 54 56 55 /* Values close to RLIM_INFINITY can overflow. */ 57 56 if (gap + pad > gap) ··· 60 61 * Top of mmap area (just below the process stack). 61 62 * Leave at least a ~128 MB hole. 62 63 */ 63 - gap_min = SZ_128M; 64 - gap_max = (STACK_TOP / 6) * 5; 65 - 66 - if (gap < gap_min) 67 - gap = gap_min; 68 - else if (gap > gap_max) 69 - gap = gap_max; 64 + gap = clamp(gap, SZ_128M, (STACK_TOP / 6) * 5); 70 65 71 66 return PAGE_ALIGN(STACK_TOP - gap - rnd); 72 67 }
+5 -4
arch/s390/mm/pageattr.c
··· 3 3 * Copyright IBM Corp. 2011 4 4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 5 5 */ 6 + #include <linux/cpufeature.h> 6 7 #include <linux/hugetlb.h> 7 8 #include <linux/proc_fs.h> 8 9 #include <linux/vmalloc.h> ··· 28 27 unsigned long boundary, size; 29 28 30 29 while (start < end) { 31 - if (MACHINE_HAS_EDAT1) { 30 + if (cpu_has_edat1()) { 32 31 /* set storage keys for a 1MB frame */ 33 32 size = 1UL << 20; 34 33 boundary = (start + size) & ~(size - 1); ··· 64 63 unsigned long *table, mask; 65 64 66 65 mask = 0; 67 - if (MACHINE_HAS_EDAT2) { 66 + if (cpu_has_edat2()) { 68 67 switch (dtt) { 69 68 case CRDTE_DTT_REGION3: 70 69 mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1); ··· 78 77 } 79 78 table = (unsigned long *)((unsigned long)old & mask); 80 79 crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); 81 - } else if (MACHINE_HAS_IDTE) { 80 + } else if (cpu_has_idte()) { 82 81 cspg(old, *old, new); 83 82 } else { 84 83 csp((unsigned int *)old + 1, *old, new); ··· 374 373 unsigned long end; 375 374 int rc; 376 375 377 - if (!MACHINE_HAS_NX) 376 + if (!cpu_has_nx()) 378 377 flags &= ~(SET_MEMORY_NX | SET_MEMORY_X); 379 378 if (!flags) 380 379 return 0;
+2 -2
arch/s390/mm/pfault.c
··· 56 56 if (pfault_disable) 57 57 return rc; 58 58 diag_stat_inc(DIAG_STAT_X258); 59 - asm volatile( 59 + asm_inline volatile( 60 60 " diag %[refbk],%[rc],0x258\n" 61 61 "0: nopr %%r7\n" 62 62 EX_TABLE(0b, 0b) ··· 78 78 if (pfault_disable) 79 79 return; 80 80 diag_stat_inc(DIAG_STAT_X258); 81 - asm volatile( 81 + asm_inline volatile( 82 82 " diag %[refbk],0,0x258\n" 83 83 "0: nopr %%r7\n" 84 84 EX_TABLE(0b, 0b)
-25
arch/s390/mm/pgalloc.c
··· 16 16 #include <asm/tlb.h> 17 17 #include <asm/tlbflush.h> 18 18 19 - #ifdef CONFIG_PGSTE 20 - 21 - int page_table_allocate_pgste = 0; 22 - EXPORT_SYMBOL(page_table_allocate_pgste); 23 - 24 - static const struct ctl_table page_table_sysctl[] = { 25 - { 26 - .procname = "allocate_pgste", 27 - .data = &page_table_allocate_pgste, 28 - .maxlen = sizeof(int), 29 - .mode = S_IRUGO | S_IWUSR, 30 - .proc_handler = proc_dointvec_minmax, 31 - .extra1 = SYSCTL_ZERO, 32 - .extra2 = SYSCTL_ONE, 33 - }, 34 - }; 35 - 36 - static int __init page_table_register_sysctl(void) 37 - { 38 - return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM; 39 - } 40 - __initcall(page_table_register_sysctl); 41 - 42 - #endif /* CONFIG_PGSTE */ 43 - 44 19 unsigned long *crst_table_alloc(struct mm_struct *mm) 45 20 { 46 21 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
+40 -48
arch/s390/mm/pgtable.c
··· 4 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 5 */ 6 6 7 + #include <linux/cpufeature.h> 7 8 #include <linux/sched.h> 8 9 #include <linux/kernel.h> 9 10 #include <linux/errno.h> ··· 24 23 #include <asm/tlbflush.h> 25 24 #include <asm/mmu_context.h> 26 25 #include <asm/page-states.h> 26 + #include <asm/machine.h> 27 27 28 28 pgprot_t pgprot_writecombine(pgprot_t prot) 29 29 { ··· 36 34 } 37 35 EXPORT_SYMBOL_GPL(pgprot_writecombine); 38 36 39 - pgprot_t pgprot_writethrough(pgprot_t prot) 40 - { 41 - /* 42 - * mio_wb_bit_mask may be set on a different CPU, but it is only set 43 - * once at init and only read afterwards. 44 - */ 45 - return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask); 46 - } 47 - EXPORT_SYMBOL_GPL(pgprot_writethrough); 48 - 49 37 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, 50 38 pte_t *ptep, int nodat) 51 39 { 52 40 unsigned long opt, asce; 53 41 54 - if (MACHINE_HAS_TLB_GUEST) { 42 + if (machine_has_tlb_guest()) { 55 43 opt = 0; 56 44 asce = READ_ONCE(mm->context.gmap_asce); 57 45 if (asce == 0UL || nodat) ··· 61 69 { 62 70 unsigned long opt, asce; 63 71 64 - if (MACHINE_HAS_TLB_GUEST) { 72 + if (machine_has_tlb_guest()) { 65 73 opt = 0; 66 74 asce = READ_ONCE(mm->context.gmap_asce); 67 75 if (asce == 0UL || nodat) ··· 86 94 if (unlikely(pte_val(old) & _PAGE_INVALID)) 87 95 return old; 88 96 atomic_inc(&mm->context.flush_count); 89 - if (MACHINE_HAS_TLB_LC && 97 + if (cpu_has_tlb_lc() && 90 98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 91 99 ptep_ipte_local(mm, addr, ptep, nodat); 92 100 else ··· 165 173 skey = (unsigned long) page_get_storage_key(address); 166 174 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 167 175 /* Transfer page changed & referenced bit to guest bits in pgste */ 168 - pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ 176 + pgste = set_pgste_bit(pgste, bits << 48); /* GR bit & GC bit */ 169 177 /* Copy page access key and fetch protection bit to pgste */ 170 - pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 171 - pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 178 + pgste = clear_pgste_bit(pgste, PGSTE_ACC_BITS | PGSTE_FP_BIT); 179 + pgste = set_pgste_bit(pgste, (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56); 172 180 #endif 173 181 return pgste; 174 182 ··· 202 210 if ((pte_val(entry) & _PAGE_PRESENT) && 203 211 (pte_val(entry) & _PAGE_WRITE) && 204 212 !(pte_val(entry) & _PAGE_INVALID)) { 205 - if (!MACHINE_HAS_ESOP) { 213 + if (!machine_has_esop()) { 206 214 /* 207 215 * Without enhanced suppression-on-protection force 208 216 * the dirty bit on for all writable ptes. ··· 212 220 } 213 221 if (!(pte_val(entry) & _PAGE_PROTECT)) 214 222 /* This pte allows write access, set user-dirty */ 215 - pgste_val(pgste) |= PGSTE_UC_BIT; 223 + pgste = set_pgste_bit(pgste, PGSTE_UC_BIT); 216 224 } 217 225 #endif 218 226 set_pte(ptep, entry); ··· 228 236 229 237 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT); 230 238 if (bits) { 231 - pgste_val(pgste) ^= bits; 239 + pgste = __pgste(pgste_val(pgste) ^ bits); 232 240 ptep_notify(mm, addr, ptep, bits); 233 241 } 234 242 #endif ··· 366 374 static inline void pmdp_idte_local(struct mm_struct *mm, 367 375 unsigned long addr, pmd_t *pmdp) 368 376 { 369 - if (MACHINE_HAS_TLB_GUEST) 377 + if (machine_has_tlb_guest()) 370 378 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, 371 379 mm->context.asce, IDTE_LOCAL); 372 380 else ··· 378 386 static inline void pmdp_idte_global(struct mm_struct *mm, 379 387 unsigned long addr, pmd_t *pmdp) 380 388 { 381 - if (MACHINE_HAS_TLB_GUEST) { 389 + if (machine_has_tlb_guest()) { 382 390 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, 383 391 mm->context.asce, IDTE_GLOBAL); 384 392 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 385 393 gmap_pmdp_idte_global(mm, addr); 386 - } else if (MACHINE_HAS_IDTE) { 394 + } else if (cpu_has_idte()) { 387 395 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); 388 396 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 389 397 gmap_pmdp_idte_global(mm, addr); ··· 403 411 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID) 404 412 return old; 405 413 atomic_inc(&mm->context.flush_count); 406 - if (MACHINE_HAS_TLB_LC && 414 + if (cpu_has_tlb_lc() && 407 415 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 408 416 pmdp_idte_local(mm, addr, pmdp); 409 417 else ··· 497 505 static inline void pudp_idte_local(struct mm_struct *mm, 498 506 unsigned long addr, pud_t *pudp) 499 507 { 500 - if (MACHINE_HAS_TLB_GUEST) 508 + if (machine_has_tlb_guest()) 501 509 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 502 510 mm->context.asce, IDTE_LOCAL); 503 511 else ··· 507 515 static inline void pudp_idte_global(struct mm_struct *mm, 508 516 unsigned long addr, pud_t *pudp) 509 517 { 510 - if (MACHINE_HAS_TLB_GUEST) 518 + if (machine_has_tlb_guest()) 511 519 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 512 520 mm->context.asce, IDTE_GLOBAL); 513 - else if (MACHINE_HAS_IDTE) 521 + else if (cpu_has_idte()) 514 522 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 515 523 else 516 524 /* ··· 529 537 if (pud_val(old) & _REGION_ENTRY_INVALID) 530 538 return old; 531 539 atomic_inc(&mm->context.flush_count); 532 - if (MACHINE_HAS_TLB_LC && 540 + if (cpu_has_tlb_lc() && 533 541 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 534 542 pudp_idte_local(mm, addr, pudp); 535 543 else ··· 601 609 /* the mm_has_pgste() check is done in set_pte_at() */ 602 610 preempt_disable(); 603 611 pgste = pgste_get_lock(ptep); 604 - pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; 612 + pgste = clear_pgste_bit(pgste, _PGSTE_GPS_ZERO); 605 613 pgste_set_key(ptep, pgste, entry, mm); 606 614 pgste = pgste_set_pte(ptep, pgste, entry); 607 615 pgste_set_unlock(ptep, pgste); ··· 614 622 615 623 preempt_disable(); 616 624 pgste = pgste_get_lock(ptep); 617 - pgste_val(pgste) |= PGSTE_IN_BIT; 625 + pgste = set_pgste_bit(pgste, PGSTE_IN_BIT); 618 626 pgste_set_unlock(ptep, pgste); 619 627 preempt_enable(); 620 628 } ··· 659 667 entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID)); 660 668 entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT)); 661 669 } 662 - pgste_val(pgste) |= bit; 670 + pgste = set_pgste_bit(pgste, bit); 663 671 pgste = pgste_set_pte(ptep, pgste, entry); 664 672 pgste_set_unlock(ptep, pgste); 665 673 return 0; ··· 679 687 if (!(pte_val(spte) & _PAGE_INVALID) && 680 688 !((pte_val(spte) & _PAGE_PROTECT) && 681 689 !(pte_val(pte) & _PAGE_PROTECT))) { 682 - pgste_val(spgste) |= PGSTE_VSIE_BIT; 690 + spgste = set_pgste_bit(spgste, PGSTE_VSIE_BIT); 683 691 tpgste = pgste_get_lock(tptep); 684 692 tpte = __pte((pte_val(spte) & PAGE_MASK) | 685 693 (pte_val(pte) & _PAGE_PROTECT)); ··· 737 745 pte_clear(mm, addr, ptep); 738 746 } 739 747 if (reset) 740 - pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); 748 + pgste = clear_pgste_bit(pgste, _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); 741 749 pgste_set_unlock(ptep, pgste); 742 750 preempt_enable(); 743 751 } ··· 750 758 /* Clear storage key ACC and F, but set R/C */ 751 759 preempt_disable(); 752 760 pgste = pgste_get_lock(ptep); 753 - pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); 754 - pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; 761 + pgste = clear_pgste_bit(pgste, PGSTE_ACC_BITS | PGSTE_FP_BIT); 762 + pgste = set_pgste_bit(pgste, PGSTE_GR_BIT | PGSTE_GC_BIT); 755 763 ptev = pte_val(*ptep); 756 764 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) 757 765 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); ··· 772 780 773 781 pgste = pgste_get_lock(ptep); 774 782 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); 775 - pgste_val(pgste) &= ~PGSTE_UC_BIT; 783 + pgste = clear_pgste_bit(pgste, PGSTE_UC_BIT); 776 784 pte = *ptep; 777 785 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { 778 786 pgste = pgste_pte_notify(mm, addr, ptep, pgste); 779 787 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 780 788 ptep_ipte_global(mm, addr, ptep, nodat); 781 - if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 789 + if (machine_has_esop() || !(pte_val(pte) & _PAGE_WRITE)) 782 790 pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 783 791 else 784 792 pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID)); ··· 834 842 if (!ptep) 835 843 goto again; 836 844 new = old = pgste_get_lock(ptep); 837 - pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | 838 - PGSTE_ACC_BITS | PGSTE_FP_BIT); 845 + new = clear_pgste_bit(new, PGSTE_GR_BIT | PGSTE_GC_BIT | 846 + PGSTE_ACC_BITS | PGSTE_FP_BIT); 839 847 keyul = (unsigned long) key; 840 - pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; 841 - pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 848 + new = set_pgste_bit(new, (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48); 849 + new = set_pgste_bit(new, (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56); 842 850 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 843 851 unsigned long bits, skey; 844 852 ··· 849 857 /* Set storage key ACC and FP */ 850 858 page_set_storage_key(paddr, skey, !nq); 851 859 /* Merge host changed & referenced into pgste */ 852 - pgste_val(new) |= bits << 52; 860 + new = set_pgste_bit(new, bits << 52); 853 861 } 854 862 /* changing the guest storage key is considered a change of the page */ 855 863 if ((pgste_val(new) ^ pgste_val(old)) & 856 864 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) 857 - pgste_val(new) |= PGSTE_UC_BIT; 865 + new = set_pgste_bit(new, PGSTE_UC_BIT); 858 866 859 867 pgste_set_unlock(ptep, new); 860 868 pte_unmap_unlock(ptep, ptl); ··· 942 950 goto again; 943 951 new = old = pgste_get_lock(ptep); 944 952 /* Reset guest reference bit only */ 945 - pgste_val(new) &= ~PGSTE_GR_BIT; 953 + new = clear_pgste_bit(new, PGSTE_GR_BIT); 946 954 947 955 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 948 956 paddr = pte_val(*ptep) & PAGE_MASK; 949 957 cc = page_reset_referenced(paddr); 950 958 /* Merge real referenced bit into host-set */ 951 - pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT; 959 + new = set_pgste_bit(new, ((unsigned long)cc << 53) & PGSTE_HR_BIT); 952 960 } 953 961 /* Reflect guest's logical view, not physical */ 954 962 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49; 955 963 /* Changing the guest storage key is considered a change of the page */ 956 964 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT) 957 - pgste_val(new) |= PGSTE_UC_BIT; 965 + new = set_pgste_bit(new, PGSTE_UC_BIT); 958 966 959 967 pgste_set_unlock(ptep, new); 960 968 pte_unmap_unlock(ptep, ptl); ··· 1118 1126 if (res) 1119 1127 pgstev |= _PGSTE_GPS_ZERO; 1120 1128 1121 - pgste_val(pgste) = pgstev; 1129 + pgste = __pgste(pgstev); 1122 1130 pgste_set_unlock(ptep, pgste); 1123 1131 pte_unmap_unlock(ptep, ptl); 1124 1132 return res; ··· 1151 1159 return -EFAULT; 1152 1160 new = pgste_get_lock(ptep); 1153 1161 1154 - pgste_val(new) &= ~bits; 1155 - pgste_val(new) |= value & bits; 1162 + new = clear_pgste_bit(new, bits); 1163 + new = set_pgste_bit(new, value & bits); 1156 1164 1157 1165 pgste_set_unlock(ptep, new); 1158 1166 pte_unmap_unlock(ptep, ptl);
+5 -4
arch/s390/mm/vmem.c
··· 4 4 */ 5 5 6 6 #include <linux/memory_hotplug.h> 7 + #include <linux/cpufeature.h> 7 8 #include <linux/memblock.h> 8 9 #include <linux/pfn.h> 9 10 #include <linux/mm.h> ··· 250 249 } else if (pmd_none(*pmd)) { 251 250 if (IS_ALIGNED(addr, PMD_SIZE) && 252 251 IS_ALIGNED(next, PMD_SIZE) && 253 - MACHINE_HAS_EDAT1 && direct && 252 + cpu_has_edat1() && direct && 254 253 !debug_pagealloc_enabled()) { 255 254 set_pmd(pmd, __pmd(__pa(addr) | prot)); 256 255 pages++; 257 256 continue; 258 - } else if (!direct && MACHINE_HAS_EDAT1) { 257 + } else if (!direct && cpu_has_edat1()) { 259 258 void *new_page; 260 259 261 260 /* ··· 336 335 } else if (pud_none(*pud)) { 337 336 if (IS_ALIGNED(addr, PUD_SIZE) && 338 337 IS_ALIGNED(next, PUD_SIZE) && 339 - MACHINE_HAS_EDAT2 && direct && 338 + cpu_has_edat2() && direct && 340 339 !debug_pagealloc_enabled()) { 341 340 set_pud(pud, __pud(__pa(addr) | prot)); 342 341 pages++; ··· 660 659 * prefix page is used to return to the previous context with 661 660 * an LPSWE instruction and therefore must be executable. 662 661 */ 663 - if (!static_key_enabled(&cpu_has_bear)) 662 + if (!cpu_has_bear()) 664 663 set_memory_x(0, 1); 665 664 if (debug_pagealloc_enabled()) 666 665 __set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size);
+3 -2
arch/s390/pci/pci.c
··· 31 31 #include <linux/lockdep.h> 32 32 #include <linux/list_sort.h> 33 33 34 + #include <asm/machine.h> 34 35 #include <asm/isc.h> 35 36 #include <asm/airq.h> 36 37 #include <asm/facility.h> ··· 1079 1078 return NULL; 1080 1079 } 1081 1080 if (!strcmp(str, "nomio")) { 1082 - get_lowcore()->machine_flags &= ~MACHINE_FLAG_PCI_MIO; 1081 + clear_machine_feature(MFEATURE_PCI_MIO); 1083 1082 return NULL; 1084 1083 } 1085 1084 if (!strcmp(str, "force_floating")) { ··· 1154 1153 return 0; 1155 1154 } 1156 1155 1157 - if (MACHINE_HAS_PCI_MIO) { 1156 + if (test_machine_feature(MFEATURE_PCI_MIO)) { 1158 1157 static_branch_enable(&have_mio); 1159 1158 system_ctl_set_bit(2, CR2_MIO_ADDRESSING_BIT); 1160 1159 }
+2 -2
arch/s390/pci/pci_clp.c
··· 56 56 int cc, exception; 57 57 58 58 exception = 1; 59 - asm volatile ( 59 + asm_inline volatile ( 60 60 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n" 61 61 "0: lhi %[exc],0\n" 62 62 "1:\n" ··· 79 79 u64 ignored; 80 80 81 81 exception = 1; 82 - asm volatile ( 82 + asm_inline volatile ( 83 83 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n" 84 84 "0: lhi %[exc],0\n" 85 85 "1:\n"
+6 -6
arch/s390/pci/pci_insn.c
··· 160 160 u64 __data; 161 161 162 162 exception = 1; 163 - asm volatile ( 163 + asm_inline volatile ( 164 164 " .insn rre,0xb9d20000,%[data],%[req_off]\n" 165 165 "0: lhi %[exc],0\n" 166 166 "1:\n" ··· 229 229 u64 __data; 230 230 231 231 exception = 1; 232 - asm volatile ( 232 + asm_inline volatile ( 233 233 " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n" 234 234 "0: lhi %[exc],0\n" 235 235 "1:\n" ··· 267 267 int cc, exception; 268 268 269 269 exception = 1; 270 - asm volatile ( 270 + asm_inline volatile ( 271 271 " .insn rre,0xb9d00000,%[data],%[req_off]\n" 272 272 "0: lhi %[exc],0\n" 273 273 "1:\n" ··· 321 321 int cc, exception; 322 322 323 323 exception = 1; 324 - asm volatile ( 324 + asm_inline volatile ( 325 325 " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n" 326 326 "0: lhi %[exc],0\n" 327 327 "1:\n" ··· 356 356 int cc, exception; 357 357 358 358 exception = 1; 359 - asm volatile ( 359 + asm_inline volatile ( 360 360 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" 361 361 "0: lhi %[exc],0\n" 362 362 "1:\n" ··· 410 410 int cc, exception; 411 411 412 412 exception = 1; 413 - asm volatile ( 413 + asm_inline volatile ( 414 414 " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n" 415 415 "0: lhi %[exc],0\n" 416 416 "1:\n"
+3 -3
arch/s390/pci/pci_mmio.c
··· 34 34 int cc, exception; 35 35 36 36 exception = 1; 37 - asm volatile ( 37 + asm_inline volatile ( 38 38 " sacf 256\n" 39 39 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" 40 40 "1: lhi %[exc],0\n" ··· 64 64 * address space. pcistg then uses the user mappings. 65 65 */ 66 66 exception = 1; 67 - asm volatile ( 67 + asm_inline volatile ( 68 68 " sacf 256\n" 69 69 "0: llgc %[tmp],0(%[src])\n" 70 70 "4: sllg %[val],%[val],8\n" ··· 215 215 * user address @dst 216 216 */ 217 217 exception = 1; 218 - asm volatile ( 218 + asm_inline volatile ( 219 219 " sacf 256\n" 220 220 "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" 221 221 "1: lhi %[exc],0\n"
+2 -1
drivers/s390/block/dasd.c
··· 21 21 #include <linux/seq_file.h> 22 22 #include <linux/vmalloc.h> 23 23 24 + #include <asm/machine.h> 24 25 #include <asm/ccwdev.h> 25 26 #include <asm/ebcdic.h> 26 27 #include <asm/idals.h> ··· 3383 3382 struct diag210 diag_data; 3384 3383 int rc; 3385 3384 3386 - if (!MACHINE_IS_VM) 3385 + if (!machine_is_vm()) 3387 3386 return 0; 3388 3387 ccw_device_get_id(device->cdev, &dev_id); 3389 3388 memset(&diag_data, 0, sizeof(diag_data));
+2 -1
drivers/s390/block/dasd_devmap.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/slab.h> 20 20 21 + #include <asm/machine.h> 21 22 #include <asm/debug.h> 22 23 #include <linux/uaccess.h> 23 24 #include <asm/ipl.h> ··· 235 234 return 0; 236 235 } 237 236 if (strncmp("nopav", keyword, length) == 0) { 238 - if (MACHINE_IS_VM) 237 + if (machine_is_vm()) 239 238 pr_info("'nopav' is not supported on z/VM\n"); 240 239 else { 241 240 dasd_nopav = 1;
+3 -2
drivers/s390/block/dasd_diag.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/jiffies.h> 20 20 #include <asm/asm-extable.h> 21 + #include <asm/machine.h> 21 22 #include <asm/dasd.h> 22 23 #include <asm/debug.h> 23 24 #include <asm/diag.h> ··· 76 75 } addr_type; 77 76 78 77 exception = 1; 79 - asm volatile( 78 + asm_inline volatile( 80 79 " diag %[rx],%[cmd],0x250\n" 81 80 "0: lhi %[exc],0\n" 82 81 "1:\n" ··· 655 654 static int __init 656 655 dasd_diag_init(void) 657 656 { 658 - if (!MACHINE_IS_VM) { 657 + if (!machine_is_vm()) { 659 658 pr_info("Discipline %s cannot be used without z/VM\n", 660 659 dasd_diag_discipline.name); 661 660 return -ENODEV;
+2 -1
drivers/s390/block/dasd_eckd.c
··· 23 23 #include <linux/io.h> 24 24 25 25 #include <asm/css_chars.h> 26 + #include <asm/machine.h> 26 27 #include <asm/debug.h> 27 28 #include <asm/idals.h> 28 29 #include <asm/ebcdic.h> ··· 1954 1953 if (private->uid.type == UA_BASE_PAV_ALIAS || 1955 1954 private->uid.type == UA_HYPER_PAV_ALIAS) 1956 1955 return 0; 1957 - if (dasd_nopav || MACHINE_IS_VM) 1956 + if (dasd_nopav || machine_is_vm()) 1958 1957 enable_pav = 0; 1959 1958 else 1960 1959 enable_pav = 1;
+2 -1
drivers/s390/char/con3215.c
··· 23 23 #include <linux/reboot.h> 24 24 #include <linux/serial.h> /* ASYNC_* flags */ 25 25 #include <linux/slab.h> 26 + #include <asm/machine.h> 26 27 #include <asm/ccwdev.h> 27 28 #include <asm/cio.h> 28 29 #include <linux/io.h> ··· 908 907 return -ENODEV; 909 908 910 909 /* Set the console mode for VM */ 911 - if (MACHINE_IS_VM) { 910 + if (machine_is_vm()) { 912 911 cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 913 912 cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); 914 913 }
+2 -1
drivers/s390/char/con3270.c
··· 23 23 #include <linux/memblock.h> 24 24 #include <linux/compat.h> 25 25 26 + #include <asm/machine.h> 26 27 #include <asm/ccwdev.h> 27 28 #include <asm/cio.h> 28 29 #include <asm/ebcdic.h> ··· 2157 2156 return -ENODEV; 2158 2157 2159 2158 /* Set the console mode for VM */ 2160 - if (MACHINE_IS_VM) { 2159 + if (machine_is_vm()) { 2161 2160 cpcmd("TERM CONMODE 3270", NULL, 0, NULL); 2162 2161 cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); 2163 2162 }
+1 -1
drivers/s390/char/diag_ftp.c
··· 106 106 int rc; 107 107 108 108 diag_stat_inc(DIAG_STAT_X2C4); 109 - asm volatile( 109 + asm_inline volatile( 110 110 " diag %[addr],%[cmd],0x2c4\n" 111 111 "0: j 2f\n" 112 112 "1: la %[rc],%[err]\n"
+4 -2
drivers/s390/char/hmcdrv_ftp.c
··· 17 17 #include <linux/ctype.h> 18 18 #include <linux/crc16.h> 19 19 20 + #include <asm/machine.h> 21 + 20 22 #include "hmcdrv_ftp.h" 21 23 #include "hmcdrv_cache.h" 22 24 #include "sclp_ftp.h" ··· 310 308 mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */ 311 309 312 310 if (hmcdrv_ftp_refcnt == 0) { 313 - if (MACHINE_IS_VM) 311 + if (machine_is_vm()) 314 312 hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm; 315 - else if (MACHINE_IS_LPAR || MACHINE_IS_KVM) 313 + else if (machine_is_lpar() || machine_is_kvm()) 316 314 hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar; 317 315 else 318 316 rc = -EOPNOTSUPP;
+2 -1
drivers/s390/char/monreader.c
··· 24 24 #include <linux/slab.h> 25 25 #include <net/iucv/iucv.h> 26 26 #include <linux/uaccess.h> 27 + #include <asm/machine.h> 27 28 #include <asm/ebcdic.h> 28 29 #include <asm/extmem.h> 29 30 ··· 457 456 { 458 457 int rc; 459 458 460 - if (!MACHINE_IS_VM) { 459 + if (!machine_is_vm()) { 461 460 pr_err("The z/VM *MONITOR record device driver cannot be " 462 461 "loaded without z/VM\n"); 463 462 return -ENODEV;
+2 -1
drivers/s390/char/monwriter.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/uaccess.h> 25 25 #include <linux/io.h> 26 + #include <asm/machine.h> 26 27 #include <asm/ebcdic.h> 27 28 #include <asm/appldata.h> 28 29 #include <asm/monwriter.h> ··· 294 293 295 294 static int __init mon_init(void) 296 295 { 297 - if (!MACHINE_IS_VM) 296 + if (!machine_is_vm()) 298 297 return -ENODEV; 299 298 /* 300 299 * misc_register() has to be the last action in module_init(), because
+2 -1
drivers/s390/char/raw3270.c
··· 17 17 #include <linux/types.h> 18 18 #include <linux/wait.h> 19 19 20 + #include <asm/machine.h> 20 21 #include <asm/ccwdev.h> 21 22 #include <asm/cio.h> 22 23 #include <asm/ebcdic.h> ··· 619 618 if (rq->rc) { 620 619 /* Reset command failed. */ 621 620 rp->state = RAW3270_STATE_INIT; 622 - } else if (MACHINE_IS_VM) { 621 + } else if (machine_is_vm()) { 623 622 raw3270_size_device_vm(rp); 624 623 raw3270_size_device_done(rp); 625 624 } else {
+5 -4
drivers/s390/char/sclp.h
··· 12 12 #include <linux/types.h> 13 13 #include <linux/list.h> 14 14 #include <asm/asm-extable.h> 15 + #include <asm/machine.h> 15 16 #include <asm/sclp.h> 16 17 #include <asm/ebcdic.h> 17 18 #include <asm/asm.h> ··· 318 317 int cc, exception; 319 318 320 319 exception = 1; 321 - asm volatile( 320 + asm_inline volatile( 322 321 "0: .insn rre,0xb2200000,%[cmd],%[sccb]\n" /* servc */ 323 322 "1: lhi %[exc],0\n" 324 323 "2:\n" ··· 343 342 static inline unsigned char 344 343 sclp_ascebc(unsigned char ch) 345 344 { 346 - return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch]; 345 + return (machine_is_vm()) ? _ascebc[ch] : _ascebc_500[ch]; 347 346 } 348 347 349 348 /* translate string from EBCDIC to ASCII */ 350 349 static inline void 351 350 sclp_ebcasc_str(char *str, int nr) 352 351 { 353 - (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr); 352 + (machine_is_vm()) ? EBCASC(str, nr) : EBCASC_500(str, nr); 354 353 } 355 354 356 355 /* translate string from ASCII to EBCDIC */ 357 356 static inline void 358 357 sclp_ascebc_str(char *str, int nr) 359 358 { 360 - (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); 359 + (machine_is_vm()) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); 361 360 } 362 361 363 362 static inline struct gds_vector *
+2 -1
drivers/s390/char/sclp_cmd.c
··· 8 8 #define KMSG_COMPONENT "sclp_cmd" 9 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 10 11 + #include <linux/cpufeature.h> 11 12 #include <linux/completion.h> 12 13 #include <linux/init.h> 13 14 #include <linux/errno.h> ··· 429 428 goto skip_add; 430 429 for (addr = start; addr < start + size; addr += block_size) 431 430 add_memory(0, addr, block_size, 432 - MACHINE_HAS_EDAT1 ? 431 + cpu_has_edat1() ? 433 432 MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE); 434 433 skip_add: 435 434 first_rn = rn;
+17
drivers/s390/char/sclp_con.c
··· 264 264 }; 265 265 266 266 /* 267 + * Release allocated pages. 268 + */ 269 + static void __init __sclp_console_free_pages(void) 270 + { 271 + struct list_head *page, *p; 272 + 273 + list_for_each_safe(page, p, &sclp_con_pages) { 274 + list_del(page); 275 + free_page((unsigned long)page); 276 + } 277 + } 278 + 279 + /* 267 280 * called by console_init() in drivers/char/tty_io.c at boot-time. 268 281 */ 269 282 static int __init ··· 295 282 /* Allocate pages for output buffering */ 296 283 for (i = 0; i < sclp_console_pages; i++) { 297 284 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 285 + if (!page) { 286 + __sclp_console_free_pages(); 287 + return -ENOMEM; 288 + } 298 289 list_add_tail(page, &sclp_con_pages); 299 290 } 300 291 sclp_conbuf = NULL;
+1 -5
drivers/s390/char/sclp_early.c
··· 50 50 sclp.has_aeni = !!(sccb->fac118 & 0x20); 51 51 sclp.has_aisi = !!(sccb->fac118 & 0x10); 52 52 sclp.has_zpci_lsi = !!(sccb->fac118 & 0x01); 53 - if (sccb->fac85 & 0x02) 54 - get_lowcore()->machine_flags |= MACHINE_FLAG_ESOP; 55 - if (sccb->fac91 & 0x40) 56 - get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_GUEST; 57 53 sclp.has_diag204_bif = !!(sccb->fac98 & 0x80); 58 54 sclp.has_diag310 = !!(sccb->fac91 & 0x80); 59 55 if (sccb->cpuoff > 134) { ··· 74 78 sclp.hamax = U64_MAX; 75 79 76 80 if (!sccb->hcpua) { 77 - if (MACHINE_IS_VM) 81 + if (machine_is_vm()) 78 82 sclp.max_cores = 64; 79 83 else 80 84 sclp.max_cores = sccb->ncpurl;
+13
drivers/s390/char/sclp_early_core.c
··· 13 13 #include <asm/sections.h> 14 14 #include <asm/physmem_info.h> 15 15 #include <asm/facility.h> 16 + #include <asm/machine.h> 16 17 #include "sclp.h" 17 18 #include "sclp_rw.h" 18 19 ··· 334 333 if (sclp_info_sccb.hsa_size) 335 334 *hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE; 336 335 return 0; 336 + } 337 + 338 + void __init sclp_early_detect_machine_features(void) 339 + { 340 + struct read_info_sccb *sccb = &sclp_info_sccb; 341 + 342 + if (!sclp_info_sccb_valid) 343 + return; 344 + if (sccb->fac85 & 0x02) 345 + set_machine_feature(MFEATURE_ESOP); 346 + if (sccb->fac91 & 0x40) 347 + set_machine_feature(MFEATURE_TLB_GUEST); 337 348 } 338 349 339 350 #define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL
+14 -2
drivers/s390/char/sclp_tty.c
··· 490 490 .flush_buffer = sclp_tty_flush_buffer, 491 491 }; 492 492 493 + /* Release allocated pages. */ 494 + static void __init __sclp_tty_free_pages(void) 495 + { 496 + struct list_head *page, *p; 497 + 498 + list_for_each_safe(page, p, &sclp_tty_pages) { 499 + list_del(page); 500 + free_page((unsigned long)page); 501 + } 502 + } 503 + 493 504 static int __init 494 505 sclp_tty_init(void) 495 506 { ··· 510 499 int rc; 511 500 512 501 /* z/VM multiplexes the line mode output on the 32xx screen */ 513 - if (MACHINE_IS_VM && !CONSOLE_IS_SCLP) 502 + if (machine_is_vm() && !CONSOLE_IS_SCLP) 514 503 return 0; 515 504 if (!sclp.has_linemode) 516 505 return 0; ··· 527 516 for (i = 0; i < MAX_KMEM_PAGES; i++) { 528 517 page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 529 518 if (page == NULL) { 519 + __sclp_tty_free_pages(); 530 520 tty_driver_kref_put(driver); 531 521 return -ENOMEM; 532 522 } ··· 536 524 timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0); 537 525 sclp_ttybuf = NULL; 538 526 sclp_tty_buffer_count = 0; 539 - if (MACHINE_IS_VM) { 527 + if (machine_is_vm()) { 540 528 /* case input lines to lowercase */ 541 529 sclp_tty_tolower = 1; 542 530 }
+3 -2
drivers/s390/char/vmcp.c
··· 23 23 #include <linux/mutex.h> 24 24 #include <linux/cma.h> 25 25 #include <linux/mm.h> 26 + #include <asm/machine.h> 26 27 #include <asm/cpcmd.h> 27 28 #include <asm/debug.h> 28 29 #include <asm/vmcp.h> ··· 53 52 54 53 void __init vmcp_cma_reserve(void) 55 54 { 56 - if (!MACHINE_IS_VM) 55 + if (!machine_is_vm()) 57 56 return; 58 57 cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma); 59 58 } ··· 255 254 { 256 255 int ret; 257 256 258 - if (!MACHINE_IS_VM) 257 + if (!machine_is_vm()) 259 258 return 0; 260 259 261 260 vmcp_debug = debug_register("vmcp", 1, 1, 240);
+2 -1
drivers/s390/char/vmlogrdr.c
··· 23 23 #include <linux/spinlock.h> 24 24 #include <linux/atomic.h> 25 25 #include <linux/uaccess.h> 26 + #include <asm/machine.h> 26 27 #include <asm/cpcmd.h> 27 28 #include <asm/debug.h> 28 29 #include <asm/ebcdic.h> ··· 810 809 int i; 811 810 dev_t dev; 812 811 813 - if (! MACHINE_IS_VM) { 812 + if (!machine_is_vm()) { 814 813 pr_err("not running under VM, driver not loaded.\n"); 815 814 return -ENODEV; 816 815 }
+2 -1
drivers/s390/char/vmur.c
··· 18 18 #include <linux/kobject.h> 19 19 20 20 #include <linux/uaccess.h> 21 + #include <asm/machine.h> 21 22 #include <asm/cio.h> 22 23 #include <asm/ccwdev.h> 23 24 #include <asm/debug.h> ··· 1010 1009 int rc; 1011 1010 dev_t dev; 1012 1011 1013 - if (!MACHINE_IS_VM) { 1012 + if (!machine_is_vm()) { 1014 1013 pr_err("The %s cannot be loaded without z/VM\n", 1015 1014 ur_banner); 1016 1015 return -ENODEV;
+2 -3
drivers/s390/cio/crw.c
··· 77 77 if (unlikely(chain > 1)) { 78 78 struct crw tmp_crw; 79 79 80 - printk(KERN_WARNING"%s: Code does not support more " 81 - "than two chained crws; please report to " 82 - "linux390@de.ibm.com!\n", __func__); 80 + printk(KERN_WARNING "%s: Code does not support more than two chained crws\n", 81 + __func__); 83 82 ccode = stcrw(&tmp_crw); 84 83 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " 85 84 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+2 -1
drivers/s390/cio/device_id.c
··· 12 12 #include <linux/string.h> 13 13 #include <linux/types.h> 14 14 #include <linux/errno.h> 15 + #include <asm/machine.h> 15 16 #include <asm/ccwdev.h> 16 17 #include <asm/setup.h> 17 18 #include <asm/cio.h> ··· 176 175 struct senseid *senseid = &cdev->private->dma_area->senseid; 177 176 int vm = 0; 178 177 179 - if (rc && MACHINE_IS_VM) { 178 + if (rc && machine_is_vm()) { 180 179 /* Try diag 0x210 fallback on z/VM. */ 181 180 snsid_init(cdev); 182 181 if (diag210_get_dev_info(cdev) == 0) {
+4 -4
drivers/s390/cio/ioasm.c
··· 22 22 int ccode, exception; 23 23 24 24 exception = 1; 25 - asm volatile( 25 + asm_inline volatile( 26 26 " lgr 1,%[r1]\n" 27 27 " stsch %[addr]\n" 28 28 "0: lhi %[exc],0\n" ··· 52 52 int ccode, exception; 53 53 54 54 exception = 1; 55 - asm volatile( 55 + asm_inline volatile( 56 56 " lgr 1,%[r1]\n" 57 57 " msch %[addr]\n" 58 58 "0: lhi %[exc],0\n" ··· 106 106 int ccode, exception; 107 107 108 108 exception = 1; 109 - asm volatile( 109 + asm_inline volatile( 110 110 " lgr 1,%[r1]\n" 111 111 " ssch %[addr]\n" 112 112 "0: lhi %[exc],0\n" ··· 178 178 int cc, exception; 179 179 180 180 exception = 1; 181 - asm volatile( 181 + asm_inline volatile( 182 182 " .insn rre,0xb25f0000,%[chsc_area],0\n" 183 183 "0: lhi %[exc],0\n" 184 184 "1:\n"
+3 -3
drivers/s390/cio/vfio_ccw_drv.c
··· 171 171 return -ENODEV; 172 172 } 173 173 174 - parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL); 174 + parent = kzalloc(sizeof(*parent), GFP_KERNEL); 175 175 if (!parent) 176 176 return -ENOMEM; 177 177 ··· 186 186 187 187 parent->mdev_type.sysfs_name = "io"; 188 188 parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; 189 - parent->mdev_types[0] = &parent->mdev_type; 189 + parent->mdev_types = &parent->mdev_type; 190 190 ret = mdev_register_parent(&parent->parent, &sch->dev, 191 191 &vfio_ccw_mdev_driver, 192 - parent->mdev_types, 1); 192 + &parent->mdev_types, 1); 193 193 if (ret) 194 194 goto out_unreg; 195 195
+1 -1
drivers/s390/cio/vfio_ccw_private.h
··· 79 79 80 80 struct mdev_parent parent; 81 81 struct mdev_type mdev_type; 82 - struct mdev_type *mdev_types[]; 82 + struct mdev_type *mdev_types; 83 83 }; 84 84 85 85 /**
+2 -1
drivers/s390/crypto/ap_bus.c
··· 26 26 #include <linux/notifier.h> 27 27 #include <linux/kthread.h> 28 28 #include <linux/mutex.h> 29 + #include <asm/machine.h> 29 30 #include <asm/airq.h> 30 31 #include <asm/tpi.h> 31 32 #include <linux/atomic.h> ··· 2325 2324 * Setup the high resolution poll timer. 2326 2325 * If we are running under z/VM adjust polling to z/VM polling rate. 2327 2326 */ 2328 - if (MACHINE_IS_VM) 2327 + if (machine_is_vm()) 2329 2328 poll_high_timeout = 1500000; 2330 2329 hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2331 2330
+63 -5
drivers/s390/crypto/vfio_ap_ops.c
··· 650 650 matrix->adm_max = info->apxa ? info->nd : 15; 651 651 } 652 652 653 + static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev) 654 + { 655 + if (matrix_mdev->cfg_chg_trigger) 656 + eventfd_signal(matrix_mdev->cfg_chg_trigger); 657 + } 658 + 653 659 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) 654 660 { 655 - if (matrix_mdev->kvm) 661 + if (matrix_mdev->kvm) { 656 662 kvm_arch_crypto_set_masks(matrix_mdev->kvm, 657 663 matrix_mdev->shadow_apcb.apm, 658 664 matrix_mdev->shadow_apcb.aqm, 659 665 matrix_mdev->shadow_apcb.adm); 666 + 667 + signal_guest_ap_cfg_changed(matrix_mdev); 668 + } 660 669 } 661 670 662 671 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) ··· 801 792 if (ret) 802 793 goto err_put_vdev; 803 794 matrix_mdev->req_trigger = NULL; 795 + matrix_mdev->cfg_chg_trigger = NULL; 804 796 dev_set_drvdata(&mdev->dev, matrix_mdev); 805 797 mutex_lock(&matrix_dev->mdevs_lock); 806 798 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); ··· 2056 2046 2057 2047 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); 2058 2048 2049 + get_update_locks_for_mdev(matrix_mdev); 2050 + 2051 + if (matrix_mdev->kvm) { 2052 + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); 2053 + signal_guest_ap_cfg_changed(matrix_mdev); 2054 + } 2055 + 2059 2056 if (matrix_mdev->req_trigger) { 2060 2057 if (!(count % 10)) 2061 2058 dev_notice_ratelimited(dev, ··· 2074 2057 dev_notice(dev, 2075 2058 "No device request registered, blocked until released by user\n"); 2076 2059 } 2060 + 2061 + release_update_locks_for_mdev(matrix_mdev); 2077 2062 } 2078 2063 2079 2064 static int vfio_ap_mdev_get_device_info(unsigned long arg) ··· 2113 2094 2114 2095 switch (info.index) { 2115 2096 case VFIO_AP_REQ_IRQ_INDEX: 2097 + info.count = 1; 2098 + info.flags = VFIO_IRQ_INFO_EVENTFD; 2099 + break; 2100 + case VFIO_AP_CFG_CHG_IRQ_INDEX: 2116 2101 info.count = 1; 2117 2102 info.flags = VFIO_IRQ_INFO_EVENTFD; 2118 2103 break; ··· 2183 2160 return 0; 2184 2161 } 2185 2162 2163 + static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) 2164 + { 2165 + s32 fd; 2166 + void __user *data; 2167 + unsigned long minsz; 2168 + struct eventfd_ctx *cfg_chg_trigger; 2169 + 2170 + minsz = offsetofend(struct vfio_irq_set, count); 2171 + data = (void __user *)(arg + minsz); 2172 + 2173 + if (get_user(fd, (s32 __user *)data)) 2174 + return -EFAULT; 2175 + 2176 + if (fd == -1) { 2177 + if (matrix_mdev->cfg_chg_trigger) 2178 + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); 2179 + matrix_mdev->cfg_chg_trigger = NULL; 2180 + } else if (fd >= 0) { 2181 + cfg_chg_trigger = eventfd_ctx_fdget(fd); 2182 + if (IS_ERR(cfg_chg_trigger)) 2183 + return PTR_ERR(cfg_chg_trigger); 2184 + 2185 + if (matrix_mdev->cfg_chg_trigger) 2186 + eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); 2187 + 2188 + matrix_mdev->cfg_chg_trigger = cfg_chg_trigger; 2189 + } else { 2190 + return -EINVAL; 2191 + } 2192 + 2193 + return 0; 2194 + } 2195 + 2186 2196 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, 2187 2197 unsigned long arg) 2188 2198 { ··· 2231 2175 switch (irq_set.index) { 2232 2176 case VFIO_AP_REQ_IRQ_INDEX: 2233 2177 return vfio_ap_set_request_irq(matrix_mdev, arg); 2178 + case VFIO_AP_CFG_CHG_IRQ_INDEX: 2179 + return vfio_ap_set_cfg_change_irq(matrix_mdev, arg); 2234 2180 default: 2235 2181 return -EINVAL; 2236 2182 } ··· 2257 2199 ret = vfio_ap_mdev_reset_queues(matrix_mdev); 2258 2200 break; 2259 2201 case VFIO_DEVICE_GET_IRQ_INFO: 2260 - ret = vfio_ap_get_irq_info(arg); 2261 - break; 2202 + ret = vfio_ap_get_irq_info(arg); 2203 + break; 2262 2204 case VFIO_DEVICE_SET_IRQS: 2263 2205 ret = vfio_ap_set_irqs(matrix_mdev, arg); 2264 2206 break; ··· 2374 2316 2375 2317 matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT; 2376 2318 matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT; 2377 - matrix_dev->mdev_types[0] = &matrix_dev->mdev_type; 2319 + matrix_dev->mdev_types = &matrix_dev->mdev_type; 2378 2320 ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device, 2379 2321 &vfio_ap_matrix_driver, 2380 - matrix_dev->mdev_types, 1); 2322 + &matrix_dev->mdev_types, 1); 2381 2323 if (ret) 2382 2324 goto err_driver; 2383 2325 return 0;
+3 -1
drivers/s390/crypto/vfio_ap_private.h
··· 53 53 struct mutex guests_lock; /* serializes access to each KVM guest */ 54 54 struct mdev_parent parent; 55 55 struct mdev_type mdev_type; 56 - struct mdev_type *mdev_types[1]; 56 + struct mdev_type *mdev_types; 57 57 }; 58 58 59 59 extern struct ap_matrix_dev *matrix_dev; ··· 105 105 * @mdev: the mediated device 106 106 * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev 107 107 * @req_trigger eventfd ctx for signaling userspace to return a device 108 + * @cfg_chg_trigger eventfd ctx to signal AP config changed to userspace 108 109 * @apm_add: bitmap of APIDs added to the host's AP configuration 109 110 * @aqm_add: bitmap of APQIs added to the host's AP configuration 110 111 * @adm_add: bitmap of control domain numbers added to the host's AP ··· 121 120 struct mdev_device *mdev; 122 121 struct ap_queue_table qtable; 123 122 struct eventfd_ctx *req_trigger; 123 + struct eventfd_ctx *cfg_chg_trigger; 124 124 DECLARE_BITMAP(apm_add, AP_DEVICES); 125 125 DECLARE_BITMAP(aqm_add, AP_DOMAINS); 126 126 DECLARE_BITMAP(adm_add, AP_DOMAINS);
+2 -1
drivers/s390/net/qeth_l2_main.c
··· 22 22 #include <linux/hash.h> 23 23 #include <linux/hashtable.h> 24 24 #include <net/switchdev.h> 25 + #include <asm/machine.h> 25 26 #include <asm/chsc.h> 26 27 #include <asm/css_chars.h> 27 28 #include <asm/setup.h> ··· 300 299 301 300 QETH_CARD_TEXT(card, 2, "l2reqmac"); 302 301 303 - if (MACHINE_IS_VM) { 302 + if (machine_is_vm()) { 304 303 rc = qeth_vm_request_mac(card); 305 304 if (!rc) 306 305 goto out;
+2 -1
drivers/s390/net/smsgiucv.c
··· 13 13 #include <linux/device.h> 14 14 #include <linux/slab.h> 15 15 #include <net/iucv/iucv.h> 16 + #include <asm/machine.h> 16 17 #include <asm/cpcmd.h> 17 18 #include <asm/ebcdic.h> 18 19 #include "smsgiucv.h" ··· 139 138 { 140 139 int rc; 141 140 142 - if (!MACHINE_IS_VM) { 141 + if (!machine_is_vm()) { 143 142 rc = -EPROTONOSUPPORT; 144 143 goto out; 145 144 }
+2 -1
drivers/s390/net/smsgiucv_app.c
··· 23 23 #include <linux/spinlock.h> 24 24 #include <linux/workqueue.h> 25 25 #include <net/iucv/iucv.h> 26 + #include <asm/machine.h> 26 27 #include "smsgiucv.h" 27 28 28 29 /* prefix used for SMSG registration */ ··· 154 153 struct device_driver *smsgiucv_drv; 155 154 int rc; 156 155 157 - if (!MACHINE_IS_VM) 156 + if (!machine_is_vm()) 158 157 return -ENODEV; 159 158 160 159 smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
+1 -1
drivers/s390/scsi/zfcp_aux.c
··· 41 41 42 42 #define ZFCP_BUS_ID_SIZE 20 43 43 44 - MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); 44 + MODULE_AUTHOR("IBM Corporation"); 45 45 MODULE_DESCRIPTION("FCP HBA driver"); 46 46 MODULE_LICENSE("GPL"); 47 47
+4 -3
drivers/tty/hvc/hvc_iucv.c
··· 24 24 #include <linux/tty.h> 25 25 #include <linux/wait.h> 26 26 #include <net/iucv/iucv.h> 27 + #include <asm/machine.h> 27 28 28 29 #include "hvc_console.h" 29 30 ··· 1241 1240 { 1242 1241 int rc; 1243 1242 1244 - if (!MACHINE_IS_VM || !hvc_iucv_devices) 1243 + if (!machine_is_vm() || !hvc_iucv_devices) 1245 1244 return -ENODEV; 1246 1245 1247 1246 if (!val) ··· 1270 1269 size_t index, len; 1271 1270 void *start, *end; 1272 1271 1273 - if (!MACHINE_IS_VM || !hvc_iucv_devices) 1272 + if (!machine_is_vm() || !hvc_iucv_devices) 1274 1273 return -ENODEV; 1275 1274 1276 1275 rc = 0; ··· 1307 1306 if (!hvc_iucv_devices) 1308 1307 return -ENODEV; 1309 1308 1310 - if (!MACHINE_IS_VM) { 1309 + if (!machine_is_vm()) { 1311 1310 pr_notice("The z/VM IUCV HVC device driver cannot " 1312 1311 "be used without z/VM\n"); 1313 1312 rc = -ENODEV;
+4 -3
drivers/watchdog/diag288_wdt.c
··· 27 27 #include <linux/moduleparam.h> 28 28 #include <linux/slab.h> 29 29 #include <linux/watchdog.h> 30 + #include <asm/machine.h> 30 31 #include <asm/ebcdic.h> 31 32 #include <asm/diag.h> 32 33 #include <linux/io.h> ··· 111 110 int ret; 112 111 unsigned int func; 113 112 114 - if (MACHINE_IS_VM) { 113 + if (machine_is_vm()) { 115 114 func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL) 116 115 : WDT_FUNC_INIT; 117 116 ret = diag288_str(func, dev->timeout, wdt_cmd); ··· 137 136 int ret; 138 137 unsigned int func; 139 138 140 - if (MACHINE_IS_VM) { 139 + if (machine_is_vm()) { 141 140 /* 142 141 * It seems to be ok to z/VM to use the init function to 143 142 * retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must ··· 193 192 194 193 watchdog_set_nowayout(&wdt_dev, nowayout_info); 195 194 196 - if (MACHINE_IS_VM) { 195 + if (machine_is_vm()) { 197 196 cmd_buf = kmalloc(MAX_CMDLEN, GFP_KERNEL); 198 197 if (!cmd_buf) { 199 198 pr_err("The watchdog cannot be initialized\n");
+1
include/uapi/linux/vfio.h
··· 671 671 */ 672 672 enum { 673 673 VFIO_AP_REQ_IRQ_INDEX, 674 + VFIO_AP_CFG_CHG_IRQ_INDEX, 674 675 VFIO_AP_NUM_IRQS 675 676 }; 676 677
-18
kernel/sysctl.c
··· 1772 1772 .extra1 = SYSCTL_ZERO, 1773 1773 .extra2 = SYSCTL_MAXOLDUID, 1774 1774 }, 1775 - #ifdef CONFIG_S390 1776 - { 1777 - .procname = "userprocess_debug", 1778 - .data = &show_unhandled_signals, 1779 - .maxlen = sizeof(int), 1780 - .mode = 0644, 1781 - .proc_handler = proc_dointvec, 1782 - }, 1783 - #endif 1784 1775 { 1785 1776 .procname = "panic_on_oops", 1786 1777 .data = &panic_on_oops, ··· 1815 1824 .procname = "randomize_va_space", 1816 1825 .data = &randomize_va_space, 1817 1826 .maxlen = sizeof(int), 1818 - .mode = 0644, 1819 - .proc_handler = proc_dointvec, 1820 - }, 1821 - #endif 1822 - #if defined(CONFIG_S390) && defined(CONFIG_SMP) 1823 - { 1824 - .procname = "spin_retry", 1825 - .data = &spin_retry, 1826 - .maxlen = sizeof (int), 1827 1827 .mode = 0644, 1828 1828 .proc_handler = proc_dointvec, 1829 1829 },
+1
lib/raid6/s390vx.uc
··· 11 11 * This file is postprocessed using unroll.awk. 12 12 */ 13 13 14 + #include <linux/cpufeature.h> 14 15 #include <linux/raid/pq.h> 15 16 #include <asm/fpu.h> 16 17
+2 -1
net/iucv/af_iucv.c
··· 28 28 #include <linux/poll.h> 29 29 #include <linux/security.h> 30 30 #include <net/sock.h> 31 + #include <asm/machine.h> 31 32 #include <asm/ebcdic.h> 32 33 #include <asm/cpcmd.h> 33 34 #include <linux/kmod.h> ··· 2273 2272 { 2274 2273 int err; 2275 2274 2276 - if (MACHINE_IS_VM && IS_ENABLED(CONFIG_IUCV)) { 2275 + if (machine_is_vm() && IS_ENABLED(CONFIG_IUCV)) { 2277 2276 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2278 2277 if (unlikely(err)) { 2279 2278 WARN_ON(err);
+2 -1
net/iucv/iucv.c
··· 39 39 #include <linux/reboot.h> 40 40 #include <net/iucv/iucv.h> 41 41 #include <linux/atomic.h> 42 + #include <asm/machine.h> 42 43 #include <asm/ebcdic.h> 43 44 #include <asm/io.h> 44 45 #include <asm/irq.h> ··· 1866 1865 { 1867 1866 int rc; 1868 1867 1869 - if (!MACHINE_IS_VM) { 1868 + if (!machine_is_vm()) { 1870 1869 rc = -EPROTONOSUPPORT; 1871 1870 goto out; 1872 1871 }