Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Alexander Gordeev:

- Standardize on the __ASSEMBLER__ macro that is provided by GCC and
Clang compilers and replace __ASSEMBLY__ with __ASSEMBLER__ in both
uapi and non-uapi headers

- Explicitly include <linux/export.h> in architecture and driver files
which contain an EXPORT_SYMBOL() and remove the include from the
files which do not contain the EXPORT_SYMBOL()

- Use the full title of "z/Architecture Principles of Operation" manual
and the name of a section where facility bits are listed

- Use -D__DISABLE_EXPORTS for files in arch/s390/boot to avoid
unnecessary slowing down of the build and confusing external kABI
tools that process symtypes data

- Print additional unrecoverable machine check information to make the
root cause analysis easier

- Move cmpxchg_user_key() handling to uaccess library code, since the
generated code is large anyway and there is no benefit if it is
inlined

- Fix a problem when cmpxchg_user_key() is executing a code with a
non-default key: if a system is IPL-ed with "LOAD NORMAL", and the
previous system used storage keys where the fetch-protection bit was
set for some pages, and the cmpxchg_user_key() is located within such
page, a protection exception happens

- Either the external call or emergency signal order is used to send an
IPI to a remote CPU. Use the external order only, since it is at
least as good and sometimes even better, than the emergency signal

- In case of an early crash the early program check handler prints more
or less random value of the last breaking event address, since it is
not initialized properly. Copy the last breaking event address from
the lowcore to pt_regs to address this

- During STP synchronization check udelay() can not be used, since the
first CPU modifies tod_clock_base and get_tod_clock_monotonic() might
return a non-monotonic time. Instead, busy-loop on other CPUs, while
the the first CPU actually handles the synchronization operation

- When debugging the early kernel boot using QEMU with the -S flag and
GDB attached, skip the decompressor and start directly in kernel

- Rename PAI Crypto event 4210 according to z16 and z17 "z/Architecture
Principles of Operation" manual

- Remove the in-kernel time steering support in favour of the new s390
PTP driver, which allows the kernel clock steered more precisely

- Remove a possible false-positive warning in pte_free_defer(), which
could be triggered in a valid case KVM guest process is initializing

* tag 's390-6.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (29 commits)
s390/mm: Remove possible false-positive warning in pte_free_defer()
s390/stp: Default to enabled
s390/stp: Remove leap second support
s390/time: Remove in-kernel time steering
s390/sclp: Use monotonic clock in sclp_sync_wait()
s390/smp: Use monotonic clock in smp_emergency_stop()
s390/time: Use monotonic clock in get_cycles()
s390/pai_crypto: Rename PAI Crypto event 4210
scripts/gdb/symbols: make lx-symbols skip the s390 decompressor
s390/boot: Introduce jump_to_kernel() function
s390/stp: Remove udelay from stp_sync_clock()
s390/early: Copy last breaking event address to pt_regs
s390/smp: Remove conditional emergency signal order code usage
s390/uaccess: Merge cmpxchg_user_key() inline assemblies
s390/uaccess: Prevent kprobes on cmpxchg_user_key() functions
s390/uaccess: Initialize code pages executed with non-default access key
s390/skey: Provide infrastructure for executing with non-default access key
s390/uaccess: Make cmpxchg_user_key() library code
s390/page: Add memory clobber to page_set_storage_key()
s390/page: Cleanup page_set_storage_key() inline assemblies
...

+601 -429
+1
arch/s390/appldata/appldata_base.c
··· 12 12 #define KMSG_COMPONENT "appldata" 13 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 14 15 + #include <linux/export.h> 15 16 #include <linux/module.h> 16 17 #include <linux/sched/stat.h> 17 18 #include <linux/init.h>
+3 -3
arch/s390/boot/Makefile
··· 19 19 20 20 KBUILD_AFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_AFLAGS_DECOMPRESSOR)) 21 21 KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_MARCH),$(KBUILD_CFLAGS_DECOMPRESSOR)) 22 - KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM) 23 - KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM) 22 + KBUILD_AFLAGS += $(CC_FLAGS_MARCH_MINIMUM) -D__DISABLE_EXPORTS 23 + KBUILD_CFLAGS += $(CC_FLAGS_MARCH_MINIMUM) -D__DISABLE_EXPORTS 24 24 25 25 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char 26 26 27 27 obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o 28 28 obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o 29 29 obj-y += version.o pgm_check.o ctype.o ipl_data.o relocs.o alternative.o 30 - obj-y += uv.o printk.o 30 + obj-y += uv.o printk.o trampoline.o 31 31 obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 32 32 obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 33 33 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
+1 -1
arch/s390/boot/als.c
··· 65 65 boot_emerg("The Linux kernel requires more recent processor hardware\n"); 66 66 boot_emerg("Detected machine-type number: %4x\n", id.machine); 67 67 print_missing_facilities(); 68 - boot_emerg("See Principles of Operations for facility bits\n"); 68 + boot_emerg("See z/Architecture Principles of Operation - Facility Indications\n"); 69 69 disabled_wait(); 70 70 } 71 71
+3 -2
arch/s390/boot/boot.h
··· 6 6 7 7 #define IPL_START 0x200 8 8 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 11 11 #include <linux/printk.h> 12 12 #include <asm/physmem_info.h> ··· 74 74 void error(char *m); 75 75 int get_random(unsigned long limit, unsigned long *value); 76 76 void boot_rb_dump(void); 77 + void __noreturn jump_to_kernel(psw_t *psw); 77 78 78 79 #ifndef boot_fmt 79 80 #define boot_fmt(fmt) fmt ··· 122 121 { 123 122 return addr0 + size0 > addr1 && addr1 + size1 > addr0; 124 123 } 125 - #endif /* __ASSEMBLY__ */ 124 + #endif /* __ASSEMBLER__ */ 126 125 #endif /* BOOT_BOOT_H */
+8 -1
arch/s390/boot/ipl_data.c
··· 16 16 struct ccw0 ccwpgm[2]; /* 0x0008 */ 17 17 u8 fill[56]; /* 0x0018 */ 18 18 struct ccw0 ccwpgmcc[20]; /* 0x0050 */ 19 - u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */ 19 + u8 pad_0xf0[0x0140-0x00f0]; /* 0x00f0 */ 20 + psw_t svc_old_psw; /* 0x0140 */ 21 + u8 pad_0x150[0x01a0-0x0150]; /* 0x0150 */ 20 22 psw_t restart_psw; /* 0x01a0 */ 21 23 psw_t external_new_psw; /* 0x01b0 */ 22 24 psw_t svc_new_psw; /* 0x01c0 */ ··· 77 75 [18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 78 76 [19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI), 79 77 }, 78 + /* 79 + * Let the GDB's lx-symbols command find the jump_to_kernel symbol 80 + * without having to load decompressor symbols. 81 + */ 82 + .svc_old_psw = { .mask = 0, .addr = (unsigned long)jump_to_kernel }, 80 83 .restart_psw = { .mask = 0, .addr = IPL_START, }, 81 84 .external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, }, 82 85 .svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, },
+1 -1
arch/s390/boot/startup.c
··· 642 642 psw.addr = __kaslr_offset + vmlinux.entry; 643 643 psw.mask = PSW_KERNEL_BITS; 644 644 boot_debug("Starting kernel at: 0x%016lx\n", psw.addr); 645 - __load_psw(psw); 645 + jump_to_kernel(&psw); 646 646 }
+9
arch/s390/boot/trampoline.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <linux/linkage.h> 4 + 5 + # This function is identical to __load_psw(), but the lx-symbols GDB command 6 + # puts a breakpoint on it, so it needs to be kept separate. 7 + SYM_CODE_START(jump_to_kernel) 8 + lpswe 0(%r2) 9 + SYM_CODE_END(jump_to_kernel)
+1
arch/s390/crypto/arch_random.c
··· 6 6 * Author(s): Harald Freudenberger 7 7 */ 8 8 9 + #include <linux/export.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/atomic.h> 11 12 #include <linux/random.h>
+1
arch/s390/crypto/sha_common.c
··· 9 9 */ 10 10 11 11 #include <crypto/internal/hash.h> 12 + #include <linux/export.h> 12 13 #include <linux/module.h> 13 14 #include <asm/cpacf.h> 14 15 #include "sha.h"
+3 -3
arch/s390/include/asm/alternative.h
··· 51 51 ALT_TYPE_SPEC << ALT_TYPE_SHIFT | \ 52 52 (facility) << ALT_DATA_SHIFT) 53 53 54 - #ifndef __ASSEMBLY__ 54 + #ifndef __ASSEMBLER__ 55 55 56 56 #include <linux/types.h> 57 57 #include <linux/stddef.h> ··· 183 183 /* Use this macro if clobbers are needed without inputs. */ 184 184 #define ASM_NO_INPUT_CLOBBER(clobber...) : clobber 185 185 186 - #else /* __ASSEMBLY__ */ 186 + #else /* __ASSEMBLER__ */ 187 187 188 188 /* 189 189 * Issue one struct alt_instr descriptor entry (need to put it into ··· 233 233 .popsection 234 234 .endm 235 235 236 - #endif /* __ASSEMBLY__ */ 236 + #endif /* __ASSEMBLER__ */ 237 237 238 238 #endif /* _ASM_S390_ALTERNATIVE_H */
+1 -1
arch/s390/include/asm/asm-const.h
··· 2 2 #ifndef _ASM_S390_ASM_CONST_H 3 3 #define _ASM_S390_ASM_CONST_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 # define stringify_in_c(...) __VA_ARGS__ 7 7 #else 8 8 /* This version of stringify will deal with commas... */
+2 -2
arch/s390/include/asm/cpu.h
··· 9 9 #ifndef _ASM_S390_CPU_H 10 10 #define _ASM_S390_CPU_H 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 14 14 #include <linux/types.h> 15 15 #include <linux/jump_label.h> ··· 24 24 25 25 DECLARE_STATIC_KEY_FALSE(cpu_has_bear); 26 26 27 - #endif /* __ASSEMBLY__ */ 27 + #endif /* __ASSEMBLER__ */ 28 28 #endif /* _ASM_S390_CPU_H */
+2 -2
arch/s390/include/asm/cpu_mf-insn.h
··· 8 8 #ifndef _ASM_S390_CPU_MF_INSN_H 9 9 #define _ASM_S390_CPU_MF_INSN_H 10 10 11 - #ifdef __ASSEMBLY__ 11 + #ifdef __ASSEMBLER__ 12 12 13 13 /* Macro to generate the STCCTM instruction with a customized 14 14 * M3 field designating the counter set. ··· 17 17 .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2 18 18 .endm 19 19 20 - #endif /* __ASSEMBLY__ */ 20 + #endif /* __ASSEMBLER__ */ 21 21 22 22 #endif
+2 -2
arch/s390/include/asm/ctlreg.h
··· 80 80 #define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(CR14_EXTERNAL_DAMAGE_SUBMASK_BIT) 81 81 #define CR14_WARNING_SUBMASK BIT(CR14_WARNING_SUBMASK_BIT) 82 82 83 - #ifndef __ASSEMBLY__ 83 + #ifndef __ASSEMBLER__ 84 84 85 85 #include <linux/bug.h> 86 86 ··· 252 252 }; 253 253 }; 254 254 255 - #endif /* __ASSEMBLY__ */ 255 + #endif /* __ASSEMBLER__ */ 256 256 #endif /* __ASM_S390_CTLREG_H */
+2 -2
arch/s390/include/asm/dwarf.h
··· 2 2 #ifndef _ASM_S390_DWARF_H 3 3 #define _ASM_S390_DWARF_H 4 4 5 - #ifdef __ASSEMBLY__ 5 + #ifdef __ASSEMBLER__ 6 6 7 7 #define CFI_STARTPROC .cfi_startproc 8 8 #define CFI_ENDPROC .cfi_endproc ··· 33 33 .cfi_sections .eh_frame, .debug_frame 34 34 #endif 35 35 36 - #endif /* __ASSEMBLY__ */ 36 + #endif /* __ASSEMBLER__ */ 37 37 38 38 #endif /* _ASM_S390_DWARF_H */
+1 -1
arch/s390/include/asm/extmem.h
··· 6 6 7 7 #ifndef _ASM_S390X_DCSS_H 8 8 #define _ASM_S390X_DCSS_H 9 - #ifndef __ASSEMBLY__ 9 + #ifndef __ASSEMBLER__ 10 10 11 11 /* 12 12 * DCSS segment is defined as a contiguous range of pages using DEFSEG command.
+2 -2
arch/s390/include/asm/fpu-insn-asm.h
··· 16 16 #error only <asm/fpu-insn.h> can be included directly 17 17 #endif 18 18 19 - #ifdef __ASSEMBLY__ 19 + #ifdef __ASSEMBLER__ 20 20 21 21 /* Macros to generate vector instruction byte code */ 22 22 ··· 750 750 MRXBOPC 0, 0x77, v1, v2, v3 751 751 .endm 752 752 753 - #endif /* __ASSEMBLY__ */ 753 + #endif /* __ASSEMBLER__ */ 754 754 #endif /* __ASM_S390_FPU_INSN_ASM_H */
+2 -2
arch/s390/include/asm/fpu-insn.h
··· 9 9 10 10 #include <asm/fpu-insn-asm.h> 11 11 12 - #ifndef __ASSEMBLY__ 12 + #ifndef __ASSEMBLER__ 13 13 14 14 #include <linux/instrumented.h> 15 15 #include <asm/asm-extable.h> ··· 475 475 : "memory"); 476 476 } 477 477 478 - #endif /* __ASSEMBLY__ */ 478 + #endif /* __ASSEMBLER__ */ 479 479 #endif /* __ASM_S390_FPU_INSN_H */
+2 -2
arch/s390/include/asm/ftrace.h
··· 5 5 #define ARCH_SUPPORTS_FTRACE_OPS 1 6 6 #define MCOUNT_INSN_SIZE 6 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 #include <asm/stacktrace.h> 10 10 11 11 static __always_inline unsigned long return_address(unsigned int n) ··· 134 134 struct ftrace_ops *op, struct ftrace_regs *fregs); 135 135 #define ftrace_graph_func ftrace_graph_func 136 136 137 - #endif /* __ASSEMBLY__ */ 137 + #endif /* __ASSEMBLER__ */ 138 138 139 139 #ifdef CONFIG_FUNCTION_TRACER 140 140
+2 -2
arch/s390/include/asm/irq.h
··· 25 25 #define EXT_IRQ_CP_SERVICE 0x2603 26 26 #define EXT_IRQ_IUCV 0x4000 27 27 28 - #ifndef __ASSEMBLY__ 28 + #ifndef __ASSEMBLER__ 29 29 30 30 #include <linux/hardirq.h> 31 31 #include <linux/percpu.h> ··· 120 120 121 121 #define irq_canonicalize(irq) (irq) 122 122 123 - #endif /* __ASSEMBLY__ */ 123 + #endif /* __ASSEMBLER__ */ 124 124 125 125 #endif /* _ASM_IRQ_H */
+2 -2
arch/s390/include/asm/jump_label.h
··· 4 4 5 5 #define HAVE_JUMP_LABEL_BATCH 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 9 9 #include <linux/types.h> 10 10 #include <linux/stringify.h> ··· 51 51 return true; 52 52 } 53 53 54 - #endif /* __ASSEMBLY__ */ 54 + #endif /* __ASSEMBLER__ */ 55 55 #endif
+3 -3
arch/s390/include/asm/lowcore.h
··· 22 22 23 23 #define LOWCORE_ALT_ADDRESS _AC(0x70000, UL) 24 24 25 - #ifndef __ASSEMBLY__ 25 + #ifndef __ASSEMBLER__ 26 26 27 27 struct pgm_tdb { 28 28 u64 data[32]; ··· 237 237 asm volatile("spx %0" : : "Q" (address) : "memory"); 238 238 } 239 239 240 - #else /* __ASSEMBLY__ */ 240 + #else /* __ASSEMBLER__ */ 241 241 242 242 .macro GET_LC reg 243 243 ALTERNATIVE "lghi \reg,0", \ ··· 251 251 ALT_FEATURE(MFEATURE_LOWCORE) 252 252 .endm 253 253 254 - #endif /* __ASSEMBLY__ */ 254 + #endif /* __ASSEMBLER__ */ 255 255 #endif /* _ASM_S390_LOWCORE_H */
+2 -2
arch/s390/include/asm/machine.h
··· 20 20 #define MFEATURE_LPAR 9 21 21 #define MFEATURE_DIAG288 10 22 22 23 - #ifndef __ASSEMBLY__ 23 + #ifndef __ASSEMBLER__ 24 24 25 25 #include <linux/bitops.h> 26 26 #include <asm/alternative.h> ··· 100 100 #define machine_is_kvm machine_has_kvm 101 101 #define machine_is_lpar machine_has_lpar 102 102 103 - #endif /* __ASSEMBLY__ */ 103 + #endif /* __ASSEMBLER__ */ 104 104 #endif /* __ASM_S390_MACHINE_H */
+2 -2
arch/s390/include/asm/mem_encrypt.h
··· 2 2 #ifndef S390_MEM_ENCRYPT_H__ 3 3 #define S390_MEM_ENCRYPT_H__ 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 int set_memory_encrypted(unsigned long vaddr, int numpages); 8 8 int set_memory_decrypted(unsigned long vaddr, int numpages); 9 9 10 - #endif /* __ASSEMBLY__ */ 10 + #endif /* __ASSEMBLER__ */ 11 11 12 12 #endif /* S390_MEM_ENCRYPT_H__ */
+2 -2
arch/s390/include/asm/nmi.h
··· 33 33 #define MCCK_CODE_FC_VALID BIT(63 - 43) 34 34 #define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) 35 35 36 - #ifndef __ASSEMBLY__ 36 + #ifndef __ASSEMBLER__ 37 37 38 38 union mci { 39 39 unsigned long val; ··· 104 104 void s390_handle_mcck(void); 105 105 void s390_do_machine_check(struct pt_regs *regs); 106 106 107 - #endif /* __ASSEMBLY__ */ 107 + #endif /* __ASSEMBLER__ */ 108 108 #endif /* _ASM_S390_NMI_H */
+2 -2
arch/s390/include/asm/nospec-branch.h
··· 2 2 #ifndef _ASM_S390_EXPOLINE_H 3 3 #define _ASM_S390_EXPOLINE_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <linux/types.h> 8 8 #include <asm/facility.h> ··· 42 42 void __s390_indirect_jump_r14(void); 43 43 void __s390_indirect_jump_r15(void); 44 44 45 - #endif /* __ASSEMBLY__ */ 45 + #endif /* __ASSEMBLER__ */ 46 46 47 47 #endif /* _ASM_S390_EXPOLINE_H */
+3 -2
arch/s390/include/asm/nospec-insn.h
··· 3 3 #define _ASM_S390_NOSPEC_ASM_H 4 4 5 5 #include <linux/linkage.h> 6 + #include <linux/export.h> 6 7 #include <asm/dwarf.h> 7 8 8 - #ifdef __ASSEMBLY__ 9 + #ifdef __ASSEMBLER__ 9 10 10 11 #ifdef CC_USING_EXPOLINE 11 12 ··· 129 128 .endm 130 129 #endif /* CC_USING_EXPOLINE */ 131 130 132 - #endif /* __ASSEMBLY__ */ 131 + #endif /* __ASSEMBLER__ */ 133 132 134 133 #endif /* _ASM_S390_NOSPEC_ASM_H */
+15 -7
arch/s390/include/asm/page.h
··· 33 33 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 34 34 35 35 #include <asm/setup.h> 36 - #ifndef __ASSEMBLY__ 36 + #ifndef __ASSEMBLER__ 37 37 38 38 void __storage_key_init_range(unsigned long start, unsigned long end); 39 39 ··· 130 130 static inline void page_set_storage_key(unsigned long addr, 131 131 unsigned char skey, int mapped) 132 132 { 133 - if (!mapped) 134 - asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" 135 - : : "d" (skey), "a" (addr)); 136 - else 137 - asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); 133 + if (!mapped) { 134 + asm volatile( 135 + " .insn rrf,0xb22b0000,%[skey],%[addr],8,0" 136 + : 137 + : [skey] "d" (skey), [addr] "a" (addr) 138 + : "memory"); 139 + } else { 140 + asm volatile( 141 + " sske %[skey],%[addr]" 142 + : 143 + : [skey] "d" (skey), [addr] "a" (addr) 144 + : "memory"); 145 + } 138 146 } 139 147 140 148 static inline unsigned char page_get_storage_key(unsigned long addr) ··· 282 274 283 275 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 284 276 285 - #endif /* !__ASSEMBLY__ */ 277 + #endif /* !__ASSEMBLER__ */ 286 278 287 279 #include <asm-generic/memory_model.h> 288 280 #include <asm-generic/getorder.h>
+2 -2
arch/s390/include/asm/processor.h
··· 26 26 27 27 #define RESTART_FLAG_CTLREGS _AC(1 << 0, U) 28 28 29 - #ifndef __ASSEMBLY__ 29 + #ifndef __ASSEMBLER__ 30 30 31 31 #include <linux/cpumask.h> 32 32 #include <linux/linkage.h> ··· 418 418 ); 419 419 } 420 420 421 - #endif /* __ASSEMBLY__ */ 421 + #endif /* __ASSEMBLER__ */ 422 422 423 423 #endif /* __ASM_S390_PROCESSOR_H */
+2 -2
arch/s390/include/asm/ptrace.h
··· 54 54 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ 55 55 PSW_MASK_PSTATE | PSW_ASC_PRIMARY) 56 56 57 - #ifndef __ASSEMBLY__ 57 + #ifndef __ASSEMBLER__ 58 58 59 59 struct psw_bits { 60 60 unsigned long : 1; ··· 292 292 regs->gprs[2] = rc; 293 293 } 294 294 295 - #endif /* __ASSEMBLY__ */ 295 + #endif /* __ASSEMBLER__ */ 296 296 #endif /* _S390_PTRACE_H */
+2 -2
arch/s390/include/asm/purgatory.h
··· 7 7 8 8 #ifndef _S390_PURGATORY_H_ 9 9 #define _S390_PURGATORY_H_ 10 - #ifndef __ASSEMBLY__ 10 + #ifndef __ASSEMBLER__ 11 11 12 12 #include <linux/purgatory.h> 13 13 14 14 int verify_sha256_digest(void); 15 15 16 - #endif /* __ASSEMBLY__ */ 16 + #endif /* __ASSEMBLER__ */ 17 17 #endif /* _S390_PURGATORY_H_ */
+2 -2
arch/s390/include/asm/sclp.h
··· 21 21 #define SCLP_ERRNOTIFY_AQ_INFO_LOG 2 22 22 #define SCLP_ERRNOTIFY_AQ_OPTICS_DATA 3 23 23 24 - #ifndef __ASSEMBLY__ 24 + #ifndef __ASSEMBLER__ 25 25 #include <linux/uio.h> 26 26 #include <asm/chpid.h> 27 27 #include <asm/cpu.h> ··· 199 199 return _sclp_get_core_info(info); 200 200 } 201 201 202 - #endif /* __ASSEMBLY__ */ 202 + #endif /* __ASSEMBLER__ */ 203 203 #endif /* _ASM_S390_SCLP_H */
+4 -2
arch/s390/include/asm/setup.h
··· 24 24 25 25 #define LEGACY_COMMAND_LINE_SIZE 896 26 26 27 - #ifndef __ASSEMBLY__ 27 + #ifndef __ASSEMBLER__ 28 28 29 29 #include <asm/lowcore.h> 30 30 #include <asm/types.h> ··· 40 40 char pad1[0x10480-0x10438]; /* 0x10438 - 0x10480 */ 41 41 char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */ 42 42 }; 43 + 44 + extern char arch_hw_string[128]; 43 45 44 46 extern struct parmarea parmarea; 45 47 ··· 102 100 BUILD_BUG_ON(addr > 0xfff); 103 101 return 0xb2b20000 | addr; 104 102 } 105 - #endif /* __ASSEMBLY__ */ 103 + #endif /* __ASSEMBLER__ */ 106 104 #endif /* _ASM_S390_SETUP_H */
+2 -2
arch/s390/include/asm/sigp.h
··· 36 36 #define SIGP_STATUS_INCORRECT_STATE 0x00000200UL 37 37 #define SIGP_STATUS_NOT_RUNNING 0x00000400UL 38 38 39 - #ifndef __ASSEMBLY__ 39 + #ifndef __ASSEMBLER__ 40 40 41 41 #include <asm/asm.h> 42 42 ··· 68 68 return cc; 69 69 } 70 70 71 - #endif /* __ASSEMBLY__ */ 71 + #endif /* __ASSEMBLER__ */ 72 72 73 73 #endif /* __S390_ASM_SIGP_H */
+32
arch/s390/include/asm/skey.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_SKEY_H 3 + #define __ASM_SKEY_H 4 + 5 + #include <asm/rwonce.h> 6 + 7 + struct skey_region { 8 + unsigned long start; 9 + unsigned long end; 10 + }; 11 + 12 + #define SKEY_REGION(_start, _end) \ 13 + stringify_in_c(.section .skey_region,"a";) \ 14 + stringify_in_c(.balign 8;) \ 15 + stringify_in_c(.quad (_start);) \ 16 + stringify_in_c(.quad (_end);) \ 17 + stringify_in_c(.previous) 18 + 19 + extern int skey_regions_initialized; 20 + extern struct skey_region __skey_region_start[]; 21 + extern struct skey_region __skey_region_end[]; 22 + 23 + void __skey_regions_initialize(void); 24 + 25 + static inline void skey_regions_initialize(void) 26 + { 27 + if (READ_ONCE(skey_regions_initialized)) 28 + return; 29 + __skey_regions_initialize(); 30 + } 31 + 32 + #endif /* __ASM_SKEY_H */
+1 -1
arch/s390/include/asm/thread_info.h
··· 24 24 25 25 #define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) 26 26 27 - #ifndef __ASSEMBLY__ 27 + #ifndef __ASSEMBLER__ 28 28 29 29 /* 30 30 * low level task data that entry.S needs immediate access to
+6 -7
arch/s390/include/asm/timex.h
··· 196 196 asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 197 197 return clk; 198 198 } 199 - 200 - static inline cycles_t get_cycles(void) 201 - { 202 - return (cycles_t) get_tod_clock() >> 2; 203 - } 204 - #define get_cycles get_cycles 205 - 206 199 int get_phys_clock(unsigned long *clock); 207 200 void init_cpu_timer(void); 208 201 ··· 222 229 preempt_enable_notrace(); 223 230 return tod; 224 231 } 232 + 233 + static inline cycles_t get_cycles(void) 234 + { 235 + return (cycles_t)get_tod_clock_monotonic() >> 2; 236 + } 237 + #define get_cycles get_cycles 225 238 226 239 /** 227 240 * tod_to_ns - convert a TOD format value to nanoseconds
+2 -2
arch/s390/include/asm/tpi.h
··· 5 5 #include <linux/types.h> 6 6 #include <uapi/asm/schid.h> 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 /* I/O-Interruption Code as stored by TEST PENDING INTERRUPTION (TPI). */ 11 11 struct tpi_info { ··· 32 32 u32 :27; 33 33 } __packed __aligned(4); 34 34 35 - #endif /* __ASSEMBLY__ */ 35 + #endif /* __ASSEMBLER__ */ 36 36 37 37 #endif /* _ASM_S390_TPI_H */
+2 -2
arch/s390/include/asm/types.h
··· 5 5 6 6 #include <uapi/asm/types.h> 7 7 8 - #ifndef __ASSEMBLY__ 8 + #ifndef __ASSEMBLER__ 9 9 10 10 union register_pair { 11 11 unsigned __int128 pair; ··· 15 15 }; 16 16 }; 17 17 18 - #endif /* __ASSEMBLY__ */ 18 + #endif /* __ASSEMBLER__ */ 19 19 #endif /* _ASM_S390_TYPES_H */
+22 -180
arch/s390/include/asm/uaccess.h
··· 473 473 474 474 void __cmpxchg_user_key_called_with_bad_pointer(void); 475 475 476 - #define CMPXCHG_USER_KEY_MAX_LOOPS 128 476 + int __cmpxchg_user_key1(unsigned long address, unsigned char *uval, 477 + unsigned char old, unsigned char new, unsigned long key); 478 + int __cmpxchg_user_key2(unsigned long address, unsigned short *uval, 479 + unsigned short old, unsigned short new, unsigned long key); 480 + int __cmpxchg_user_key4(unsigned long address, unsigned int *uval, 481 + unsigned int old, unsigned int new, unsigned long key); 482 + int __cmpxchg_user_key8(unsigned long address, unsigned long *uval, 483 + unsigned long old, unsigned long new, unsigned long key); 484 + int __cmpxchg_user_key16(unsigned long address, __uint128_t *uval, 485 + __uint128_t old, __uint128_t new, unsigned long key); 477 486 478 - static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, 479 - __uint128_t old, __uint128_t new, 480 - unsigned long key, int size) 487 + static __always_inline int _cmpxchg_user_key(unsigned long address, void *uval, 488 + __uint128_t old, __uint128_t new, 489 + unsigned long key, int size) 481 490 { 482 - bool sacf_flag; 483 - int rc = 0; 484 - 485 491 switch (size) { 486 - case 1: { 487 - unsigned int prev, shift, mask, _old, _new; 488 - unsigned long count; 489 - 490 - shift = (3 ^ (address & 3)) << 3; 491 - address ^= address & 3; 492 - _old = ((unsigned int)old & 0xff) << shift; 493 - _new = ((unsigned int)new & 0xff) << shift; 494 - mask = ~(0xff << shift); 495 - sacf_flag = enable_sacf_uaccess(); 496 - asm_inline volatile( 497 - " spka 0(%[key])\n" 498 - " sacf 256\n" 499 - " llill %[count],%[max_loops]\n" 500 - "0: l %[prev],%[address]\n" 501 - "1: nr %[prev],%[mask]\n" 502 - " xilf %[mask],0xffffffff\n" 503 - " or %[new],%[prev]\n" 504 - " or %[prev],%[tmp]\n" 505 - "2: lr %[tmp],%[prev]\n" 506 - "3: cs %[prev],%[new],%[address]\n" 507 - "4: jnl 5f\n" 508 - " xr %[tmp],%[prev]\n" 509 - " xr %[new],%[tmp]\n" 510 - " nr %[tmp],%[mask]\n" 511 - " jnz 5f\n" 512 - " brct %[count],2b\n" 513 - "5: sacf 768\n" 514 - " spka %[default_key]\n" 515 - EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) 516 - EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) 517 - EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) 518 - EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) 519 - : [rc] "+&d" (rc), 520 - [prev] "=&d" (prev), 521 - [address] "+Q" (*(int *)address), 522 - [tmp] "+&d" (_old), 523 - [new] "+&d" (_new), 524 - [mask] "+&d" (mask), 525 - [count] "=a" (count) 526 - : [key] "%[count]" (key << 4), 527 - [default_key] "J" (PAGE_DEFAULT_KEY), 528 - [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) 529 - : "memory", "cc"); 530 - disable_sacf_uaccess(sacf_flag); 531 - *(unsigned char *)uval = prev >> shift; 532 - if (!count) 533 - rc = -EAGAIN; 534 - return rc; 492 + case 1: return __cmpxchg_user_key1(address, uval, old, new, key); 493 + case 2: return __cmpxchg_user_key2(address, uval, old, new, key); 494 + case 4: return __cmpxchg_user_key4(address, uval, old, new, key); 495 + case 8: return __cmpxchg_user_key8(address, uval, old, new, key); 496 + case 16: return __cmpxchg_user_key16(address, uval, old, new, key); 497 + default: __cmpxchg_user_key_called_with_bad_pointer(); 535 498 } 536 - case 2: { 537 - unsigned int prev, shift, mask, _old, _new; 538 - unsigned long count; 539 - 540 - shift = (2 ^ (address & 2)) << 3; 541 - address ^= address & 2; 542 - _old = ((unsigned int)old & 0xffff) << shift; 543 - _new = ((unsigned int)new & 0xffff) << shift; 544 - mask = ~(0xffff << shift); 545 - sacf_flag = enable_sacf_uaccess(); 546 - asm_inline volatile( 547 - " spka 0(%[key])\n" 548 - " sacf 256\n" 549 - " llill %[count],%[max_loops]\n" 550 - "0: l %[prev],%[address]\n" 551 - "1: nr %[prev],%[mask]\n" 552 - " xilf %[mask],0xffffffff\n" 553 - " or %[new],%[prev]\n" 554 - " or %[prev],%[tmp]\n" 555 - "2: lr %[tmp],%[prev]\n" 556 - "3: cs %[prev],%[new],%[address]\n" 557 - "4: jnl 5f\n" 558 - " xr %[tmp],%[prev]\n" 559 - " xr %[new],%[tmp]\n" 560 - " nr %[tmp],%[mask]\n" 561 - " jnz 5f\n" 562 - " brct %[count],2b\n" 563 - "5: sacf 768\n" 564 - " spka %[default_key]\n" 565 - EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) 566 - EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) 567 - EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) 568 - EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) 569 - : [rc] "+&d" (rc), 570 - [prev] "=&d" (prev), 571 - [address] "+Q" (*(int *)address), 572 - [tmp] "+&d" (_old), 573 - [new] "+&d" (_new), 574 - [mask] "+&d" (mask), 575 - [count] "=a" (count) 576 - : [key] "%[count]" (key << 4), 577 - [default_key] "J" (PAGE_DEFAULT_KEY), 578 - [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) 579 - : "memory", "cc"); 580 - disable_sacf_uaccess(sacf_flag); 581 - *(unsigned short *)uval = prev >> shift; 582 - if (!count) 583 - rc = -EAGAIN; 584 - return rc; 585 - } 586 - case 4: { 587 - unsigned int prev = old; 588 - 589 - sacf_flag = enable_sacf_uaccess(); 590 - asm_inline volatile( 591 - " spka 0(%[key])\n" 592 - " sacf 256\n" 593 - "0: cs %[prev],%[new],%[address]\n" 594 - "1: sacf 768\n" 595 - " spka %[default_key]\n" 596 - EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) 597 - EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) 598 - : [rc] "+&d" (rc), 599 - [prev] "+&d" (prev), 600 - [address] "+Q" (*(int *)address) 601 - : [new] "d" ((unsigned int)new), 602 - [key] "a" (key << 4), 603 - [default_key] "J" (PAGE_DEFAULT_KEY) 604 - : "memory", "cc"); 605 - disable_sacf_uaccess(sacf_flag); 606 - *(unsigned int *)uval = prev; 607 - return rc; 608 - } 609 - case 8: { 610 - unsigned long prev = old; 611 - 612 - sacf_flag = enable_sacf_uaccess(); 613 - asm_inline volatile( 614 - " spka 0(%[key])\n" 615 - " sacf 256\n" 616 - "0: csg %[prev],%[new],%[address]\n" 617 - "1: sacf 768\n" 618 - " spka %[default_key]\n" 619 - EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) 620 - EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) 621 - : [rc] "+&d" (rc), 622 - [prev] "+&d" (prev), 623 - [address] "+QS" (*(long *)address) 624 - : [new] "d" ((unsigned long)new), 625 - [key] "a" (key << 4), 626 - [default_key] "J" (PAGE_DEFAULT_KEY) 627 - : "memory", "cc"); 628 - disable_sacf_uaccess(sacf_flag); 629 - *(unsigned long *)uval = prev; 630 - return rc; 631 - } 632 - case 16: { 633 - __uint128_t prev = old; 634 - 635 - sacf_flag = enable_sacf_uaccess(); 636 - asm_inline volatile( 637 - " spka 0(%[key])\n" 638 - " sacf 256\n" 639 - "0: cdsg %[prev],%[new],%[address]\n" 640 - "1: sacf 768\n" 641 - " spka %[default_key]\n" 642 - EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev]) 643 - EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev]) 644 - : [rc] "+&d" (rc), 645 - [prev] "+&d" (prev), 646 - [address] "+QS" (*(__int128_t *)address) 647 - : [new] "d" (new), 648 - [key] "a" (key << 4), 649 - [default_key] "J" (PAGE_DEFAULT_KEY) 650 - : "memory", "cc"); 651 - disable_sacf_uaccess(sacf_flag); 652 - *(__uint128_t *)uval = prev; 653 - return rc; 654 - } 655 - } 656 - __cmpxchg_user_key_called_with_bad_pointer(); 657 - return rc; 499 + return 0; 658 500 } 659 501 660 502 /** ··· 528 686 BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval))); \ 529 687 might_fault(); \ 530 688 __chk_user_ptr(__ptr); \ 531 - __cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ 532 - (old), (new), (key), sizeof(*(__ptr))); \ 689 + _cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval), \ 690 + (old), (new), (key), sizeof(*(__ptr))); \ 533 691 }) 534 692 535 693 #endif /* __S390_UACCESS_H */
+2 -2
arch/s390/include/asm/vdso.h
··· 4 4 5 5 #include <vdso/datapage.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 9 9 int vdso_getcpu_init(void); 10 10 11 - #endif /* __ASSEMBLY__ */ 11 + #endif /* __ASSEMBLER__ */ 12 12 13 13 #define __VDSO_PAGES 4 14 14
+2 -2
arch/s390/include/asm/vdso/getrandom.h
··· 3 3 #ifndef __ASM_VDSO_GETRANDOM_H 4 4 #define __ASM_VDSO_GETRANDOM_H 5 5 6 - #ifndef __ASSEMBLY__ 6 + #ifndef __ASSEMBLER__ 7 7 8 8 #include <vdso/datapage.h> 9 9 #include <asm/vdso/vsyscall.h> ··· 23 23 return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags); 24 24 } 25 25 26 - #endif /* !__ASSEMBLY__ */ 26 + #endif /* !__ASSEMBLER__ */ 27 27 28 28 #endif /* __ASM_VDSO_GETRANDOM_H */
+1 -7
arch/s390/include/asm/vdso/gettimeofday.h
··· 16 16 17 17 static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd) 18 18 { 19 - u64 adj, now; 20 - 21 - now = get_tod_clock(); 22 - adj = vd->arch_data.tod_steering_end - now; 23 - if (unlikely((s64) adj > 0)) 24 - now += (vd->arch_data.tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); 25 - return now; 19 + return get_tod_clock() - vd->arch_data.tod_delta; 26 20 } 27 21 28 22 static __always_inline
+1 -2
arch/s390/include/asm/vdso/time_data.h
··· 5 5 #include <linux/types.h> 6 6 7 7 struct arch_vdso_time_data { 8 - __s64 tod_steering_delta; 9 - __u64 tod_steering_end; 8 + __s64 tod_delta; 10 9 }; 11 10 12 11 #endif /* __S390_ASM_VDSO_TIME_DATA_H */
+2 -2
arch/s390/include/asm/vdso/vsyscall.h
··· 2 2 #ifndef __ASM_VDSO_VSYSCALL_H 3 3 #define __ASM_VDSO_VSYSCALL_H 4 4 5 - #ifndef __ASSEMBLY__ 5 + #ifndef __ASSEMBLER__ 6 6 7 7 #include <linux/hrtimer.h> 8 8 #include <vdso/datapage.h> ··· 11 11 /* The asm-generic header needs to be included after the definitions above */ 12 12 #include <asm-generic/vdso/vsyscall.h> 13 13 14 - #endif /* !__ASSEMBLY__ */ 14 + #endif /* !__ASSEMBLER__ */ 15 15 16 16 #endif /* __ASM_VDSO_VSYSCALL_H */
+3 -2
arch/s390/include/uapi/asm/ptrace.h
··· 242 242 #define PTRACE_OLDSETOPTIONS 21 243 243 #define PTRACE_SYSEMU 31 244 244 #define PTRACE_SYSEMU_SINGLESTEP 32 245 - #ifndef __ASSEMBLY__ 245 + 246 + #ifndef __ASSEMBLER__ 246 247 #include <linux/stddef.h> 247 248 #include <linux/types.h> 248 249 ··· 451 450 unsigned long ieee_instruction_pointer; /* obsolete, always 0 */ 452 451 }; 453 452 454 - #endif /* __ASSEMBLY__ */ 453 + #endif /* __ASSEMBLER__ */ 455 454 456 455 #endif /* _UAPI_S390_PTRACE_H */
+2 -2
arch/s390/include/uapi/asm/schid.h
··· 4 4 5 5 #include <linux/types.h> 6 6 7 - #ifndef __ASSEMBLY__ 7 + #ifndef __ASSEMBLER__ 8 8 9 9 struct subchannel_id { 10 10 __u32 cssid : 8; ··· 15 15 __u32 sch_no : 16; 16 16 } __attribute__ ((packed, aligned(4))); 17 17 18 - #endif /* __ASSEMBLY__ */ 18 + #endif /* __ASSEMBLER__ */ 19 19 20 20 #endif /* _UAPIASM_SCHID_H */
+2 -2
arch/s390/include/uapi/asm/types.h
··· 10 10 11 11 #include <asm-generic/int-ll64.h> 12 12 13 - #ifndef __ASSEMBLY__ 13 + #ifndef __ASSEMBLER__ 14 14 15 15 typedef unsigned long addr_t; 16 16 typedef __signed__ long saddr_t; ··· 25 25 }; 26 26 } __attribute__((packed, aligned(4))) __vector128; 27 27 28 - #endif /* __ASSEMBLY__ */ 28 + #endif /* __ASSEMBLER__ */ 29 29 30 30 #endif /* _UAPI_S390_TYPES_H */
+1 -1
arch/s390/kernel/Makefile
··· 41 41 obj-y += debug.o irq.o ipl.o dis.o vdso.o cpufeature.o 42 42 obj-y += sysinfo.o lgr.o os_info.o ctlreg.o 43 43 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o 44 - obj-y += entry.o reipl.o kdebugfs.o alternative.o 44 + obj-y += entry.o reipl.o kdebugfs.o alternative.o skey.o 45 45 obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o 46 46 obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o facility.o uv.o wti.o 47 47 obj-y += diag/
+1
arch/s390/kernel/cpufeature.c
··· 4 4 */ 5 5 6 6 #include <linux/cpufeature.h> 7 + #include <linux/export.h> 7 8 #include <linux/bug.h> 8 9 #include <asm/machine.h> 9 10 #include <asm/elf.h>
+1
arch/s390/kernel/crash_dump.c
··· 7 7 */ 8 8 9 9 #include <linux/crash_dump.h> 10 + #include <linux/export.h> 10 11 #include <asm/lowcore.h> 11 12 #include <linux/kernel.h> 12 13 #include <linux/init.h>
+1
arch/s390/kernel/ctlreg.c
··· 5 5 6 6 #include <linux/irqflags.h> 7 7 #include <linux/spinlock.h> 8 + #include <linux/export.h> 8 9 #include <linux/kernel.h> 9 10 #include <linux/init.h> 10 11 #include <linux/smp.h>
-1
arch/s390/kernel/dis.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/interrupt.h> 19 19 #include <linux/delay.h> 20 - #include <linux/export.h> 21 20 #include <linux/kallsyms.h> 22 21 #include <linux/reboot.h> 23 22 #include <linux/kprobes.h>
+4
arch/s390/kernel/early.c
··· 105 105 } 106 106 } 107 107 108 + char arch_hw_string[128]; 109 + 108 110 static noinline __init void setup_arch_string(void) 109 111 { 110 112 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page; ··· 133 131 machine_is_vm() ? "z/VM" : 134 132 machine_is_kvm() ? "KVM" : "unknown"); 135 133 } 134 + sprintf(arch_hw_string, "HW: %s (%s)", mstr, hvstr); 136 135 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr); 137 136 } 138 137 ··· 157 154 158 155 regs->int_code = lc->pgm_int_code; 159 156 regs->int_parm_long = lc->trans_exc_code; 157 + regs->last_break = lc->pgm_last_break; 160 158 ip = __rewind_psw(regs->psw, regs->int_code >> 16); 161 159 162 160 /* Monitor Event? Might be a warning */
+1
arch/s390/kernel/facility.c
··· 3 3 * Copyright IBM Corp. 2023 4 4 */ 5 5 6 + #include <linux/export.h> 6 7 #include <asm/facility.h> 7 8 8 9 unsigned int stfle_size(void)
+2
arch/s390/kernel/fpu.c
··· 5 5 * Copyright IBM Corp. 2015 6 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 7 */ 8 + 9 + #include <linux/export.h> 8 10 #include <linux/kernel.h> 9 11 #include <linux/cpu.h> 10 12 #include <linux/sched.h>
+70 -6
arch/s390/kernel/nmi.c
··· 9 9 */ 10 10 11 11 #include <linux/kernel_stat.h> 12 + #include <linux/utsname.h> 12 13 #include <linux/cpufeature.h> 13 14 #include <linux/init.h> 14 15 #include <linux/errno.h> ··· 22 21 #include <linux/module.h> 23 22 #include <linux/sched/signal.h> 24 23 #include <linux/kvm_host.h> 25 - #include <linux/export.h> 26 24 #include <asm/lowcore.h> 27 25 #include <asm/ctlreg.h> 28 26 #include <asm/fpu.h> ··· 116 116 return dest; 117 117 } 118 118 119 + static notrace void nmi_print_info(void) 120 + { 121 + struct lowcore *lc = get_lowcore(); 122 + char message[100]; 123 + char *ptr; 124 + int i; 125 + 126 + ptr = nmi_puts(message, "Unrecoverable machine check, code: "); 127 + ptr = u64_to_hex(ptr, lc->mcck_interruption_code); 128 + ptr = nmi_puts(ptr, "\n"); 129 + sclp_emergency_printk(message); 130 + 131 + ptr = nmi_puts(message, init_utsname()->release); 132 + ptr = nmi_puts(ptr, "\n"); 133 + sclp_emergency_printk(message); 134 + 135 + ptr = nmi_puts(message, arch_hw_string); 136 + ptr = nmi_puts(ptr, "\n"); 137 + sclp_emergency_printk(message); 138 + 139 + ptr = nmi_puts(message, "PSW: "); 140 + ptr = u64_to_hex(ptr, lc->mcck_old_psw.mask); 141 + ptr = nmi_puts(ptr, " "); 142 + ptr = u64_to_hex(ptr, lc->mcck_old_psw.addr); 143 + ptr = nmi_puts(ptr, " PFX: "); 144 + ptr = u64_to_hex(ptr, (u64)get_lowcore()); 145 + ptr = nmi_puts(ptr, "\n"); 146 + sclp_emergency_printk(message); 147 + 148 + ptr = nmi_puts(message, "LBA: "); 149 + ptr = u64_to_hex(ptr, lc->last_break_save_area); 150 + ptr = nmi_puts(ptr, " EDC: "); 151 + ptr = u64_to_hex(ptr, lc->external_damage_code); 152 + ptr = nmi_puts(ptr, " FSA: "); 153 + ptr = u64_to_hex(ptr, lc->failing_storage_address); 154 + ptr = nmi_puts(ptr, "\n"); 155 + sclp_emergency_printk(message); 156 + 157 + ptr = nmi_puts(message, "CRS:\n"); 158 + sclp_emergency_printk(message); 159 + ptr = message; 160 + for (i = 0; i < 16; i++) { 161 + ptr = u64_to_hex(ptr, lc->cregs_save_area[i].val); 162 + ptr = nmi_puts(ptr, " "); 163 + if ((i + 1) % 4 == 0) { 164 + ptr = nmi_puts(ptr, "\n"); 165 + sclp_emergency_printk(message); 166 + ptr = message; 167 + } 168 + } 169 + 170 + ptr = nmi_puts(message, "GPRS:\n"); 171 + sclp_emergency_printk(message); 172 + ptr = message; 173 + for (i = 0; i < 16; i++) { 174 + ptr = u64_to_hex(ptr, lc->gpregs_save_area[i]); 175 + ptr = nmi_puts(ptr, " "); 176 + if ((i + 1) % 4 == 0) { 177 + ptr = nmi_puts(ptr, "\n"); 178 + sclp_emergency_printk(message); 179 + ptr = message; 180 + } 181 + } 182 + 183 + ptr = nmi_puts(message, "System stopped\n"); 184 + sclp_emergency_printk(message); 185 + } 186 + 119 187 static notrace void s390_handle_damage(void) 120 188 { 121 189 struct lowcore *lc = get_lowcore(); 122 190 union ctlreg0 cr0, cr0_new; 123 - char message[100]; 124 191 psw_t psw_save; 125 - char *ptr; 126 192 127 193 smp_emergency_stop(); 128 194 diag_amode31_ops.diag308_reset(); 129 - ptr = nmi_puts(message, "System stopped due to unrecoverable machine check, code: 0x"); 130 - u64_to_hex(ptr, lc->mcck_interruption_code); 131 195 132 196 /* 133 197 * Disable low address protection and make machine check new PSW a ··· 205 141 psw_bits(lc->mcck_new_psw).io = 0; 206 142 psw_bits(lc->mcck_new_psw).ext = 0; 207 143 psw_bits(lc->mcck_new_psw).wait = 1; 208 - sclp_emergency_printk(message); 144 + nmi_print_info(); 209 145 210 146 /* 211 147 * Restore machine check new PSW and control register 0 to original
-1
arch/s390/kernel/perf_cpum_cf.c
··· 14 14 #include <linux/percpu.h> 15 15 #include <linux/notifier.h> 16 16 #include <linux/init.h> 17 - #include <linux/export.h> 18 17 #include <linux/miscdevice.h> 19 18 #include <linux/perf_event.h> 20 19
-1
arch/s390/kernel/perf_cpum_sf.c
··· 14 14 #include <linux/percpu.h> 15 15 #include <linux/pid.h> 16 16 #include <linux/notifier.h> 17 - #include <linux/export.h> 18 17 #include <linux/slab.h> 19 18 #include <linux/mm.h> 20 19 #include <linux/moduleparam.h>
-1
arch/s390/kernel/perf_event.c
··· 12 12 #include <linux/perf_event.h> 13 13 #include <linux/kvm_host.h> 14 14 #include <linux/percpu.h> 15 - #include <linux/export.h> 16 15 #include <linux/seq_file.h> 17 16 #include <linux/spinlock.h> 18 17 #include <linux/uaccess.h>
+1 -2
arch/s390/kernel/perf_pai_crypto.c
··· 13 13 #include <linux/percpu.h> 14 14 #include <linux/notifier.h> 15 15 #include <linux/init.h> 16 - #include <linux/export.h> 17 16 #include <linux/io.h> 18 17 #include <linux/perf_event.h> 19 18 #include <asm/ctlreg.h> ··· 695 696 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 696 697 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 697 698 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 698 - [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A", 699 + [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256", 699 700 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 700 701 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 701 702 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
-1
arch/s390/kernel/perf_pai_ext.c
··· 14 14 #include <linux/percpu.h> 15 15 #include <linux/notifier.h> 16 16 #include <linux/init.h> 17 - #include <linux/export.h> 18 17 #include <linux/io.h> 19 18 #include <linux/perf_event.h> 20 19 #include <asm/ctlreg.h>
-1
arch/s390/kernel/process.c
··· 27 27 #include <linux/compat.h> 28 28 #include <linux/kprobes.h> 29 29 #include <linux/random.h> 30 - #include <linux/export.h> 31 30 #include <linux/init_task.h> 32 31 #include <linux/entry-common.h> 33 32 #include <linux/io.h>
+48
arch/s390/kernel/skey.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <asm/rwonce.h> 4 + #include <asm/page.h> 5 + #include <asm/skey.h> 6 + 7 + int skey_regions_initialized; 8 + 9 + static inline unsigned long load_real_address(unsigned long address) 10 + { 11 + unsigned long real; 12 + 13 + asm volatile( 14 + " lra %[real],0(%[address])\n" 15 + : [real] "=d" (real) 16 + : [address] "a" (address) 17 + : "cc"); 18 + return real; 19 + } 20 + 21 + /* 22 + * Initialize storage keys of registered memory regions with the 23 + * default key. This is useful for code which is executed with a 24 + * non-default access key. 25 + */ 26 + void __skey_regions_initialize(void) 27 + { 28 + unsigned long address, real; 29 + struct skey_region *r, *end; 30 + 31 + r = __skey_region_start; 32 + end = __skey_region_end; 33 + while (r < end) { 34 + address = r->start & PAGE_MASK; 35 + do { 36 + real = load_real_address(address); 37 + page_set_storage_key(real, PAGE_DEFAULT_KEY, 1); 38 + address += PAGE_SIZE; 39 + } while (address < r->end); 40 + r++; 41 + } 42 + /* 43 + * Make sure storage keys are initialized before 44 + * skey_regions_initialized is changed. 45 + */ 46 + barrier(); 47 + WRITE_ONCE(skey_regions_initialized, 1); 48 + }
+4 -7
arch/s390/kernel/smp.c
··· 175 175 176 176 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 177 177 { 178 - int order; 179 - 180 178 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 181 179 return; 182 - order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 183 180 pcpu->ec_clk = get_tod_clock_fast(); 184 - pcpu_sigp_retry(pcpu, order, 0); 181 + pcpu_sigp_retry(pcpu, SIGP_EXTERNAL_CALL, 0); 185 182 } 186 183 187 184 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) ··· 430 433 cpumask_copy(&cpumask, cpu_online_mask); 431 434 cpumask_clear_cpu(smp_processor_id(), &cpumask); 432 435 433 - end = get_tod_clock() + (1000000UL << 12); 436 + end = get_tod_clock_monotonic() + (1000000UL << 12); 434 437 for_each_cpu(cpu, &cpumask) { 435 438 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu); 436 439 set_bit(ec_stop_cpu, &pcpu->ec_mask); 437 440 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 438 441 0, NULL) == SIGP_CC_BUSY && 439 - get_tod_clock() < end) 442 + get_tod_clock_monotonic() < end) 440 443 cpu_relax(); 441 444 } 442 - while (get_tod_clock() < end) { 445 + while (get_tod_clock_monotonic() < end) { 443 446 for_each_cpu(cpu, &cpumask) 444 447 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu))) 445 448 cpumask_clear_cpu(cpu, &cpumask);
+2
arch/s390/kernel/sthyi.c
··· 5 5 * Copyright IBM Corp. 2016 6 6 * Author(s): Janosch Frank <frankja@linux.vnet.ibm.com> 7 7 */ 8 + 9 + #include <linux/export.h> 8 10 #include <linux/errno.h> 9 11 #include <linux/pagemap.h> 10 12 #include <linux/vmalloc.h>
+5 -116
arch/s390/kernel/time.c
··· 69 69 70 70 static unsigned long lpar_offset; 71 71 static unsigned long initial_leap_seconds; 72 - static unsigned long tod_steering_end; 73 - static long tod_steering_delta; 74 72 75 73 /* 76 74 * Get time offsets with PTFF ··· 78 80 struct ptff_qto qto; 79 81 struct ptff_qui qui; 80 82 81 - /* Initialize TOD steering parameters */ 82 - tod_steering_end = tod_clock_base.tod; 83 - vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end; 83 + vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod; 84 84 85 85 if (!test_facility(28)) 86 86 return; ··· 222 226 223 227 static u64 read_tod_clock(struct clocksource *cs) 224 228 { 225 - unsigned long now, adj; 226 - 227 - preempt_disable(); /* protect from changes to steering parameters */ 228 - now = get_tod_clock(); 229 - adj = tod_steering_end - now; 230 - if (unlikely((s64) adj > 0)) 231 - /* 232 - * manually steer by 1 cycle every 2^16 cycles. This 233 - * corresponds to shifting the tod delta by 15. 1s is 234 - * therefore steered in ~9h. The adjust will decrease 235 - * over time, until it finally reaches 0. 236 - */ 237 - now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15); 238 - preempt_enable(); 239 - return now; 229 + return get_tod_clock_monotonic(); 240 230 } 241 231 242 232 static struct clocksource clocksource_tod = { ··· 351 369 */ 352 370 static void clock_sync_global(long delta) 353 371 { 354 - unsigned long now, adj; 355 372 struct ptff_qto qto; 356 373 357 374 /* Fixup the monotonic sched clock. */ 358 375 tod_clock_base.eitod += delta; 359 - /* Adjust TOD steering parameters. */ 360 - now = get_tod_clock(); 361 - adj = tod_steering_end - now; 362 - if (unlikely((s64) adj >= 0)) 363 - /* Calculate how much of the old adjustment is left. */ 364 - tod_steering_delta = (tod_steering_delta < 0) ? 365 - -(adj >> 15) : (adj >> 15); 366 - tod_steering_delta += delta; 367 - if ((abs(tod_steering_delta) >> 48) != 0) 368 - panic("TOD clock sync offset %li is too large to drift\n", 369 - tod_steering_delta); 370 - tod_steering_end = now + (abs(tod_steering_delta) << 15); 371 - vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end; 372 - vdso_k_time_data->arch_data.tod_steering_delta = tod_steering_delta; 373 - 376 + vdso_k_time_data->arch_data.tod_delta = tod_clock_base.tod; 374 377 /* Update LPAR offset. */ 375 378 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0) 376 379 lpar_offset = qto.tod_epoch_difference; ··· 397 430 /* 398 431 * Server Time Protocol (STP) code. 399 432 */ 400 - static bool stp_online; 433 + static bool stp_online = true; 401 434 static struct stp_sstpi stp_info; 402 435 static void *stp_page; 403 436 ··· 423 456 if (rc == 0) 424 457 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); 425 458 else if (stp_online) { 426 - pr_warn("The real or virtual hardware system does not provide an STP interface\n"); 427 459 free_page((unsigned long) stp_page); 428 460 stp_page = NULL; 429 461 stp_online = false; ··· 546 580 atomic_dec(&sync->cpus); 547 581 /* Wait for in_sync to be set. */ 548 582 while (READ_ONCE(sync->in_sync) == 0) 549 - __udelay(1); 583 + ; 550 584 } 551 585 if (sync->in_sync != 1) 552 586 /* Didn't work. Clear per-cpu in sync bit again. */ ··· 555 589 clock_sync_local(sync->clock_delta); 556 590 557 591 return 0; 558 - } 559 - 560 - static int stp_clear_leap(void) 561 - { 562 - struct __kernel_timex txc; 563 - int ret; 564 - 565 - memset(&txc, 0, sizeof(txc)); 566 - 567 - ret = do_adjtimex(&txc); 568 - if (ret < 0) 569 - return ret; 570 - 571 - txc.modes = ADJ_STATUS; 572 - txc.status &= ~(STA_INS|STA_DEL); 573 - return do_adjtimex(&txc); 574 - } 575 - 576 - static void stp_check_leap(void) 577 - { 578 - struct stp_stzi stzi; 579 - struct stp_lsoib *lsoib = &stzi.lsoib; 580 - struct __kernel_timex txc; 581 - int64_t timediff; 582 - int leapdiff, ret; 583 - 584 - if (!stp_info.lu || !check_sync_clock()) { 585 - /* 586 - * Either a scheduled leap second was removed by the operator, 587 - * or STP is out of sync. In both cases, clear the leap second 588 - * kernel flags. 589 - */ 590 - if (stp_clear_leap() < 0) 591 - pr_err("failed to clear leap second flags\n"); 592 - return; 593 - } 594 - 595 - if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) { 596 - pr_err("stzi failed\n"); 597 - return; 598 - } 599 - 600 - timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC; 601 - leapdiff = lsoib->nlso - lsoib->also; 602 - 603 - if (leapdiff != 1 && leapdiff != -1) { 604 - pr_err("Cannot schedule %d leap seconds\n", leapdiff); 605 - return; 606 - } 607 - 608 - if (timediff < 0) { 609 - if (stp_clear_leap() < 0) 610 - pr_err("failed to clear leap second flags\n"); 611 - } else if (timediff < 7200) { 612 - memset(&txc, 0, sizeof(txc)); 613 - ret = do_adjtimex(&txc); 614 - if (ret < 0) 615 - return; 616 - 617 - txc.modes = ADJ_STATUS; 618 - if (leapdiff > 0) 619 - txc.status |= STA_INS; 620 - else 621 - txc.status |= STA_DEL; 622 - ret = do_adjtimex(&txc); 623 - if (ret < 0) 624 - pr_err("failed to set leap second flags\n"); 625 - /* arm Timer to clear leap second flags */ 626 - mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400)); 627 - } else { 628 - /* The day the leap second is scheduled for hasn't been reached. Retry 629 - * in one hour. 630 - */ 631 - mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600)); 632 - } 633 592 } 634 593 635 594 /* ··· 598 707 * Retry after a second. 599 708 */ 600 709 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC)); 601 - else if (stp_info.lu) 602 - stp_check_leap(); 603 710 604 711 out_unlock: 605 712 mutex_unlock(&stp_mutex);
+2
arch/s390/kernel/unwind_bc.c
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <linux/export.h> 2 4 #include <linux/sched.h> 3 5 #include <linux/sched/task.h> 4 6 #include <linux/sched/task_stack.h>
+1
arch/s390/kernel/uv.c
··· 7 7 #define KMSG_COMPONENT "prot_virt" 8 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 9 10 + #include <linux/export.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/types.h> 12 13 #include <linux/sizes.h>
+7
arch/s390/kernel/vmlinux.lds.S
··· 71 71 . = ALIGN(PAGE_SIZE); 72 72 __end_ro_after_init = .; 73 73 74 + . = ALIGN(8); 75 + .skey_region_table : { 76 + __skey_region_start = .; 77 + KEEP(*(.skey_region)) 78 + __skey_region_end = .; 79 + } 80 + 74 81 .data.rel.ro : { 75 82 *(.data.rel.ro .data.rel.ro.*) 76 83 }
+1
arch/s390/kvm/interrupt.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/kvm_host.h> 16 16 #include <linux/hrtimer.h> 17 + #include <linux/export.h> 17 18 #include <linux/mmu_context.h> 18 19 #include <linux/nospec.h> 19 20 #include <linux/signal.h>
+1
arch/s390/kvm/kvm-s390.c
··· 14 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 15 16 16 #include <linux/compiler.h> 17 + #include <linux/export.h> 17 18 #include <linux/err.h> 18 19 #include <linux/fs.h> 19 20 #include <linux/hrtimer.h>
+2
arch/s390/kvm/pv.c
··· 5 5 * Copyright IBM Corp. 2019, 2020 6 6 * Author(s): Janosch Frank <frankja@linux.ibm.com> 7 7 */ 8 + 9 + #include <linux/export.h> 8 10 #include <linux/kvm.h> 9 11 #include <linux/kvm_host.h> 10 12 #include <linux/minmax.h>
+1
arch/s390/lib/delay.c
··· 7 7 */ 8 8 9 9 #include <linux/processor.h> 10 + #include <linux/export.h> 10 11 #include <linux/delay.h> 11 12 #include <asm/div64.h> 12 13 #include <asm/timex.h>
+188
arch/s390/lib/uaccess.c
··· 8 8 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 9 9 */ 10 10 11 + #include <linux/kprobes.h> 11 12 #include <linux/uaccess.h> 12 13 #include <linux/export.h> 13 14 #include <linux/mm.h> 14 15 #include <asm/asm-extable.h> 15 16 #include <asm/ctlreg.h> 17 + #include <asm/skey.h> 16 18 17 19 #ifdef CONFIG_DEBUG_ENTRY 18 20 void debug_user_asce(int exit) ··· 147 145 return raw_copy_to_user_key(to, from, n, key); 148 146 } 149 147 EXPORT_SYMBOL(_copy_to_user_key); 148 + 149 + #define CMPXCHG_USER_KEY_MAX_LOOPS 128 150 + 151 + static nokprobe_inline int __cmpxchg_user_key_small(unsigned long address, unsigned int *uval, 152 + unsigned int old, unsigned int new, 153 + unsigned int mask, unsigned long key) 154 + { 155 + unsigned long count; 156 + unsigned int prev; 157 + bool sacf_flag; 158 + int rc = 0; 159 + 160 + skey_regions_initialize(); 161 + sacf_flag = enable_sacf_uaccess(); 162 + asm_inline volatile( 163 + "20: spka 0(%[key])\n" 164 + " sacf 256\n" 165 + " llill %[count],%[max_loops]\n" 166 + "0: l %[prev],%[address]\n" 167 + "1: nr %[prev],%[mask]\n" 168 + " xilf %[mask],0xffffffff\n" 169 + " or %[new],%[prev]\n" 170 + " or %[prev],%[tmp]\n" 171 + "2: lr %[tmp],%[prev]\n" 172 + "3: cs %[prev],%[new],%[address]\n" 173 + "4: jnl 5f\n" 174 + " xr %[tmp],%[prev]\n" 175 + " xr %[new],%[tmp]\n" 176 + " nr %[tmp],%[mask]\n" 177 + " jnz 5f\n" 178 + " brct %[count],2b\n" 179 + "5: sacf 768\n" 180 + " spka %[default_key]\n" 181 + "21:\n" 182 + EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev]) 183 + EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev]) 184 + EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev]) 185 + EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev]) 186 + SKEY_REGION(20b, 21b) 187 + : [rc] "+&d" (rc), 188 + [prev] "=&d" (prev), 189 + [address] "+Q" (*(int *)address), 190 + [tmp] "+&d" (old), 191 + [new] "+&d" (new), 192 + [mask] "+&d" (mask), 193 + [count] "=a" (count) 194 + : [key] "%[count]" (key << 4), 195 + [default_key] "J" (PAGE_DEFAULT_KEY), 196 + [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) 197 + : "memory", "cc"); 198 + disable_sacf_uaccess(sacf_flag); 199 + *uval = prev; 200 + if (!count) 201 + rc = -EAGAIN; 202 + return rc; 203 + } 204 + 205 + int __kprobes __cmpxchg_user_key1(unsigned long address, unsigned char *uval, 206 + unsigned char old, unsigned char new, unsigned long key) 207 + { 208 + unsigned int prev, shift, mask, _old, _new; 209 + int rc; 210 + 211 + shift = (3 ^ (address & 3)) << 3; 212 + address ^= address & 3; 213 + _old = (unsigned int)old << shift; 214 + _new = (unsigned int)new << shift; 215 + mask = ~(0xff << shift); 216 + rc = __cmpxchg_user_key_small(address, &prev, _old, _new, mask, key); 217 + *uval = prev >> shift; 218 + return rc; 219 + } 220 + EXPORT_SYMBOL(__cmpxchg_user_key1); 221 + 222 + int __kprobes __cmpxchg_user_key2(unsigned long address, unsigned short *uval, 223 + unsigned short old, unsigned short new, unsigned long key) 224 + { 225 + unsigned int prev, shift, mask, _old, _new; 226 + int rc; 227 + 228 + shift = (2 ^ (address & 2)) << 3; 229 + address ^= address & 2; 230 + _old = (unsigned int)old << shift; 231 + _new = (unsigned int)new << shift; 232 + mask = ~(0xffff << shift); 233 + rc = __cmpxchg_user_key_small(address, &prev, _old, _new, mask, key); 234 + *uval = prev >> shift; 235 + return rc; 236 + } 237 + EXPORT_SYMBOL(__cmpxchg_user_key2); 238 + 239 + int __kprobes __cmpxchg_user_key4(unsigned long address, unsigned int *uval, 240 + unsigned int old, unsigned int new, unsigned long key) 241 + { 242 + unsigned int prev = old; 243 + bool sacf_flag; 244 + int rc = 0; 245 + 246 + skey_regions_initialize(); 247 + sacf_flag = enable_sacf_uaccess(); 248 + asm_inline volatile( 249 + "20: spka 0(%[key])\n" 250 + " sacf 256\n" 251 + "0: cs %[prev],%[new],%[address]\n" 252 + "1: sacf 768\n" 253 + " spka %[default_key]\n" 254 + "21:\n" 255 + EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) 256 + EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) 257 + SKEY_REGION(20b, 21b) 258 + : [rc] "+&d" (rc), 259 + [prev] "+&d" (prev), 260 + [address] "+Q" (*(int *)address) 261 + : [new] "d" (new), 262 + [key] "a" (key << 4), 263 + [default_key] "J" (PAGE_DEFAULT_KEY) 264 + : "memory", "cc"); 265 + disable_sacf_uaccess(sacf_flag); 266 + *uval = prev; 267 + return rc; 268 + } 269 + EXPORT_SYMBOL(__cmpxchg_user_key4); 270 + 271 + int __kprobes __cmpxchg_user_key8(unsigned long address, unsigned long *uval, 272 + unsigned long old, unsigned long new, unsigned long key) 273 + { 274 + unsigned long prev = old; 275 + bool sacf_flag; 276 + int rc = 0; 277 + 278 + skey_regions_initialize(); 279 + sacf_flag = enable_sacf_uaccess(); 280 + asm_inline volatile( 281 + "20: spka 0(%[key])\n" 282 + " sacf 256\n" 283 + "0: csg %[prev],%[new],%[address]\n" 284 + "1: sacf 768\n" 285 + " spka %[default_key]\n" 286 + "21:\n" 287 + EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev]) 288 + EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev]) 289 + SKEY_REGION(20b, 21b) 290 + : [rc] "+&d" (rc), 291 + [prev] "+&d" (prev), 292 + [address] "+QS" (*(long *)address) 293 + : [new] "d" (new), 294 + [key] "a" (key << 4), 295 + [default_key] "J" (PAGE_DEFAULT_KEY) 296 + : "memory", "cc"); 297 + disable_sacf_uaccess(sacf_flag); 298 + *uval = prev; 299 + return rc; 300 + } 301 + EXPORT_SYMBOL(__cmpxchg_user_key8); 302 + 303 + int __kprobes __cmpxchg_user_key16(unsigned long address, __uint128_t *uval, 304 + __uint128_t old, __uint128_t new, unsigned long key) 305 + { 306 + __uint128_t prev = old; 307 + bool sacf_flag; 308 + int rc = 0; 309 + 310 + skey_regions_initialize(); 311 + sacf_flag = enable_sacf_uaccess(); 312 + asm_inline volatile( 313 + "20: spka 0(%[key])\n" 314 + " sacf 256\n" 315 + "0: cdsg %[prev],%[new],%[address]\n" 316 + "1: sacf 768\n" 317 + " spka %[default_key]\n" 318 + "21:\n" 319 + EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev]) 320 + EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev]) 321 + SKEY_REGION(20b, 21b) 322 + : [rc] "+&d" (rc), 323 + [prev] "+&d" (prev), 324 + [address] "+QS" (*(__int128_t *)address) 325 + : [new] "d" (new), 326 + [key] "a" (key << 4), 327 + [default_key] "J" (PAGE_DEFAULT_KEY) 328 + : "memory", "cc"); 329 + disable_sacf_uaccess(sacf_flag); 330 + *uval = prev; 331 + return rc; 332 + } 333 + EXPORT_SYMBOL(__cmpxchg_user_key16);
+1
arch/s390/mm/gmap.c
··· 9 9 */ 10 10 11 11 #include <linux/cpufeature.h> 12 + #include <linux/export.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/pagewalk.h> 14 15 #include <linux/swap.h>
+2
arch/s390/mm/gmap_helpers.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2007, 2025 6 6 */ 7 + 8 + #include <linux/export.h> 7 9 #include <linux/mm_types.h> 8 10 #include <linux/mmap_lock.h> 9 11 #include <linux/mm.h>
-5
arch/s390/mm/pgalloc.c
··· 173 173 struct ptdesc *ptdesc = virt_to_ptdesc(pgtable); 174 174 175 175 call_rcu(&ptdesc->pt_rcu_head, pte_free_now); 176 - /* 177 - * THPs are not allowed for KVM guests. Warn if pgste ever reaches here. 178 - * Turn to the generic pte_free_defer() version once gmap is removed. 179 - */ 180 - WARN_ON_ONCE(mm_has_pgste(mm)); 181 176 } 182 177 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 183 178
+1
arch/s390/mm/pgtable.c
··· 5 5 */ 6 6 7 7 #include <linux/cpufeature.h> 8 + #include <linux/export.h> 8 9 #include <linux/sched.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/errno.h>
+2 -2
arch/s390/net/bpf_jit.h
··· 11 11 #ifndef __ARCH_S390_NET_BPF_JIT_H 12 12 #define __ARCH_S390_NET_BPF_JIT_H 13 13 14 - #ifndef __ASSEMBLY__ 14 + #ifndef __ASSEMBLER__ 15 15 16 16 #include <linux/filter.h> 17 17 #include <linux/types.h> 18 18 19 - #endif /* __ASSEMBLY__ */ 19 + #endif /* __ASSEMBLER__ */ 20 20 21 21 /* 22 22 * Stackframe layout (packed stack):
+1
arch/s390/net/pnet.c
··· 6 6 */ 7 7 8 8 #include <linux/device.h> 9 + #include <linux/export.h> 9 10 #include <linux/module.h> 10 11 #include <linux/pci.h> 11 12 #include <linux/types.h>
-1
arch/s390/pci/pci_bus.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/slab.h> 15 15 #include <linux/err.h> 16 - #include <linux/export.h> 17 16 #include <linux/delay.h> 18 17 #include <linux/seq_file.h> 19 18 #include <linux/jump_label.h>
+2
arch/s390/pci/pci_kvm_hook.c
··· 5 5 * Copyright (C) IBM Corp. 2022. All rights reserved. 6 6 * Author(s): Pierre Morel <pmorel@linux.ibm.com> 7 7 */ 8 + 8 9 #include <linux/kvm_host.h> 10 + #include <linux/export.h> 9 11 10 12 struct zpci_kvm_hook zpci_kvm_hook; 11 13 EXPORT_SYMBOL_GPL(zpci_kvm_hook);
+1
drivers/s390/block/dasd.c
··· 8 8 * Copyright IBM Corp. 1999, 2009 9 9 */ 10 10 11 + #include <linux/export.h> 11 12 #include <linux/kmod.h> 12 13 #include <linux/init.h> 13 14 #include <linux/interrupt.h>
+1
drivers/s390/block/dasd_devmap.c
··· 13 13 * 14 14 */ 15 15 16 + #include <linux/export.h> 16 17 #include <linux/ctype.h> 17 18 #include <linux/init.h> 18 19 #include <linux/module.h>
+1
drivers/s390/block/dasd_eer.c
··· 7 7 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 8 8 */ 9 9 10 + #include <linux/export.h> 10 11 #include <linux/init.h> 11 12 #include <linux/fs.h> 12 13 #include <linux/kernel.h>
+1
drivers/s390/block/dasd_erp.c
··· 9 9 * 10 10 */ 11 11 12 + #include <linux/export.h> 12 13 #include <linux/ctype.h> 13 14 #include <linux/init.h> 14 15
+1
drivers/s390/block/dasd_ioctl.c
··· 12 12 13 13 #include <linux/interrupt.h> 14 14 #include <linux/compat.h> 15 + #include <linux/export.h> 15 16 #include <linux/major.h> 16 17 #include <linux/fs.h> 17 18 #include <linux/blkpg.h>
+1
drivers/s390/char/keyboard.c
··· 7 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 8 8 */ 9 9 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <linux/sched/signal.h> 12 13 #include <linux/slab.h>
+1
drivers/s390/char/raw3270.c
··· 8 8 * Copyright IBM Corp. 2003, 2009 9 9 */ 10 10 11 + #include <linux/export.h> 11 12 #include <linux/module.h> 12 13 #include <linux/err.h> 13 14 #include <linux/init.h>
+3 -2
drivers/s390/char/sclp.c
··· 9 9 */ 10 10 11 11 #include <linux/kernel_stat.h> 12 + #include <linux/export.h> 12 13 #include <linux/module.h> 13 14 #include <linux/err.h> 14 15 #include <linux/panic_notifier.h> ··· 720 719 timeout = 0; 721 720 if (timer_pending(&sclp_request_timer)) { 722 721 /* Get timeout TOD value */ 723 - timeout = get_tod_clock_fast() + 722 + timeout = get_tod_clock_monotonic() + 724 723 sclp_tod_from_jiffies(sclp_request_timer.expires - 725 724 jiffies); 726 725 } ··· 740 739 /* Loop until driver state indicates finished request */ 741 740 while (sclp_running_state != sclp_running_state_idle) { 742 741 /* Check for expired request timer */ 743 - if (get_tod_clock_fast() > timeout && timer_delete(&sclp_request_timer)) 742 + if (get_tod_clock_monotonic() > timeout && timer_delete(&sclp_request_timer)) 744 743 sclp_request_timer.function(&sclp_request_timer); 745 744 cpu_relax(); 746 745 }
-1
drivers/s390/char/sclp_cmd.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/errno.h> 15 15 #include <linux/err.h> 16 - #include <linux/export.h> 17 16 #include <linux/slab.h> 18 17 #include <linux/string.h> 19 18 #include <linux/mm.h>
+1
drivers/s390/char/sclp_early.c
··· 8 8 #define KMSG_COMPONENT "sclp_early" 9 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 10 11 + #include <linux/export.h> 11 12 #include <linux/errno.h> 12 13 #include <linux/memblock.h> 13 14 #include <asm/ctlreg.h>
+1
drivers/s390/char/sclp_ocf.c
··· 9 9 #define KMSG_COMPONENT "sclp_ocf" 10 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 11 12 + #include <linux/export.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/init.h> 14 15 #include <linux/stat.h>
-1
drivers/s390/char/sclp_sd.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/vmalloc.h> 18 18 #include <linux/async.h> 19 - #include <linux/export.h> 20 19 #include <linux/mutex.h> 21 20 22 21 #include <asm/pgalloc.h>
+1
drivers/s390/char/tape_34xx.c
··· 11 11 #define KMSG_COMPONENT "tape_34xx" 12 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 13 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/init.h> 16 17 #include <linux/bio.h>
+1
drivers/s390/char/tape_3590.c
··· 11 11 #define KMSG_COMPONENT "tape_3590" 12 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 13 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/slab.h> 16 17 #include <linux/init.h>
+1
drivers/s390/char/tape_class.c
··· 11 11 #define KMSG_COMPONENT "tape" 12 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 13 14 + #include <linux/export.h> 14 15 #include <linux/slab.h> 15 16 16 17 #include "tape_class.h"
+1
drivers/s390/char/tape_core.c
··· 14 14 #define KMSG_COMPONENT "tape" 15 15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 16 17 + #include <linux/export.h> 17 18 #include <linux/module.h> 18 19 #include <linux/init.h> // for kernel parameters 19 20 #include <linux/kmod.h> // for requesting modules
+1
drivers/s390/char/tape_std.c
··· 14 14 #define KMSG_COMPONENT "tape" 15 15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 16 17 + #include <linux/export.h> 17 18 #include <linux/stddef.h> 18 19 #include <linux/kernel.h> 19 20 #include <linux/bio.h>
-1
drivers/s390/char/vmcp.c
··· 19 19 #include <linux/miscdevice.h> 20 20 #include <linux/slab.h> 21 21 #include <linux/uaccess.h> 22 - #include <linux/export.h> 23 22 #include <linux/mutex.h> 24 23 #include <linux/cma.h> 25 24 #include <linux/mm.h>
+1
drivers/s390/cio/airq.c
··· 9 9 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 10 10 */ 11 11 12 + #include <linux/export.h> 12 13 #include <linux/init.h> 13 14 #include <linux/irq.h> 14 15 #include <linux/kernel_stat.h>
+2
drivers/s390/cio/ccwgroup.c
··· 7 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 9 */ 10 + 11 + #include <linux/export.h> 10 12 #include <linux/module.h> 11 13 #include <linux/errno.h> 12 14 #include <linux/slab.h>
+1
drivers/s390/cio/chsc.c
··· 11 11 #define KMSG_COMPONENT "cio" 12 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 13 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/slab.h> 16 17 #include <linux/init.h>
+1
drivers/s390/cio/cio.c
··· 12 12 #define KMSG_COMPONENT "cio" 13 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 14 15 + #include <linux/export.h> 15 16 #include <linux/ftrace.h> 16 17 #include <linux/module.h> 17 18 #include <linux/init.h>
+1
drivers/s390/cio/device_fsm.c
··· 7 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 8 */ 9 9 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <linux/init.h> 12 13 #include <linux/io.h>
+1
drivers/s390/cio/eadm_sch.c
··· 11 11 #include <linux/workqueue.h> 12 12 #include <linux/spinlock.h> 13 13 #include <linux/device.h> 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/timer.h> 16 17 #include <linux/slab.h>
+1
drivers/s390/cio/fcx.c
··· 6 6 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 7 */ 8 8 9 + #include <linux/export.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/types.h> 11 12 #include <linux/string.h>
+1
drivers/s390/cio/isc.c
··· 7 7 */ 8 8 9 9 #include <linux/spinlock.h> 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <asm/isc.h> 12 13
+1
drivers/s390/cio/itcw.c
··· 6 6 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 7 */ 8 8 9 + #include <linux/export.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/types.h> 11 12 #include <linux/string.h>
-1
drivers/s390/cio/qdio_debug.c
··· 7 7 #include <linux/seq_file.h> 8 8 #include <linux/debugfs.h> 9 9 #include <linux/uaccess.h> 10 - #include <linux/export.h> 11 10 #include <linux/slab.h> 12 11 #include <asm/debug.h> 13 12 #include "qdio_debug.h"
+2
drivers/s390/cio/qdio_main.c
··· 7 7 * Jan Glauber <jang@linux.vnet.ibm.com> 8 8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 9 9 */ 10 + 11 + #include <linux/export.h> 10 12 #include <linux/module.h> 11 13 #include <linux/init.h> 12 14 #include <linux/kernel.h>
+1
drivers/s390/cio/scm.c
··· 7 7 */ 8 8 9 9 #include <linux/device.h> 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <linux/mutex.h> 12 13 #include <linux/slab.h>
+1
drivers/s390/crypto/ap_bus.c
··· 16 16 17 17 #include <linux/kernel_stat.h> 18 18 #include <linux/moduleparam.h> 19 + #include <linux/export.h> 19 20 #include <linux/init.h> 20 21 #include <linux/delay.h> 21 22 #include <linux/err.h>
+1
drivers/s390/crypto/ap_queue.c
··· 9 9 #define KMSG_COMPONENT "ap" 10 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 11 12 + #include <linux/export.h> 12 13 #include <linux/init.h> 13 14 #include <linux/slab.h> 14 15 #include <asm/facility.h>
+1
drivers/s390/crypto/pkey_api.c
··· 12 12 13 13 #include <linux/init.h> 14 14 #include <linux/miscdevice.h> 15 + #include <linux/export.h> 15 16 #include <linux/slab.h> 16 17 17 18 #include "zcrypt_api.h"
+1
drivers/s390/crypto/pkey_base.c
··· 9 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 10 11 11 #include <linux/cpufeature.h> 12 + #include <linux/export.h> 12 13 #include <linux/init.h> 13 14 #include <linux/list.h> 14 15 #include <linux/module.h>
+1
drivers/s390/crypto/zcrypt_api.c
··· 15 15 #define KMSG_COMPONENT "zcrypt" 16 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 17 18 + #include <linux/export.h> 18 19 #include <linux/module.h> 19 20 #include <linux/init.h> 20 21 #include <linux/interrupt.h>
+1
drivers/s390/crypto/zcrypt_card.c
··· 11 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 12 */ 13 13 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/init.h> 16 17 #include <linux/interrupt.h>
+1
drivers/s390/crypto/zcrypt_ccamisc.c
··· 10 10 #define KMSG_COMPONENT "zcrypt" 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 + #include <linux/export.h> 13 14 #include <linux/init.h> 14 15 #include <linux/mempool.h> 15 16 #include <linux/module.h>
+1
drivers/s390/crypto/zcrypt_ep11misc.c
··· 9 9 #define KMSG_COMPONENT "zcrypt" 10 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 11 12 + #include <linux/export.h> 12 13 #include <linux/init.h> 13 14 #include <linux/mempool.h> 14 15 #include <linux/module.h>
+1
drivers/s390/crypto/zcrypt_queue.c
··· 11 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 12 */ 13 13 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/init.h> 16 17 #include <linux/interrupt.h>
+1
drivers/s390/net/ctcm_mpc.c
··· 21 21 #define KMSG_COMPONENT "ctcm" 22 22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 23 24 + #include <linux/export.h> 24 25 #include <linux/module.h> 25 26 #include <linux/init.h> 26 27 #include <linux/kernel.h>
+1
drivers/s390/net/fsm.c
··· 5 5 */ 6 6 7 7 #include "fsm.h" 8 + #include <linux/export.h> 8 9 #include <linux/module.h> 9 10 #include <linux/slab.h> 10 11 #include <linux/timer.h>
+1
drivers/s390/net/ism_drv.c
··· 7 7 #define KMSG_COMPONENT "ism" 8 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 9 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <linux/types.h> 12 13 #include <linux/interrupt.h>
+1
drivers/s390/net/qeth_core_main.c
··· 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 13 #include <linux/compat.h> 14 + #include <linux/export.h> 14 15 #include <linux/module.h> 15 16 #include <linux/moduleparam.h> 16 17 #include <linux/string.h>
+1
drivers/s390/net/qeth_l2_main.c
··· 10 10 #define KMSG_COMPONENT "qeth" 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 + #include <linux/export.h> 13 14 #include <linux/module.h> 14 15 #include <linux/moduleparam.h> 15 16 #include <linux/string.h>
+1
drivers/s390/net/qeth_l3_main.c
··· 10 10 #define KMSG_COMPONENT "qeth" 11 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 12 13 + #include <linux/export.h> 13 14 #include <linux/module.h> 14 15 #include <linux/moduleparam.h> 15 16 #include <linux/bitops.h>
+1
drivers/s390/net/smsgiucv.c
··· 7 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 8 8 */ 9 9 10 + #include <linux/export.h> 10 11 #include <linux/module.h> 11 12 #include <linux/init.h> 12 13 #include <linux/errno.h>
+1
lib/crypto/s390/chacha-glue.c
··· 10 10 11 11 #include <crypto/chacha.h> 12 12 #include <linux/cpufeature.h> 13 + #include <linux/export.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/module.h> 15 16 #include <linux/sizes.h>
-1
lib/raid6/recov_s390xc.c
··· 6 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 */ 8 8 9 - #include <linux/export.h> 10 9 #include <linux/raid/pq.h> 11 10 12 11 static inline void xor_block(u8 *p1, u8 *p2)
+1
net/iucv/iucv.c
··· 24 24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25 25 26 26 #include <linux/kernel_stat.h> 27 + #include <linux/export.h> 27 28 #include <linux/module.h> 28 29 #include <linux/moduleparam.h> 29 30 #include <linux/spinlock.h>
+26
scripts/gdb/linux/symbols.py
··· 84 84 return None 85 85 86 86 87 + def is_in_s390_decompressor(): 88 + # DAT is always off in decompressor. Use this as an indicator. 89 + # Note that in the kernel, DAT can be off during kexec() or restart. 90 + # Accept this imprecision in order to avoid complicating things. 91 + # It is unlikely that someone will run lx-symbols at these points. 92 + pswm = int(gdb.parse_and_eval("$pswm")) 93 + return (pswm & 0x0400000000000000) == 0 94 + 95 + 96 + def skip_decompressor(): 97 + if utils.is_target_arch("s390"): 98 + if is_in_s390_decompressor(): 99 + # The address of the jump_to_kernel function is statically placed 100 + # into svc_old_psw.addr (see ipl_data.c); read it from there. DAT 101 + # is off, so we do not need to care about lowcore relocation. 102 + svc_old_pswa = 0x148 103 + jump_to_kernel = int(gdb.parse_and_eval("*(unsigned long long *)" + 104 + hex(svc_old_pswa))) 105 + gdb.execute("tbreak *" + hex(jump_to_kernel)) 106 + gdb.execute("continue") 107 + while is_in_s390_decompressor(): 108 + gdb.execute("stepi") 109 + 110 + 87 111 class LxSymbols(gdb.Command): 88 112 """(Re-)load symbols of Linux kernel and currently loaded modules. 89 113 ··· 228 204 saved_state['breakpoint'].enabled = saved_state['enabled'] 229 205 230 206 def invoke(self, arg, from_tty): 207 + skip_decompressor() 208 + 231 209 self.module_paths = [os.path.abspath(os.path.expanduser(p)) 232 210 for p in arg.split()] 233 211 self.module_paths.append(os.getcwd())