Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Heiko Carstens:

- Provide a new interface for dynamic configuration and deconfiguration
of hotplug memory, allowing with and without memmap_on_memory
support. This makes the way memory hotplug is handled on s390 much
more similar to other architectures

- Remove compat support. There shouldn't be any compat user space
around anymore, therefore get rid of a lot of code which also doesn't
need to be tested anymore

- Add stackprotector support. GCC 16 will get new compiler options,
which allow to generate code required for kernel stackprotector
support

- Merge pai_crypto and pai_ext PMU drivers into a new driver. This
removes a lot of duplicated code. The new driver is also extendable
and allows to support new PMUs

- Add driver override support for AP queues

- Rework and extend zcrypt and AP trace events to allow for tracing of
crypto requests

- Support block sizes larger than 65535 bytes for CCW tape devices

- Since the rework of the virtual kernel address space the module area
and the kernel image are within the same 4GB area. This eliminates
the need of weak per cpu variables. Get rid of
ARCH_MODULE_NEEDS_WEAK_PER_CPU

- Various other small improvements and fixes

* tag 's390-6.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (92 commits)
watchdog: diag288_wdt: Remove KMSG_COMPONENT macro
s390/entry: Use lay instead of aghik
s390/vdso: Get rid of -m64 flag handling
s390/vdso: Rename vdso64 to vdso
s390: Rename head64.S to head.S
s390/vdso: Use common STABS_DEBUG and DWARF_DEBUG macros
s390: Add stackprotector support
s390/modules: Simplify module_finalize() slightly
s390: Remove KMSG_COMPONENT macro
s390/percpu: Get rid of ARCH_MODULE_NEEDS_WEAK_PER_CPU
s390/ap: Restrict driver_override versus apmask and aqmask use
s390/ap: Rename mutex ap_perms_mutex to ap_attr_mutex
s390/ap: Support driver_override for AP queue devices
s390/ap: Use all-bits-one apmask/aqmask for vfio in_use() checks
s390/debug: Update description of resize operation
s390/syscalls: Switch to generic system call table generation
s390/syscalls: Remove system call table pointer from thread_struct
s390/uapi: Remove 31 bit support from uapi header files
s390: Remove compat support
tools: Remove s390 compat support
...

+3151 -6204
+2 -3
Documentation/arch/s390/s390dbf.rst
··· 243 243 244 244 Changing the size of debug areas 245 245 ------------------------------------ 246 - It is possible the change the size of debug areas through piping 247 - the number of pages to the debugfs file "pages". The resize request will 248 - also flush the debug areas. 246 + To resize a debug area, write the desired page count to the "pages" file. 247 + Existing data is preserved if it fits; otherwise, oldest entries are dropped. 249 248 250 249 Example: 251 250
+4 -17
arch/s390/Kconfig
··· 69 69 Clang versions before 19.1.0 do not support A, 70 70 O, and R inline assembly format flags. 71 71 72 + config CC_HAS_STACKPROTECTOR_GLOBAL 73 + def_bool $(cc-option, -mstack-protector-guard=global -mstack-protector-guard-record) 74 + 72 75 config S390 73 76 def_bool y 74 77 # ··· 143 140 select ARCH_INLINE_WRITE_UNLOCK_IRQ 144 141 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 145 142 select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE 146 - select ARCH_MODULE_NEEDS_WEAK_PER_CPU 147 143 select ARCH_STACKWALK 148 144 select ARCH_SUPPORTS_ATOMIC_RMW 149 145 select ARCH_SUPPORTS_DEBUG_PAGEALLOC ··· 247 245 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI 248 246 select HAVE_SETUP_PER_CPU_AREA 249 247 select HAVE_SOFTIRQ_ON_OWN_STACK 248 + select HAVE_STACKPROTECTOR if CC_HAS_STACKPROTECTOR_GLOBAL 250 249 select HAVE_SYSCALL_TRACEPOINTS 251 250 select HAVE_VIRT_CPU_ACCOUNTING 252 251 select HAVE_VIRT_CPU_ACCOUNTING_IDLE ··· 506 503 help 507 504 This allows you to specify the maximum length of the kernel command 508 505 line. 509 - 510 - config COMPAT 511 - def_bool n 512 - prompt "Kernel support for 31 bit emulation" 513 - select ARCH_WANT_OLD_COMPAT_IPC 514 - select COMPAT_OLD_SIGACTION 515 - select HAVE_UID16 516 - depends on MULTIUSER 517 - depends on !CC_IS_CLANG && !LD_IS_LLD 518 - help 519 - Select this option if you want to enable your system kernel to 520 - handle system-calls from ELF binaries for 31 bit ESA. This option 521 - (and some other stuff like libraries and such) is needed for 522 - executing 31 bit applications. 523 - 524 - If unsure say N. 525 506 526 507 config SMP 527 508 def_bool y
+7 -7
arch/s390/Makefile
··· 90 90 aflags-y += -DCC_USING_EXPOLINE 91 91 endif 92 92 93 + ifeq ($(CONFIG_STACKPROTECTOR),y) 94 + KBUILD_CFLAGS += -mstack-protector-guard=global -mstack-protector-guard-record 95 + endif 96 + 93 97 ifdef CONFIG_FUNCTION_TRACER 94 98 ifeq ($(call cc-option,-mfentry -mnop-mcount),) 95 99 # make use of hotpatch feature if the compiler supports it ··· 139 135 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 140 136 141 137 archheaders: 142 - $(Q)$(MAKE) $(build)=$(syscalls) uapi 138 + $(Q)$(MAKE) $(build)=$(syscalls) all 143 139 144 140 archprepare: 145 - $(Q)$(MAKE) $(build)=$(syscalls) kapi 146 141 $(Q)$(MAKE) $(build)=$(tools) kapi $(extra_tools) 147 142 ifeq ($(KBUILD_EXTMOD),) 148 143 # We need to generate vdso-offsets.h before compiling certain files in kernel/. ··· 152 149 # this hack. 153 150 prepare: vdso_prepare 154 151 vdso_prepare: prepare0 155 - $(Q)$(MAKE) $(build)=arch/s390/kernel/vdso64 include/generated/vdso64-offsets.h 156 - $(if $(CONFIG_COMPAT),$(Q)$(MAKE) \ 157 - $(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h) 152 + $(Q)$(MAKE) $(build)=arch/s390/kernel/vdso include/generated/vdso-offsets.h 158 153 159 - vdso-install-y += arch/s390/kernel/vdso64/vdso64.so.dbg 160 - vdso-install-$(CONFIG_COMPAT) += arch/s390/kernel/vdso32/vdso32.so.dbg 154 + vdso-install-y += arch/s390/kernel/vdso/vdso.so.dbg 161 155 162 156 endif 163 157
+1 -2
arch/s390/appldata/appldata_base.c
··· 9 9 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 10 10 */ 11 11 12 - #define KMSG_COMPONENT "appldata" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "appldata: " fmt 14 13 15 14 #include <linux/export.h> 16 15 #include <linux/module.h>
+1 -2
arch/s390/appldata/appldata_os.c
··· 8 8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 9 9 */ 10 10 11 - #define KMSG_COMPONENT "appldata" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "appldata: " fmt 13 12 14 13 #include <linux/module.h> 15 14 #include <linux/init.h>
+1
arch/s390/boot/Makefile
··· 32 32 obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 33 33 obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o 34 34 obj-$(CONFIG_KMSAN) += kmsan.o 35 + obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o 35 36 obj-all := $(obj-y) piggy.o syms.o 36 37 37 38 targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
+4
arch/s390/boot/boot.h
··· 28 28 unsigned long invalid_pg_dir_off; 29 29 unsigned long alt_instructions; 30 30 unsigned long alt_instructions_end; 31 + #ifdef CONFIG_STACKPROTECTOR 32 + unsigned long stack_prot_start; 33 + unsigned long stack_prot_end; 34 + #endif 31 35 #ifdef CONFIG_KASAN 32 36 unsigned long kasan_early_shadow_page_off; 33 37 unsigned long kasan_early_shadow_pte_off;
+1 -2
arch/s390/boot/ipl_data.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 - #include <linux/compat.h> 4 3 #include <linux/ptrace.h> 5 4 #include <asm/cio.h> 6 5 #include <asm/asm-offsets.h> ··· 11 12 #define PSW_MASK_DISABLED (PSW_MASK_WAIT | PSW_MASK_EA | PSW_MASK_BA) 12 13 13 14 struct ipl_lowcore { 14 - psw_t32 ipl_psw; /* 0x0000 */ 15 + psw32_t ipl_psw; /* 0x0000 */ 15 16 struct ccw0 ccwpgm[2]; /* 0x0008 */ 16 17 u8 fill[56]; /* 0x0018 */ 17 18 struct ccw0 ccwpgmcc[20]; /* 0x0050 */
+6
arch/s390/boot/ipl_parm.c
··· 3 3 #include <linux/init.h> 4 4 #include <linux/ctype.h> 5 5 #include <linux/pgtable.h> 6 + #include <asm/arch-stackprotector.h> 6 7 #include <asm/abs_lowcore.h> 7 8 #include <asm/page-states.h> 8 9 #include <asm/machine.h> ··· 294 293 if (!rc && !enabled) 295 294 cmma_flag = 0; 296 295 } 296 + 297 + #ifdef CONFIG_STACKPROTECTOR 298 + if (!strcmp(param, "debug_stackprotector")) 299 + stack_protector_debug = 1; 300 + #endif 297 301 298 302 #if IS_ENABLED(CONFIG_KVM) 299 303 if (!strcmp(param, "prot_virt")) {
+6
arch/s390/boot/stackprotector.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #define boot_fmt(fmt) "stackprot: " fmt 4 + 5 + #include "boot.h" 6 + #include "../kernel/stackprotector.c"
+8
arch/s390/boot/startup.c
··· 20 20 #include <asm/uv.h> 21 21 #include <asm/abs_lowcore.h> 22 22 #include <asm/physmem_info.h> 23 + #include <asm/stacktrace.h> 24 + #include <asm/asm-offsets.h> 25 + #include <asm/arch-stackprotector.h> 23 26 #include "decompressor.h" 24 27 #include "boot.h" 25 28 #include "uv.h" ··· 480 477 vmlinux.invalid_pg_dir_off += offset; 481 478 vmlinux.alt_instructions += offset; 482 479 vmlinux.alt_instructions_end += offset; 480 + #ifdef CONFIG_STACKPROTECTOR 481 + vmlinux.stack_prot_start += offset; 482 + vmlinux.stack_prot_end += offset; 483 + #endif 483 484 #ifdef CONFIG_KASAN 484 485 vmlinux.kasan_early_shadow_page_off += offset; 485 486 vmlinux.kasan_early_shadow_pte_off += offset; ··· 629 622 __apply_alternatives((struct alt_instr *)_vmlinux_info.alt_instructions, 630 623 (struct alt_instr *)_vmlinux_info.alt_instructions_end, 631 624 ALT_CTX_EARLY); 625 + stack_protector_apply_early(text_lma); 632 626 633 627 /* 634 628 * Save KASLR offset for early dumps, before vmcore_info is set.
-3
arch/s390/configs/compat.config
··· 1 - # Help: Enable compat support 2 - CONFIG_COMPAT=y 3 - CONFIG_COMPAT_32BIT_TIME=y
+1 -2
arch/s390/crypto/aes_s390.c
··· 14 14 * Derived from "crypto/aes_generic.c" 15 15 */ 16 16 17 - #define KMSG_COMPONENT "aes_s390" 18 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 + #define pr_fmt(fmt) "aes_s390: " fmt 19 18 20 19 #include <crypto/aes.h> 21 20 #include <crypto/algapi.h>
+1 -2
arch/s390/crypto/hmac_s390.c
··· 5 5 * s390 specific HMAC support. 6 6 */ 7 7 8 - #define KMSG_COMPONENT "hmac_s390" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "hmac_s390: " fmt 10 9 11 10 #include <asm/cpacf.h> 12 11 #include <crypto/internal/hash.h>
+1 -2
arch/s390/crypto/paes_s390.c
··· 10 10 * Harald Freudenberger <freude@de.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "paes_s390" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "paes_s390: " fmt 15 14 16 15 #include <linux/atomic.h> 17 16 #include <linux/cpufeature.h>
+1 -2
arch/s390/crypto/phmac_s390.c
··· 5 5 * s390 specific HMAC support for protected keys. 6 6 */ 7 7 8 - #define KMSG_COMPONENT "phmac_s390" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "phmac_s390: " fmt 10 9 11 10 #include <asm/cpacf.h> 12 11 #include <asm/pkey.h>
+1 -2
arch/s390/crypto/prng.c
··· 6 6 * Driver for the s390 pseudo random number generator 7 7 */ 8 8 9 - #define KMSG_COMPONENT "prng" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "prng: " fmt 11 10 12 11 #include <linux/fs.h> 13 12 #include <linux/fips.h>
+1 -2
arch/s390/hypfs/hypfs_diag.c
··· 7 7 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "hypfs" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "hypfs: " fmt 12 11 13 12 #include <linux/types.h> 14 13 #include <linux/errno.h>
+1 -2
arch/s390/hypfs/hypfs_diag_fs.c
··· 7 7 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "hypfs" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "hypfs: " fmt 12 11 13 12 #include <linux/types.h> 14 13 #include <linux/errno.h>
+1 -5
arch/s390/hypfs/hypfs_sprp.c
··· 7 7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 8 */ 9 9 10 - #include <linux/compat.h> 11 10 #include <linux/errno.h> 12 11 #include <linux/gfp.h> 13 12 #include <linux/string.h> ··· 115 116 116 117 if (!capable(CAP_SYS_ADMIN)) 117 118 return -EACCES; 118 - if (is_compat_task()) 119 - argp = compat_ptr(arg); 120 - else 121 - argp = (void __user *) arg; 119 + argp = (void __user *)arg; 122 120 switch (cmd) { 123 121 case HYPFS_DIAG304: 124 122 return __hypfs_sprp_ioctl(argp);
+1 -2
arch/s390/hypfs/inode.c
··· 6 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "hypfs" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "hypfs: " fmt 11 10 12 11 #include <linux/types.h> 13 12 #include <linux/errno.h>
+22 -8
arch/s390/include/asm/ap.h
··· 38 38 * The ap queue status word is returned by all three AP functions 39 39 * (PQAP, NQAP and DQAP). There's a set of flags in the first 40 40 * byte, followed by a 1 byte response code. 41 + * 42 + * For convenience the 'value' field is a 32 bit access of the 43 + * whole status and the 'status_bits' and 'rc' fields comprise 44 + * the leftmost 8 status bits and the response_code. 41 45 */ 42 46 struct ap_queue_status { 43 - unsigned int queue_empty : 1; 44 - unsigned int replies_waiting : 1; 45 - unsigned int queue_full : 1; 46 - unsigned int : 3; 47 - unsigned int async : 1; 48 - unsigned int irq_enabled : 1; 49 - unsigned int response_code : 8; 50 - unsigned int : 16; 47 + union { 48 + unsigned int value : 32; 49 + struct { 50 + unsigned int status_bits : 8; 51 + unsigned int rc : 8; 52 + unsigned int : 16; 53 + }; 54 + struct { 55 + unsigned int queue_empty : 1; 56 + unsigned int replies_waiting : 1; 57 + unsigned int queue_full : 1; 58 + unsigned int : 3; 59 + unsigned int async : 1; 60 + unsigned int irq_enabled : 1; 61 + unsigned int response_code : 8; 62 + unsigned int : 16; 63 + }; 64 + }; 51 65 }; 52 66 53 67 /*
+25
arch/s390/include/asm/arch-stackprotector.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_ARCH_STACKPROTECTOR_H 4 + #define _ASM_S390_ARCH_STACKPROTECTOR_H 5 + 6 + extern unsigned long __stack_chk_guard; 7 + extern int stack_protector_debug; 8 + 9 + void __stack_protector_apply_early(unsigned long kernel_start); 10 + int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start); 11 + 12 + static inline void stack_protector_apply_early(unsigned long kernel_start) 13 + { 14 + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) 15 + __stack_protector_apply_early(kernel_start); 16 + } 17 + 18 + static inline int stack_protector_apply(unsigned long *start, unsigned long *end) 19 + { 20 + if (IS_ENABLED(CONFIG_STACKPROTECTOR)) 21 + return __stack_protector_apply(start, end, 0); 22 + return 0; 23 + } 24 + 25 + #endif /* _ASM_S390_ARCH_STACKPROTECTOR_H */
+2
arch/s390/include/asm/cio.h
··· 18 18 19 19 #include <asm/scsw.h> 20 20 21 + #define CCW_MAX_BYTE_COUNT 65535 22 + 21 23 /** 22 24 * struct ccw1 - channel command word 23 25 * @cmd_code: command code
-140
arch/s390/include/asm/compat.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_S390X_COMPAT_H 3 - #define _ASM_S390X_COMPAT_H 4 - /* 5 - * Architecture specific compatibility types 6 - */ 7 - #include <linux/types.h> 8 - #include <linux/sched.h> 9 - #include <linux/sched/task_stack.h> 10 - #include <linux/thread_info.h> 11 - #include <asm/ptrace.h> 12 - 13 - #define compat_mode_t compat_mode_t 14 - typedef u16 compat_mode_t; 15 - 16 - #define __compat_uid_t __compat_uid_t 17 - typedef u16 __compat_uid_t; 18 - typedef u16 __compat_gid_t; 19 - 20 - #define compat_dev_t compat_dev_t 21 - typedef u16 compat_dev_t; 22 - 23 - #define compat_ipc_pid_t compat_ipc_pid_t 24 - typedef u16 compat_ipc_pid_t; 25 - 26 - #define compat_statfs compat_statfs 27 - 28 - #include <asm-generic/compat.h> 29 - 30 - #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \ 31 - typeof(0?(__force t)0:0ULL), u64)) 32 - 33 - #define __SC_DELOUSE(t,v) ({ \ 34 - BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \ 35 - (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ 36 - }) 37 - 38 - #define PSW32_MASK_USER 0x0000FF00UL 39 - 40 - #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ 41 - PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ 42 - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ 43 - PSW32_ASC_PRIMARY) 44 - 45 - #define COMPAT_UTS_MACHINE "s390\0\0\0\0" 46 - 47 - typedef u16 compat_nlink_t; 48 - 49 - typedef struct { 50 - u32 mask; 51 - u32 addr; 52 - } __aligned(8) psw_compat_t; 53 - 54 - typedef struct { 55 - psw_compat_t psw; 56 - u32 gprs[NUM_GPRS]; 57 - u32 acrs[NUM_ACRS]; 58 - u32 orig_gpr2; 59 - } s390_compat_regs; 60 - 61 - typedef struct { 62 - u32 gprs_high[NUM_GPRS]; 63 - } s390_compat_regs_high; 64 - 65 - struct compat_stat { 66 - compat_dev_t st_dev; 67 - u16 __pad1; 68 - compat_ino_t st_ino; 69 - compat_mode_t st_mode; 70 - compat_nlink_t st_nlink; 71 - __compat_uid_t st_uid; 72 - __compat_gid_t st_gid; 73 - compat_dev_t st_rdev; 74 - u16 __pad2; 75 - u32 st_size; 76 - u32 st_blksize; 77 - u32 st_blocks; 78 - u32 st_atime; 79 - u32 st_atime_nsec; 80 - u32 st_mtime; 81 - u32 st_mtime_nsec; 82 - u32 st_ctime; 83 - u32 st_ctime_nsec; 84 - u32 __unused4; 85 - u32 __unused5; 86 - }; 87 - 88 - struct compat_statfs { 89 - u32 f_type; 90 - u32 f_bsize; 91 - u32 f_blocks; 92 - u32 f_bfree; 93 - u32 f_bavail; 94 - u32 f_files; 95 - u32 f_ffree; 96 - compat_fsid_t f_fsid; 97 - u32 f_namelen; 98 - u32 f_frsize; 99 - u32 f_flags; 100 - u32 f_spare[4]; 101 - }; 102 - 103 - struct compat_statfs64 { 104 - u32 f_type; 105 - u32 f_bsize; 106 - u64 f_blocks; 107 - u64 f_bfree; 108 - u64 f_bavail; 109 - u64 f_files; 110 - u64 f_ffree; 111 - compat_fsid_t f_fsid; 112 - u32 f_namelen; 113 - u32 f_frsize; 114 - u32 f_flags; 115 - u32 f_spare[5]; 116 - }; 117 - 118 - /* 119 - * A pointer passed in from user mode. This should not 120 - * be used for syscall parameters, just declare them 121 - * as pointers because the syscall entry code will have 122 - * appropriately converted them already. 123 - */ 124 - 125 - static inline void __user *compat_ptr(compat_uptr_t uptr) 126 - { 127 - return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); 128 - } 129 - #define compat_ptr(uptr) compat_ptr(uptr) 130 - 131 - #ifdef CONFIG_COMPAT 132 - 133 - static inline int is_compat_task(void) 134 - { 135 - return test_thread_flag(TIF_31BIT); 136 - } 137 - 138 - #endif 139 - 140 - #endif /* _ASM_S390X_COMPAT_H */
-1
arch/s390/include/asm/cpufeature.h
··· 27 27 #define cpu_has_edat1() test_facility(8) 28 28 #define cpu_has_edat2() test_facility(78) 29 29 #define cpu_has_gs() test_facility(133) 30 - #define cpu_has_idte() test_facility(3) 31 30 #define cpu_has_nx() test_facility(130) 32 31 #define cpu_has_rdp() test_facility(194) 33 32 #define cpu_has_seq_insn() test_facility(85)
+7 -40
arch/s390/include/asm/elf.h
··· 162 162 * ELF register definitions.. 163 163 */ 164 164 165 - #include <linux/compat.h> 166 - 167 165 #include <asm/ptrace.h> 168 166 #include <asm/syscall.h> 169 167 #include <asm/user.h> 170 168 171 169 typedef s390_fp_regs elf_fpregset_t; 172 170 typedef s390_regs elf_gregset_t; 173 - 174 - typedef s390_fp_regs compat_elf_fpregset_t; 175 - typedef s390_compat_regs compat_elf_gregset_t; 176 171 177 172 #include <linux/sched/mm.h> /* for task_struct */ 178 173 #include <asm/mmu_context.h> ··· 178 183 #define elf_check_arch(x) \ 179 184 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 180 185 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 181 - #define compat_elf_check_arch(x) \ 182 - (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 183 - && (x)->e_ident[EI_CLASS] == ELF_CLASS) 184 - #define compat_start_thread start_thread31 185 186 186 187 /* For SVR4/S390 the function pointer to be registered with `atexit` is 187 188 passed in R14. */ ··· 194 203 the loader. We need to make sure that it is out of the way of the program 195 204 that it will "exec", and that there is sufficient room for the brk. 64-bit 196 205 tasks are aligned to 4GB. */ 197 - #define ELF_ET_DYN_BASE (is_compat_task() ? \ 198 - (STACK_TOP / 3 * 2) : \ 199 - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) 206 + #define ELF_ET_DYN_BASE ((STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) 200 207 201 208 /* This yields a mask that user programs can use to figure out what 202 209 instruction set this CPU supports. */ ··· 213 224 extern char elf_platform[]; 214 225 #define ELF_PLATFORM (elf_platform) 215 226 216 - #ifndef CONFIG_COMPAT 217 227 #define SET_PERSONALITY(ex) \ 218 228 do { \ 219 229 set_personality(PER_LINUX | \ 220 230 (current->personality & (~PER_MASK))); \ 221 - current->thread.sys_call_table = sys_call_table; \ 222 231 } while (0) 223 - #else /* CONFIG_COMPAT */ 224 - #define SET_PERSONALITY(ex) \ 225 - do { \ 226 - if (personality(current->personality) != PER_LINUX32) \ 227 - set_personality(PER_LINUX | \ 228 - (current->personality & ~PER_MASK)); \ 229 - if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 230 - set_thread_flag(TIF_31BIT); \ 231 - current->thread.sys_call_table = \ 232 - sys_call_table_emu; \ 233 - } else { \ 234 - clear_thread_flag(TIF_31BIT); \ 235 - current->thread.sys_call_table = \ 236 - sys_call_table; \ 237 - } \ 238 - } while (0) 239 - #endif /* CONFIG_COMPAT */ 240 232 241 233 /* 242 234 * Cache aliasing on the latest machines calls for a mapping granularity 243 - * of 512KB for the anonymous mapping base. For 64-bit processes use a 244 - * 512KB alignment and a randomization of up to 1GB. For 31-bit processes 245 - * the virtual address space is limited, use no alignment and limit the 246 - * randomization to 8MB. 247 - * For the additional randomization of the program break use 32MB for 248 - * 64-bit and 8MB for 31-bit. 235 + * of 512KB for the anonymous mapping base. Use a 512KB alignment and a 236 + * randomization of up to 1GB. 237 + * For the additional randomization of the program break use 32MB. 249 238 */ 250 - #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL) 251 - #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 252 - #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 239 + #define BRK_RND_MASK (0x1fffUL) 240 + #define MMAP_RND_MASK (0x3ff80UL) 241 + #define MMAP_ALIGN_MASK (0x7fUL) 253 242 #define STACK_RND_MASK MMAP_RND_MASK 254 243 255 244 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+3
arch/s390/include/asm/fpu-insn.h
··· 12 12 #ifndef __ASSEMBLER__ 13 13 14 14 #include <linux/instrumented.h> 15 + #include <linux/kmsan.h> 15 16 #include <asm/asm-extable.h> 16 17 17 18 asm(".include \"asm/fpu-insn-asm.h\"\n"); ··· 394 393 : [vxr] "=Q" (*(u8 *)vxr) 395 394 : [index] "d" (index), [v1] "I" (v1) 396 395 : "memory"); 396 + kmsan_unpoison_memory(vxr, size); 397 397 } 398 398 399 399 #else /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */ ··· 411 409 : [vxr] "=R" (*(u8 *)vxr) 412 410 : [index] "d" (index), [v1] "I" (v1) 413 411 : "memory", "1"); 412 + kmsan_unpoison_memory(vxr, size); 414 413 } 415 414 416 415 #endif /* CONFIG_CC_HAS_ASM_AOR_FORMAT_FLAGS */
+1 -18
arch/s390/include/asm/ftrace.h
··· 105 105 } 106 106 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 107 107 108 - /* 109 - * Even though the system call numbers are identical for s390/s390x a 110 - * different system call table is used for compat tasks. This may lead 111 - * to e.g. incorrect or missing trace event sysfs files. 112 - * Therefore simply do not trace compat system calls at all. 113 - * See kernel/trace/trace_syscalls.c. 114 - */ 115 - #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 116 - static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) 117 - { 118 - return is_compat_task(); 119 - } 120 - 121 108 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME 122 109 static inline bool arch_syscall_match_sym_name(const char *sym, 123 110 const char *name) 124 111 { 125 - /* 126 - * Skip __s390_ and __s390x_ prefix - due to compat wrappers 127 - * and aliasing some symbols of 64 bit system call functions 128 - * may get the __s390_ prefix instead of the __s390x_ prefix. 129 - */ 112 + /* Skip the __s390x_ prefix. */ 130 113 return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); 131 114 } 132 115
+76
arch/s390/include/asm/idals.h
··· 181 181 } 182 182 183 183 /* 184 + * Allocate an array of IDAL buffers to cover a total data size of @size. The 185 + * resulting array is null-terminated. 186 + * 187 + * The amount of individual IDAL buffers is determined based on @size. 188 + * Each IDAL buffer can have a maximum size of @CCW_MAX_BYTE_COUNT. 189 + */ 190 + static inline struct idal_buffer **idal_buffer_array_alloc(size_t size, int page_order) 191 + { 192 + struct idal_buffer **ibs; 193 + size_t ib_size; /* Size of a single idal buffer */ 194 + int count; /* Amount of individual idal buffers */ 195 + int i; 196 + 197 + count = (size + CCW_MAX_BYTE_COUNT - 1) / CCW_MAX_BYTE_COUNT; 198 + ibs = kmalloc_array(count + 1, sizeof(*ibs), GFP_KERNEL); 199 + for (i = 0; i < count; i++) { 200 + /* Determine size for the current idal buffer */ 201 + ib_size = min(size, CCW_MAX_BYTE_COUNT); 202 + size -= ib_size; 203 + ibs[i] = idal_buffer_alloc(ib_size, page_order); 204 + if (IS_ERR(ibs[i])) { 205 + while (i--) 206 + idal_buffer_free(ibs[i]); 207 + kfree(ibs); 208 + ibs = NULL; 209 + return ERR_PTR(-ENOMEM); 210 + } 211 + } 212 + ibs[i] = NULL; 213 + return ibs; 214 + } 215 + 216 + /* 217 + * Free array of IDAL buffers 218 + */ 219 + static inline void idal_buffer_array_free(struct idal_buffer ***ibs) 220 + { 221 + struct idal_buffer **p; 222 + 223 + if (!ibs || !*ibs) 224 + return; 225 + for (p = *ibs; *p; p++) 226 + idal_buffer_free(*p); 227 + kfree(*ibs); 228 + *ibs = NULL; 229 + } 230 + 231 + /* 232 + * Determine size of IDAL buffer array 233 + */ 234 + static inline int idal_buffer_array_size(struct idal_buffer **ibs) 235 + { 236 + int size = 0; 237 + 238 + while (ibs && *ibs) { 239 + size++; 240 + ibs++; 241 + } 242 + return size; 243 + } 244 + 245 + /* 246 + * Determine total data size covered by IDAL buffer array 247 + */ 248 + static inline size_t idal_buffer_array_datasize(struct idal_buffer **ibs) 249 + { 250 + size_t size = 0; 251 + 252 + while (ibs && *ibs) { 253 + size += (*ibs)->size; 254 + ibs++; 255 + } 256 + return size; 257 + } 258 + 259 + /* 184 260 * Test if a idal list is really needed. 185 261 */ 186 262 static inline bool __idal_buffer_is_needed(struct idal_buffer *ib)
+2 -1
arch/s390/include/asm/lowcore.h
··· 100 100 101 101 /* Save areas. */ 102 102 __u64 save_area[8]; /* 0x0200 */ 103 - __u8 pad_0x0240[0x0280-0x0240]; /* 0x0240 */ 103 + __u64 stack_canary; /* 0x0240 */ 104 + __u8 pad_0x0248[0x0280-0x0248]; /* 0x0248 */ 104 105 __u64 save_area_restart[1]; /* 0x0280 */ 105 106 106 107 __u64 pcpu; /* 0x0288 */
+1
arch/s390/include/asm/pai.h
··· 77 77 78 78 #define PAI_SAVE_AREA(x) ((x)->hw.event_base) 79 79 #define PAI_CPU_MASK(x) ((x)->hw.addr_filters) 80 + #define PAI_PMU_IDX(x) ((x)->hw.last_tag) 80 81 #define PAI_SWLIST(x) (&(x)->hw.tp_list) 81 82 82 83 #endif
-8
arch/s390/include/asm/percpu.h
··· 13 13 #define __my_cpu_offset get_lowcore()->percpu_offset 14 14 15 15 /* 16 - * For 64 bit module code, the module may be more than 4G above the 17 - * per cpu area, use weak definitions to force the compiler to 18 - * generate external references. 19 - * Therefore, we have enabled CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU 20 - * in the Kconfig. 21 - */ 22 - 23 - /* 24 16 * We use a compare-and-swap loop since that uses less cpu cycles than 25 17 * disabling and enabling interrupts like the generic variant would do. 26 18 */
+3 -16
arch/s390/include/asm/pgtable.h
··· 648 648 return 0; 649 649 } 650 650 651 - static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 652 - { 653 - union register_pair r1 = { .even = old, .odd = new, }; 654 - unsigned long address = (unsigned long)ptr | 1; 655 - 656 - asm volatile( 657 - " csp %[r1],%[address]" 658 - : [r1] "+&d" (r1.pair), "+m" (*ptr) 659 - : [address] "d" (address) 660 - : "cc"); 661 - } 662 - 663 651 /** 664 652 * cspg() - Compare and Swap and Purge (CSPG) 665 653 * @ptr: Pointer to the value to be exchanged ··· 1388 1400 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1389 1401 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1390 1402 unsigned long *oldpte, unsigned long *oldpgste); 1391 - void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1392 1403 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1393 1404 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1394 1405 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); ··· 1677 1690 1678 1691 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1679 1692 1680 - static inline void __pmdp_csp(pmd_t *pmdp) 1693 + static inline void __pmdp_cspg(pmd_t *pmdp) 1681 1694 { 1682 - csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1683 - pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1695 + cspg((unsigned long *)pmdp, pmd_val(*pmdp), 1696 + pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1684 1697 } 1685 1698 1686 1699 #define IDTE_GLOBAL 0
+10 -12
arch/s390/include/asm/processor.h
··· 119 119 unsigned long vdso_text_size(void); 120 120 unsigned long vdso_size(void); 121 121 122 - /* 123 - * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 124 - */ 125 - 126 - #define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \ 127 - _REGION3_SIZE : TASK_SIZE_MAX) 128 - #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 129 - (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) 122 + #define TASK_SIZE (TASK_SIZE_MAX) 123 + #define TASK_UNMAPPED_BASE (_REGION2_SIZE >> 1) 130 124 #define TASK_SIZE_MAX (-PAGE_SIZE) 131 125 132 126 #define VDSO_BASE (STACK_TOP + PAGE_SIZE) 133 - #define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) 127 + #define VDSO_LIMIT (_REGION2_SIZE) 134 128 #define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) 135 129 #define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) 136 130 ··· 175 181 unsigned long system_timer; /* task cputime in kernel space */ 176 182 unsigned long hardirq_timer; /* task cputime in hardirq context */ 177 183 unsigned long softirq_timer; /* task cputime in softirq context */ 178 - const sys_call_ptr_t *sys_call_table; /* system call table address */ 179 184 union teid gmap_teid; /* address and flags of last gmap fault */ 180 185 unsigned int gmap_int_code; /* int code of last gmap fault */ 181 186 int ufpu_flags; /* user fpu flags */ ··· 372 379 /* 373 380 * Rewind PSW instruction address by specified number of bytes. 374 381 */ 375 - static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) 382 + static inline unsigned long __rewind_psw(psw_t psw, long ilen) 376 383 { 377 384 unsigned long mask; 378 385 379 386 mask = (psw.mask & PSW_MASK_EA) ? -1UL : 380 387 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : 381 388 (1UL << 24) - 1; 382 - return (psw.addr - ilc) & mask; 389 + return (psw.addr - ilen) & mask; 390 + } 391 + 392 + static inline unsigned long __forward_psw(psw_t psw, long ilen) 393 + { 394 + return __rewind_psw(psw, -ilen); 383 395 } 384 396 385 397 /*
+4 -1
arch/s390/include/asm/ptrace.h
··· 8 8 #define _S390_PTRACE_H 9 9 10 10 #include <linux/bits.h> 11 + #include <linux/typecheck.h> 11 12 #include <uapi/asm/ptrace.h> 12 13 #include <asm/thread_info.h> 13 14 #include <asm/tpi.h> 14 15 15 16 #define PIF_SYSCALL 0 /* inside a system call */ 17 + #define PIF_PSW_ADDR_ADJUSTED 1 /* psw address has been adjusted */ 16 18 #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ 17 19 #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ 18 20 #define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ 19 21 20 22 #define _PIF_SYSCALL BIT(PIF_SYSCALL) 23 + #define _PIF_ADDR_PSW_ADJUSTED BIT(PIF_PSW_ADDR_ADJUSTED) 21 24 #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) 22 25 #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) 23 26 #define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) ··· 102 99 typedef struct { 103 100 unsigned int mask; 104 101 unsigned int addr; 105 - } psw_t32 __aligned(8); 102 + } psw32_t __aligned(8); 106 103 107 104 #define PGM_INT_CODE_MASK 0x7f 108 105 #define PGM_INT_CODE_PER 0x80
-5
arch/s390/include/asm/seccomp.h
··· 19 19 #define SECCOMP_ARCH_NATIVE AUDIT_ARCH_S390X 20 20 #define SECCOMP_ARCH_NATIVE_NR NR_syscalls 21 21 #define SECCOMP_ARCH_NATIVE_NAME "s390x" 22 - #ifdef CONFIG_COMPAT 23 - # define SECCOMP_ARCH_COMPAT AUDIT_ARCH_S390 24 - # define SECCOMP_ARCH_COMPAT_NR NR_syscalls 25 - # define SECCOMP_ARCH_COMPAT_NAME "s390" 26 - #endif 27 22 28 23 #endif /* _ASM_S390_SECCOMP_H */
+1 -1
arch/s390/include/asm/smp.h
··· 43 43 extern void arch_send_call_function_single_ipi(int cpu); 44 44 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 45 45 46 - extern void smp_call_ipl_cpu(void (*func)(void *), void *); 46 + extern void __noreturn smp_call_ipl_cpu(void (*func)(void *), void *data); 47 47 extern void smp_emergency_stop(void); 48 48 49 49 extern int smp_find_processor_id(u16 address);
+16
arch/s390/include/asm/stackprotector.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_STACKPROTECTOR_H 4 + #define _ASM_S390_STACKPROTECTOR_H 5 + 6 + #include <linux/sched.h> 7 + #include <asm/current.h> 8 + #include <asm/lowcore.h> 9 + 10 + static __always_inline void boot_init_stack_canary(void) 11 + { 12 + current->stack_canary = get_random_canary(); 13 + get_lowcore()->stack_canary = current->stack_canary; 14 + } 15 + 16 + #endif /* _ASM_S390_STACKPROTECTOR_H */
+1 -18
arch/s390/include/asm/syscall.h
··· 15 15 #include <asm/ptrace.h> 16 16 17 17 extern const sys_call_ptr_t sys_call_table[]; 18 - extern const sys_call_ptr_t sys_call_table_emu[]; 19 18 20 19 static inline long syscall_get_nr(struct task_struct *task, 21 20 struct pt_regs *regs) ··· 45 46 struct pt_regs *regs) 46 47 { 47 48 unsigned long error = regs->gprs[2]; 48 - #ifdef CONFIG_COMPAT 49 - if (test_tsk_thread_flag(task, TIF_31BIT)) { 50 - /* 51 - * Sign-extend the value so (int)-EFOO becomes (long)-EFOO 52 - * and will match correctly in comparisons. 53 - */ 54 - error = (long)(int)error; 55 - } 56 - #endif 49 + 57 50 return IS_ERR_VALUE(error) ? error : 0; 58 51 } 59 52 ··· 69 78 { 70 79 unsigned long mask = -1UL; 71 80 72 - #ifdef CONFIG_COMPAT 73 - if (test_tsk_thread_flag(task, TIF_31BIT)) 74 - mask = 0xffffffff; 75 - #endif 76 81 for (int i = 1; i < 6; i++) 77 82 args[i] = regs->gprs[2 + i] & mask; 78 83 ··· 86 99 87 100 static inline int syscall_get_arch(struct task_struct *task) 88 101 { 89 - #ifdef CONFIG_COMPAT 90 - if (test_tsk_thread_flag(task, TIF_31BIT)) 91 - return AUDIT_ARCH_S390; 92 - #endif 93 102 return AUDIT_ARCH_S390X; 94 103 } 95 104
+2 -93
arch/s390/include/asm/syscall_wrapper.h
··· 13 13 ,, regs->orig_gpr2,, regs->gprs[3],, regs->gprs[4] \ 14 14 ,, regs->gprs[5],, regs->gprs[6],, regs->gprs[7]) 15 15 16 - #ifdef CONFIG_COMPAT 17 - 18 - #define __SC_COMPAT_CAST(t, a) \ 19 - ({ \ 20 - long __ReS = a; \ 21 - \ 22 - BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ 23 - !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) && \ 24 - !__TYPE_IS_LL(t)); \ 25 - if (__TYPE_IS_L(t)) \ 26 - __ReS = (s32)a; \ 27 - if (__TYPE_IS_UL(t)) \ 28 - __ReS = (u32)a; \ 29 - if (__TYPE_IS_PTR(t)) \ 30 - __ReS = a & 0x7fffffff; \ 31 - if (__TYPE_IS_LL(t)) \ 32 - return -ENOSYS; \ 33 - (t)__ReS; \ 34 - }) 35 - 36 - /* 37 - * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias 38 - * named __s390x_sys_*() 39 - */ 40 - #define COMPAT_SYSCALL_DEFINE0(sname) \ 41 - long __s390_compat_sys_##sname(void); \ 42 - ALLOW_ERROR_INJECTION(__s390_compat_sys_##sname, ERRNO); \ 43 - long __s390_compat_sys_##sname(void) 44 - 45 16 #define SYSCALL_DEFINE0(sname) \ 46 17 SYSCALL_METADATA(_##sname, 0); \ 47 - long __s390_sys_##sname(void); \ 48 - ALLOW_ERROR_INJECTION(__s390_sys_##sname, ERRNO); \ 49 - long __s390x_sys_##sname(void); \ 18 + long __s390x_sys_##sname(struct pt_regs *__unused); \ 50 19 ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ 51 20 static inline long __do_sys_##sname(void); \ 52 - long __s390_sys_##sname(void) \ 53 - { \ 54 - return __do_sys_##sname(); \ 55 - } \ 56 - long __s390x_sys_##sname(void) \ 57 - { \ 58 - return __do_sys_##sname(); \ 59 - } \ 60 - static inline long __do_sys_##sname(void) 61 - 62 - #define COND_SYSCALL(name) \ 63 - cond_syscall(__s390x_sys_##name); \ 64 - cond_syscall(__s390_sys_##name) 65 - 66 - #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ 67 - long __s390_compat_sys##name(struct pt_regs *regs); \ 68 - ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \ 69 - static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ 70 - static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)); \ 71 - long __s390_compat_sys##name(struct pt_regs *regs) \ 72 - { \ 73 - return __se_compat_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ 74 - } \ 75 - static inline long __se_compat_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ 76 - { \ 77 - __MAP(x, __SC_TEST, __VA_ARGS__); \ 78 - return __do_compat_sys##name(__MAP(x, __SC_DELOUSE, __VA_ARGS__)); \ 79 - } \ 80 - static inline long __do_compat_sys##name(__MAP(x, __SC_DECL, __VA_ARGS__)) 81 - 82 - /* 83 - * As some compat syscalls may not be implemented, we need to expand 84 - * COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well. 85 - */ 86 - #define COND_SYSCALL_COMPAT(name) \ 87 - cond_syscall(__s390_compat_sys_##name) 88 - 89 - #define __S390_SYS_STUBx(x, name, ...) \ 90 - long __s390_sys##name(struct pt_regs *regs); \ 91 - ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ 92 - static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)); \ 93 - long __s390_sys##name(struct pt_regs *regs) \ 94 - { \ 95 - return ___se_sys##name(SC_S390_REGS_TO_ARGS(x, __VA_ARGS__)); \ 96 - } \ 97 - static inline long ___se_sys##name(__MAP(x, __SC_LONG, __VA_ARGS__)) \ 98 - { \ 99 - __MAP(x, __SC_TEST, __VA_ARGS__); \ 100 - return __do_sys##name(__MAP(x, __SC_COMPAT_CAST, __VA_ARGS__)); \ 101 - } 102 - 103 - #else /* CONFIG_COMPAT */ 104 - 105 - #define SYSCALL_DEFINE0(sname) \ 106 - SYSCALL_METADATA(_##sname, 0); \ 107 - long __s390x_sys_##sname(void); \ 108 - ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ 109 - static inline long __do_sys_##sname(void); \ 110 - long __s390x_sys_##sname(void) \ 21 + long __s390x_sys_##sname(struct pt_regs *__unused) \ 111 22 { \ 112 23 return __do_sys_##sname(); \ 113 24 } \ ··· 28 117 cond_syscall(__s390x_sys_##name) 29 118 30 119 #define __S390_SYS_STUBx(x, fullname, name, ...) 31 - 32 - #endif /* CONFIG_COMPAT */ 33 120 34 121 #define __SYSCALL_DEFINEx(x, name, ...) \ 35 122 long __s390x_sys##name(struct pt_regs *regs); \
-2
arch/s390/include/asm/thread_info.h
··· 69 69 #define TIF_GUARDED_STORAGE 17 /* load guarded storage control block */ 70 70 #define TIF_ISOLATE_BP_GUEST 18 /* Run KVM guests with isolated BP */ 71 71 #define TIF_PER_TRAP 19 /* Need to handle PER trap on exit to usermode */ 72 - #define TIF_31BIT 20 /* 32bit process */ 73 72 #define TIF_SINGLE_STEP 21 /* This task is single stepped */ 74 73 #define TIF_BLOCK_STEP 22 /* This task is block stepped */ 75 74 #define TIF_UPROBE_SINGLESTEP 23 /* This task is uprobe single stepped */ ··· 77 78 #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) 78 79 #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) 79 80 #define _TIF_PER_TRAP BIT(TIF_PER_TRAP) 80 - #define _TIF_31BIT BIT(TIF_31BIT) 81 81 #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) 82 82 #define _TIF_BLOCK_STEP BIT(TIF_BLOCK_STEP) 83 83 #define _TIF_UPROBE_SINGLESTEP BIT(TIF_UPROBE_SINGLESTEP)
+4 -9
arch/s390/include/asm/tlbflush.h
··· 35 35 */ 36 36 static inline void __tlb_flush_global(void) 37 37 { 38 - unsigned int dummy = 0; 38 + unsigned long dummy = 0; 39 39 40 - csp(&dummy, 0, 0); 40 + cspg(&dummy, 0, 0); 41 41 } 42 42 43 43 /* ··· 54 54 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 55 55 barrier(); 56 56 gmap_asce = READ_ONCE(mm->context.gmap_asce); 57 - if (cpu_has_idte() && gmap_asce != -1UL) { 57 + if (gmap_asce != -1UL) { 58 58 if (gmap_asce) 59 59 __tlb_flush_idte(gmap_asce); 60 60 __tlb_flush_idte(mm->context.asce); ··· 68 68 69 69 static inline void __tlb_flush_kernel(void) 70 70 { 71 - if (cpu_has_idte()) 72 - __tlb_flush_idte(init_mm.context.asce); 73 - else 74 - __tlb_flush_global(); 71 + __tlb_flush_idte(init_mm.context.asce); 75 72 } 76 73 77 74 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) ··· 83 86 84 87 /* 85 88 * TLB flushing: 86 - * flush_tlb() - flushes the current mm struct TLBs 87 89 * flush_tlb_all() - flushes all processes TLBs 88 90 * flush_tlb_mm(mm) - flushes the specified mm context TLB's 89 91 * flush_tlb_page(vma, vmaddr) - flushes one page ··· 98 102 * only one user. At the end of the update the flush_tlb_mm and 99 103 * flush_tlb_range functions need to do the flush. 100 104 */ 101 - #define flush_tlb() do { } while (0) 102 105 #define flush_tlb_all() do { } while (0) 103 106 #define flush_tlb_page(vma, addr) do { } while (0) 104 107
+87
arch/s390/include/asm/trace/ap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Tracepoint definitions for s390 ap bus related trace events 4 + * 5 + * There are two AP bus related tracepoint events defined here: 6 + * There is a tracepoint s390_ap_nqap event immediately after a request 7 + * has been pushed into the AP firmware queue with the NQAP AP command. 8 + * The other tracepoint s390_ap_dqap event fires immediately after a 9 + * reply has been pulled out of the AP firmware queue via DQAP AP command. 10 + * The idea of these two trace events focuses on performance to measure 11 + * the runtime of a crypto request/reply as close as possible at the 12 + * firmware level. In combination with the two zcrypt tracepoints (see the 13 + * zcrypt.h trace event definition file) this gives measurement data about 14 + * the runtime of a request/reply within the zcrpyt and AP bus layer. 15 + */ 16 + 17 + #undef TRACE_SYSTEM 18 + #define TRACE_SYSTEM s390 19 + 20 + #if !defined(_TRACE_S390_AP_H) || defined(TRACE_HEADER_MULTI_READ) 21 + #define _TRACE_S390_AP_H 22 + 23 + #include <linux/tracepoint.h> 24 + 25 + DECLARE_EVENT_CLASS(s390_ap_nqapdqap_template, 26 + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), 27 + TP_ARGS(card, dom, status, psmid), 28 + TP_STRUCT__entry( 29 + __field(u16, card) 30 + __field(u16, dom) 31 + __field(u32, status) 32 + __field(u64, psmid)), 33 + TP_fast_assign( 34 + __entry->card = card; 35 + __entry->dom = dom; 36 + __entry->status = status; 37 + __entry->psmid = psmid;), 38 + TP_printk("card=%u dom=%u status=0x%08x psmid=0x%016lx", 39 + (unsigned short)__entry->card, 40 + (unsigned short)__entry->dom, 41 + (unsigned int)__entry->status, 42 + (unsigned long)__entry->psmid) 43 + ); 44 + 45 + /** 46 + * trace_s390_ap_nqap - ap msg nqap tracepoint function 47 + * @card: Crypto card number addressed. 48 + * @dom: Domain within the crypto card addressed. 49 + * @status: AP queue status (GR1 on return of nqap). 50 + * @psmid: Unique id identifying this request/reply. 51 + * 52 + * Called immediately after a request has been enqueued into 53 + * the AP firmware queue with the NQAP command. 54 + */ 55 + DEFINE_EVENT(s390_ap_nqapdqap_template, 56 + s390_ap_nqap, 57 + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), 58 + TP_ARGS(card, dom, status, psmid) 59 + ); 60 + 61 + /** 62 + * trace_s390_ap_dqap - ap msg dqap tracepoint function 63 + * @card: Crypto card number addressed. 64 + * @dom: Domain within the crypto card addressed. 65 + * @status: AP queue status (GR1 on return of dqap). 66 + * @psmid: Unique id identifying this request/reply. 67 + * 68 + * Called immediately after a reply has been dequeued from 69 + * the AP firmware queue with the DQAP command. 70 + */ 71 + DEFINE_EVENT(s390_ap_nqapdqap_template, 72 + s390_ap_dqap, 73 + TP_PROTO(u16 card, u16 dom, u32 status, u64 psmid), 74 + TP_ARGS(card, dom, status, psmid) 75 + ); 76 + 77 + #endif /* _TRACE_S390_AP_H */ 78 + 79 + /* This part must be outside protection */ 80 + 81 + #undef TRACE_INCLUDE_PATH 82 + #undef TRACE_INCLUDE_FILE 83 + 84 + #define TRACE_INCLUDE_PATH asm/trace 85 + #define TRACE_INCLUDE_FILE ap 86 + 87 + #include <trace/define_trace.h>
+24 -20
arch/s390/include/asm/trace/zcrypt.h
··· 2 2 /* 3 3 * Tracepoint definitions for the s390 zcrypt device driver 4 4 * 5 - * Copyright IBM Corp. 2016 5 + * Copyright IBM Corp. 2016,2025 6 6 * Author(s): Harald Freudenberger <freude@de.ibm.com> 7 7 * 8 8 * Currently there are two tracepoint events defined here. ··· 73 73 74 74 /** 75 75 * trace_s390_zcrypt_rep - zcrypt reply tracepoint function 76 - * @ptr: Address of the local buffer where the request from userspace 77 - * is stored. Can be used as a unique id to match together 78 - * request and reply. 79 - * @fc: Function code. 80 - * @rc: The bare returncode as returned by the device driver ioctl 81 - * function. 82 - * @dev: The adapter nr where this request was actually processed. 83 - * @dom: Domain id of the device where this request was processed. 76 + * @ptr: Address of the local buffer where the request from userspace 77 + * is stored. Can be used as a unique id to match together 78 + * request and reply. 79 + * @fc: Function code. 80 + * @rc: The bare returncode as returned by the device driver ioctl 81 + * function. 82 + * @card: The adapter nr where this request was actually processed. 83 + * @dom: Domain id of the device where this request was processed. 84 + * @psmid: Unique id identifying this request/reply. 84 85 * 85 86 * Called upon recognising the reply from the crypto adapter. This 86 87 * message may act as the exit timestamp for the request but also ··· 89 88 * and the returncode from the device driver. 90 89 */ 91 90 TRACE_EVENT(s390_zcrypt_rep, 92 - TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom), 93 - TP_ARGS(ptr, fc, rc, dev, dom), 91 + TP_PROTO(void *ptr, u32 fc, u32 rc, u16 card, u16 dom, u64 psmid), 92 + TP_ARGS(ptr, fc, rc, card, dom, psmid), 94 93 TP_STRUCT__entry( 95 94 __field(void *, ptr) 96 95 __field(u32, fc) 97 96 __field(u32, rc) 98 - __field(u16, device) 99 - __field(u16, domain)), 97 + __field(u16, card) 98 + __field(u16, dom) 99 + __field(u64, psmid)), 100 100 TP_fast_assign( 101 101 __entry->ptr = ptr; 102 102 __entry->fc = fc; 103 103 __entry->rc = rc; 104 - __entry->device = dev; 105 - __entry->domain = dom;), 106 - TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx", 104 + __entry->card = card; 105 + __entry->dom = dom; 106 + __entry->psmid = psmid;), 107 + TP_printk("ptr=%p fc=0x%04x rc=%d card=%u dom=%u psmid=0x%016lx", 107 108 __entry->ptr, 108 - (unsigned int) __entry->fc, 109 - (int) __entry->rc, 110 - (unsigned short) __entry->device, 111 - (unsigned short) __entry->domain) 109 + (unsigned int)__entry->fc, 110 + (int)__entry->rc, 111 + (unsigned short)__entry->card, 112 + (unsigned short)__entry->dom, 113 + (unsigned long)__entry->psmid) 112 114 ); 113 115 114 116 #endif /* _TRACE_S390_ZCRYPT_H */
+2 -6
arch/s390/include/asm/unistd.h
··· 8 8 #define _ASM_S390_UNISTD_H_ 9 9 10 10 #include <uapi/asm/unistd.h> 11 - #include <asm/unistd_nr.h> 11 + 12 + #define NR_syscalls (__NR_syscalls) 12 13 13 14 #define __ARCH_WANT_NEW_STAT 14 15 #define __ARCH_WANT_OLD_READDIR ··· 28 27 #define __ARCH_WANT_SYS_OLDUMOUNT 29 28 #define __ARCH_WANT_SYS_SIGPENDING 30 29 #define __ARCH_WANT_SYS_SIGPROCMASK 31 - # ifdef CONFIG_COMPAT 32 - # define __ARCH_WANT_COMPAT_STAT 33 - # define __ARCH_WANT_SYS_TIME32 34 - # define __ARCH_WANT_SYS_UTIME32 35 - # endif 36 30 #define __ARCH_WANT_SYS_FORK 37 31 #define __ARCH_WANT_SYS_VFORK 38 32 #define __ARCH_WANT_SYS_CLONE
+2 -10
arch/s390/include/asm/vdso-symbols.h
··· 2 2 #ifndef __S390_VDSO_SYMBOLS_H__ 3 3 #define __S390_VDSO_SYMBOLS_H__ 4 4 5 - #include <generated/vdso64-offsets.h> 6 - #ifdef CONFIG_COMPAT 7 - #include <generated/vdso32-offsets.h> 8 - #endif 5 + #include <generated/vdso-offsets.h> 9 6 10 - #define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name)) 11 - #ifdef CONFIG_COMPAT 12 - #define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name)) 13 - #else 14 - #define VDSO32_SYMBOL(tsk, name) (-1UL) 15 - #endif 7 + #define VDSO_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso_offset_##name)) 16 8 17 9 #endif /* __S390_VDSO_SYMBOLS_H__ */
-4
arch/s390/include/uapi/asm/bitsperlong.h
··· 2 2 #ifndef __ASM_S390_BITSPERLONG_H 3 3 #define __ASM_S390_BITSPERLONG_H 4 4 5 - #ifndef __s390x__ 6 - #define __BITS_PER_LONG 32 7 - #else 8 5 #define __BITS_PER_LONG 64 9 - #endif 10 6 11 7 #include <asm-generic/bitsperlong.h> 12 8
-3
arch/s390/include/uapi/asm/ipcbuf.h
··· 24 24 __kernel_mode_t mode; 25 25 unsigned short __pad1; 26 26 unsigned short seq; 27 - #ifndef __s390x__ 28 - unsigned short __pad2; 29 - #endif /* ! __s390x__ */ 30 27 unsigned long __unused1; 31 28 unsigned long __unused2; 32 29 };
-13
arch/s390/include/uapi/asm/posix_types.h
··· 26 26 #define __kernel_old_uid_t __kernel_old_uid_t 27 27 #endif 28 28 29 - #ifndef __s390x__ 30 - 31 - typedef unsigned long __kernel_ino_t; 32 - typedef unsigned short __kernel_mode_t; 33 - typedef unsigned short __kernel_ipc_pid_t; 34 - typedef unsigned short __kernel_uid_t; 35 - typedef unsigned short __kernel_gid_t; 36 - typedef int __kernel_ptrdiff_t; 37 - 38 - #else /* __s390x__ */ 39 - 40 29 typedef unsigned int __kernel_ino_t; 41 30 typedef unsigned int __kernel_mode_t; 42 31 typedef int __kernel_ipc_pid_t; ··· 33 44 typedef unsigned int __kernel_gid_t; 34 45 typedef long __kernel_ptrdiff_t; 35 46 typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 36 - 37 - #endif /* __s390x__ */ 38 47 39 48 #define __kernel_ino_t __kernel_ino_t 40 49 #define __kernel_mode_t __kernel_mode_t
-124
arch/s390/include/uapi/asm/ptrace.h
··· 14 14 * Offsets in the user_regs_struct. They are used for the ptrace 15 15 * system call and in entry.S 16 16 */ 17 - #ifndef __s390x__ 18 - 19 - #define PT_PSWMASK 0x00 20 - #define PT_PSWADDR 0x04 21 - #define PT_GPR0 0x08 22 - #define PT_GPR1 0x0C 23 - #define PT_GPR2 0x10 24 - #define PT_GPR3 0x14 25 - #define PT_GPR4 0x18 26 - #define PT_GPR5 0x1C 27 - #define PT_GPR6 0x20 28 - #define PT_GPR7 0x24 29 - #define PT_GPR8 0x28 30 - #define PT_GPR9 0x2C 31 - #define PT_GPR10 0x30 32 - #define PT_GPR11 0x34 33 - #define PT_GPR12 0x38 34 - #define PT_GPR13 0x3C 35 - #define PT_GPR14 0x40 36 - #define PT_GPR15 0x44 37 - #define PT_ACR0 0x48 38 - #define PT_ACR1 0x4C 39 - #define PT_ACR2 0x50 40 - #define PT_ACR3 0x54 41 - #define PT_ACR4 0x58 42 - #define PT_ACR5 0x5C 43 - #define PT_ACR6 0x60 44 - #define PT_ACR7 0x64 45 - #define PT_ACR8 0x68 46 - #define PT_ACR9 0x6C 47 - #define PT_ACR10 0x70 48 - #define PT_ACR11 0x74 49 - #define PT_ACR12 0x78 50 - #define PT_ACR13 0x7C 51 - #define PT_ACR14 0x80 52 - #define PT_ACR15 0x84 53 - #define PT_ORIGGPR2 0x88 54 - #define PT_FPC 0x90 55 - /* 56 - * A nasty fact of life that the ptrace api 57 - * only supports passing of longs. 58 - */ 59 - #define PT_FPR0_HI 0x98 60 - #define PT_FPR0_LO 0x9C 61 - #define PT_FPR1_HI 0xA0 62 - #define PT_FPR1_LO 0xA4 63 - #define PT_FPR2_HI 0xA8 64 - #define PT_FPR2_LO 0xAC 65 - #define PT_FPR3_HI 0xB0 66 - #define PT_FPR3_LO 0xB4 67 - #define PT_FPR4_HI 0xB8 68 - #define PT_FPR4_LO 0xBC 69 - #define PT_FPR5_HI 0xC0 70 - #define PT_FPR5_LO 0xC4 71 - #define PT_FPR6_HI 0xC8 72 - #define PT_FPR6_LO 0xCC 73 - #define PT_FPR7_HI 0xD0 74 - #define PT_FPR7_LO 0xD4 75 - #define PT_FPR8_HI 0xD8 76 - #define PT_FPR8_LO 0XDC 77 - #define PT_FPR9_HI 0xE0 78 - #define PT_FPR9_LO 0xE4 79 - #define PT_FPR10_HI 0xE8 80 - #define PT_FPR10_LO 0xEC 81 - #define PT_FPR11_HI 0xF0 82 - #define PT_FPR11_LO 0xF4 83 - #define PT_FPR12_HI 0xF8 84 - #define PT_FPR12_LO 0xFC 85 - #define PT_FPR13_HI 0x100 86 - #define PT_FPR13_LO 0x104 87 - #define PT_FPR14_HI 0x108 88 - #define PT_FPR14_LO 0x10C 89 - #define PT_FPR15_HI 0x110 90 - #define PT_FPR15_LO 0x114 91 - #define PT_CR_9 0x118 92 - #define PT_CR_10 0x11C 93 - #define PT_CR_11 0x120 94 - #define PT_IEEE_IP 0x13C 95 - #define PT_LASTOFF PT_IEEE_IP 96 - #define PT_ENDREGS 0x140-1 97 - 98 - #define GPR_SIZE 4 99 - #define CR_SIZE 4 100 - 101 - #define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */ 102 - 103 - #else /* __s390x__ */ 104 - 105 17 #define PT_PSWMASK 0x00 106 18 #define PT_PSWADDR 0x08 107 19 #define PT_GPR0 0x10 ··· 78 166 79 167 #define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */ 80 168 81 - #endif /* __s390x__ */ 82 - 83 - #ifndef __s390x__ 84 - 85 - #define PSW_MASK_PER _AC(0x40000000, UL) 86 - #define PSW_MASK_DAT _AC(0x04000000, UL) 87 - #define PSW_MASK_IO _AC(0x02000000, UL) 88 - #define PSW_MASK_EXT _AC(0x01000000, UL) 89 - #define PSW_MASK_KEY _AC(0x00F00000, UL) 90 - #define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */ 91 - #define PSW_MASK_MCHECK _AC(0x00040000, UL) 92 - #define PSW_MASK_WAIT _AC(0x00020000, UL) 93 - #define PSW_MASK_PSTATE _AC(0x00010000, UL) 94 - #define PSW_MASK_ASC _AC(0x0000C000, UL) 95 - #define PSW_MASK_CC _AC(0x00003000, UL) 96 - #define PSW_MASK_PM _AC(0x00000F00, UL) 97 - #define PSW_MASK_RI _AC(0x00000000, UL) 98 - #define PSW_MASK_EA _AC(0x00000000, UL) 99 - #define PSW_MASK_BA _AC(0x00000000, UL) 100 - 101 - #define PSW_MASK_USER _AC(0x0000FF00, UL) 102 - 103 - #define PSW_ADDR_AMODE _AC(0x80000000, UL) 104 - #define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL) 105 - 106 - #define PSW_ASC_PRIMARY _AC(0x00000000, UL) 107 - #define PSW_ASC_ACCREG _AC(0x00004000, UL) 108 - #define PSW_ASC_SECONDARY _AC(0x00008000, UL) 109 - #define PSW_ASC_HOME _AC(0x0000C000, UL) 110 - 111 - #else /* __s390x__ */ 112 - 113 169 #define PSW_MASK_PER _AC(0x4000000000000000, UL) 114 170 #define PSW_MASK_DAT _AC(0x0400000000000000, UL) 115 171 #define PSW_MASK_IO _AC(0x0200000000000000, UL) ··· 103 223 #define PSW_ASC_ACCREG _AC(0x0000400000000000, UL) 104 224 #define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL) 105 225 #define PSW_ASC_HOME _AC(0x0000C00000000000, UL) 106 - 107 - #endif /* __s390x__ */ 108 226 109 227 #define NUM_GPRS 16 110 228 #define NUM_FPRS 16 ··· 186 308 #define PER_EM_MASK 0xE8000000UL 187 309 188 310 typedef struct { 189 - #ifdef __s390x__ 190 311 unsigned : 32; 191 - #endif /* __s390x__ */ 192 312 unsigned em_branching : 1; 193 313 unsigned em_instruction_fetch : 1; 194 314 /*
-15
arch/s390/include/uapi/asm/sigcontext.h
··· 17 17 #define __NUM_VXRS_LOW 16 18 18 #define __NUM_VXRS_HIGH 16 19 19 20 - #ifndef __s390x__ 21 - 22 - /* Has to be at least _NSIG_WORDS from asm/signal.h */ 23 - #define _SIGCONTEXT_NSIG 64 24 - #define _SIGCONTEXT_NSIG_BPW 32 25 - /* Size of stack frame allocated when calling signal handler. */ 26 - #define __SIGNAL_FRAMESIZE 96 27 - 28 - #else /* __s390x__ */ 29 - 30 20 /* Has to be at least _NSIG_WORDS from asm/signal.h */ 31 21 #define _SIGCONTEXT_NSIG 64 32 22 #define _SIGCONTEXT_NSIG_BPW 64 33 23 /* Size of stack frame allocated when calling signal handler. */ 34 24 #define __SIGNAL_FRAMESIZE 160 35 - 36 - #endif /* __s390x__ */ 37 25 38 26 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW) 39 27 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS) ··· 54 66 55 67 typedef struct 56 68 { 57 - #ifndef __s390x__ 58 - unsigned long gprs_high[__NUM_GPRS]; 59 - #endif 60 69 unsigned long long vxrs_low[__NUM_VXRS_LOW]; 61 70 __vector128 vxrs_high[__NUM_VXRS_HIGH]; 62 71 unsigned char __reserved[128];
-70
arch/s390/include/uapi/asm/stat.h
··· 8 8 #ifndef _S390_STAT_H 9 9 #define _S390_STAT_H 10 10 11 - #ifndef __s390x__ 12 - struct __old_kernel_stat { 13 - unsigned short st_dev; 14 - unsigned short st_ino; 15 - unsigned short st_mode; 16 - unsigned short st_nlink; 17 - unsigned short st_uid; 18 - unsigned short st_gid; 19 - unsigned short st_rdev; 20 - unsigned long st_size; 21 - unsigned long st_atime; 22 - unsigned long st_mtime; 23 - unsigned long st_ctime; 24 - }; 25 - 26 - struct stat { 27 - unsigned short st_dev; 28 - unsigned short __pad1; 29 - unsigned long st_ino; 30 - unsigned short st_mode; 31 - unsigned short st_nlink; 32 - unsigned short st_uid; 33 - unsigned short st_gid; 34 - unsigned short st_rdev; 35 - unsigned short __pad2; 36 - unsigned long st_size; 37 - unsigned long st_blksize; 38 - unsigned long st_blocks; 39 - unsigned long st_atime; 40 - unsigned long st_atime_nsec; 41 - unsigned long st_mtime; 42 - unsigned long st_mtime_nsec; 43 - unsigned long st_ctime; 44 - unsigned long st_ctime_nsec; 45 - unsigned long __unused4; 46 - unsigned long __unused5; 47 - }; 48 - 49 - /* This matches struct stat64 in glibc2.1, hence the absolutely 50 - * insane amounts of padding around dev_t's. 51 - */ 52 - struct stat64 { 53 - unsigned long long st_dev; 54 - unsigned int __pad1; 55 - #define STAT64_HAS_BROKEN_ST_INO 1 56 - unsigned long __st_ino; 57 - unsigned int st_mode; 58 - unsigned int st_nlink; 59 - unsigned long st_uid; 60 - unsigned long st_gid; 61 - unsigned long long st_rdev; 62 - unsigned int __pad3; 63 - long long st_size; 64 - unsigned long st_blksize; 65 - unsigned char __pad4[4]; 66 - unsigned long __pad5; /* future possible st_blocks high bits */ 67 - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ 68 - unsigned long st_atime; 69 - unsigned long st_atime_nsec; 70 - unsigned long st_mtime; 71 - unsigned long st_mtime_nsec; 72 - unsigned long st_ctime; 73 - unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ 74 - unsigned long long st_ino; 75 - }; 76 - 77 - #else /* __s390x__ */ 78 - 79 11 struct stat { 80 12 unsigned long st_dev; 81 13 unsigned long st_ino; ··· 28 96 long st_blocks; 29 97 unsigned long __unused[3]; 30 98 }; 31 - 32 - #endif /* __s390x__ */ 33 99 34 100 #define STAT_HAVE_NSEC 1 35 101
-4
arch/s390/include/uapi/asm/unistd.h
··· 8 8 #ifndef _UAPI_ASM_S390_UNISTD_H_ 9 9 #define _UAPI_ASM_S390_UNISTD_H_ 10 10 11 - #ifdef __s390x__ 12 11 #include <asm/unistd_64.h> 13 - #else 14 - #include <asm/unistd_32.h> 15 - #endif 16 12 17 13 #endif /* _UAPI_ASM_S390_UNISTD_H_ */
+4 -8
arch/s390/kernel/Makefile
··· 36 36 CFLAGS_dumpstack.o += -fno-optimize-sibling-calls 37 37 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls 38 38 39 - obj-y := head64.o traps.o time.o process.o early.o setup.o idle.o vtime.o 39 + obj-y := head.o traps.o time.o process.o early.o setup.o idle.o vtime.o 40 40 obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 41 41 obj-y += debug.o irq.o ipl.o dis.o vdso.o cpufeature.o 42 42 obj-y += sysinfo.o lgr.o os_info.o ctlreg.o ··· 56 56 obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o hiperdispatch.o 57 57 obj-$(CONFIG_NUMA) += numa.o 58 58 obj-$(CONFIG_AUDIT) += audit.o 59 - compat-obj-$(CONFIG_AUDIT) += compat_audit.o 60 - obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o 61 - obj-$(CONFIG_COMPAT) += $(compat-obj-y) 62 59 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 63 60 obj-$(CONFIG_KPROBES) += kprobes.o 64 61 obj-$(CONFIG_KPROBES) += mcount.o ··· 67 70 obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o 68 71 obj-$(CONFIG_UPROBES) += uprobes.o 69 72 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 70 - 73 + obj-$(CONFIG_STACKPROTECTOR) += stackprotector.o 71 74 obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o 72 75 obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o 73 76 obj-$(CONFIG_CERT_STORE) += cert_store.o ··· 76 79 obj-$(CONFIG_PERF_EVENTS) += perf_event.o 77 80 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o 78 81 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o 79 - obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o perf_pai_ext.o 82 + obj-$(CONFIG_PERF_EVENTS) += perf_pai.o 80 83 81 84 obj-$(CONFIG_TRACEPOINTS) += trace.o 82 85 83 86 # vdso 84 - obj-y += vdso64/ 85 - obj-$(CONFIG_COMPAT) += vdso32/ 87 + obj-y += vdso/
+4
arch/s390/kernel/asm-offsets.c
··· 21 21 OFFSET(__TASK_stack, task_struct, stack); 22 22 OFFSET(__TASK_thread, task_struct, thread); 23 23 OFFSET(__TASK_pid, task_struct, pid); 24 + #ifdef CONFIG_STACKPROTECTOR 25 + OFFSET(__TASK_stack_canary, task_struct, stack_canary); 26 + #endif 24 27 BLANK(); 25 28 /* thread struct offsets */ 26 29 OFFSET(__THREAD_ksp, thread_struct, ksp); ··· 142 139 OFFSET(__LC_CURRENT_PID, lowcore, current_pid); 143 140 OFFSET(__LC_LAST_BREAK, lowcore, last_break); 144 141 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 142 + OFFSET(__LC_STACK_CANARY, lowcore, stack_canary); 145 143 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 146 144 OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); 147 145 OFFSET(__LC_OS_INFO, lowcore, os_info);
-16
arch/s390/kernel/audit.c
··· 3 3 #include <linux/types.h> 4 4 #include <linux/audit.h> 5 5 #include <asm/unistd.h> 6 - #include "audit.h" 7 6 8 7 static unsigned dir_class[] = { 9 8 #include <asm-generic/audit_dir_write.h> ··· 31 32 32 33 int audit_classify_arch(int arch) 33 34 { 34 - #ifdef CONFIG_COMPAT 35 - if (arch == AUDIT_ARCH_S390) 36 - return 1; 37 - #endif 38 35 return 0; 39 36 } 40 37 41 38 int audit_classify_syscall(int abi, unsigned syscall) 42 39 { 43 - #ifdef CONFIG_COMPAT 44 - if (abi == AUDIT_ARCH_S390) 45 - return s390_classify_syscall(syscall); 46 - #endif 47 40 switch(syscall) { 48 41 case __NR_open: 49 42 return AUDITSC_OPEN; ··· 54 63 55 64 static int __init audit_classes_init(void) 56 65 { 57 - #ifdef CONFIG_COMPAT 58 - audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class); 59 - audit_register_class(AUDIT_CLASS_READ_32, s390_read_class); 60 - audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class); 61 - audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class); 62 - audit_register_class(AUDIT_CLASS_SIGNAL_32, s390_signal_class); 63 - #endif 64 66 audit_register_class(AUDIT_CLASS_WRITE, write_class); 65 67 audit_register_class(AUDIT_CLASS_READ, read_class); 66 68 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
-16
arch/s390/kernel/audit.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __ARCH_S390_KERNEL_AUDIT_H 3 - #define __ARCH_S390_KERNEL_AUDIT_H 4 - 5 - #include <linux/types.h> 6 - 7 - #ifdef CONFIG_COMPAT 8 - extern int s390_classify_syscall(unsigned); 9 - extern __u32 s390_dir_class[]; 10 - extern __u32 s390_write_class[]; 11 - extern __u32 s390_read_class[]; 12 - extern __u32 s390_chattr_class[]; 13 - extern __u32 s390_signal_class[]; 14 - #endif /* CONFIG_COMPAT */ 15 - 16 - #endif /* __ARCH_S390_KERNEL_AUDIT_H */
-48
arch/s390/kernel/compat_audit.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #undef __s390x__ 3 - #include <linux/audit_arch.h> 4 - #include <asm/unistd.h> 5 - #include "audit.h" 6 - 7 - unsigned s390_dir_class[] = { 8 - #include <asm-generic/audit_dir_write.h> 9 - ~0U 10 - }; 11 - 12 - unsigned s390_chattr_class[] = { 13 - #include <asm-generic/audit_change_attr.h> 14 - ~0U 15 - }; 16 - 17 - unsigned s390_write_class[] = { 18 - #include <asm-generic/audit_write.h> 19 - ~0U 20 - }; 21 - 22 - unsigned s390_read_class[] = { 23 - #include <asm-generic/audit_read.h> 24 - ~0U 25 - }; 26 - 27 - unsigned s390_signal_class[] = { 28 - #include <asm-generic/audit_signal.h> 29 - ~0U 30 - }; 31 - 32 - int s390_classify_syscall(unsigned syscall) 33 - { 34 - switch(syscall) { 35 - case __NR_open: 36 - return AUDITSC_OPEN; 37 - case __NR_openat: 38 - return AUDITSC_OPENAT; 39 - case __NR_socketcall: 40 - return AUDITSC_SOCKETCALL; 41 - case __NR_execve: 42 - return AUDITSC_EXECVE; 43 - case __NR_openat2: 44 - return AUDITSC_OPENAT2; 45 - default: 46 - return AUDITSC_COMPAT; 47 - } 48 - }
-289
arch/s390/kernel/compat_linux.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * S390 version 4 - * Copyright IBM Corp. 2000 5 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 - * Gerhard Tonn (ton@de.ibm.com) 7 - * Thomas Spatzier (tspat@de.ibm.com) 8 - * 9 - * Conversion between 31bit and 64bit native syscalls. 10 - * 11 - * Heavily inspired by the 32-bit Sparc compat code which is 12 - * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 13 - * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 14 - * 15 - */ 16 - 17 - 18 - #include <linux/kernel.h> 19 - #include <linux/sched.h> 20 - #include <linux/fs.h> 21 - #include <linux/mm.h> 22 - #include <linux/file.h> 23 - #include <linux/signal.h> 24 - #include <linux/resource.h> 25 - #include <linux/times.h> 26 - #include <linux/smp.h> 27 - #include <linux/sem.h> 28 - #include <linux/msg.h> 29 - #include <linux/shm.h> 30 - #include <linux/uio.h> 31 - #include <linux/quota.h> 32 - #include <linux/poll.h> 33 - #include <linux/personality.h> 34 - #include <linux/stat.h> 35 - #include <linux/filter.h> 36 - #include <linux/highmem.h> 37 - #include <linux/mman.h> 38 - #include <linux/ipv6.h> 39 - #include <linux/in.h> 40 - #include <linux/icmpv6.h> 41 - #include <linux/syscalls.h> 42 - #include <linux/sysctl.h> 43 - #include <linux/binfmts.h> 44 - #include <linux/capability.h> 45 - #include <linux/compat.h> 46 - #include <linux/vfs.h> 47 - #include <linux/ptrace.h> 48 - #include <linux/fadvise.h> 49 - #include <linux/ipc.h> 50 - #include <linux/slab.h> 51 - 52 - #include <asm/types.h> 53 - #include <linux/uaccess.h> 54 - 55 - #include <net/scm.h> 56 - #include <net/sock.h> 57 - 58 - #include "compat_linux.h" 59 - 60 - #ifdef CONFIG_SYSVIPC 61 - COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second, 62 - compat_ulong_t, third, compat_uptr_t, ptr) 63 - { 64 - if (call >> 16) /* hack for backward compatibility */ 65 - return -EINVAL; 66 - return compat_ksys_ipc(call, first, second, third, ptr, third); 67 - } 68 - #endif 69 - 70 - COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low) 71 - { 72 - return ksys_truncate(path, (unsigned long)high << 32 | low); 73 - } 74 - 75 - COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low) 76 - { 77 - return ksys_ftruncate(fd, (unsigned long)high << 32 | low); 78 - } 79 - 80 - COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf, 81 - compat_size_t, count, u32, high, u32, low) 82 - { 83 - if ((compat_ssize_t) count < 0) 84 - return -EINVAL; 85 - return ksys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low); 86 - } 87 - 88 - COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf, 89 - compat_size_t, count, u32, high, u32, low) 90 - { 91 - if ((compat_ssize_t) count < 0) 92 - return -EINVAL; 93 - return ksys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low); 94 - } 95 - 96 - COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count) 97 - { 98 - return ksys_readahead(fd, (unsigned long)high << 32 | low, count); 99 - } 100 - 101 - struct stat64_emu31 { 102 - unsigned long long st_dev; 103 - unsigned int __pad1; 104 - #define STAT64_HAS_BROKEN_ST_INO 1 105 - u32 __st_ino; 106 - unsigned int st_mode; 107 - unsigned int st_nlink; 108 - u32 st_uid; 109 - u32 st_gid; 110 - unsigned long long st_rdev; 111 - unsigned int __pad3; 112 - long st_size; 113 - u32 st_blksize; 114 - unsigned char __pad4[4]; 115 - u32 __pad5; /* future possible st_blocks high bits */ 116 - u32 st_blocks; /* Number 512-byte blocks allocated. */ 117 - u32 st_atime; 118 - u32 __pad6; 119 - u32 st_mtime; 120 - u32 __pad7; 121 - u32 st_ctime; 122 - u32 __pad8; /* will be high 32 bits of ctime someday */ 123 - unsigned long st_ino; 124 - }; 125 - 126 - static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat) 127 - { 128 - struct stat64_emu31 tmp; 129 - 130 - memset(&tmp, 0, sizeof(tmp)); 131 - 132 - tmp.st_dev = huge_encode_dev(stat->dev); 133 - tmp.st_ino = stat->ino; 134 - tmp.__st_ino = (u32)stat->ino; 135 - tmp.st_mode = stat->mode; 136 - tmp.st_nlink = (unsigned int)stat->nlink; 137 - tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 138 - tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 139 - tmp.st_rdev = huge_encode_dev(stat->rdev); 140 - tmp.st_size = stat->size; 141 - tmp.st_blksize = (u32)stat->blksize; 142 - tmp.st_blocks = (u32)stat->blocks; 143 - tmp.st_atime = (u32)stat->atime.tv_sec; 144 - tmp.st_mtime = (u32)stat->mtime.tv_sec; 145 - tmp.st_ctime = (u32)stat->ctime.tv_sec; 146 - 147 - return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 148 - } 149 - 150 - COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf) 151 - { 152 - struct kstat stat; 153 - int ret = vfs_stat(filename, &stat); 154 - if (!ret) 155 - ret = cp_stat64(statbuf, &stat); 156 - return ret; 157 - } 158 - 159 - COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf) 160 - { 161 - struct kstat stat; 162 - int ret = vfs_lstat(filename, &stat); 163 - if (!ret) 164 - ret = cp_stat64(statbuf, &stat); 165 - return ret; 166 - } 167 - 168 - COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf) 169 - { 170 - struct kstat stat; 171 - int ret = vfs_fstat(fd, &stat); 172 - if (!ret) 173 - ret = cp_stat64(statbuf, &stat); 174 - return ret; 175 - } 176 - 177 - COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename, 178 - struct stat64_emu31 __user *, statbuf, int, flag) 179 - { 180 - struct kstat stat; 181 - int error; 182 - 183 - error = vfs_fstatat(dfd, filename, &stat, flag); 184 - if (error) 185 - return error; 186 - return cp_stat64(statbuf, &stat); 187 - } 188 - 189 - /* 190 - * Linux/i386 didn't use to be able to handle more than 191 - * 4 system call parameters, so these system calls used a memory 192 - * block for parameter passing.. 193 - */ 194 - 195 - struct mmap_arg_struct_emu31 { 196 - compat_ulong_t addr; 197 - compat_ulong_t len; 198 - compat_ulong_t prot; 199 - compat_ulong_t flags; 200 - compat_ulong_t fd; 201 - compat_ulong_t offset; 202 - }; 203 - 204 - COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg) 205 - { 206 - struct mmap_arg_struct_emu31 a; 207 - 208 - if (copy_from_user(&a, arg, sizeof(a))) 209 - return -EFAULT; 210 - if (a.offset & ~PAGE_MASK) 211 - return -EINVAL; 212 - return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 213 - a.offset >> PAGE_SHIFT); 214 - } 215 - 216 - COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg) 217 - { 218 - struct mmap_arg_struct_emu31 a; 219 - 220 - if (copy_from_user(&a, arg, sizeof(a))) 221 - return -EFAULT; 222 - return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); 223 - } 224 - 225 - COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count) 226 - { 227 - if ((compat_ssize_t) count < 0) 228 - return -EINVAL; 229 - 230 - return ksys_read(fd, buf, count); 231 - } 232 - 233 - COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count) 234 - { 235 - if ((compat_ssize_t) count < 0) 236 - return -EINVAL; 237 - 238 - return ksys_write(fd, buf, count); 239 - } 240 - 241 - /* 242 - * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64. 243 - * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE} 244 - * because the 31 bit values differ from the 64 bit values. 245 - */ 246 - 247 - COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise) 248 - { 249 - if (advise == 4) 250 - advise = POSIX_FADV_DONTNEED; 251 - else if (advise == 5) 252 - advise = POSIX_FADV_NOREUSE; 253 - return ksys_fadvise64_64(fd, (unsigned long)high << 32 | low, len, 254 - advise); 255 - } 256 - 257 - struct fadvise64_64_args { 258 - int fd; 259 - long long offset; 260 - long long len; 261 - int advice; 262 - }; 263 - 264 - COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) 265 - { 266 - struct fadvise64_64_args a; 267 - 268 - if ( copy_from_user(&a, args, sizeof(a)) ) 269 - return -EFAULT; 270 - if (a.advice == 4) 271 - a.advice = POSIX_FADV_DONTNEED; 272 - else if (a.advice == 5) 273 - a.advice = POSIX_FADV_NOREUSE; 274 - return ksys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 275 - } 276 - 277 - COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow, 278 - u32, nhigh, u32, nlow, unsigned int, flags) 279 - { 280 - return ksys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow, 281 - ((u64)nhigh << 32) + nlow, flags); 282 - } 283 - 284 - COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow, 285 - u32, lenhigh, u32, lenlow) 286 - { 287 - return ksys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow, 288 - ((u64)lenhigh << 32) + lenlow); 289 - }
-101
arch/s390/kernel/compat_linux.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_S390X_S390_H 3 - #define _ASM_S390X_S390_H 4 - 5 - #include <linux/compat.h> 6 - #include <linux/socket.h> 7 - #include <linux/syscalls.h> 8 - #include <asm/ptrace.h> 9 - 10 - /* 11 - * Macro that masks the high order bit of a 32 bit pointer and 12 - * converts it to a 64 bit pointer. 13 - */ 14 - #define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) 15 - #define AA(__x) ((unsigned long)(__x)) 16 - 17 - /* Now 32bit compatibility types */ 18 - struct ipc_kludge_32 { 19 - __u32 msgp; /* pointer */ 20 - __s32 msgtyp; 21 - }; 22 - 23 - /* asm/sigcontext.h */ 24 - typedef union { 25 - __u64 d; 26 - __u32 f; 27 - } freg_t32; 28 - 29 - typedef struct { 30 - unsigned int fpc; 31 - unsigned int pad; 32 - freg_t32 fprs[__NUM_FPRS]; 33 - } _s390_fp_regs32; 34 - 35 - typedef struct { 36 - psw_t32 psw; 37 - __u32 gprs[__NUM_GPRS]; 38 - __u32 acrs[__NUM_ACRS]; 39 - } _s390_regs_common32; 40 - 41 - typedef struct { 42 - _s390_regs_common32 regs; 43 - _s390_fp_regs32 fpregs; 44 - } _sigregs32; 45 - 46 - typedef struct { 47 - __u32 gprs_high[__NUM_GPRS]; 48 - __u64 vxrs_low[__NUM_VXRS_LOW]; 49 - __vector128 vxrs_high[__NUM_VXRS_HIGH]; 50 - __u8 __reserved[128]; 51 - } _sigregs_ext32; 52 - 53 - #define _SIGCONTEXT_NSIG32 64 54 - #define _SIGCONTEXT_NSIG_BPW32 32 55 - #define __SIGNAL_FRAMESIZE32 96 56 - #define _SIGMASK_COPY_SIZE32 (sizeof(u32) * 2) 57 - 58 - struct sigcontext32 { 59 - __u32 oldmask[_COMPAT_NSIG_WORDS]; 60 - __u32 sregs; /* pointer */ 61 - }; 62 - 63 - /* asm/signal.h */ 64 - 65 - /* asm/ucontext.h */ 66 - struct ucontext32 { 67 - __u32 uc_flags; 68 - __u32 uc_link; /* pointer */ 69 - compat_stack_t uc_stack; 70 - _sigregs32 uc_mcontext; 71 - compat_sigset_t uc_sigmask; 72 - /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 73 - unsigned char __unused[128 - sizeof(compat_sigset_t)]; 74 - _sigregs_ext32 uc_mcontext_ext; 75 - }; 76 - 77 - struct stat64_emu31; 78 - struct mmap_arg_struct_emu31; 79 - struct fadvise64_64_args; 80 - 81 - long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low); 82 - long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low); 83 - long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low); 84 - long compat_sys_s390_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, u32 high, u32 low); 85 - long compat_sys_s390_readahead(int fd, u32 high, u32 low, s32 count); 86 - long compat_sys_s390_stat64(const char __user *filename, struct stat64_emu31 __user *statbuf); 87 - long compat_sys_s390_lstat64(const char __user *filename, struct stat64_emu31 __user *statbuf); 88 - long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbuf); 89 - long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag); 90 - long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg); 91 - long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg); 92 - long compat_sys_s390_read(unsigned int fd, char __user *buf, compat_size_t count); 93 - long compat_sys_s390_write(unsigned int fd, const char __user *buf, compat_size_t count); 94 - long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise); 95 - long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); 96 - long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags); 97 - long compat_sys_s390_fallocate(int fd, int mode, u32 offhigh, u32 offlow, u32 lenhigh, u32 lenlow); 98 - long compat_sys_sigreturn(void); 99 - long compat_sys_rt_sigreturn(void); 100 - 101 - #endif /* _ASM_S390X_S390_H */
-64
arch/s390/kernel/compat_ptrace.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _PTRACE32_H 3 - #define _PTRACE32_H 4 - 5 - #include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 6 - #include "compat_linux.h" /* needed for psw_compat_t */ 7 - 8 - struct compat_per_struct_kernel { 9 - __u32 cr9; /* PER control bits */ 10 - __u32 cr10; /* PER starting address */ 11 - __u32 cr11; /* PER ending address */ 12 - __u32 bits; /* Obsolete software bits */ 13 - __u32 starting_addr; /* User specified start address */ 14 - __u32 ending_addr; /* User specified end address */ 15 - __u16 perc_atmid; /* PER trap ATMID */ 16 - __u32 address; /* PER trap instruction address */ 17 - __u8 access_id; /* PER trap access identification */ 18 - }; 19 - 20 - struct compat_user_regs_struct 21 - { 22 - psw_compat_t psw; 23 - u32 gprs[NUM_GPRS]; 24 - u32 acrs[NUM_ACRS]; 25 - u32 orig_gpr2; 26 - /* nb: there's a 4-byte hole here */ 27 - s390_fp_regs fp_regs; 28 - /* 29 - * These per registers are in here so that gdb can modify them 30 - * itself as there is no "official" ptrace interface for hardware 31 - * watchpoints. This is the way intel does it. 32 - */ 33 - struct compat_per_struct_kernel per_info; 34 - u32 ieee_instruction_pointer; /* obsolete, always 0 */ 35 - }; 36 - 37 - struct compat_user { 38 - /* We start with the registers, to mimic the way that "memory" 39 - is returned from the ptrace(3,...) function. */ 40 - struct compat_user_regs_struct regs; 41 - /* The rest of this junk is to help gdb figure out what goes where */ 42 - u32 u_tsize; /* Text segment size (pages). */ 43 - u32 u_dsize; /* Data segment size (pages). */ 44 - u32 u_ssize; /* Stack segment size (pages). */ 45 - u32 start_code; /* Starting virtual address of text. */ 46 - u32 start_stack; /* Starting virtual address of stack area. 47 - This is actually the bottom of the stack, 48 - the top of the stack is always found in the 49 - esp register. */ 50 - s32 signal; /* Signal that caused the core dump. */ 51 - u32 u_ar0; /* Used by gdb to help find the values for */ 52 - /* the registers. */ 53 - u32 magic; /* To uniquely identify a core file */ 54 - char u_comm[32]; /* User command that was responsible */ 55 - }; 56 - 57 - typedef struct 58 - { 59 - __u32 len; 60 - __u32 kernel_addr; 61 - __u32 process_addr; 62 - } compat_ptrace_area; 63 - 64 - #endif /* _PTRACE32_H */
-420
arch/s390/kernel/compat_signal.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Copyright IBM Corp. 2000, 2006 4 - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 5 - * Gerhard Tonn (ton@de.ibm.com) 6 - * 7 - * Copyright (C) 1991, 1992 Linus Torvalds 8 - * 9 - * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson 10 - */ 11 - 12 - #include <linux/compat.h> 13 - #include <linux/sched.h> 14 - #include <linux/sched/task_stack.h> 15 - #include <linux/mm.h> 16 - #include <linux/smp.h> 17 - #include <linux/kernel.h> 18 - #include <linux/signal.h> 19 - #include <linux/errno.h> 20 - #include <linux/wait.h> 21 - #include <linux/ptrace.h> 22 - #include <linux/unistd.h> 23 - #include <linux/stddef.h> 24 - #include <linux/tty.h> 25 - #include <linux/personality.h> 26 - #include <linux/binfmts.h> 27 - #include <asm/vdso-symbols.h> 28 - #include <asm/access-regs.h> 29 - #include <asm/ucontext.h> 30 - #include <linux/uaccess.h> 31 - #include <asm/lowcore.h> 32 - #include <asm/fpu.h> 33 - #include "compat_linux.h" 34 - #include "compat_ptrace.h" 35 - #include "entry.h" 36 - 37 - typedef struct 38 - { 39 - __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 40 - struct sigcontext32 sc; 41 - _sigregs32 sregs; 42 - int signo; 43 - _sigregs_ext32 sregs_ext; 44 - __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */ 45 - } sigframe32; 46 - 47 - typedef struct 48 - { 49 - __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 50 - __u16 svc_insn; 51 - compat_siginfo_t info; 52 - struct ucontext32 uc; 53 - } rt_sigframe32; 54 - 55 - /* Store registers needed to create the signal frame */ 56 - static void store_sigregs(void) 57 - { 58 - save_access_regs(current->thread.acrs); 59 - save_user_fpu_regs(); 60 - } 61 - 62 - /* Load registers after signal return */ 63 - static void load_sigregs(void) 64 - { 65 - restore_access_regs(current->thread.acrs); 66 - } 67 - 68 - static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) 69 - { 70 - _sigregs32 user_sregs; 71 - int i; 72 - 73 - user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32); 74 - user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI; 75 - user_sregs.regs.psw.mask |= PSW32_USER_BITS; 76 - user_sregs.regs.psw.addr = (__u32) regs->psw.addr | 77 - (__u32)(regs->psw.mask & PSW_MASK_BA); 78 - for (i = 0; i < NUM_GPRS; i++) 79 - user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; 80 - memcpy(&user_sregs.regs.acrs, current->thread.acrs, 81 - sizeof(user_sregs.regs.acrs)); 82 - fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.ufpu); 83 - if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) 84 - return -EFAULT; 85 - return 0; 86 - } 87 - 88 - static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) 89 - { 90 - _sigregs32 user_sregs; 91 - int i; 92 - 93 - /* Always make any pending restarted system call return -EINTR */ 94 - current->restart_block.fn = do_no_restart_syscall; 95 - 96 - if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) 97 - return -EFAULT; 98 - 99 - if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) 100 - return -EINVAL; 101 - 102 - /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 103 - regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | 104 - (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 105 - (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 106 - (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); 107 - /* Check for invalid user address space control. */ 108 - if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 109 - regs->psw.mask = PSW_ASC_PRIMARY | 110 - (regs->psw.mask & ~PSW_MASK_ASC); 111 - regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN); 112 - for (i = 0; i < NUM_GPRS; i++) 113 - regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; 114 - memcpy(&current->thread.acrs, &user_sregs.regs.acrs, 115 - sizeof(current->thread.acrs)); 116 - fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, &current->thread.ufpu); 117 - 118 - clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 119 - return 0; 120 - } 121 - 122 - static int save_sigregs_ext32(struct pt_regs *regs, 123 - _sigregs_ext32 __user *sregs_ext) 124 - { 125 - __u32 gprs_high[NUM_GPRS]; 126 - __u64 vxrs[__NUM_VXRS_LOW]; 127 - int i; 128 - 129 - /* Save high gprs to signal stack */ 130 - for (i = 0; i < NUM_GPRS; i++) 131 - gprs_high[i] = regs->gprs[i] >> 32; 132 - if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high, 133 - sizeof(sregs_ext->gprs_high))) 134 - return -EFAULT; 135 - 136 - /* Save vector registers to signal stack */ 137 - if (cpu_has_vx()) { 138 - for (i = 0; i < __NUM_VXRS_LOW; i++) 139 - vxrs[i] = current->thread.ufpu.vxrs[i].low; 140 - if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, 141 - sizeof(sregs_ext->vxrs_low)) || 142 - __copy_to_user(&sregs_ext->vxrs_high, 143 - current->thread.ufpu.vxrs + __NUM_VXRS_LOW, 144 - sizeof(sregs_ext->vxrs_high))) 145 - return -EFAULT; 146 - } 147 - return 0; 148 - } 149 - 150 - static int restore_sigregs_ext32(struct pt_regs *regs, 151 - _sigregs_ext32 __user *sregs_ext) 152 - { 153 - __u32 gprs_high[NUM_GPRS]; 154 - __u64 vxrs[__NUM_VXRS_LOW]; 155 - int i; 156 - 157 - /* Restore high gprs from signal stack */ 158 - if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, 159 - sizeof(sregs_ext->gprs_high))) 160 - return -EFAULT; 161 - for (i = 0; i < NUM_GPRS; i++) 162 - *(__u32 *)&regs->gprs[i] = gprs_high[i]; 163 - 164 - /* Restore vector registers from signal stack */ 165 - if (cpu_has_vx()) { 166 - if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, 167 - sizeof(sregs_ext->vxrs_low)) || 168 - __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW, 169 - &sregs_ext->vxrs_high, 170 - sizeof(sregs_ext->vxrs_high))) 171 - return -EFAULT; 172 - for (i = 0; i < __NUM_VXRS_LOW; i++) 173 - current->thread.ufpu.vxrs[i].low = vxrs[i]; 174 - } 175 - return 0; 176 - } 177 - 178 - COMPAT_SYSCALL_DEFINE0(sigreturn) 179 - { 180 - struct pt_regs *regs = task_pt_regs(current); 181 - sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; 182 - sigset_t set; 183 - 184 - if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask)) 185 - goto badframe; 186 - set_current_blocked(&set); 187 - save_user_fpu_regs(); 188 - if (restore_sigregs32(regs, &frame->sregs)) 189 - goto badframe; 190 - if (restore_sigregs_ext32(regs, &frame->sregs_ext)) 191 - goto badframe; 192 - load_sigregs(); 193 - return regs->gprs[2]; 194 - badframe: 195 - force_sig(SIGSEGV); 196 - return 0; 197 - } 198 - 199 - COMPAT_SYSCALL_DEFINE0(rt_sigreturn) 200 - { 201 - struct pt_regs *regs = task_pt_regs(current); 202 - rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; 203 - sigset_t set; 204 - 205 - if (get_compat_sigset(&set, &frame->uc.uc_sigmask)) 206 - goto badframe; 207 - set_current_blocked(&set); 208 - if (compat_restore_altstack(&frame->uc.uc_stack)) 209 - goto badframe; 210 - save_user_fpu_regs(); 211 - if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 212 - goto badframe; 213 - if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) 214 - goto badframe; 215 - load_sigregs(); 216 - return regs->gprs[2]; 217 - badframe: 218 - force_sig(SIGSEGV); 219 - return 0; 220 - } 221 - 222 - /* 223 - * Set up a signal frame. 224 - */ 225 - 226 - 227 - /* 228 - * Determine which stack to use.. 229 - */ 230 - static inline void __user * 231 - get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) 232 - { 233 - unsigned long sp; 234 - 235 - /* Default to using normal stack */ 236 - sp = (unsigned long) A(regs->gprs[15]); 237 - 238 - /* Overflow on alternate signal stack gives SIGSEGV. */ 239 - if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL)) 240 - return (void __user *) -1UL; 241 - 242 - /* This is the X/Open sanctioned signal stack switching. */ 243 - if (ka->sa.sa_flags & SA_ONSTACK) { 244 - if (! sas_ss_flags(sp)) 245 - sp = current->sas_ss_sp + current->sas_ss_size; 246 - } 247 - 248 - return (void __user *)((sp - frame_size) & -8ul); 249 - } 250 - 251 - static int setup_frame32(struct ksignal *ksig, sigset_t *set, 252 - struct pt_regs *regs) 253 - { 254 - int sig = ksig->sig; 255 - sigframe32 __user *frame; 256 - unsigned long restorer; 257 - size_t frame_size; 258 - 259 - /* 260 - * gprs_high are always present for 31-bit compat tasks. 261 - * The space for vector registers is only allocated if 262 - * the machine supports it 263 - */ 264 - frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved); 265 - if (!cpu_has_vx()) 266 - frame_size -= sizeof(frame->sregs_ext.vxrs_low) + 267 - sizeof(frame->sregs_ext.vxrs_high); 268 - frame = get_sigframe(&ksig->ka, regs, frame_size); 269 - if (frame == (void __user *) -1UL) 270 - return -EFAULT; 271 - 272 - /* Set up backchain. */ 273 - if (__put_user(regs->gprs[15], (unsigned int __user *) frame)) 274 - return -EFAULT; 275 - 276 - /* Create struct sigcontext32 on the signal stack */ 277 - if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask, 278 - set, sizeof(compat_sigset_t))) 279 - return -EFAULT; 280 - if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs)) 281 - return -EFAULT; 282 - 283 - /* Store registers needed to create the signal frame */ 284 - store_sigregs(); 285 - 286 - /* Create _sigregs32 on the signal stack */ 287 - if (save_sigregs32(regs, &frame->sregs)) 288 - return -EFAULT; 289 - 290 - /* Place signal number on stack to allow backtrace from handler. */ 291 - if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) 292 - return -EFAULT; 293 - 294 - /* Create _sigregs_ext32 on the signal stack */ 295 - if (save_sigregs_ext32(regs, &frame->sregs_ext)) 296 - return -EFAULT; 297 - 298 - /* Set up to return from userspace. If provided, use a stub 299 - already in userspace. */ 300 - if (ksig->ka.sa.sa_flags & SA_RESTORER) { 301 - restorer = (unsigned long __force) 302 - ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 303 - } else { 304 - restorer = VDSO32_SYMBOL(current, sigreturn); 305 - } 306 - 307 - /* Set up registers for signal handler */ 308 - regs->gprs[14] = restorer; 309 - regs->gprs[15] = (__force __u64) frame; 310 - /* Force 31 bit amode and default user address space control. */ 311 - regs->psw.mask = PSW_MASK_BA | 312 - (PSW_USER_BITS & PSW_MASK_ASC) | 313 - (regs->psw.mask & ~PSW_MASK_ASC); 314 - regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler; 315 - 316 - regs->gprs[2] = sig; 317 - regs->gprs[3] = (__force __u64) &frame->sc; 318 - 319 - /* We forgot to include these in the sigcontext. 320 - To avoid breaking binary compatibility, they are passed as args. */ 321 - if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 322 - sig == SIGTRAP || sig == SIGFPE) { 323 - /* set extra registers only for synchronous signals */ 324 - regs->gprs[4] = regs->int_code & 127; 325 - regs->gprs[5] = regs->int_parm_long; 326 - regs->gprs[6] = current->thread.last_break; 327 - } 328 - 329 - return 0; 330 - } 331 - 332 - static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, 333 - struct pt_regs *regs) 334 - { 335 - rt_sigframe32 __user *frame; 336 - unsigned long restorer; 337 - size_t frame_size; 338 - u32 uc_flags; 339 - 340 - frame_size = sizeof(*frame) - 341 - sizeof(frame->uc.uc_mcontext_ext.__reserved); 342 - /* 343 - * gprs_high are always present for 31-bit compat tasks. 344 - * The space for vector registers is only allocated if 345 - * the machine supports it 346 - */ 347 - uc_flags = UC_GPRS_HIGH; 348 - if (cpu_has_vx()) { 349 - uc_flags |= UC_VXRS; 350 - } else { 351 - frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + 352 - sizeof(frame->uc.uc_mcontext_ext.vxrs_high); 353 - } 354 - frame = get_sigframe(&ksig->ka, regs, frame_size); 355 - if (frame == (void __user *) -1UL) 356 - return -EFAULT; 357 - 358 - /* Set up backchain. */ 359 - if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame)) 360 - return -EFAULT; 361 - 362 - /* Set up to return from userspace. If provided, use a stub 363 - already in userspace. */ 364 - if (ksig->ka.sa.sa_flags & SA_RESTORER) { 365 - restorer = (unsigned long __force) 366 - ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 367 - } else { 368 - restorer = VDSO32_SYMBOL(current, rt_sigreturn); 369 - } 370 - 371 - /* Create siginfo on the signal stack */ 372 - if (copy_siginfo_to_user32(&frame->info, &ksig->info)) 373 - return -EFAULT; 374 - 375 - /* Store registers needed to create the signal frame */ 376 - store_sigregs(); 377 - 378 - /* Create ucontext on the signal stack. */ 379 - if (__put_user(uc_flags, &frame->uc.uc_flags) || 380 - __put_user(0, &frame->uc.uc_link) || 381 - __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || 382 - save_sigregs32(regs, &frame->uc.uc_mcontext) || 383 - put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) || 384 - save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) 385 - return -EFAULT; 386 - 387 - /* Set up registers for signal handler */ 388 - regs->gprs[14] = restorer; 389 - regs->gprs[15] = (__force __u64) frame; 390 - /* Force 31 bit amode and default user address space control. */ 391 - regs->psw.mask = PSW_MASK_BA | 392 - (PSW_USER_BITS & PSW_MASK_ASC) | 393 - (regs->psw.mask & ~PSW_MASK_ASC); 394 - regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler; 395 - 396 - regs->gprs[2] = ksig->sig; 397 - regs->gprs[3] = (__force __u64) &frame->info; 398 - regs->gprs[4] = (__force __u64) &frame->uc; 399 - regs->gprs[5] = current->thread.last_break; 400 - return 0; 401 - } 402 - 403 - /* 404 - * OK, we're invoking a handler 405 - */ 406 - 407 - void handle_signal32(struct ksignal *ksig, sigset_t *oldset, 408 - struct pt_regs *regs) 409 - { 410 - int ret; 411 - 412 - /* Set up the stack frame */ 413 - if (ksig->ka.sa.sa_flags & SA_SIGINFO) 414 - ret = setup_rt_frame32(ksig, oldset, regs); 415 - else 416 - ret = setup_frame32(ksig, oldset, regs); 417 - 418 - signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP)); 419 - } 420 -
+1 -2
arch/s390/kernel/cpacf.c
··· 3 3 * Copyright IBM Corp. 2024 4 4 */ 5 5 6 - #define KMSG_COMPONENT "cpacf" 7 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 6 + #define pr_fmt(fmt) "cpacf: " fmt 8 7 9 8 #include <linux/cpu.h> 10 9 #include <linux/device.h>
+1 -2
arch/s390/kernel/cpcmd.c
··· 6 6 * Christian Borntraeger (cborntra@de.ibm.com), 7 7 */ 8 8 9 - #define KMSG_COMPONENT "cpcmd" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "cpcmd: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/export.h>
+1 -2
arch/s390/kernel/debug.c
··· 10 10 * Bugreports to: <Linux390@de.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "s390dbf" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "s390dbf: " fmt 15 14 16 15 #include <linux/stddef.h> 17 16 #include <linux/kernel.h>
+10 -7
arch/s390/kernel/dis.c
··· 503 503 void show_code(struct pt_regs *regs) 504 504 { 505 505 char *mode = user_mode(regs) ? "User" : "Krnl"; 506 + unsigned long addr, pswaddr; 506 507 unsigned char code[64]; 507 508 char buffer[128], *ptr; 508 - unsigned long addr; 509 509 int start, end, opsize, hops, i; 510 510 511 + pswaddr = regs->psw.addr; 512 + if (test_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED)) 513 + pswaddr = __forward_psw(regs->psw, regs->int_code >> 16); 511 514 /* Get a snapshot of the 64 bytes surrounding the fault address. */ 512 - for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) { 513 - addr = regs->psw.addr - 34 + start; 515 + for (start = 32; start && pswaddr >= 34 - start; start -= 2) { 516 + addr = pswaddr - 34 + start; 514 517 if (copy_from_regs(regs, code + start - 2, (void *)addr, 2)) 515 518 break; 516 519 } 517 520 for (end = 32; end < 64; end += 2) { 518 - addr = regs->psw.addr + end - 32; 521 + addr = pswaddr + end - 32; 519 522 if (copy_from_regs(regs, code + end, (void *)addr, 2)) 520 523 break; 521 524 } 522 525 /* Code snapshot usable ? */ 523 - if ((regs->psw.addr & 1) || start >= end) { 526 + if ((pswaddr & 1) || start >= end) { 524 527 printk("%s Code: Bad PSW.\n", mode); 525 528 return; 526 529 } ··· 546 543 while (start < end && hops < 8) { 547 544 opsize = insn_length(code[start]); 548 545 if (start + opsize == 32) 549 - *ptr++ = '#'; 546 + *ptr++ = '*'; 550 547 else if (start == 32) 551 548 *ptr++ = '>'; 552 549 else 553 550 *ptr++ = ' '; 554 - addr = regs->psw.addr + start - 32; 551 + addr = pswaddr + start - 32; 555 552 ptr += sprintf(ptr, "%px: ", (void *)addr); 556 553 if (start + opsize >= end) 557 554 break;
+6 -2
arch/s390/kernel/dumpstack.c
··· 155 155 void show_registers(struct pt_regs *regs) 156 156 { 157 157 struct psw_bits *psw = &psw_bits(regs->psw); 158 + unsigned long pswaddr; 158 159 char *mode; 159 160 161 + pswaddr = regs->psw.addr; 162 + if (test_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED)) 163 + pswaddr = __forward_psw(regs->psw, regs->int_code >> 16); 160 164 mode = user_mode(regs) ? "User" : "Krnl"; 161 - printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 165 + printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)pswaddr); 162 166 if (!user_mode(regs)) 163 - pr_cont(" (%pSR)", (void *)regs->psw.addr); 167 + pr_cont(" (%pSR)", (void *)pswaddr); 164 168 pr_cont("\n"); 165 169 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 166 170 "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
+10 -11
arch/s390/kernel/early.c
··· 4 4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 5 5 */ 6 6 7 - #define KMSG_COMPONENT "setup" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "setup: " fmt 9 8 10 9 #include <linux/sched/debug.h> 11 10 #include <linux/cpufeature.h> ··· 119 120 EBCASC(mach->type, sizeof(mach->type)); 120 121 EBCASC(mach->model, sizeof(mach->model)); 121 122 EBCASC(mach->model_capacity, sizeof(mach->model_capacity)); 122 - sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s", 123 - mach->manufacturer, mach->type, 124 - mach->model, mach->model_capacity); 123 + scnprintf(mstr, sizeof(mstr), "%-16.16s %-4.4s %-16.16s %-16.16s", 124 + mach->manufacturer, mach->type, 125 + mach->model, mach->model_capacity); 125 126 strim_all(mstr); 126 127 if (stsi(vm, 3, 2, 2) == 0 && vm->count) { 127 128 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi)); 128 - sprintf(hvstr, "%-16.16s", vm->vm[0].cpi); 129 + scnprintf(hvstr, sizeof(hvstr), "%-16.16s", vm->vm[0].cpi); 129 130 strim_all(hvstr); 130 131 } else { 131 - sprintf(hvstr, "%s", 132 - machine_is_lpar() ? "LPAR" : 133 - machine_is_vm() ? "z/VM" : 134 - machine_is_kvm() ? "KVM" : "unknown"); 132 + scnprintf(hvstr, sizeof(hvstr), "%s", 133 + machine_is_lpar() ? "LPAR" : 134 + machine_is_vm() ? "z/VM" : 135 + machine_is_kvm() ? "KVM" : "unknown"); 135 136 } 136 - sprintf(arch_hw_string, "HW: %s (%s)", mstr, hvstr); 137 + scnprintf(arch_hw_string, sizeof(arch_hw_string), "HW: %s (%s)", mstr, hvstr); 137 138 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr); 138 139 } 139 140
+6 -19
arch/s390/kernel/entry.S
··· 162 162 stg %r3,__LC_CURRENT(%r13) # store task struct of next 163 163 stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack 164 164 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 165 - aghi %r3,__TASK_pid 166 - mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next 165 + lay %r4,__TASK_pid(%r3) 166 + mvc __LC_CURRENT_PID(4,%r13),0(%r4) # store pid of next 167 167 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) 168 + #ifdef CONFIG_STACKPROTECTOR 169 + lg %r3,__TASK_stack_canary(%r3) 170 + stg %r3,__LC_STACK_CANARY(%r13) 171 + #endif 168 172 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 169 173 BR_EX %r14 170 174 SYM_FUNC_END(__switch_to_asm) ··· 610 606 .quad PSW_KERNEL_BITS 611 607 .quad .Ldaton 612 608 SYM_DATA_END(daton_psw) 613 - 614 - .section .rodata, "a" 615 - .balign 8 616 - #define SYSCALL(esame,emu) .quad __s390x_ ## esame 617 - SYM_DATA_START(sys_call_table) 618 - #include <asm/syscall_table.h> 619 - SYM_DATA_END(sys_call_table) 620 - #undef SYSCALL 621 - 622 - #ifdef CONFIG_COMPAT 623 - 624 - #define SYSCALL(esame,emu) .quad __s390_ ## emu 625 - SYM_DATA_START(sys_call_table_emu) 626 - #include <asm/syscall_table.h> 627 - SYM_DATA_END(sys_call_table_emu) 628 - #undef SYSCALL 629 - #endif
arch/s390/kernel/head64.S arch/s390/kernel/head.S
+2 -3
arch/s390/kernel/hiperdispatch.c
··· 3 3 * Copyright IBM Corp. 2024 4 4 */ 5 5 6 - #define KMSG_COMPONENT "hd" 7 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 6 + #define pr_fmt(fmt) "hd: " fmt 8 7 9 8 /* 10 9 * Hiperdispatch: ··· 64 65 65 66 #define HD_DELAY_FACTOR (4) 66 67 #define HD_DELAY_INTERVAL (HZ / 4) 67 - #define HD_STEAL_THRESHOLD 30 68 + #define HD_STEAL_THRESHOLD 10 68 69 #define HD_STEAL_AVG_WEIGHT 16 69 70 70 71 static cpumask_t hd_vl_coremask; /* Mask containing all vertical low COREs */
+14 -7
arch/s390/kernel/module.c
··· 22 22 #include <linux/bug.h> 23 23 #include <linux/memory.h> 24 24 #include <linux/execmem.h> 25 + #include <asm/arch-stackprotector.h> 25 26 #include <asm/alternative.h> 26 27 #include <asm/nospec-branch.h> 27 28 #include <asm/facility.h> 28 29 #include <asm/ftrace.lds.h> 29 30 #include <asm/set_memory.h> 30 31 #include <asm/setup.h> 32 + #include <asm/asm-offsets.h> 31 33 32 34 #if 0 33 35 #define DEBUGP printk ··· 497 495 const Elf_Shdr *s; 498 496 char *secstrings, *secname; 499 497 void *aseg; 500 - #ifdef CONFIG_FUNCTION_TRACER 501 - int ret; 502 - #endif 498 + int rc = 0; 503 499 504 500 if (IS_ENABLED(CONFIG_EXPOLINE) && 505 501 !nospec_disable && me->arch.plt_size) { ··· 527 527 (str_has_prefix(secname, ".s390_return"))) 528 528 nospec_revert(aseg, aseg + s->sh_size); 529 529 530 + if (IS_ENABLED(CONFIG_STACKPROTECTOR) && 531 + (str_has_prefix(secname, "__stack_protector_loc"))) { 532 + rc = stack_protector_apply(aseg, aseg + s->sh_size); 533 + if (rc) 534 + break; 535 + } 536 + 530 537 #ifdef CONFIG_FUNCTION_TRACER 531 538 if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) { 532 - ret = module_alloc_ftrace_hotpatch_trampolines(me, s); 533 - if (ret < 0) 534 - return ret; 539 + rc = module_alloc_ftrace_hotpatch_trampolines(me, s); 540 + if (rc) 541 + break; 535 542 } 536 543 #endif /* CONFIG_FUNCTION_TRACER */ 537 544 } 538 545 539 - return 0; 546 + return rc; 540 547 }
+1 -2
arch/s390/kernel/nmi.c
··· 184 184 sclp_emergency_printk(message); 185 185 } 186 186 187 - static notrace void s390_handle_damage(void) 187 + static notrace void __noreturn s390_handle_damage(void) 188 188 { 189 189 struct lowcore *lc = get_lowcore(); 190 190 union ctlreg0 cr0, cr0_new; ··· 214 214 lc->mcck_new_psw = psw_save; 215 215 local_ctl_load(0, &cr0.reg); 216 216 disabled_wait(); 217 - while (1); 218 217 } 219 218 NOKPROBE_SYMBOL(s390_handle_damage); 220 219
+1 -2
arch/s390/kernel/os_info.c
··· 6 6 * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "os_info" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "os_info: " fmt 11 10 12 11 #include <linux/crash_dump.h> 13 12 #include <linux/kernel.h>
+2 -4
arch/s390/kernel/perf_cpum_cf.c
··· 6 6 * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> 7 7 * Thomas Richter <tmricht@linux.ibm.com> 8 8 */ 9 - #define KMSG_COMPONENT "cpum_cf" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "cpum_cf: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/kernel_stat.h> ··· 1205 1206 } 1206 1207 1207 1208 /* Setup s390dbf facility */ 1208 - cf_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128); 1209 + cf_dbg = debug_register("cpum_cf", 2, 1, 128); 1209 1210 if (!cf_dbg) { 1210 1211 pr_err("Registration of s390dbf(cpum_cf) failed\n"); 1211 1212 rc = -ENOMEM; ··· 1688 1689 .open = cfset_open, 1689 1690 .release = cfset_release, 1690 1691 .unlocked_ioctl = cfset_ioctl, 1691 - .compat_ioctl = cfset_ioctl, 1692 1692 }; 1693 1693 1694 1694 static struct miscdevice cfset_dev = {
+3 -4
arch/s390/kernel/perf_cpum_sf.c
··· 5 5 * Copyright IBM Corp. 2013, 2018 6 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 7 */ 8 - #define KMSG_COMPONENT "cpum_sf" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "cpum_sf: " fmt 10 9 11 10 #include <linux/kernel.h> 12 11 #include <linux/kernel_stat.h> ··· 1092 1093 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1093 1094 * data entry. The sampling function is determined by the flags in the perf 1094 1095 * event hardware structure. The function always works with a combined-sampling 1095 - * data entry but ignores the the diagnostic portion if it is not available. 1096 + * data entry but ignores the diagnostic portion if it is not available. 1096 1097 * 1097 1098 * Note that the implementation focuses on basic-sampling data entries and, if 1098 1099 * such an entry is not valid, the entire combined-sampling data entry is ··· 2069 2070 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2070 2071 } 2071 2072 2072 - sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2073 + sfdbg = debug_register("cpum_sf", 2, 1, 80); 2073 2074 if (!sfdbg) { 2074 2075 pr_err("Registering for s390dbf failed\n"); 2075 2076 return -ENOMEM;
+1 -3
arch/s390/kernel/perf_event.c
··· 5 5 * Copyright IBM Corp. 2012, 2013 6 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 7 */ 8 - #define KMSG_COMPONENT "perf" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "perf: " fmt 10 9 11 10 #include <linux/kernel.h> 12 11 #include <linux/perf_event.h> ··· 14 15 #include <linux/seq_file.h> 15 16 #include <linux/spinlock.h> 16 17 #include <linux/uaccess.h> 17 - #include <linux/compat.h> 18 18 #include <linux/sysfs.h> 19 19 #include <asm/stacktrace.h> 20 20 #include <asm/irq.h>
+1230
arch/s390/kernel/perf_pai.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Performance event support - Processor Activity Instrumentation Facility 4 + * 5 + * Copyright IBM Corp. 2026 6 + * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 + */ 8 + #define pr_fmt(fmt) "pai: " fmt 9 + 10 + #include <linux/kernel.h> 11 + #include <linux/kernel_stat.h> 12 + #include <linux/percpu.h> 13 + #include <linux/notifier.h> 14 + #include <linux/init.h> 15 + #include <linux/io.h> 16 + #include <linux/perf_event.h> 17 + #include <asm/ctlreg.h> 18 + #include <asm/pai.h> 19 + #include <asm/debug.h> 20 + 21 + static debug_info_t *paidbg; 22 + 23 + DEFINE_STATIC_KEY_FALSE(pai_key); 24 + 25 + enum { 26 + PAI_PMU_CRYPTO, /* Index of PMU pai_crypto */ 27 + PAI_PMU_EXT, /* Index of PMU pai_ext */ 28 + PAI_PMU_MAX /* # of PAI PMUs */ 29 + }; 30 + 31 + enum { 32 + PAIE1_CB_SZ = 0x200, /* Size of PAIE1 control block */ 33 + PAIE1_CTRBLOCK_SZ = 0x400 /* Size of PAIE1 counter blocks */ 34 + }; 35 + 36 + struct pai_userdata { 37 + u16 num; 38 + u64 value; 39 + } __packed; 40 + 41 + /* Create the PAI extension 1 control block area. 42 + * The PAI extension control block 1 is pointed to by lowcore 43 + * address 0x1508 for each CPU. This control block is 512 bytes in size 44 + * and requires a 512 byte boundary alignment. 45 + */ 46 + struct paiext_cb { /* PAI extension 1 control block */ 47 + u64 header; /* Not used */ 48 + u64 reserved1; 49 + u64 acc; /* Addr to analytics counter control block */ 50 + u8 reserved2[PAIE1_CTRBLOCK_SZ - 3 * sizeof(u64)]; 51 + } __packed; 52 + 53 + struct pai_map { 54 + unsigned long *area; /* Area for CPU to store counters */ 55 + struct pai_userdata *save; /* Page to store no-zero counters */ 56 + unsigned int active_events; /* # of PAI crypto users */ 57 + refcount_t refcnt; /* Reference count mapped buffers */ 58 + struct perf_event *event; /* Perf event for sampling */ 59 + struct list_head syswide_list; /* List system-wide sampling events */ 60 + struct paiext_cb *paiext_cb; /* PAI extension control block area */ 61 + bool fullpage; /* True: counter area is a full page */ 62 + }; 63 + 64 + struct pai_mapptr { 65 + struct pai_map *mapptr; 66 + }; 67 + 68 + static struct pai_root { /* Anchor to per CPU data */ 69 + refcount_t refcnt; /* Overall active events */ 70 + struct pai_mapptr __percpu *mapptr; 71 + } pai_root[PAI_PMU_MAX]; 72 + 73 + /* This table defines the different parameters of the PAI PMUs. During 74 + * initialization the machine dependent values are extracted and saved. 75 + * However most of the values are static and do not change. 76 + * There is one table entry per PAI PMU. 77 + */ 78 + struct pai_pmu { /* Define PAI PMU characteristics */ 79 + const char *pmuname; /* Name of PMU */ 80 + const int facility_nr; /* Facility number to check for support */ 81 + unsigned int num_avail; /* # Counters defined by hardware */ 82 + unsigned int num_named; /* # Counters known by name */ 83 + unsigned long base; /* Counter set base number */ 84 + unsigned long kernel_offset; /* Offset to kernel part in counter page */ 85 + unsigned long area_size; /* Size of counter area */ 86 + const char * const *names; /* List of counter names */ 87 + struct pmu *pmu; /* Ptr to supporting PMU */ 88 + int (*init)(struct pai_pmu *p); /* PMU support init function */ 89 + void (*exit)(struct pai_pmu *p); /* PMU support exit function */ 90 + struct attribute_group *event_group; /* Ptr to attribute of events */ 91 + }; 92 + 93 + static struct pai_pmu pai_pmu[]; /* Forward declaration */ 94 + 95 + /* Free per CPU data when the last event is removed. */ 96 + static void pai_root_free(int idx) 97 + { 98 + if (refcount_dec_and_test(&pai_root[idx].refcnt)) { 99 + free_percpu(pai_root[idx].mapptr); 100 + pai_root[idx].mapptr = NULL; 101 + } 102 + debug_sprintf_event(paidbg, 5, "%s root[%d].refcount %d\n", __func__, 103 + idx, refcount_read(&pai_root[idx].refcnt)); 104 + } 105 + 106 + /* 107 + * On initialization of first event also allocate per CPU data dynamically. 108 + * Start with an array of pointers, the array size is the maximum number of 109 + * CPUs possible, which might be larger than the number of CPUs currently 110 + * online. 111 + */ 112 + static int pai_root_alloc(int idx) 113 + { 114 + if (!refcount_inc_not_zero(&pai_root[idx].refcnt)) { 115 + /* The memory is already zeroed. */ 116 + pai_root[idx].mapptr = alloc_percpu(struct pai_mapptr); 117 + if (!pai_root[idx].mapptr) 118 + return -ENOMEM; 119 + refcount_set(&pai_root[idx].refcnt, 1); 120 + } 121 + return 0; 122 + } 123 + 124 + /* Release the PMU if event is the last perf event */ 125 + static DEFINE_MUTEX(pai_reserve_mutex); 126 + 127 + /* Free all memory allocated for event counting/sampling setup */ 128 + static void pai_free(struct pai_mapptr *mp) 129 + { 130 + if (mp->mapptr->fullpage) 131 + free_page((unsigned long)mp->mapptr->area); 132 + else 133 + kfree(mp->mapptr->area); 134 + kfree(mp->mapptr->paiext_cb); 135 + kvfree(mp->mapptr->save); 136 + kfree(mp->mapptr); 137 + mp->mapptr = NULL; 138 + } 139 + 140 + /* Adjust usage counters and remove allocated memory when all users are 141 + * gone. 142 + */ 143 + static void pai_event_destroy_cpu(struct perf_event *event, int cpu) 144 + { 145 + int idx = PAI_PMU_IDX(event); 146 + struct pai_mapptr *mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 147 + struct pai_map *cpump = mp->mapptr; 148 + 149 + mutex_lock(&pai_reserve_mutex); 150 + debug_sprintf_event(paidbg, 5, "%s event %#llx idx %d cpu %d users %d " 151 + "refcnt %u\n", __func__, event->attr.config, idx, 152 + event->cpu, cpump->active_events, 153 + refcount_read(&cpump->refcnt)); 154 + if (refcount_dec_and_test(&cpump->refcnt)) 155 + pai_free(mp); 156 + pai_root_free(idx); 157 + mutex_unlock(&pai_reserve_mutex); 158 + } 159 + 160 + static void pai_event_destroy(struct perf_event *event) 161 + { 162 + int cpu; 163 + 164 + free_page(PAI_SAVE_AREA(event)); 165 + if (event->cpu == -1) { 166 + struct cpumask *mask = PAI_CPU_MASK(event); 167 + 168 + for_each_cpu(cpu, mask) 169 + pai_event_destroy_cpu(event, cpu); 170 + kfree(mask); 171 + } else { 172 + pai_event_destroy_cpu(event, event->cpu); 173 + } 174 + } 175 + 176 + static void paicrypt_event_destroy(struct perf_event *event) 177 + { 178 + static_branch_dec(&pai_key); 179 + pai_event_destroy(event); 180 + } 181 + 182 + static u64 pai_getctr(unsigned long *page, int nr, unsigned long offset) 183 + { 184 + if (offset) 185 + nr += offset / sizeof(*page); 186 + return page[nr]; 187 + } 188 + 189 + /* Read the counter values. Return value from location in CMP. For base 190 + * event xxx_ALL sum up all events. Returns counter value. 191 + */ 192 + static u64 pai_getdata(struct perf_event *event, bool kernel) 193 + { 194 + int idx = PAI_PMU_IDX(event); 195 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 196 + struct pai_pmu *pp = &pai_pmu[idx]; 197 + struct pai_map *cpump = mp->mapptr; 198 + unsigned int i; 199 + u64 sum = 0; 200 + 201 + if (event->attr.config != pp->base) { 202 + return pai_getctr(cpump->area, 203 + event->attr.config - pp->base, 204 + kernel ? pp->kernel_offset : 0); 205 + } 206 + 207 + for (i = 1; i <= pp->num_avail; i++) { 208 + u64 val = pai_getctr(cpump->area, i, 209 + kernel ? pp->kernel_offset : 0); 210 + 211 + if (!val) 212 + continue; 213 + sum += val; 214 + } 215 + return sum; 216 + } 217 + 218 + static u64 paicrypt_getall(struct perf_event *event) 219 + { 220 + u64 sum = 0; 221 + 222 + if (!event->attr.exclude_kernel) 223 + sum += pai_getdata(event, true); 224 + if (!event->attr.exclude_user) 225 + sum += pai_getdata(event, false); 226 + 227 + return sum; 228 + } 229 + 230 + /* Check concurrent access of counting and sampling for crypto events. 231 + * This function is called in process context and it is save to block. 232 + * When the event initialization functions fails, no other call back will 233 + * be invoked. 234 + * 235 + * Allocate the memory for the event. 236 + */ 237 + static int pai_alloc_cpu(struct perf_event *event, int cpu) 238 + { 239 + int rc, idx = PAI_PMU_IDX(event); 240 + struct pai_map *cpump = NULL; 241 + bool need_paiext_cb = false; 242 + struct pai_mapptr *mp; 243 + 244 + mutex_lock(&pai_reserve_mutex); 245 + /* Allocate root node */ 246 + rc = pai_root_alloc(idx); 247 + if (rc) 248 + goto unlock; 249 + 250 + /* Allocate node for this event */ 251 + mp = per_cpu_ptr(pai_root[idx].mapptr, cpu); 252 + cpump = mp->mapptr; 253 + if (!cpump) { /* Paicrypt_map allocated? */ 254 + rc = -ENOMEM; 255 + cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 256 + if (!cpump) 257 + goto undo; 258 + /* Allocate memory for counter page and counter extraction. 259 + * Only the first counting event has to allocate a page. 260 + */ 261 + mp->mapptr = cpump; 262 + if (idx == PAI_PMU_CRYPTO) { 263 + cpump->area = (unsigned long *)get_zeroed_page(GFP_KERNEL); 264 + /* free_page() can handle 0x0 address */ 265 + cpump->fullpage = true; 266 + } else { /* PAI_PMU_EXT */ 267 + /* 268 + * Allocate memory for counter area and counter extraction. 269 + * These are 270 + * - a 512 byte block and requires 512 byte boundary 271 + * alignment. 272 + * - a 1KB byte block and requires 1KB boundary 273 + * alignment. 274 + * Only the first counting event has to allocate the area. 275 + * 276 + * Note: This works with commit 59bb47985c1d by default. 277 + * Backporting this to kernels without this commit might 278 + * needs adjustment. 279 + */ 280 + cpump->area = kzalloc(pai_pmu[idx].area_size, GFP_KERNEL); 281 + cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL); 282 + need_paiext_cb = true; 283 + } 284 + cpump->save = kvmalloc_array(pai_pmu[idx].num_avail + 1, 285 + sizeof(struct pai_userdata), 286 + GFP_KERNEL); 287 + if (!cpump->area || !cpump->save || 288 + (need_paiext_cb && !cpump->paiext_cb)) { 289 + pai_free(mp); 290 + goto undo; 291 + } 292 + INIT_LIST_HEAD(&cpump->syswide_list); 293 + refcount_set(&cpump->refcnt, 1); 294 + rc = 0; 295 + } else { 296 + refcount_inc(&cpump->refcnt); 297 + } 298 + 299 + undo: 300 + if (rc) { 301 + /* Error in allocation of event, decrement anchor. Since 302 + * the event in not created, its destroy() function is never 303 + * invoked. Adjust the reference counter for the anchor. 304 + */ 305 + pai_root_free(idx); 306 + } 307 + unlock: 308 + mutex_unlock(&pai_reserve_mutex); 309 + /* If rc is non-zero, no increment of counter/sampler was done. */ 310 + return rc; 311 + } 312 + 313 + static int pai_alloc(struct perf_event *event) 314 + { 315 + struct cpumask *maskptr; 316 + int cpu, rc = -ENOMEM; 317 + 318 + maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL); 319 + if (!maskptr) 320 + goto out; 321 + 322 + for_each_online_cpu(cpu) { 323 + rc = pai_alloc_cpu(event, cpu); 324 + if (rc) { 325 + for_each_cpu(cpu, maskptr) 326 + pai_event_destroy_cpu(event, cpu); 327 + kfree(maskptr); 328 + goto out; 329 + } 330 + cpumask_set_cpu(cpu, maskptr); 331 + } 332 + 333 + /* 334 + * On error all cpumask are freed and all events have been destroyed. 335 + * Save of which CPUs data structures have been allocated for. 336 + * Release them in pai_event_destroy call back function 337 + * for this event. 338 + */ 339 + PAI_CPU_MASK(event) = maskptr; 340 + rc = 0; 341 + out: 342 + return rc; 343 + } 344 + 345 + /* Validate event number and return error if event is not supported. 346 + * On successful return, PAI_PMU_IDX(event) is set to the index of 347 + * the supporting paing_support[] array element. 348 + */ 349 + static int pai_event_valid(struct perf_event *event, int idx) 350 + { 351 + struct perf_event_attr *a = &event->attr; 352 + struct pai_pmu *pp = &pai_pmu[idx]; 353 + 354 + /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 355 + if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 356 + return -ENOENT; 357 + /* Allow only CRYPTO_ALL/NNPA_ALL for sampling */ 358 + if (a->sample_period && a->config != pp->base) 359 + return -EINVAL; 360 + /* PAI crypto event must be in valid range, try others if not */ 361 + if (a->config < pp->base || a->config > pp->base + pp->num_avail) 362 + return -ENOENT; 363 + if (idx == PAI_PMU_EXT && a->exclude_user) 364 + return -EINVAL; 365 + PAI_PMU_IDX(event) = idx; 366 + return 0; 367 + } 368 + 369 + /* Might be called on different CPU than the one the event is intended for. */ 370 + static int pai_event_init(struct perf_event *event, int idx) 371 + { 372 + struct perf_event_attr *a = &event->attr; 373 + int rc; 374 + 375 + /* PAI event must be valid and in supported range */ 376 + rc = pai_event_valid(event, idx); 377 + if (rc) 378 + goto out; 379 + /* Get a page to store last counter values for sampling */ 380 + if (a->sample_period) { 381 + PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 382 + if (!PAI_SAVE_AREA(event)) { 383 + rc = -ENOMEM; 384 + goto out; 385 + } 386 + } 387 + 388 + if (event->cpu >= 0) 389 + rc = pai_alloc_cpu(event, event->cpu); 390 + else 391 + rc = pai_alloc(event); 392 + if (rc) { 393 + free_page(PAI_SAVE_AREA(event)); 394 + goto out; 395 + } 396 + 397 + if (a->sample_period) { 398 + a->sample_period = 1; 399 + a->freq = 0; 400 + /* Register for paicrypt_sched_task() to be called */ 401 + event->attach_state |= PERF_ATTACH_SCHED_CB; 402 + /* Add raw data which contain the memory mapped counters */ 403 + a->sample_type |= PERF_SAMPLE_RAW; 404 + /* Turn off inheritance */ 405 + a->inherit = 0; 406 + } 407 + out: 408 + return rc; 409 + } 410 + 411 + static int paicrypt_event_init(struct perf_event *event) 412 + { 413 + int rc = pai_event_init(event, PAI_PMU_CRYPTO); 414 + 415 + if (!rc) { 416 + event->destroy = paicrypt_event_destroy; 417 + static_branch_inc(&pai_key); 418 + } 419 + return rc; 420 + } 421 + 422 + static void pai_read(struct perf_event *event, 423 + u64 (*fct)(struct perf_event *event)) 424 + { 425 + u64 prev, new, delta; 426 + 427 + prev = local64_read(&event->hw.prev_count); 428 + new = fct(event); 429 + local64_set(&event->hw.prev_count, new); 430 + delta = (prev <= new) ? new - prev : (-1ULL - prev) + new + 1; 431 + local64_add(delta, &event->count); 432 + } 433 + 434 + static void paicrypt_read(struct perf_event *event) 435 + { 436 + pai_read(event, paicrypt_getall); 437 + } 438 + 439 + static void pai_start(struct perf_event *event, int flags, 440 + u64 (*fct)(struct perf_event *event)) 441 + { 442 + int idx = PAI_PMU_IDX(event); 443 + struct pai_pmu *pp = &pai_pmu[idx]; 444 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 445 + struct pai_map *cpump = mp->mapptr; 446 + u64 sum; 447 + 448 + if (!event->attr.sample_period) { /* Counting */ 449 + sum = fct(event); /* Get current value */ 450 + local64_set(&event->hw.prev_count, sum); 451 + } else { /* Sampling */ 452 + memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 453 + /* Enable context switch callback for system-wide sampling */ 454 + if (!(event->attach_state & PERF_ATTACH_TASK)) { 455 + list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 456 + perf_sched_cb_inc(event->pmu); 457 + } else { 458 + cpump->event = event; 459 + } 460 + } 461 + } 462 + 463 + static void paicrypt_start(struct perf_event *event, int flags) 464 + { 465 + pai_start(event, flags, paicrypt_getall); 466 + } 467 + 468 + static int pai_add(struct perf_event *event, int flags) 469 + { 470 + int idx = PAI_PMU_IDX(event); 471 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 472 + struct pai_map *cpump = mp->mapptr; 473 + struct paiext_cb *pcb = cpump->paiext_cb; 474 + unsigned long ccd; 475 + 476 + if (++cpump->active_events == 1) { 477 + if (!pcb) { /* PAI crypto */ 478 + ccd = virt_to_phys(cpump->area) | PAI_CRYPTO_KERNEL_OFFSET; 479 + WRITE_ONCE(get_lowcore()->ccd, ccd); 480 + local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 481 + } else { /* PAI extension 1 */ 482 + ccd = virt_to_phys(pcb); 483 + WRITE_ONCE(get_lowcore()->aicd, ccd); 484 + pcb->acc = virt_to_phys(cpump->area) | 0x1; 485 + /* Enable CPU instruction lookup for PAIE1 control block */ 486 + local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT); 487 + } 488 + } 489 + if (flags & PERF_EF_START) 490 + pai_pmu[idx].pmu->start(event, PERF_EF_RELOAD); 491 + event->hw.state = 0; 492 + return 0; 493 + } 494 + 495 + static int paicrypt_add(struct perf_event *event, int flags) 496 + { 497 + return pai_add(event, flags); 498 + } 499 + 500 + static void pai_have_sample(struct perf_event *, struct pai_map *); 501 + static void pai_stop(struct perf_event *event, int flags) 502 + { 503 + int idx = PAI_PMU_IDX(event); 504 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 505 + struct pai_map *cpump = mp->mapptr; 506 + 507 + if (!event->attr.sample_period) { /* Counting */ 508 + pai_pmu[idx].pmu->read(event); 509 + } else { /* Sampling */ 510 + if (!(event->attach_state & PERF_ATTACH_TASK)) { 511 + perf_sched_cb_dec(event->pmu); 512 + list_del(PAI_SWLIST(event)); 513 + } else { 514 + pai_have_sample(event, cpump); 515 + cpump->event = NULL; 516 + } 517 + } 518 + event->hw.state = PERF_HES_STOPPED; 519 + } 520 + 521 + static void paicrypt_stop(struct perf_event *event, int flags) 522 + { 523 + pai_stop(event, flags); 524 + } 525 + 526 + static void pai_del(struct perf_event *event, int flags) 527 + { 528 + int idx = PAI_PMU_IDX(event); 529 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 530 + struct pai_map *cpump = mp->mapptr; 531 + struct paiext_cb *pcb = cpump->paiext_cb; 532 + 533 + pai_pmu[idx].pmu->stop(event, PERF_EF_UPDATE); 534 + if (--cpump->active_events == 0) { 535 + if (!pcb) { /* PAI crypto */ 536 + local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 537 + WRITE_ONCE(get_lowcore()->ccd, 0); 538 + } else { /* PAI extension 1 */ 539 + /* Disable CPU instruction lookup for PAIE1 control block */ 540 + local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT); 541 + pcb->acc = 0; 542 + WRITE_ONCE(get_lowcore()->aicd, 0); 543 + } 544 + } 545 + } 546 + 547 + static void paicrypt_del(struct perf_event *event, int flags) 548 + { 549 + pai_del(event, flags); 550 + } 551 + 552 + /* Create raw data and save it in buffer. Calculate the delta for each 553 + * counter between this invocation and the last invocation. 554 + * Returns number of bytes copied. 555 + * Saves only entries with positive counter difference of the form 556 + * 2 bytes: Number of counter 557 + * 8 bytes: Value of counter 558 + */ 559 + static size_t pai_copy(struct pai_userdata *userdata, unsigned long *page, 560 + struct pai_pmu *pp, unsigned long *page_old, 561 + bool exclude_user, bool exclude_kernel) 562 + { 563 + int i, outidx = 0; 564 + 565 + for (i = 1; i <= pp->num_avail; i++) { 566 + u64 val = 0, val_old = 0; 567 + 568 + if (!exclude_kernel) { 569 + val += pai_getctr(page, i, pp->kernel_offset); 570 + val_old += pai_getctr(page_old, i, pp->kernel_offset); 571 + } 572 + if (!exclude_user) { 573 + val += pai_getctr(page, i, 0); 574 + val_old += pai_getctr(page_old, i, 0); 575 + } 576 + if (val >= val_old) 577 + val -= val_old; 578 + else 579 + val = (~0ULL - val_old) + val + 1; 580 + if (val) { 581 + userdata[outidx].num = i; 582 + userdata[outidx].value = val; 583 + outidx++; 584 + } 585 + } 586 + return outidx * sizeof(*userdata); 587 + } 588 + 589 + /* Write sample when one or more counters values are nonzero. 590 + * 591 + * Note: The function paicrypt_sched_task() and pai_push_sample() are not 592 + * invoked after function paicrypt_del() has been called because of function 593 + * perf_sched_cb_dec(). Both functions are only 594 + * called when sampling is active. Function perf_sched_cb_inc() 595 + * has been invoked to install function paicrypt_sched_task() as call back 596 + * to run at context switch time. 597 + * 598 + * This causes function perf_event_context_sched_out() and 599 + * perf_event_context_sched_in() to check whether the PMU has installed an 600 + * sched_task() callback. That callback is not active after paicrypt_del() 601 + * returns and has deleted the event on that CPU. 602 + */ 603 + static int pai_push_sample(size_t rawsize, struct pai_map *cpump, 604 + struct perf_event *event) 605 + { 606 + int idx = PAI_PMU_IDX(event); 607 + struct pai_pmu *pp = &pai_pmu[idx]; 608 + struct perf_sample_data data; 609 + struct perf_raw_record raw; 610 + struct pt_regs regs; 611 + int overflow; 612 + 613 + /* Setup perf sample */ 614 + memset(&regs, 0, sizeof(regs)); 615 + memset(&raw, 0, sizeof(raw)); 616 + memset(&data, 0, sizeof(data)); 617 + perf_sample_data_init(&data, 0, event->hw.last_period); 618 + if (event->attr.sample_type & PERF_SAMPLE_TID) { 619 + data.tid_entry.pid = task_tgid_nr(current); 620 + data.tid_entry.tid = task_pid_nr(current); 621 + } 622 + if (event->attr.sample_type & PERF_SAMPLE_TIME) 623 + data.time = event->clock(); 624 + if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 625 + data.id = event->id; 626 + if (event->attr.sample_type & PERF_SAMPLE_CPU) { 627 + data.cpu_entry.cpu = smp_processor_id(); 628 + data.cpu_entry.reserved = 0; 629 + } 630 + if (event->attr.sample_type & PERF_SAMPLE_RAW) { 631 + raw.frag.size = rawsize; 632 + raw.frag.data = cpump->save; 633 + perf_sample_save_raw_data(&data, event, &raw); 634 + } 635 + 636 + overflow = perf_event_overflow(event, &data, &regs); 637 + perf_event_update_userpage(event); 638 + /* Save crypto counter lowcore page after reading event data. */ 639 + memcpy((void *)PAI_SAVE_AREA(event), cpump->area, pp->area_size); 640 + return overflow; 641 + } 642 + 643 + /* Check if there is data to be saved on schedule out of a task. */ 644 + static void pai_have_sample(struct perf_event *event, struct pai_map *cpump) 645 + { 646 + struct pai_pmu *pp; 647 + size_t rawsize; 648 + 649 + if (!event) /* No event active */ 650 + return; 651 + pp = &pai_pmu[PAI_PMU_IDX(event)]; 652 + rawsize = pai_copy(cpump->save, cpump->area, pp, 653 + (unsigned long *)PAI_SAVE_AREA(event), 654 + event->attr.exclude_user, 655 + event->attr.exclude_kernel); 656 + if (rawsize) /* No incremented counters */ 657 + pai_push_sample(rawsize, cpump, event); 658 + } 659 + 660 + /* Check if there is data to be saved on schedule out of a task. */ 661 + static void pai_have_samples(int idx) 662 + { 663 + struct pai_mapptr *mp = this_cpu_ptr(pai_root[idx].mapptr); 664 + struct pai_map *cpump = mp->mapptr; 665 + struct perf_event *event; 666 + 667 + list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 668 + pai_have_sample(event, cpump); 669 + } 670 + 671 + /* Called on schedule-in and schedule-out. No access to event structure, 672 + * but for sampling only event CRYPTO_ALL is allowed. 673 + */ 674 + static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, 675 + struct task_struct *task, bool sched_in) 676 + { 677 + /* We started with a clean page on event installation. So read out 678 + * results on schedule_out and if page was dirty, save old values. 679 + */ 680 + if (!sched_in) 681 + pai_have_samples(PAI_PMU_CRYPTO); 682 + } 683 + 684 + /* ============================= paiext ====================================*/ 685 + 686 + static void paiext_event_destroy(struct perf_event *event) 687 + { 688 + pai_event_destroy(event); 689 + } 690 + 691 + /* Might be called on different CPU than the one the event is intended for. */ 692 + static int paiext_event_init(struct perf_event *event) 693 + { 694 + int rc = pai_event_init(event, PAI_PMU_EXT); 695 + 696 + if (!rc) { 697 + event->attr.exclude_kernel = true; /* No kernel space part */ 698 + event->destroy = paiext_event_destroy; 699 + /* Offset of NNPA in paiext_cb */ 700 + event->hw.config_base = offsetof(struct paiext_cb, acc); 701 + } 702 + return rc; 703 + } 704 + 705 + static u64 paiext_getall(struct perf_event *event) 706 + { 707 + return pai_getdata(event, false); 708 + } 709 + 710 + static void paiext_read(struct perf_event *event) 711 + { 712 + pai_read(event, paiext_getall); 713 + } 714 + 715 + static void paiext_start(struct perf_event *event, int flags) 716 + { 717 + pai_start(event, flags, paiext_getall); 718 + } 719 + 720 + static int paiext_add(struct perf_event *event, int flags) 721 + { 722 + return pai_add(event, flags); 723 + } 724 + 725 + static void paiext_stop(struct perf_event *event, int flags) 726 + { 727 + pai_stop(event, flags); 728 + } 729 + 730 + static void paiext_del(struct perf_event *event, int flags) 731 + { 732 + pai_del(event, flags); 733 + } 734 + 735 + /* Called on schedule-in and schedule-out. No access to event structure, 736 + * but for sampling only event NNPA_ALL is allowed. 737 + */ 738 + static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, 739 + struct task_struct *task, bool sched_in) 740 + { 741 + /* We started with a clean page on event installation. So read out 742 + * results on schedule_out and if page was dirty, save old values. 743 + */ 744 + if (!sched_in) 745 + pai_have_samples(PAI_PMU_EXT); 746 + } 747 + 748 + /* Attribute definitions for paicrypt interface. As with other CPU 749 + * Measurement Facilities, there is one attribute per mapped counter. 750 + * The number of mapped counters may vary per machine generation. Use 751 + * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 752 + * to determine the number of mapped counters. The instructions returns 753 + * a positive number, which is the highest number of supported counters. 754 + * All counters less than this number are also supported, there are no 755 + * holes. A returned number of zero means no support for mapped counters. 756 + * 757 + * The identification of the counter is a unique number. The chosen range 758 + * is 0x1000 + offset in mapped kernel page. 759 + * All CPU Measurement Facility counters identifiers must be unique and 760 + * the numbers from 0 to 496 are already used for the CPU Measurement 761 + * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 762 + * used for the CPU Measurement Sampling facility. 763 + */ 764 + PMU_FORMAT_ATTR(event, "config:0-63"); 765 + 766 + static struct attribute *paicrypt_format_attr[] = { 767 + &format_attr_event.attr, 768 + NULL, 769 + }; 770 + 771 + static struct attribute_group paicrypt_events_group = { 772 + .name = "events", 773 + .attrs = NULL /* Filled in attr_event_init() */ 774 + }; 775 + 776 + static struct attribute_group paicrypt_format_group = { 777 + .name = "format", 778 + .attrs = paicrypt_format_attr, 779 + }; 780 + 781 + static const struct attribute_group *paicrypt_attr_groups[] = { 782 + &paicrypt_events_group, 783 + &paicrypt_format_group, 784 + NULL, 785 + }; 786 + 787 + /* Performance monitoring unit for mapped counters */ 788 + static struct pmu paicrypt = { 789 + .task_ctx_nr = perf_hw_context, 790 + .event_init = paicrypt_event_init, 791 + .add = paicrypt_add, 792 + .del = paicrypt_del, 793 + .start = paicrypt_start, 794 + .stop = paicrypt_stop, 795 + .read = paicrypt_read, 796 + .sched_task = paicrypt_sched_task, 797 + .attr_groups = paicrypt_attr_groups 798 + }; 799 + 800 + /* List of symbolic PAI counter names. */ 801 + static const char * const paicrypt_ctrnames[] = { 802 + [0] = "CRYPTO_ALL", 803 + [1] = "KM_DEA", 804 + [2] = "KM_TDEA_128", 805 + [3] = "KM_TDEA_192", 806 + [4] = "KM_ENCRYPTED_DEA", 807 + [5] = "KM_ENCRYPTED_TDEA_128", 808 + [6] = "KM_ENCRYPTED_TDEA_192", 809 + [7] = "KM_AES_128", 810 + [8] = "KM_AES_192", 811 + [9] = "KM_AES_256", 812 + [10] = "KM_ENCRYPTED_AES_128", 813 + [11] = "KM_ENCRYPTED_AES_192", 814 + [12] = "KM_ENCRYPTED_AES_256", 815 + [13] = "KM_XTS_AES_128", 816 + [14] = "KM_XTS_AES_256", 817 + [15] = "KM_XTS_ENCRYPTED_AES_128", 818 + [16] = "KM_XTS_ENCRYPTED_AES_256", 819 + [17] = "KMC_DEA", 820 + [18] = "KMC_TDEA_128", 821 + [19] = "KMC_TDEA_192", 822 + [20] = "KMC_ENCRYPTED_DEA", 823 + [21] = "KMC_ENCRYPTED_TDEA_128", 824 + [22] = "KMC_ENCRYPTED_TDEA_192", 825 + [23] = "KMC_AES_128", 826 + [24] = "KMC_AES_192", 827 + [25] = "KMC_AES_256", 828 + [26] = "KMC_ENCRYPTED_AES_128", 829 + [27] = "KMC_ENCRYPTED_AES_192", 830 + [28] = "KMC_ENCRYPTED_AES_256", 831 + [29] = "KMC_PRNG", 832 + [30] = "KMA_GCM_AES_128", 833 + [31] = "KMA_GCM_AES_192", 834 + [32] = "KMA_GCM_AES_256", 835 + [33] = "KMA_GCM_ENCRYPTED_AES_128", 836 + [34] = "KMA_GCM_ENCRYPTED_AES_192", 837 + [35] = "KMA_GCM_ENCRYPTED_AES_256", 838 + [36] = "KMF_DEA", 839 + [37] = "KMF_TDEA_128", 840 + [38] = "KMF_TDEA_192", 841 + [39] = "KMF_ENCRYPTED_DEA", 842 + [40] = "KMF_ENCRYPTED_TDEA_128", 843 + [41] = "KMF_ENCRYPTED_TDEA_192", 844 + [42] = "KMF_AES_128", 845 + [43] = "KMF_AES_192", 846 + [44] = "KMF_AES_256", 847 + [45] = "KMF_ENCRYPTED_AES_128", 848 + [46] = "KMF_ENCRYPTED_AES_192", 849 + [47] = "KMF_ENCRYPTED_AES_256", 850 + [48] = "KMCTR_DEA", 851 + [49] = "KMCTR_TDEA_128", 852 + [50] = "KMCTR_TDEA_192", 853 + [51] = "KMCTR_ENCRYPTED_DEA", 854 + [52] = "KMCTR_ENCRYPTED_TDEA_128", 855 + [53] = "KMCTR_ENCRYPTED_TDEA_192", 856 + [54] = "KMCTR_AES_128", 857 + [55] = "KMCTR_AES_192", 858 + [56] = "KMCTR_AES_256", 859 + [57] = "KMCTR_ENCRYPTED_AES_128", 860 + [58] = "KMCTR_ENCRYPTED_AES_192", 861 + [59] = "KMCTR_ENCRYPTED_AES_256", 862 + [60] = "KMO_DEA", 863 + [61] = "KMO_TDEA_128", 864 + [62] = "KMO_TDEA_192", 865 + [63] = "KMO_ENCRYPTED_DEA", 866 + [64] = "KMO_ENCRYPTED_TDEA_128", 867 + [65] = "KMO_ENCRYPTED_TDEA_192", 868 + [66] = "KMO_AES_128", 869 + [67] = "KMO_AES_192", 870 + [68] = "KMO_AES_256", 871 + [69] = "KMO_ENCRYPTED_AES_128", 872 + [70] = "KMO_ENCRYPTED_AES_192", 873 + [71] = "KMO_ENCRYPTED_AES_256", 874 + [72] = "KIMD_SHA_1", 875 + [73] = "KIMD_SHA_256", 876 + [74] = "KIMD_SHA_512", 877 + [75] = "KIMD_SHA3_224", 878 + [76] = "KIMD_SHA3_256", 879 + [77] = "KIMD_SHA3_384", 880 + [78] = "KIMD_SHA3_512", 881 + [79] = "KIMD_SHAKE_128", 882 + [80] = "KIMD_SHAKE_256", 883 + [81] = "KIMD_GHASH", 884 + [82] = "KLMD_SHA_1", 885 + [83] = "KLMD_SHA_256", 886 + [84] = "KLMD_SHA_512", 887 + [85] = "KLMD_SHA3_224", 888 + [86] = "KLMD_SHA3_256", 889 + [87] = "KLMD_SHA3_384", 890 + [88] = "KLMD_SHA3_512", 891 + [89] = "KLMD_SHAKE_128", 892 + [90] = "KLMD_SHAKE_256", 893 + [91] = "KMAC_DEA", 894 + [92] = "KMAC_TDEA_128", 895 + [93] = "KMAC_TDEA_192", 896 + [94] = "KMAC_ENCRYPTED_DEA", 897 + [95] = "KMAC_ENCRYPTED_TDEA_128", 898 + [96] = "KMAC_ENCRYPTED_TDEA_192", 899 + [97] = "KMAC_AES_128", 900 + [98] = "KMAC_AES_192", 901 + [99] = "KMAC_AES_256", 902 + [100] = "KMAC_ENCRYPTED_AES_128", 903 + [101] = "KMAC_ENCRYPTED_AES_192", 904 + [102] = "KMAC_ENCRYPTED_AES_256", 905 + [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 906 + [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 907 + [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 908 + [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 909 + [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 910 + [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 911 + [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 912 + [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 913 + [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 914 + [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 915 + [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 916 + [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256", 917 + [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 918 + [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 919 + [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 920 + [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 921 + [119] = "PCC_SCALAR_MULTIPLY_P256", 922 + [120] = "PCC_SCALAR_MULTIPLY_P384", 923 + [121] = "PCC_SCALAR_MULTIPLY_P521", 924 + [122] = "PCC_SCALAR_MULTIPLY_ED25519", 925 + [123] = "PCC_SCALAR_MULTIPLY_ED448", 926 + [124] = "PCC_SCALAR_MULTIPLY_X25519", 927 + [125] = "PCC_SCALAR_MULTIPLY_X448", 928 + [126] = "PRNO_SHA_512_DRNG", 929 + [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 930 + [128] = "PRNO_TRNG", 931 + [129] = "KDSA_ECDSA_VERIFY_P256", 932 + [130] = "KDSA_ECDSA_VERIFY_P384", 933 + [131] = "KDSA_ECDSA_VERIFY_P521", 934 + [132] = "KDSA_ECDSA_SIGN_P256", 935 + [133] = "KDSA_ECDSA_SIGN_P384", 936 + [134] = "KDSA_ECDSA_SIGN_P521", 937 + [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 938 + [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 939 + [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 940 + [138] = "KDSA_EDDSA_VERIFY_ED25519", 941 + [139] = "KDSA_EDDSA_VERIFY_ED448", 942 + [140] = "KDSA_EDDSA_SIGN_ED25519", 943 + [141] = "KDSA_EDDSA_SIGN_ED448", 944 + [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 945 + [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 946 + [144] = "PCKMO_ENCRYPT_DEA_KEY", 947 + [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 948 + [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 949 + [147] = "PCKMO_ENCRYPT_AES_128_KEY", 950 + [148] = "PCKMO_ENCRYPT_AES_192_KEY", 951 + [149] = "PCKMO_ENCRYPT_AES_256_KEY", 952 + [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 953 + [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 954 + [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 955 + [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 956 + [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 957 + [155] = "IBM_RESERVED_155", 958 + [156] = "IBM_RESERVED_156", 959 + [157] = "KM_FULL_XTS_AES_128", 960 + [158] = "KM_FULL_XTS_AES_256", 961 + [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 962 + [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 963 + [161] = "KMAC_HMAC_SHA_224", 964 + [162] = "KMAC_HMAC_SHA_256", 965 + [163] = "KMAC_HMAC_SHA_384", 966 + [164] = "KMAC_HMAC_SHA_512", 967 + [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 968 + [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 969 + [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 970 + [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 971 + [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 972 + [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 973 + [171] = "PCKMO_ENCRYPT_AES_XTS_128", 974 + [172] = "PCKMO_ENCRYPT_AES_XTS_256", 975 + }; 976 + 977 + static struct attribute *paiext_format_attr[] = { 978 + &format_attr_event.attr, 979 + NULL, 980 + }; 981 + 982 + static struct attribute_group paiext_events_group = { 983 + .name = "events", 984 + .attrs = NULL, /* Filled in attr_event_init() */ 985 + }; 986 + 987 + static struct attribute_group paiext_format_group = { 988 + .name = "format", 989 + .attrs = paiext_format_attr, 990 + }; 991 + 992 + static const struct attribute_group *paiext_attr_groups[] = { 993 + &paiext_events_group, 994 + &paiext_format_group, 995 + NULL, 996 + }; 997 + 998 + /* Performance monitoring unit for mapped counters */ 999 + static struct pmu paiext = { 1000 + .task_ctx_nr = perf_hw_context, 1001 + .event_init = paiext_event_init, 1002 + .add = paiext_add, 1003 + .del = paiext_del, 1004 + .start = paiext_start, 1005 + .stop = paiext_stop, 1006 + .read = paiext_read, 1007 + .sched_task = paiext_sched_task, 1008 + .attr_groups = paiext_attr_groups, 1009 + }; 1010 + 1011 + /* List of symbolic PAI extension 1 NNPA counter names. */ 1012 + static const char * const paiext_ctrnames[] = { 1013 + [0] = "NNPA_ALL", 1014 + [1] = "NNPA_ADD", 1015 + [2] = "NNPA_SUB", 1016 + [3] = "NNPA_MUL", 1017 + [4] = "NNPA_DIV", 1018 + [5] = "NNPA_MIN", 1019 + [6] = "NNPA_MAX", 1020 + [7] = "NNPA_LOG", 1021 + [8] = "NNPA_EXP", 1022 + [9] = "NNPA_IBM_RESERVED_9", 1023 + [10] = "NNPA_RELU", 1024 + [11] = "NNPA_TANH", 1025 + [12] = "NNPA_SIGMOID", 1026 + [13] = "NNPA_SOFTMAX", 1027 + [14] = "NNPA_BATCHNORM", 1028 + [15] = "NNPA_MAXPOOL2D", 1029 + [16] = "NNPA_AVGPOOL2D", 1030 + [17] = "NNPA_LSTMACT", 1031 + [18] = "NNPA_GRUACT", 1032 + [19] = "NNPA_CONVOLUTION", 1033 + [20] = "NNPA_MATMUL_OP", 1034 + [21] = "NNPA_MATMUL_OP_BCAST23", 1035 + [22] = "NNPA_SMALLBATCH", 1036 + [23] = "NNPA_LARGEDIM", 1037 + [24] = "NNPA_SMALLTENSOR", 1038 + [25] = "NNPA_1MFRAME", 1039 + [26] = "NNPA_2GFRAME", 1040 + [27] = "NNPA_ACCESSEXCEPT", 1041 + [28] = "NNPA_TRANSFORM", 1042 + [29] = "NNPA_GELU", 1043 + [30] = "NNPA_MOMENTS", 1044 + [31] = "NNPA_LAYERNORM", 1045 + [32] = "NNPA_MATMUL_OP_BCAST1", 1046 + [33] = "NNPA_SQRT", 1047 + [34] = "NNPA_INVSQRT", 1048 + [35] = "NNPA_NORM", 1049 + [36] = "NNPA_REDUCE", 1050 + }; 1051 + 1052 + static void __init attr_event_free(struct attribute **attrs) 1053 + { 1054 + struct perf_pmu_events_attr *pa; 1055 + unsigned int i; 1056 + 1057 + for (i = 0; attrs[i]; i++) { 1058 + struct device_attribute *dap; 1059 + 1060 + dap = container_of(attrs[i], struct device_attribute, attr); 1061 + pa = container_of(dap, struct perf_pmu_events_attr, attr); 1062 + kfree(pa); 1063 + } 1064 + kfree(attrs); 1065 + } 1066 + 1067 + static struct attribute * __init attr_event_init_one(int num, 1068 + unsigned long base, 1069 + const char *name) 1070 + { 1071 + struct perf_pmu_events_attr *pa; 1072 + 1073 + pa = kzalloc(sizeof(*pa), GFP_KERNEL); 1074 + if (!pa) 1075 + return NULL; 1076 + 1077 + sysfs_attr_init(&pa->attr.attr); 1078 + pa->id = base + num; 1079 + pa->attr.attr.name = name; 1080 + pa->attr.attr.mode = 0444; 1081 + pa->attr.show = cpumf_events_sysfs_show; 1082 + pa->attr.store = NULL; 1083 + return &pa->attr.attr; 1084 + } 1085 + 1086 + static struct attribute ** __init attr_event_init(struct pai_pmu *p) 1087 + { 1088 + unsigned int min_attr = min_t(unsigned int, p->num_named, p->num_avail); 1089 + struct attribute **attrs; 1090 + unsigned int i; 1091 + 1092 + attrs = kmalloc_array(min_attr + 1, sizeof(*attrs), GFP_KERNEL | __GFP_ZERO); 1093 + if (!attrs) 1094 + goto out; 1095 + for (i = 0; i < min_attr; i++) { 1096 + attrs[i] = attr_event_init_one(i, p->base, p->names[i]); 1097 + if (!attrs[i]) { 1098 + attr_event_free(attrs); 1099 + attrs = NULL; 1100 + goto out; 1101 + } 1102 + } 1103 + attrs[i] = NULL; 1104 + out: 1105 + return attrs; 1106 + } 1107 + 1108 + static void __init pai_pmu_exit(struct pai_pmu *p) 1109 + { 1110 + attr_event_free(p->event_group->attrs); 1111 + p->event_group->attrs = NULL; 1112 + } 1113 + 1114 + /* Add a PMU. Install its events and register the PMU device driver 1115 + * call back functions. 1116 + */ 1117 + static int __init pai_pmu_init(struct pai_pmu *p) 1118 + { 1119 + int rc = -ENOMEM; 1120 + 1121 + 1122 + /* Export known PAI events */ 1123 + p->event_group->attrs = attr_event_init(p); 1124 + if (!p->event_group->attrs) { 1125 + pr_err("Creation of PMU %s /sysfs failed\n", p->pmuname); 1126 + goto out; 1127 + } 1128 + 1129 + rc = perf_pmu_register(p->pmu, p->pmuname, -1); 1130 + if (rc) { 1131 + pai_pmu_exit(p); 1132 + pr_err("Registering PMU %s failed with rc=%i\n", p->pmuname, 1133 + rc); 1134 + } 1135 + out: 1136 + return rc; 1137 + } 1138 + 1139 + /* PAI PMU characteristics table */ 1140 + static struct pai_pmu pai_pmu[] __refdata = { 1141 + [PAI_PMU_CRYPTO] = { 1142 + .pmuname = "pai_crypto", 1143 + .facility_nr = 196, 1144 + .num_named = ARRAY_SIZE(paicrypt_ctrnames), 1145 + .names = paicrypt_ctrnames, 1146 + .base = PAI_CRYPTO_BASE, 1147 + .kernel_offset = PAI_CRYPTO_KERNEL_OFFSET, 1148 + .area_size = PAGE_SIZE, 1149 + .init = pai_pmu_init, 1150 + .exit = pai_pmu_exit, 1151 + .pmu = &paicrypt, 1152 + .event_group = &paicrypt_events_group 1153 + }, 1154 + [PAI_PMU_EXT] = { 1155 + .pmuname = "pai_ext", 1156 + .facility_nr = 197, 1157 + .num_named = ARRAY_SIZE(paiext_ctrnames), 1158 + .names = paiext_ctrnames, 1159 + .base = PAI_NNPA_BASE, 1160 + .kernel_offset = 0, 1161 + .area_size = PAIE1_CTRBLOCK_SZ, 1162 + .init = pai_pmu_init, 1163 + .exit = pai_pmu_exit, 1164 + .pmu = &paiext, 1165 + .event_group = &paiext_events_group 1166 + } 1167 + }; 1168 + 1169 + /* 1170 + * Check if the PMU (via facility) is supported by machine. Try all of the 1171 + * supported PAI PMUs. 1172 + * Return number of successfully installed PMUs. 1173 + */ 1174 + static int __init paipmu_setup(void) 1175 + { 1176 + struct qpaci_info_block ib; 1177 + int install_ok = 0, rc; 1178 + struct pai_pmu *p; 1179 + size_t i; 1180 + 1181 + for (i = 0; i < ARRAY_SIZE(pai_pmu); ++i) { 1182 + p = &pai_pmu[i]; 1183 + 1184 + if (!test_facility(p->facility_nr)) 1185 + continue; 1186 + 1187 + qpaci(&ib); 1188 + switch (i) { 1189 + case PAI_PMU_CRYPTO: 1190 + p->num_avail = ib.num_cc; 1191 + if (p->num_avail >= PAI_CRYPTO_MAXCTR) { 1192 + pr_err("Too many PMU %s counters %d\n", 1193 + p->pmuname, p->num_avail); 1194 + continue; 1195 + } 1196 + break; 1197 + case PAI_PMU_EXT: 1198 + p->num_avail = ib.num_nnpa; 1199 + break; 1200 + } 1201 + p->num_avail += 1; /* Add xxx_ALL event */ 1202 + if (p->init) { 1203 + rc = p->init(p); 1204 + if (!rc) 1205 + ++install_ok; 1206 + } 1207 + } 1208 + return install_ok; 1209 + } 1210 + 1211 + static int __init pai_init(void) 1212 + { 1213 + /* Setup s390dbf facility */ 1214 + paidbg = debug_register("pai", 32, 256, 128); 1215 + if (!paidbg) { 1216 + pr_err("Registration of s390dbf pai failed\n"); 1217 + return -ENOMEM; 1218 + } 1219 + debug_register_view(paidbg, &debug_sprintf_view); 1220 + 1221 + if (!paipmu_setup()) { 1222 + /* No PMU registration, no need for debug buffer */ 1223 + debug_unregister_view(paidbg, &debug_sprintf_view); 1224 + debug_unregister(paidbg); 1225 + return -ENODEV; 1226 + } 1227 + return 0; 1228 + } 1229 + 1230 + device_initcall(pai_init);
-843
arch/s390/kernel/perf_pai_crypto.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Performance event support - Processor Activity Instrumentation Facility 4 - * 5 - * Copyright IBM Corp. 2022 6 - * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 - */ 8 - #define KMSG_COMPONENT "pai_crypto" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 - 11 - #include <linux/kernel.h> 12 - #include <linux/kernel_stat.h> 13 - #include <linux/percpu.h> 14 - #include <linux/notifier.h> 15 - #include <linux/init.h> 16 - #include <linux/io.h> 17 - #include <linux/perf_event.h> 18 - #include <asm/ctlreg.h> 19 - #include <asm/pai.h> 20 - #include <asm/debug.h> 21 - 22 - static debug_info_t *cfm_dbg; 23 - static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */ 24 - /* extracted with QPACI instruction */ 25 - 26 - DEFINE_STATIC_KEY_FALSE(pai_key); 27 - 28 - struct pai_userdata { 29 - u16 num; 30 - u64 value; 31 - } __packed; 32 - 33 - struct paicrypt_map { 34 - unsigned long *page; /* Page for CPU to store counters */ 35 - struct pai_userdata *save; /* Page to store no-zero counters */ 36 - unsigned int active_events; /* # of PAI crypto users */ 37 - refcount_t refcnt; /* Reference count mapped buffers */ 38 - struct perf_event *event; /* Perf event for sampling */ 39 - struct list_head syswide_list; /* List system-wide sampling events */ 40 - }; 41 - 42 - struct paicrypt_mapptr { 43 - struct paicrypt_map *mapptr; 44 - }; 45 - 46 - static struct paicrypt_root { /* Anchor to per CPU data */ 47 - refcount_t refcnt; /* Overall active events */ 48 - struct paicrypt_mapptr __percpu *mapptr; 49 - } paicrypt_root; 50 - 51 - /* Free per CPU data when the last event is removed. */ 52 - static void paicrypt_root_free(void) 53 - { 54 - if (refcount_dec_and_test(&paicrypt_root.refcnt)) { 55 - free_percpu(paicrypt_root.mapptr); 56 - paicrypt_root.mapptr = NULL; 57 - } 58 - debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__, 59 - refcount_read(&paicrypt_root.refcnt)); 60 - } 61 - 62 - /* 63 - * On initialization of first event also allocate per CPU data dynamically. 64 - * Start with an array of pointers, the array size is the maximum number of 65 - * CPUs possible, which might be larger than the number of CPUs currently 66 - * online. 67 - */ 68 - static int paicrypt_root_alloc(void) 69 - { 70 - if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) { 71 - /* The memory is already zeroed. */ 72 - paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr); 73 - if (!paicrypt_root.mapptr) 74 - return -ENOMEM; 75 - refcount_set(&paicrypt_root.refcnt, 1); 76 - } 77 - return 0; 78 - } 79 - 80 - /* Release the PMU if event is the last perf event */ 81 - static DEFINE_MUTEX(pai_reserve_mutex); 82 - 83 - /* Free all memory allocated for event counting/sampling setup */ 84 - static void paicrypt_free(struct paicrypt_mapptr *mp) 85 - { 86 - free_page((unsigned long)mp->mapptr->page); 87 - kvfree(mp->mapptr->save); 88 - kfree(mp->mapptr); 89 - mp->mapptr = NULL; 90 - } 91 - 92 - /* Adjust usage counters and remove allocated memory when all users are 93 - * gone. 94 - */ 95 - static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu) 96 - { 97 - struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu); 98 - struct paicrypt_map *cpump = mp->mapptr; 99 - 100 - mutex_lock(&pai_reserve_mutex); 101 - debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d " 102 - "refcnt %u\n", __func__, event->attr.config, 103 - event->cpu, cpump->active_events, 104 - refcount_read(&cpump->refcnt)); 105 - if (refcount_dec_and_test(&cpump->refcnt)) 106 - paicrypt_free(mp); 107 - paicrypt_root_free(); 108 - mutex_unlock(&pai_reserve_mutex); 109 - } 110 - 111 - static void paicrypt_event_destroy(struct perf_event *event) 112 - { 113 - int cpu; 114 - 115 - static_branch_dec(&pai_key); 116 - free_page(PAI_SAVE_AREA(event)); 117 - if (event->cpu == -1) { 118 - struct cpumask *mask = PAI_CPU_MASK(event); 119 - 120 - for_each_cpu(cpu, mask) 121 - paicrypt_event_destroy_cpu(event, cpu); 122 - kfree(mask); 123 - } else { 124 - paicrypt_event_destroy_cpu(event, event->cpu); 125 - } 126 - } 127 - 128 - static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel) 129 - { 130 - if (kernel) 131 - nr += PAI_CRYPTO_MAXCTR; 132 - return page[nr]; 133 - } 134 - 135 - /* Read the counter values. Return value from location in CMP. For event 136 - * CRYPTO_ALL sum up all events. 137 - */ 138 - static u64 paicrypt_getdata(struct perf_event *event, bool kernel) 139 - { 140 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 141 - struct paicrypt_map *cpump = mp->mapptr; 142 - u64 sum = 0; 143 - int i; 144 - 145 - if (event->attr.config != PAI_CRYPTO_BASE) { 146 - return paicrypt_getctr(cpump->page, 147 - event->attr.config - PAI_CRYPTO_BASE, 148 - kernel); 149 - } 150 - 151 - for (i = 1; i <= paicrypt_cnt; i++) { 152 - u64 val = paicrypt_getctr(cpump->page, i, kernel); 153 - 154 - if (!val) 155 - continue; 156 - sum += val; 157 - } 158 - return sum; 159 - } 160 - 161 - static u64 paicrypt_getall(struct perf_event *event) 162 - { 163 - u64 sum = 0; 164 - 165 - if (!event->attr.exclude_kernel) 166 - sum += paicrypt_getdata(event, true); 167 - if (!event->attr.exclude_user) 168 - sum += paicrypt_getdata(event, false); 169 - 170 - return sum; 171 - } 172 - 173 - /* Check concurrent access of counting and sampling for crypto events. 174 - * This function is called in process context and it is save to block. 175 - * When the event initialization functions fails, no other call back will 176 - * be invoked. 177 - * 178 - * Allocate the memory for the event. 179 - */ 180 - static int paicrypt_alloc_cpu(struct perf_event *event, int cpu) 181 - { 182 - struct paicrypt_map *cpump = NULL; 183 - struct paicrypt_mapptr *mp; 184 - int rc; 185 - 186 - mutex_lock(&pai_reserve_mutex); 187 - /* Allocate root node */ 188 - rc = paicrypt_root_alloc(); 189 - if (rc) 190 - goto unlock; 191 - 192 - /* Allocate node for this event */ 193 - mp = per_cpu_ptr(paicrypt_root.mapptr, cpu); 194 - cpump = mp->mapptr; 195 - if (!cpump) { /* Paicrypt_map allocated? */ 196 - rc = -ENOMEM; 197 - cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 198 - if (!cpump) 199 - goto undo; 200 - /* Allocate memory for counter page and counter extraction. 201 - * Only the first counting event has to allocate a page. 202 - */ 203 - mp->mapptr = cpump; 204 - cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); 205 - cpump->save = kvmalloc_array(paicrypt_cnt + 1, 206 - sizeof(struct pai_userdata), 207 - GFP_KERNEL); 208 - if (!cpump->page || !cpump->save) { 209 - paicrypt_free(mp); 210 - goto undo; 211 - } 212 - INIT_LIST_HEAD(&cpump->syswide_list); 213 - refcount_set(&cpump->refcnt, 1); 214 - rc = 0; 215 - } else { 216 - refcount_inc(&cpump->refcnt); 217 - } 218 - 219 - undo: 220 - if (rc) { 221 - /* Error in allocation of event, decrement anchor. Since 222 - * the event in not created, its destroy() function is never 223 - * invoked. Adjust the reference counter for the anchor. 224 - */ 225 - paicrypt_root_free(); 226 - } 227 - unlock: 228 - mutex_unlock(&pai_reserve_mutex); 229 - return rc; 230 - } 231 - 232 - static int paicrypt_alloc(struct perf_event *event) 233 - { 234 - struct cpumask *maskptr; 235 - int cpu, rc = -ENOMEM; 236 - 237 - maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL); 238 - if (!maskptr) 239 - goto out; 240 - 241 - for_each_online_cpu(cpu) { 242 - rc = paicrypt_alloc_cpu(event, cpu); 243 - if (rc) { 244 - for_each_cpu(cpu, maskptr) 245 - paicrypt_event_destroy_cpu(event, cpu); 246 - kfree(maskptr); 247 - goto out; 248 - } 249 - cpumask_set_cpu(cpu, maskptr); 250 - } 251 - 252 - /* 253 - * On error all cpumask are freed and all events have been destroyed. 254 - * Save of which CPUs data structures have been allocated for. 255 - * Release them in paicrypt_event_destroy call back function 256 - * for this event. 257 - */ 258 - PAI_CPU_MASK(event) = maskptr; 259 - rc = 0; 260 - out: 261 - return rc; 262 - } 263 - 264 - /* Might be called on different CPU than the one the event is intended for. */ 265 - static int paicrypt_event_init(struct perf_event *event) 266 - { 267 - struct perf_event_attr *a = &event->attr; 268 - int rc = 0; 269 - 270 - /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 271 - if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 272 - return -ENOENT; 273 - /* PAI crypto event must be in valid range, try others if not */ 274 - if (a->config < PAI_CRYPTO_BASE || 275 - a->config > PAI_CRYPTO_BASE + paicrypt_cnt) 276 - return -ENOENT; 277 - /* Allow only CRYPTO_ALL for sampling */ 278 - if (a->sample_period && a->config != PAI_CRYPTO_BASE) 279 - return -EINVAL; 280 - /* Get a page to store last counter values for sampling */ 281 - if (a->sample_period) { 282 - PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 283 - if (!PAI_SAVE_AREA(event)) { 284 - rc = -ENOMEM; 285 - goto out; 286 - } 287 - } 288 - 289 - if (event->cpu >= 0) 290 - rc = paicrypt_alloc_cpu(event, event->cpu); 291 - else 292 - rc = paicrypt_alloc(event); 293 - if (rc) { 294 - free_page(PAI_SAVE_AREA(event)); 295 - goto out; 296 - } 297 - event->destroy = paicrypt_event_destroy; 298 - 299 - if (a->sample_period) { 300 - a->sample_period = 1; 301 - a->freq = 0; 302 - /* Register for paicrypt_sched_task() to be called */ 303 - event->attach_state |= PERF_ATTACH_SCHED_CB; 304 - /* Add raw data which contain the memory mapped counters */ 305 - a->sample_type |= PERF_SAMPLE_RAW; 306 - /* Turn off inheritance */ 307 - a->inherit = 0; 308 - } 309 - 310 - static_branch_inc(&pai_key); 311 - out: 312 - return rc; 313 - } 314 - 315 - static void paicrypt_read(struct perf_event *event) 316 - { 317 - u64 prev, new, delta; 318 - 319 - prev = local64_read(&event->hw.prev_count); 320 - new = paicrypt_getall(event); 321 - local64_set(&event->hw.prev_count, new); 322 - delta = (prev <= new) ? new - prev 323 - : (-1ULL - prev) + new + 1; /* overflow */ 324 - local64_add(delta, &event->count); 325 - } 326 - 327 - static void paicrypt_start(struct perf_event *event, int flags) 328 - { 329 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 330 - struct paicrypt_map *cpump = mp->mapptr; 331 - u64 sum; 332 - 333 - if (!event->attr.sample_period) { /* Counting */ 334 - sum = paicrypt_getall(event); /* Get current value */ 335 - local64_set(&event->hw.prev_count, sum); 336 - } else { /* Sampling */ 337 - memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE); 338 - /* Enable context switch callback for system-wide sampling */ 339 - if (!(event->attach_state & PERF_ATTACH_TASK)) { 340 - list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 341 - perf_sched_cb_inc(event->pmu); 342 - } else { 343 - cpump->event = event; 344 - } 345 - } 346 - } 347 - 348 - static int paicrypt_add(struct perf_event *event, int flags) 349 - { 350 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 351 - struct paicrypt_map *cpump = mp->mapptr; 352 - unsigned long ccd; 353 - 354 - if (++cpump->active_events == 1) { 355 - ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; 356 - WRITE_ONCE(get_lowcore()->ccd, ccd); 357 - local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 358 - } 359 - if (flags & PERF_EF_START) 360 - paicrypt_start(event, PERF_EF_RELOAD); 361 - event->hw.state = 0; 362 - return 0; 363 - } 364 - 365 - static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *); 366 - static void paicrypt_stop(struct perf_event *event, int flags) 367 - { 368 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 369 - struct paicrypt_map *cpump = mp->mapptr; 370 - 371 - if (!event->attr.sample_period) { /* Counting */ 372 - paicrypt_read(event); 373 - } else { /* Sampling */ 374 - if (!(event->attach_state & PERF_ATTACH_TASK)) { 375 - perf_sched_cb_dec(event->pmu); 376 - list_del(PAI_SWLIST(event)); 377 - } else { 378 - paicrypt_have_sample(event, cpump); 379 - cpump->event = NULL; 380 - } 381 - } 382 - event->hw.state = PERF_HES_STOPPED; 383 - } 384 - 385 - static void paicrypt_del(struct perf_event *event, int flags) 386 - { 387 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 388 - struct paicrypt_map *cpump = mp->mapptr; 389 - 390 - paicrypt_stop(event, PERF_EF_UPDATE); 391 - if (--cpump->active_events == 0) { 392 - local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); 393 - WRITE_ONCE(get_lowcore()->ccd, 0); 394 - } 395 - } 396 - 397 - /* Create raw data and save it in buffer. Calculate the delta for each 398 - * counter between this invocation and the last invocation. 399 - * Returns number of bytes copied. 400 - * Saves only entries with positive counter difference of the form 401 - * 2 bytes: Number of counter 402 - * 8 bytes: Value of counter 403 - */ 404 - static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page, 405 - unsigned long *page_old, bool exclude_user, 406 - bool exclude_kernel) 407 - { 408 - int i, outidx = 0; 409 - 410 - for (i = 1; i <= paicrypt_cnt; i++) { 411 - u64 val = 0, val_old = 0; 412 - 413 - if (!exclude_kernel) { 414 - val += paicrypt_getctr(page, i, true); 415 - val_old += paicrypt_getctr(page_old, i, true); 416 - } 417 - if (!exclude_user) { 418 - val += paicrypt_getctr(page, i, false); 419 - val_old += paicrypt_getctr(page_old, i, false); 420 - } 421 - if (val >= val_old) 422 - val -= val_old; 423 - else 424 - val = (~0ULL - val_old) + val + 1; 425 - if (val) { 426 - userdata[outidx].num = i; 427 - userdata[outidx].value = val; 428 - outidx++; 429 - } 430 - } 431 - return outidx * sizeof(struct pai_userdata); 432 - } 433 - 434 - static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump, 435 - struct perf_event *event) 436 - { 437 - struct perf_sample_data data; 438 - struct perf_raw_record raw; 439 - struct pt_regs regs; 440 - int overflow; 441 - 442 - /* Setup perf sample */ 443 - memset(&regs, 0, sizeof(regs)); 444 - memset(&raw, 0, sizeof(raw)); 445 - memset(&data, 0, sizeof(data)); 446 - perf_sample_data_init(&data, 0, event->hw.last_period); 447 - if (event->attr.sample_type & PERF_SAMPLE_TID) { 448 - data.tid_entry.pid = task_tgid_nr(current); 449 - data.tid_entry.tid = task_pid_nr(current); 450 - } 451 - if (event->attr.sample_type & PERF_SAMPLE_TIME) 452 - data.time = event->clock(); 453 - if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 454 - data.id = event->id; 455 - if (event->attr.sample_type & PERF_SAMPLE_CPU) { 456 - data.cpu_entry.cpu = smp_processor_id(); 457 - data.cpu_entry.reserved = 0; 458 - } 459 - if (event->attr.sample_type & PERF_SAMPLE_RAW) { 460 - raw.frag.size = rawsize; 461 - raw.frag.data = cpump->save; 462 - perf_sample_save_raw_data(&data, event, &raw); 463 - } 464 - 465 - overflow = perf_event_overflow(event, &data, &regs); 466 - perf_event_update_userpage(event); 467 - /* Save crypto counter lowcore page after reading event data. */ 468 - memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE); 469 - return overflow; 470 - } 471 - 472 - /* Check if there is data to be saved on schedule out of a task. */ 473 - static void paicrypt_have_sample(struct perf_event *event, 474 - struct paicrypt_map *cpump) 475 - { 476 - size_t rawsize; 477 - 478 - if (!event) /* No event active */ 479 - return; 480 - rawsize = paicrypt_copy(cpump->save, cpump->page, 481 - (unsigned long *)PAI_SAVE_AREA(event), 482 - event->attr.exclude_user, 483 - event->attr.exclude_kernel); 484 - if (rawsize) /* No incremented counters */ 485 - paicrypt_push_sample(rawsize, cpump, event); 486 - } 487 - 488 - /* Check if there is data to be saved on schedule out of a task. */ 489 - static void paicrypt_have_samples(void) 490 - { 491 - struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); 492 - struct paicrypt_map *cpump = mp->mapptr; 493 - struct perf_event *event; 494 - 495 - list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 496 - paicrypt_have_sample(event, cpump); 497 - } 498 - 499 - /* Called on schedule-in and schedule-out. No access to event structure, 500 - * but for sampling only event CRYPTO_ALL is allowed. 501 - */ 502 - static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, 503 - struct task_struct *task, bool sched_in) 504 - { 505 - /* We started with a clean page on event installation. So read out 506 - * results on schedule_out and if page was dirty, save old values. 507 - */ 508 - if (!sched_in) 509 - paicrypt_have_samples(); 510 - } 511 - 512 - /* Attribute definitions for paicrypt interface. As with other CPU 513 - * Measurement Facilities, there is one attribute per mapped counter. 514 - * The number of mapped counters may vary per machine generation. Use 515 - * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 516 - * to determine the number of mapped counters. The instructions returns 517 - * a positive number, which is the highest number of supported counters. 518 - * All counters less than this number are also supported, there are no 519 - * holes. A returned number of zero means no support for mapped counters. 520 - * 521 - * The identification of the counter is a unique number. The chosen range 522 - * is 0x1000 + offset in mapped kernel page. 523 - * All CPU Measurement Facility counters identifiers must be unique and 524 - * the numbers from 0 to 496 are already used for the CPU Measurement 525 - * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 526 - * used for the CPU Measurement Sampling facility. 527 - */ 528 - PMU_FORMAT_ATTR(event, "config:0-63"); 529 - 530 - static struct attribute *paicrypt_format_attr[] = { 531 - &format_attr_event.attr, 532 - NULL, 533 - }; 534 - 535 - static struct attribute_group paicrypt_events_group = { 536 - .name = "events", 537 - .attrs = NULL /* Filled in attr_event_init() */ 538 - }; 539 - 540 - static struct attribute_group paicrypt_format_group = { 541 - .name = "format", 542 - .attrs = paicrypt_format_attr, 543 - }; 544 - 545 - static const struct attribute_group *paicrypt_attr_groups[] = { 546 - &paicrypt_events_group, 547 - &paicrypt_format_group, 548 - NULL, 549 - }; 550 - 551 - /* Performance monitoring unit for mapped counters */ 552 - static struct pmu paicrypt = { 553 - .task_ctx_nr = perf_hw_context, 554 - .event_init = paicrypt_event_init, 555 - .add = paicrypt_add, 556 - .del = paicrypt_del, 557 - .start = paicrypt_start, 558 - .stop = paicrypt_stop, 559 - .read = paicrypt_read, 560 - .sched_task = paicrypt_sched_task, 561 - .attr_groups = paicrypt_attr_groups 562 - }; 563 - 564 - /* List of symbolic PAI counter names. */ 565 - static const char * const paicrypt_ctrnames[] = { 566 - [0] = "CRYPTO_ALL", 567 - [1] = "KM_DEA", 568 - [2] = "KM_TDEA_128", 569 - [3] = "KM_TDEA_192", 570 - [4] = "KM_ENCRYPTED_DEA", 571 - [5] = "KM_ENCRYPTED_TDEA_128", 572 - [6] = "KM_ENCRYPTED_TDEA_192", 573 - [7] = "KM_AES_128", 574 - [8] = "KM_AES_192", 575 - [9] = "KM_AES_256", 576 - [10] = "KM_ENCRYPTED_AES_128", 577 - [11] = "KM_ENCRYPTED_AES_192", 578 - [12] = "KM_ENCRYPTED_AES_256", 579 - [13] = "KM_XTS_AES_128", 580 - [14] = "KM_XTS_AES_256", 581 - [15] = "KM_XTS_ENCRYPTED_AES_128", 582 - [16] = "KM_XTS_ENCRYPTED_AES_256", 583 - [17] = "KMC_DEA", 584 - [18] = "KMC_TDEA_128", 585 - [19] = "KMC_TDEA_192", 586 - [20] = "KMC_ENCRYPTED_DEA", 587 - [21] = "KMC_ENCRYPTED_TDEA_128", 588 - [22] = "KMC_ENCRYPTED_TDEA_192", 589 - [23] = "KMC_AES_128", 590 - [24] = "KMC_AES_192", 591 - [25] = "KMC_AES_256", 592 - [26] = "KMC_ENCRYPTED_AES_128", 593 - [27] = "KMC_ENCRYPTED_AES_192", 594 - [28] = "KMC_ENCRYPTED_AES_256", 595 - [29] = "KMC_PRNG", 596 - [30] = "KMA_GCM_AES_128", 597 - [31] = "KMA_GCM_AES_192", 598 - [32] = "KMA_GCM_AES_256", 599 - [33] = "KMA_GCM_ENCRYPTED_AES_128", 600 - [34] = "KMA_GCM_ENCRYPTED_AES_192", 601 - [35] = "KMA_GCM_ENCRYPTED_AES_256", 602 - [36] = "KMF_DEA", 603 - [37] = "KMF_TDEA_128", 604 - [38] = "KMF_TDEA_192", 605 - [39] = "KMF_ENCRYPTED_DEA", 606 - [40] = "KMF_ENCRYPTED_TDEA_128", 607 - [41] = "KMF_ENCRYPTED_TDEA_192", 608 - [42] = "KMF_AES_128", 609 - [43] = "KMF_AES_192", 610 - [44] = "KMF_AES_256", 611 - [45] = "KMF_ENCRYPTED_AES_128", 612 - [46] = "KMF_ENCRYPTED_AES_192", 613 - [47] = "KMF_ENCRYPTED_AES_256", 614 - [48] = "KMCTR_DEA", 615 - [49] = "KMCTR_TDEA_128", 616 - [50] = "KMCTR_TDEA_192", 617 - [51] = "KMCTR_ENCRYPTED_DEA", 618 - [52] = "KMCTR_ENCRYPTED_TDEA_128", 619 - [53] = "KMCTR_ENCRYPTED_TDEA_192", 620 - [54] = "KMCTR_AES_128", 621 - [55] = "KMCTR_AES_192", 622 - [56] = "KMCTR_AES_256", 623 - [57] = "KMCTR_ENCRYPTED_AES_128", 624 - [58] = "KMCTR_ENCRYPTED_AES_192", 625 - [59] = "KMCTR_ENCRYPTED_AES_256", 626 - [60] = "KMO_DEA", 627 - [61] = "KMO_TDEA_128", 628 - [62] = "KMO_TDEA_192", 629 - [63] = "KMO_ENCRYPTED_DEA", 630 - [64] = "KMO_ENCRYPTED_TDEA_128", 631 - [65] = "KMO_ENCRYPTED_TDEA_192", 632 - [66] = "KMO_AES_128", 633 - [67] = "KMO_AES_192", 634 - [68] = "KMO_AES_256", 635 - [69] = "KMO_ENCRYPTED_AES_128", 636 - [70] = "KMO_ENCRYPTED_AES_192", 637 - [71] = "KMO_ENCRYPTED_AES_256", 638 - [72] = "KIMD_SHA_1", 639 - [73] = "KIMD_SHA_256", 640 - [74] = "KIMD_SHA_512", 641 - [75] = "KIMD_SHA3_224", 642 - [76] = "KIMD_SHA3_256", 643 - [77] = "KIMD_SHA3_384", 644 - [78] = "KIMD_SHA3_512", 645 - [79] = "KIMD_SHAKE_128", 646 - [80] = "KIMD_SHAKE_256", 647 - [81] = "KIMD_GHASH", 648 - [82] = "KLMD_SHA_1", 649 - [83] = "KLMD_SHA_256", 650 - [84] = "KLMD_SHA_512", 651 - [85] = "KLMD_SHA3_224", 652 - [86] = "KLMD_SHA3_256", 653 - [87] = "KLMD_SHA3_384", 654 - [88] = "KLMD_SHA3_512", 655 - [89] = "KLMD_SHAKE_128", 656 - [90] = "KLMD_SHAKE_256", 657 - [91] = "KMAC_DEA", 658 - [92] = "KMAC_TDEA_128", 659 - [93] = "KMAC_TDEA_192", 660 - [94] = "KMAC_ENCRYPTED_DEA", 661 - [95] = "KMAC_ENCRYPTED_TDEA_128", 662 - [96] = "KMAC_ENCRYPTED_TDEA_192", 663 - [97] = "KMAC_AES_128", 664 - [98] = "KMAC_AES_192", 665 - [99] = "KMAC_AES_256", 666 - [100] = "KMAC_ENCRYPTED_AES_128", 667 - [101] = "KMAC_ENCRYPTED_AES_192", 668 - [102] = "KMAC_ENCRYPTED_AES_256", 669 - [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 670 - [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 671 - [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 672 - [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 673 - [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 674 - [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 675 - [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 676 - [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 677 - [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 678 - [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 679 - [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 680 - [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256", 681 - [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 682 - [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 683 - [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 684 - [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 685 - [119] = "PCC_SCALAR_MULTIPLY_P256", 686 - [120] = "PCC_SCALAR_MULTIPLY_P384", 687 - [121] = "PCC_SCALAR_MULTIPLY_P521", 688 - [122] = "PCC_SCALAR_MULTIPLY_ED25519", 689 - [123] = "PCC_SCALAR_MULTIPLY_ED448", 690 - [124] = "PCC_SCALAR_MULTIPLY_X25519", 691 - [125] = "PCC_SCALAR_MULTIPLY_X448", 692 - [126] = "PRNO_SHA_512_DRNG", 693 - [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 694 - [128] = "PRNO_TRNG", 695 - [129] = "KDSA_ECDSA_VERIFY_P256", 696 - [130] = "KDSA_ECDSA_VERIFY_P384", 697 - [131] = "KDSA_ECDSA_VERIFY_P521", 698 - [132] = "KDSA_ECDSA_SIGN_P256", 699 - [133] = "KDSA_ECDSA_SIGN_P384", 700 - [134] = "KDSA_ECDSA_SIGN_P521", 701 - [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 702 - [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 703 - [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 704 - [138] = "KDSA_EDDSA_VERIFY_ED25519", 705 - [139] = "KDSA_EDDSA_VERIFY_ED448", 706 - [140] = "KDSA_EDDSA_SIGN_ED25519", 707 - [141] = "KDSA_EDDSA_SIGN_ED448", 708 - [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 709 - [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 710 - [144] = "PCKMO_ENCRYPT_DEA_KEY", 711 - [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 712 - [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 713 - [147] = "PCKMO_ENCRYPT_AES_128_KEY", 714 - [148] = "PCKMO_ENCRYPT_AES_192_KEY", 715 - [149] = "PCKMO_ENCRYPT_AES_256_KEY", 716 - [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 717 - [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 718 - [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 719 - [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 720 - [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 721 - [155] = "IBM_RESERVED_155", 722 - [156] = "IBM_RESERVED_156", 723 - [157] = "KM_FULL_XTS_AES_128", 724 - [158] = "KM_FULL_XTS_AES_256", 725 - [159] = "KM_FULL_XTS_ENCRYPTED_AES_128", 726 - [160] = "KM_FULL_XTS_ENCRYPTED_AES_256", 727 - [161] = "KMAC_HMAC_SHA_224", 728 - [162] = "KMAC_HMAC_SHA_256", 729 - [163] = "KMAC_HMAC_SHA_384", 730 - [164] = "KMAC_HMAC_SHA_512", 731 - [165] = "KMAC_HMAC_ENCRYPTED_SHA_224", 732 - [166] = "KMAC_HMAC_ENCRYPTED_SHA_256", 733 - [167] = "KMAC_HMAC_ENCRYPTED_SHA_384", 734 - [168] = "KMAC_HMAC_ENCRYPTED_SHA_512", 735 - [169] = "PCKMO_ENCRYPT_HMAC_512_KEY", 736 - [170] = "PCKMO_ENCRYPT_HMAC_1024_KEY", 737 - [171] = "PCKMO_ENCRYPT_AES_XTS_128", 738 - [172] = "PCKMO_ENCRYPT_AES_XTS_256", 739 - }; 740 - 741 - static void __init attr_event_free(struct attribute **attrs, int num) 742 - { 743 - struct perf_pmu_events_attr *pa; 744 - int i; 745 - 746 - for (i = 0; i < num; i++) { 747 - struct device_attribute *dap; 748 - 749 - dap = container_of(attrs[i], struct device_attribute, attr); 750 - pa = container_of(dap, struct perf_pmu_events_attr, attr); 751 - kfree(pa); 752 - } 753 - kfree(attrs); 754 - } 755 - 756 - static int __init attr_event_init_one(struct attribute **attrs, int num) 757 - { 758 - struct perf_pmu_events_attr *pa; 759 - 760 - /* Index larger than array_size, no counter name available */ 761 - if (num >= ARRAY_SIZE(paicrypt_ctrnames)) { 762 - attrs[num] = NULL; 763 - return 0; 764 - } 765 - 766 - pa = kzalloc(sizeof(*pa), GFP_KERNEL); 767 - if (!pa) 768 - return -ENOMEM; 769 - 770 - sysfs_attr_init(&pa->attr.attr); 771 - pa->id = PAI_CRYPTO_BASE + num; 772 - pa->attr.attr.name = paicrypt_ctrnames[num]; 773 - pa->attr.attr.mode = 0444; 774 - pa->attr.show = cpumf_events_sysfs_show; 775 - pa->attr.store = NULL; 776 - attrs[num] = &pa->attr.attr; 777 - return 0; 778 - } 779 - 780 - /* Create PMU sysfs event attributes on the fly. */ 781 - static int __init attr_event_init(void) 782 - { 783 - struct attribute **attrs; 784 - int ret, i; 785 - 786 - attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL); 787 - if (!attrs) 788 - return -ENOMEM; 789 - for (i = 0; i <= paicrypt_cnt; i++) { 790 - ret = attr_event_init_one(attrs, i); 791 - if (ret) { 792 - attr_event_free(attrs, i); 793 - return ret; 794 - } 795 - } 796 - attrs[i] = NULL; 797 - paicrypt_events_group.attrs = attrs; 798 - return 0; 799 - } 800 - 801 - static int __init paicrypt_init(void) 802 - { 803 - struct qpaci_info_block ib; 804 - int rc; 805 - 806 - if (!test_facility(196)) 807 - return 0; 808 - 809 - qpaci(&ib); 810 - paicrypt_cnt = ib.num_cc; 811 - if (paicrypt_cnt == 0) 812 - return 0; 813 - if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) { 814 - pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt); 815 - return -E2BIG; 816 - } 817 - 818 - rc = attr_event_init(); /* Export known PAI crypto events */ 819 - if (rc) { 820 - pr_err("Creation of PMU pai_crypto /sysfs failed\n"); 821 - return rc; 822 - } 823 - 824 - /* Setup s390dbf facility */ 825 - cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); 826 - if (!cfm_dbg) { 827 - pr_err("Registration of s390dbf pai_crypto failed\n"); 828 - return -ENOMEM; 829 - } 830 - debug_register_view(cfm_dbg, &debug_sprintf_view); 831 - 832 - rc = perf_pmu_register(&paicrypt, "pai_crypto", -1); 833 - if (rc) { 834 - pr_err("Registering the pai_crypto PMU failed with rc=%i\n", 835 - rc); 836 - debug_unregister_view(cfm_dbg, &debug_sprintf_view); 837 - debug_unregister(cfm_dbg); 838 - return rc; 839 - } 840 - return 0; 841 - } 842 - 843 - device_initcall(paicrypt_init);
-756
arch/s390/kernel/perf_pai_ext.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Performance event support - Processor Activity Instrumentation Extension 4 - * Facility 5 - * 6 - * Copyright IBM Corp. 2022 7 - * Author(s): Thomas Richter <tmricht@linux.ibm.com> 8 - */ 9 - #define KMSG_COMPONENT "pai_ext" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 - 12 - #include <linux/kernel.h> 13 - #include <linux/kernel_stat.h> 14 - #include <linux/percpu.h> 15 - #include <linux/notifier.h> 16 - #include <linux/init.h> 17 - #include <linux/io.h> 18 - #include <linux/perf_event.h> 19 - #include <asm/ctlreg.h> 20 - #include <asm/pai.h> 21 - #include <asm/debug.h> 22 - 23 - #define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */ 24 - #define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */ 25 - 26 - static debug_info_t *paiext_dbg; 27 - static unsigned int paiext_cnt; /* Extracted with QPACI instruction */ 28 - 29 - struct pai_userdata { 30 - u16 num; 31 - u64 value; 32 - } __packed; 33 - 34 - /* Create the PAI extension 1 control block area. 35 - * The PAI extension control block 1 is pointed to by lowcore 36 - * address 0x1508 for each CPU. This control block is 512 bytes in size 37 - * and requires a 512 byte boundary alignment. 38 - */ 39 - struct paiext_cb { /* PAI extension 1 control block */ 40 - u64 header; /* Not used */ 41 - u64 reserved1; 42 - u64 acc; /* Addr to analytics counter control block */ 43 - u8 reserved2[488]; 44 - } __packed; 45 - 46 - struct paiext_map { 47 - unsigned long *area; /* Area for CPU to store counters */ 48 - struct pai_userdata *save; /* Area to store non-zero counters */ 49 - unsigned int active_events; /* # of PAI Extension users */ 50 - refcount_t refcnt; 51 - struct perf_event *event; /* Perf event for sampling */ 52 - struct paiext_cb *paiext_cb; /* PAI extension control block area */ 53 - struct list_head syswide_list; /* List system-wide sampling events */ 54 - }; 55 - 56 - struct paiext_mapptr { 57 - struct paiext_map *mapptr; 58 - }; 59 - 60 - static struct paiext_root { /* Anchor to per CPU data */ 61 - refcount_t refcnt; /* Overall active events */ 62 - struct paiext_mapptr __percpu *mapptr; 63 - } paiext_root; 64 - 65 - /* Free per CPU data when the last event is removed. */ 66 - static void paiext_root_free(void) 67 - { 68 - if (refcount_dec_and_test(&paiext_root.refcnt)) { 69 - free_percpu(paiext_root.mapptr); 70 - paiext_root.mapptr = NULL; 71 - } 72 - debug_sprintf_event(paiext_dbg, 5, "%s root.refcount %d\n", __func__, 73 - refcount_read(&paiext_root.refcnt)); 74 - } 75 - 76 - /* On initialization of first event also allocate per CPU data dynamically. 77 - * Start with an array of pointers, the array size is the maximum number of 78 - * CPUs possible, which might be larger than the number of CPUs currently 79 - * online. 80 - */ 81 - static int paiext_root_alloc(void) 82 - { 83 - if (!refcount_inc_not_zero(&paiext_root.refcnt)) { 84 - /* The memory is already zeroed. */ 85 - paiext_root.mapptr = alloc_percpu(struct paiext_mapptr); 86 - if (!paiext_root.mapptr) { 87 - /* Returning without refcnt adjustment is ok. The 88 - * error code is handled by paiext_alloc() which 89 - * decrements refcnt when an event can not be 90 - * created. 91 - */ 92 - return -ENOMEM; 93 - } 94 - refcount_set(&paiext_root.refcnt, 1); 95 - } 96 - return 0; 97 - } 98 - 99 - /* Protects against concurrent increment of sampler and counter member 100 - * increments at the same time and prohibits concurrent execution of 101 - * counting and sampling events. 102 - * Ensures that analytics counter block is deallocated only when the 103 - * sampling and counting on that cpu is zero. 104 - * For details see paiext_alloc(). 105 - */ 106 - static DEFINE_MUTEX(paiext_reserve_mutex); 107 - 108 - /* Free all memory allocated for event counting/sampling setup */ 109 - static void paiext_free(struct paiext_mapptr *mp) 110 - { 111 - kfree(mp->mapptr->area); 112 - kfree(mp->mapptr->paiext_cb); 113 - kvfree(mp->mapptr->save); 114 - kfree(mp->mapptr); 115 - mp->mapptr = NULL; 116 - } 117 - 118 - /* Release the PMU if event is the last perf event */ 119 - static void paiext_event_destroy_cpu(struct perf_event *event, int cpu) 120 - { 121 - struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, cpu); 122 - struct paiext_map *cpump = mp->mapptr; 123 - 124 - mutex_lock(&paiext_reserve_mutex); 125 - if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */ 126 - paiext_free(mp); 127 - paiext_root_free(); 128 - mutex_unlock(&paiext_reserve_mutex); 129 - } 130 - 131 - static void paiext_event_destroy(struct perf_event *event) 132 - { 133 - int cpu; 134 - 135 - free_page(PAI_SAVE_AREA(event)); 136 - if (event->cpu == -1) { 137 - struct cpumask *mask = PAI_CPU_MASK(event); 138 - 139 - for_each_cpu(cpu, mask) 140 - paiext_event_destroy_cpu(event, cpu); 141 - kfree(mask); 142 - } else { 143 - paiext_event_destroy_cpu(event, event->cpu); 144 - } 145 - debug_sprintf_event(paiext_dbg, 4, "%s cpu %d\n", __func__, 146 - event->cpu); 147 - } 148 - 149 - /* Used to avoid races in checking concurrent access of counting and 150 - * sampling for pai_extension events. 151 - * 152 - * Only one instance of event pai_ext/NNPA_ALL/ for sampling is 153 - * allowed and when this event is running, no counting event is allowed. 154 - * Several counting events are allowed in parallel, but no sampling event 155 - * is allowed while one (or more) counting events are running. 156 - * 157 - * This function is called in process context and it is safe to block. 158 - * When the event initialization functions fails, no other call back will 159 - * be invoked. 160 - * 161 - * Allocate the memory for the event. 162 - */ 163 - static int paiext_alloc_cpu(struct perf_event *event, int cpu) 164 - { 165 - struct paiext_mapptr *mp; 166 - struct paiext_map *cpump; 167 - int rc; 168 - 169 - mutex_lock(&paiext_reserve_mutex); 170 - rc = paiext_root_alloc(); 171 - if (rc) 172 - goto unlock; 173 - 174 - mp = per_cpu_ptr(paiext_root.mapptr, cpu); 175 - cpump = mp->mapptr; 176 - if (!cpump) { /* Paiext_map allocated? */ 177 - rc = -ENOMEM; 178 - cpump = kzalloc(sizeof(*cpump), GFP_KERNEL); 179 - if (!cpump) 180 - goto undo; 181 - 182 - /* Allocate memory for counter area and counter extraction. 183 - * These are 184 - * - a 512 byte block and requires 512 byte boundary alignment. 185 - * - a 1KB byte block and requires 1KB boundary alignment. 186 - * Only the first counting event has to allocate the area. 187 - * 188 - * Note: This works with commit 59bb47985c1d by default. 189 - * Backporting this to kernels without this commit might 190 - * need adjustment. 191 - */ 192 - mp->mapptr = cpump; 193 - cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL); 194 - cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL); 195 - cpump->save = kvmalloc_array(paiext_cnt + 1, 196 - sizeof(struct pai_userdata), 197 - GFP_KERNEL); 198 - if (!cpump->save || !cpump->area || !cpump->paiext_cb) { 199 - paiext_free(mp); 200 - goto undo; 201 - } 202 - INIT_LIST_HEAD(&cpump->syswide_list); 203 - refcount_set(&cpump->refcnt, 1); 204 - rc = 0; 205 - } else { 206 - refcount_inc(&cpump->refcnt); 207 - } 208 - 209 - undo: 210 - if (rc) { 211 - /* Error in allocation of event, decrement anchor. Since 212 - * the event in not created, its destroy() function is never 213 - * invoked. Adjust the reference counter for the anchor. 214 - */ 215 - paiext_root_free(); 216 - } 217 - unlock: 218 - mutex_unlock(&paiext_reserve_mutex); 219 - /* If rc is non-zero, no increment of counter/sampler was done. */ 220 - return rc; 221 - } 222 - 223 - static int paiext_alloc(struct perf_event *event) 224 - { 225 - struct cpumask *maskptr; 226 - int cpu, rc = -ENOMEM; 227 - 228 - maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL); 229 - if (!maskptr) 230 - goto out; 231 - 232 - for_each_online_cpu(cpu) { 233 - rc = paiext_alloc_cpu(event, cpu); 234 - if (rc) { 235 - for_each_cpu(cpu, maskptr) 236 - paiext_event_destroy_cpu(event, cpu); 237 - kfree(maskptr); 238 - goto out; 239 - } 240 - cpumask_set_cpu(cpu, maskptr); 241 - } 242 - 243 - /* 244 - * On error all cpumask are freed and all events have been destroyed. 245 - * Save of which CPUs data structures have been allocated for. 246 - * Release them in paicrypt_event_destroy call back function 247 - * for this event. 248 - */ 249 - PAI_CPU_MASK(event) = maskptr; 250 - rc = 0; 251 - out: 252 - return rc; 253 - } 254 - 255 - /* The PAI extension 1 control block supports up to 128 entries. Return 256 - * the index within PAIE1_CB given the event number. Also validate event 257 - * number. 258 - */ 259 - static int paiext_event_valid(struct perf_event *event) 260 - { 261 - u64 cfg = event->attr.config; 262 - 263 - if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) { 264 - /* Offset NNPA in paiext_cb */ 265 - event->hw.config_base = offsetof(struct paiext_cb, acc); 266 - return 0; 267 - } 268 - return -ENOENT; 269 - } 270 - 271 - /* Might be called on different CPU than the one the event is intended for. */ 272 - static int paiext_event_init(struct perf_event *event) 273 - { 274 - struct perf_event_attr *a = &event->attr; 275 - int rc; 276 - 277 - /* PMU pai_ext registered as PERF_TYPE_RAW, check event type */ 278 - if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 279 - return -ENOENT; 280 - /* PAI extension event must be valid and in supported range */ 281 - rc = paiext_event_valid(event); 282 - if (rc) 283 - return rc; 284 - /* Allow only event NNPA_ALL for sampling. */ 285 - if (a->sample_period && a->config != PAI_NNPA_BASE) 286 - return -EINVAL; 287 - /* Prohibit exclude_user event selection */ 288 - if (a->exclude_user) 289 - return -EINVAL; 290 - /* Get a page to store last counter values for sampling */ 291 - if (a->sample_period) { 292 - PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); 293 - if (!PAI_SAVE_AREA(event)) 294 - return -ENOMEM; 295 - } 296 - 297 - if (event->cpu >= 0) 298 - rc = paiext_alloc_cpu(event, event->cpu); 299 - else 300 - rc = paiext_alloc(event); 301 - if (rc) { 302 - free_page(PAI_SAVE_AREA(event)); 303 - return rc; 304 - } 305 - event->destroy = paiext_event_destroy; 306 - 307 - if (a->sample_period) { 308 - a->sample_period = 1; 309 - a->freq = 0; 310 - /* Register for paicrypt_sched_task() to be called */ 311 - event->attach_state |= PERF_ATTACH_SCHED_CB; 312 - /* Add raw data which are the memory mapped counters */ 313 - a->sample_type |= PERF_SAMPLE_RAW; 314 - /* Turn off inheritance */ 315 - a->inherit = 0; 316 - } 317 - 318 - return 0; 319 - } 320 - 321 - static u64 paiext_getctr(unsigned long *area, int nr) 322 - { 323 - return area[nr]; 324 - } 325 - 326 - /* Read the counter values. Return value from location in buffer. For event 327 - * NNPA_ALL sum up all events. 328 - */ 329 - static u64 paiext_getdata(struct perf_event *event) 330 - { 331 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 332 - struct paiext_map *cpump = mp->mapptr; 333 - u64 sum = 0; 334 - int i; 335 - 336 - if (event->attr.config != PAI_NNPA_BASE) 337 - return paiext_getctr(cpump->area, 338 - event->attr.config - PAI_NNPA_BASE); 339 - 340 - for (i = 1; i <= paiext_cnt; i++) 341 - sum += paiext_getctr(cpump->area, i); 342 - 343 - return sum; 344 - } 345 - 346 - static u64 paiext_getall(struct perf_event *event) 347 - { 348 - return paiext_getdata(event); 349 - } 350 - 351 - static void paiext_read(struct perf_event *event) 352 - { 353 - u64 prev, new, delta; 354 - 355 - prev = local64_read(&event->hw.prev_count); 356 - new = paiext_getall(event); 357 - local64_set(&event->hw.prev_count, new); 358 - delta = new - prev; 359 - local64_add(delta, &event->count); 360 - } 361 - 362 - static void paiext_start(struct perf_event *event, int flags) 363 - { 364 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 365 - struct paiext_map *cpump = mp->mapptr; 366 - u64 sum; 367 - 368 - if (!event->attr.sample_period) { /* Counting */ 369 - sum = paiext_getall(event); /* Get current value */ 370 - local64_set(&event->hw.prev_count, sum); 371 - } else { /* Sampling */ 372 - memcpy((void *)PAI_SAVE_AREA(event), cpump->area, 373 - PAIE1_CTRBLOCK_SZ); 374 - /* Enable context switch callback for system-wide sampling */ 375 - if (!(event->attach_state & PERF_ATTACH_TASK)) { 376 - list_add_tail(PAI_SWLIST(event), &cpump->syswide_list); 377 - perf_sched_cb_inc(event->pmu); 378 - } else { 379 - cpump->event = event; 380 - } 381 - } 382 - } 383 - 384 - static int paiext_add(struct perf_event *event, int flags) 385 - { 386 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 387 - struct paiext_map *cpump = mp->mapptr; 388 - struct paiext_cb *pcb = cpump->paiext_cb; 389 - 390 - if (++cpump->active_events == 1) { 391 - get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb); 392 - pcb->acc = virt_to_phys(cpump->area) | 0x1; 393 - /* Enable CPU instruction lookup for PAIE1 control block */ 394 - local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT); 395 - } 396 - if (flags & PERF_EF_START) 397 - paiext_start(event, PERF_EF_RELOAD); 398 - event->hw.state = 0; 399 - return 0; 400 - } 401 - 402 - static void paiext_have_sample(struct perf_event *, struct paiext_map *); 403 - static void paiext_stop(struct perf_event *event, int flags) 404 - { 405 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 406 - struct paiext_map *cpump = mp->mapptr; 407 - 408 - if (!event->attr.sample_period) { /* Counting */ 409 - paiext_read(event); 410 - } else { /* Sampling */ 411 - if (!(event->attach_state & PERF_ATTACH_TASK)) { 412 - list_del(PAI_SWLIST(event)); 413 - perf_sched_cb_dec(event->pmu); 414 - } else { 415 - paiext_have_sample(event, cpump); 416 - cpump->event = NULL; 417 - } 418 - } 419 - event->hw.state = PERF_HES_STOPPED; 420 - } 421 - 422 - static void paiext_del(struct perf_event *event, int flags) 423 - { 424 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 425 - struct paiext_map *cpump = mp->mapptr; 426 - struct paiext_cb *pcb = cpump->paiext_cb; 427 - 428 - paiext_stop(event, PERF_EF_UPDATE); 429 - if (--cpump->active_events == 0) { 430 - /* Disable CPU instruction lookup for PAIE1 control block */ 431 - local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT); 432 - pcb->acc = 0; 433 - get_lowcore()->aicd = 0; 434 - } 435 - } 436 - 437 - /* Create raw data and save it in buffer. Returns number of bytes copied. 438 - * Saves only positive counter entries of the form 439 - * 2 bytes: Number of counter 440 - * 8 bytes: Value of counter 441 - */ 442 - static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area, 443 - unsigned long *area_old) 444 - { 445 - int i, outidx = 0; 446 - 447 - for (i = 1; i <= paiext_cnt; i++) { 448 - u64 val = paiext_getctr(area, i); 449 - u64 val_old = paiext_getctr(area_old, i); 450 - 451 - if (val >= val_old) 452 - val -= val_old; 453 - else 454 - val = (~0ULL - val_old) + val + 1; 455 - if (val) { 456 - userdata[outidx].num = i; 457 - userdata[outidx].value = val; 458 - outidx++; 459 - } 460 - } 461 - return outidx * sizeof(*userdata); 462 - } 463 - 464 - /* Write sample when one or more counters values are nonzero. 465 - * 466 - * Note: The function paiext_sched_task() and paiext_push_sample() are not 467 - * invoked after function paiext_del() has been called because of function 468 - * perf_sched_cb_dec(). 469 - * The function paiext_sched_task() and paiext_push_sample() are only 470 - * called when sampling is active. Function perf_sched_cb_inc() 471 - * has been invoked to install function paiext_sched_task() as call back 472 - * to run at context switch time (see paiext_add()). 473 - * 474 - * This causes function perf_event_context_sched_out() and 475 - * perf_event_context_sched_in() to check whether the PMU has installed an 476 - * sched_task() callback. That callback is not active after paiext_del() 477 - * returns and has deleted the event on that CPU. 478 - */ 479 - static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump, 480 - struct perf_event *event) 481 - { 482 - struct perf_sample_data data; 483 - struct perf_raw_record raw; 484 - struct pt_regs regs; 485 - int overflow; 486 - 487 - /* Setup perf sample */ 488 - memset(&regs, 0, sizeof(regs)); 489 - memset(&raw, 0, sizeof(raw)); 490 - memset(&data, 0, sizeof(data)); 491 - perf_sample_data_init(&data, 0, event->hw.last_period); 492 - if (event->attr.sample_type & PERF_SAMPLE_TID) { 493 - data.tid_entry.pid = task_tgid_nr(current); 494 - data.tid_entry.tid = task_pid_nr(current); 495 - } 496 - if (event->attr.sample_type & PERF_SAMPLE_TIME) 497 - data.time = event->clock(); 498 - if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 499 - data.id = event->id; 500 - if (event->attr.sample_type & PERF_SAMPLE_CPU) 501 - data.cpu_entry.cpu = smp_processor_id(); 502 - if (event->attr.sample_type & PERF_SAMPLE_RAW) { 503 - raw.frag.size = rawsize; 504 - raw.frag.data = cpump->save; 505 - perf_sample_save_raw_data(&data, event, &raw); 506 - } 507 - 508 - overflow = perf_event_overflow(event, &data, &regs); 509 - perf_event_update_userpage(event); 510 - /* Save NNPA lowcore area after read in event */ 511 - memcpy((void *)PAI_SAVE_AREA(event), cpump->area, 512 - PAIE1_CTRBLOCK_SZ); 513 - return overflow; 514 - } 515 - 516 - /* Check if there is data to be saved on schedule out of a task. */ 517 - static void paiext_have_sample(struct perf_event *event, 518 - struct paiext_map *cpump) 519 - { 520 - size_t rawsize; 521 - 522 - if (!event) 523 - return; 524 - rawsize = paiext_copy(cpump->save, cpump->area, 525 - (unsigned long *)PAI_SAVE_AREA(event)); 526 - if (rawsize) /* Incremented counters */ 527 - paiext_push_sample(rawsize, cpump, event); 528 - } 529 - 530 - /* Check if there is data to be saved on schedule out of a task. */ 531 - static void paiext_have_samples(void) 532 - { 533 - struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); 534 - struct paiext_map *cpump = mp->mapptr; 535 - struct perf_event *event; 536 - 537 - list_for_each_entry(event, &cpump->syswide_list, hw.tp_list) 538 - paiext_have_sample(event, cpump); 539 - } 540 - 541 - /* Called on schedule-in and schedule-out. No access to event structure, 542 - * but for sampling only event NNPA_ALL is allowed. 543 - */ 544 - static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, 545 - struct task_struct *task, bool sched_in) 546 - { 547 - /* We started with a clean page on event installation. So read out 548 - * results on schedule_out and if page was dirty, save old values. 549 - */ 550 - if (!sched_in) 551 - paiext_have_samples(); 552 - } 553 - 554 - /* Attribute definitions for pai extension1 interface. As with other CPU 555 - * Measurement Facilities, there is one attribute per mapped counter. 556 - * The number of mapped counters may vary per machine generation. Use 557 - * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 558 - * to determine the number of mapped counters. The instructions returns 559 - * a positive number, which is the highest number of supported counters. 560 - * All counters less than this number are also supported, there are no 561 - * holes. A returned number of zero means no support for mapped counters. 562 - * 563 - * The identification of the counter is a unique number. The chosen range 564 - * is 0x1800 + offset in mapped kernel page. 565 - * All CPU Measurement Facility counters identifiers must be unique and 566 - * the numbers from 0 to 496 are already used for the CPU Measurement 567 - * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography 568 - * counters. 569 - * Numbers 0xb0000, 0xbc000 and 0xbd000 are already 570 - * used for the CPU Measurement Sampling facility. 571 - */ 572 - PMU_FORMAT_ATTR(event, "config:0-63"); 573 - 574 - static struct attribute *paiext_format_attr[] = { 575 - &format_attr_event.attr, 576 - NULL, 577 - }; 578 - 579 - static struct attribute_group paiext_events_group = { 580 - .name = "events", 581 - .attrs = NULL, /* Filled in attr_event_init() */ 582 - }; 583 - 584 - static struct attribute_group paiext_format_group = { 585 - .name = "format", 586 - .attrs = paiext_format_attr, 587 - }; 588 - 589 - static const struct attribute_group *paiext_attr_groups[] = { 590 - &paiext_events_group, 591 - &paiext_format_group, 592 - NULL, 593 - }; 594 - 595 - /* Performance monitoring unit for mapped counters */ 596 - static struct pmu paiext = { 597 - .task_ctx_nr = perf_hw_context, 598 - .event_init = paiext_event_init, 599 - .add = paiext_add, 600 - .del = paiext_del, 601 - .start = paiext_start, 602 - .stop = paiext_stop, 603 - .read = paiext_read, 604 - .sched_task = paiext_sched_task, 605 - .attr_groups = paiext_attr_groups, 606 - }; 607 - 608 - /* List of symbolic PAI extension 1 NNPA counter names. */ 609 - static const char * const paiext_ctrnames[] = { 610 - [0] = "NNPA_ALL", 611 - [1] = "NNPA_ADD", 612 - [2] = "NNPA_SUB", 613 - [3] = "NNPA_MUL", 614 - [4] = "NNPA_DIV", 615 - [5] = "NNPA_MIN", 616 - [6] = "NNPA_MAX", 617 - [7] = "NNPA_LOG", 618 - [8] = "NNPA_EXP", 619 - [9] = "NNPA_IBM_RESERVED_9", 620 - [10] = "NNPA_RELU", 621 - [11] = "NNPA_TANH", 622 - [12] = "NNPA_SIGMOID", 623 - [13] = "NNPA_SOFTMAX", 624 - [14] = "NNPA_BATCHNORM", 625 - [15] = "NNPA_MAXPOOL2D", 626 - [16] = "NNPA_AVGPOOL2D", 627 - [17] = "NNPA_LSTMACT", 628 - [18] = "NNPA_GRUACT", 629 - [19] = "NNPA_CONVOLUTION", 630 - [20] = "NNPA_MATMUL_OP", 631 - [21] = "NNPA_MATMUL_OP_BCAST23", 632 - [22] = "NNPA_SMALLBATCH", 633 - [23] = "NNPA_LARGEDIM", 634 - [24] = "NNPA_SMALLTENSOR", 635 - [25] = "NNPA_1MFRAME", 636 - [26] = "NNPA_2GFRAME", 637 - [27] = "NNPA_ACCESSEXCEPT", 638 - [28] = "NNPA_TRANSFORM", 639 - [29] = "NNPA_GELU", 640 - [30] = "NNPA_MOMENTS", 641 - [31] = "NNPA_LAYERNORM", 642 - [32] = "NNPA_MATMUL_OP_BCAST1", 643 - [33] = "NNPA_SQRT", 644 - [34] = "NNPA_INVSQRT", 645 - [35] = "NNPA_NORM", 646 - [36] = "NNPA_REDUCE", 647 - }; 648 - 649 - static void __init attr_event_free(struct attribute **attrs, int num) 650 - { 651 - struct perf_pmu_events_attr *pa; 652 - struct device_attribute *dap; 653 - int i; 654 - 655 - for (i = 0; i < num; i++) { 656 - dap = container_of(attrs[i], struct device_attribute, attr); 657 - pa = container_of(dap, struct perf_pmu_events_attr, attr); 658 - kfree(pa); 659 - } 660 - kfree(attrs); 661 - } 662 - 663 - static int __init attr_event_init_one(struct attribute **attrs, int num) 664 - { 665 - struct perf_pmu_events_attr *pa; 666 - 667 - /* Index larger than array_size, no counter name available */ 668 - if (num >= ARRAY_SIZE(paiext_ctrnames)) { 669 - attrs[num] = NULL; 670 - return 0; 671 - } 672 - 673 - pa = kzalloc(sizeof(*pa), GFP_KERNEL); 674 - if (!pa) 675 - return -ENOMEM; 676 - 677 - sysfs_attr_init(&pa->attr.attr); 678 - pa->id = PAI_NNPA_BASE + num; 679 - pa->attr.attr.name = paiext_ctrnames[num]; 680 - pa->attr.attr.mode = 0444; 681 - pa->attr.show = cpumf_events_sysfs_show; 682 - pa->attr.store = NULL; 683 - attrs[num] = &pa->attr.attr; 684 - return 0; 685 - } 686 - 687 - /* Create PMU sysfs event attributes on the fly. */ 688 - static int __init attr_event_init(void) 689 - { 690 - struct attribute **attrs; 691 - int ret, i; 692 - 693 - attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL); 694 - if (!attrs) 695 - return -ENOMEM; 696 - for (i = 0; i <= paiext_cnt; i++) { 697 - ret = attr_event_init_one(attrs, i); 698 - if (ret) { 699 - attr_event_free(attrs, i); 700 - return ret; 701 - } 702 - } 703 - attrs[i] = NULL; 704 - paiext_events_group.attrs = attrs; 705 - return 0; 706 - } 707 - 708 - static int __init paiext_init(void) 709 - { 710 - struct qpaci_info_block ib; 711 - int rc = -ENOMEM; 712 - 713 - if (!test_facility(197)) 714 - return 0; 715 - 716 - qpaci(&ib); 717 - paiext_cnt = ib.num_nnpa; 718 - if (paiext_cnt >= PAI_NNPA_MAXCTR) 719 - paiext_cnt = PAI_NNPA_MAXCTR; 720 - if (!paiext_cnt) 721 - return 0; 722 - 723 - rc = attr_event_init(); 724 - if (rc) { 725 - pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n"); 726 - return rc; 727 - } 728 - 729 - /* Setup s390dbf facility */ 730 - paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); 731 - if (!paiext_dbg) { 732 - pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n"); 733 - rc = -ENOMEM; 734 - goto out_init; 735 - } 736 - debug_register_view(paiext_dbg, &debug_sprintf_view); 737 - 738 - rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1); 739 - if (rc) { 740 - pr_err("Registration of " KMSG_COMPONENT " PMU failed with " 741 - "rc=%i\n", rc); 742 - goto out_pmu; 743 - } 744 - 745 - return 0; 746 - 747 - out_pmu: 748 - debug_unregister_view(paiext_dbg, &debug_sprintf_view); 749 - debug_unregister(paiext_dbg); 750 - out_init: 751 - attr_event_free(paiext_events_group.attrs, 752 - ARRAY_SIZE(paiext_ctrnames) + 1); 753 - return rc; 754 - } 755 - 756 - device_initcall(paiext_init);
-3
arch/s390/kernel/perf_regs.c
··· 44 44 45 45 u64 perf_reg_abi(struct task_struct *task) 46 46 { 47 - if (test_tsk_thread_flag(task, TIF_31BIT)) 48 - return PERF_SAMPLE_REGS_ABI_32; 49 - 50 47 return PERF_SAMPLE_REGS_ABI_64; 51 48 } 52 49
+2 -7
arch/s390/kernel/process.c
··· 24 24 #include <linux/tick.h> 25 25 #include <linux/personality.h> 26 26 #include <linux/syscalls.h> 27 - #include <linux/compat.h> 28 27 #include <linux/kprobes.h> 29 28 #include <linux/random.h> 30 29 #include <linux/init_task.h> ··· 165 166 166 167 /* Set a new TLS ? */ 167 168 if (clone_flags & CLONE_SETTLS) { 168 - if (is_compat_task()) { 169 - p->thread.acrs[0] = (unsigned int)tls; 170 - } else { 171 - p->thread.acrs[0] = (unsigned int)(tls >> 32); 172 - p->thread.acrs[1] = (unsigned int)tls; 173 - } 169 + p->thread.acrs[0] = (unsigned int)(tls >> 32); 170 + p->thread.acrs[1] = (unsigned int)tls; 174 171 } 175 172 /* 176 173 * s390 stores the svc return address in arch_data when calling
+1 -2
arch/s390/kernel/processor.c
··· 4 4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 5 */ 6 6 7 - #define KMSG_COMPONENT "cpu" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "cpu: " fmt 9 8 10 9 #include <linux/stop_machine.h> 11 10 #include <linux/cpufeature.h>
-524
arch/s390/kernel/ptrace.c
··· 22 22 #include <linux/elf.h> 23 23 #include <linux/regset.h> 24 24 #include <linux/seccomp.h> 25 - #include <linux/compat.h> 26 25 #include <trace/syscall.h> 27 26 #include <asm/guarded_storage.h> 28 27 #include <asm/access-regs.h> ··· 36 37 #include <asm/fpu.h> 37 38 38 39 #include "entry.h" 39 - 40 - #ifdef CONFIG_COMPAT 41 - #include "compat_ptrace.h" 42 - #endif 43 40 44 41 void update_cr_regs(struct task_struct *task) 45 42 { ··· 501 506 return ptrace_request(child, request, addr, data); 502 507 } 503 508 } 504 - 505 - #ifdef CONFIG_COMPAT 506 - /* 507 - * Now the fun part starts... a 31 bit program running in the 508 - * 31 bit emulation tracing another program. PTRACE_PEEKTEXT, 509 - * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy 510 - * to handle, the difference to the 64 bit versions of the requests 511 - * is that the access is done in multiples of 4 byte instead of 512 - * 8 bytes (sizeof(unsigned long) on 31/64 bit). 513 - * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA, 514 - * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program 515 - * is a 31 bit program too, the content of struct user can be 516 - * emulated. A 31 bit program peeking into the struct user of 517 - * a 64 bit program is a no-no. 518 - */ 519 - 520 - /* 521 - * Same as peek_user_per but for a 31 bit program. 522 - */ 523 - static inline __u32 __peek_user_per_compat(struct task_struct *child, 524 - addr_t addr) 525 - { 526 - if (addr == offsetof(struct compat_per_struct_kernel, cr9)) 527 - /* Control bits of the active per set. */ 528 - return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 529 - PER_EVENT_IFETCH : child->thread.per_user.control; 530 - else if (addr == offsetof(struct compat_per_struct_kernel, cr10)) 531 - /* Start address of the active per set. */ 532 - return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 533 - 0 : child->thread.per_user.start; 534 - else if (addr == offsetof(struct compat_per_struct_kernel, cr11)) 535 - /* End address of the active per set. */ 536 - return test_thread_flag(TIF_SINGLE_STEP) ? 537 - PSW32_ADDR_INSN : child->thread.per_user.end; 538 - else if (addr == offsetof(struct compat_per_struct_kernel, bits)) 539 - /* Single-step bit. */ 540 - return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 541 - 0x80000000 : 0; 542 - else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) 543 - /* Start address of the user specified per set. */ 544 - return (__u32) child->thread.per_user.start; 545 - else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) 546 - /* End address of the user specified per set. */ 547 - return (__u32) child->thread.per_user.end; 548 - else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid)) 549 - /* PER code, ATMID and AI of the last PER trap */ 550 - return (__u32) child->thread.per_event.cause << 16; 551 - else if (addr == offsetof(struct compat_per_struct_kernel, address)) 552 - /* Address of the last PER trap */ 553 - return (__u32) child->thread.per_event.address; 554 - else if (addr == offsetof(struct compat_per_struct_kernel, access_id)) 555 - /* Access id of the last PER trap */ 556 - return (__u32) child->thread.per_event.paid << 24; 557 - return 0; 558 - } 559 - 560 - /* 561 - * Same as peek_user but for a 31 bit program. 562 - */ 563 - static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 564 - { 565 - addr_t offset; 566 - __u32 tmp; 567 - 568 - if (addr < offsetof(struct compat_user, regs.acrs)) { 569 - struct pt_regs *regs = task_pt_regs(child); 570 - /* 571 - * psw and gprs are stored on the stack 572 - */ 573 - if (addr == offsetof(struct compat_user, regs.psw.mask)) { 574 - /* Fake a 31 bit psw mask. */ 575 - tmp = (__u32)(regs->psw.mask >> 32); 576 - tmp &= PSW32_MASK_USER | PSW32_MASK_RI; 577 - tmp |= PSW32_USER_BITS; 578 - } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { 579 - /* Fake a 31 bit psw address. */ 580 - tmp = (__u32) regs->psw.addr | 581 - (__u32)(regs->psw.mask & PSW_MASK_BA); 582 - } else { 583 - /* gpr 0-15 */ 584 - tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); 585 - } 586 - } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { 587 - /* 588 - * access registers are stored in the thread structure 589 - */ 590 - offset = addr - offsetof(struct compat_user, regs.acrs); 591 - tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); 592 - 593 - } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { 594 - /* 595 - * orig_gpr2 is stored on the kernel stack 596 - */ 597 - tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); 598 - 599 - } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { 600 - /* 601 - * prevent reads of padding hole between 602 - * orig_gpr2 and fp_regs on s390. 603 - */ 604 - tmp = 0; 605 - 606 - } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { 607 - /* 608 - * floating point control reg. is in the thread structure 609 - */ 610 - tmp = child->thread.ufpu.fpc; 611 - 612 - } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { 613 - /* 614 - * floating point regs. are in the child->thread.ufpu.vxrs array 615 - */ 616 - offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); 617 - tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset); 618 - } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { 619 - /* 620 - * Handle access to the per_info structure. 621 - */ 622 - addr -= offsetof(struct compat_user, regs.per_info); 623 - tmp = __peek_user_per_compat(child, addr); 624 - 625 - } else 626 - tmp = 0; 627 - 628 - return tmp; 629 - } 630 - 631 - static int peek_user_compat(struct task_struct *child, 632 - addr_t addr, addr_t data) 633 - { 634 - __u32 tmp; 635 - 636 - if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) 637 - return -EIO; 638 - 639 - tmp = __peek_user_compat(child, addr); 640 - return put_user(tmp, (__u32 __user *) data); 641 - } 642 - 643 - /* 644 - * Same as poke_user_per but for a 31 bit program. 645 - */ 646 - static inline void __poke_user_per_compat(struct task_struct *child, 647 - addr_t addr, __u32 data) 648 - { 649 - if (addr == offsetof(struct compat_per_struct_kernel, cr9)) 650 - /* PER event mask of the user specified per set. */ 651 - child->thread.per_user.control = 652 - data & (PER_EVENT_MASK | PER_CONTROL_MASK); 653 - else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) 654 - /* Starting address of the user specified per set. */ 655 - child->thread.per_user.start = data; 656 - else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) 657 - /* Ending address of the user specified per set. */ 658 - child->thread.per_user.end = data; 659 - } 660 - 661 - /* 662 - * Same as poke_user but for a 31 bit program. 663 - */ 664 - static int __poke_user_compat(struct task_struct *child, 665 - addr_t addr, addr_t data) 666 - { 667 - __u32 tmp = (__u32) data; 668 - addr_t offset; 669 - 670 - if (addr < offsetof(struct compat_user, regs.acrs)) { 671 - struct pt_regs *regs = task_pt_regs(child); 672 - /* 673 - * psw, gprs, acrs and orig_gpr2 are stored on the stack 674 - */ 675 - if (addr == offsetof(struct compat_user, regs.psw.mask)) { 676 - __u32 mask = PSW32_MASK_USER; 677 - 678 - mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 679 - /* Build a 64 bit psw mask from 31 bit mask. */ 680 - if ((tmp ^ PSW32_USER_BITS) & ~mask) 681 - /* Invalid psw mask. */ 682 - return -EINVAL; 683 - if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) 684 - /* Invalid address-space-control bits */ 685 - return -EINVAL; 686 - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 687 - (regs->psw.mask & PSW_MASK_BA) | 688 - (__u64)(tmp & mask) << 32; 689 - } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { 690 - /* Build a 64 bit psw address from 31 bit address. */ 691 - regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 692 - /* Transfer 31 bit amode bit to psw mask. */ 693 - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | 694 - (__u64)(tmp & PSW32_ADDR_AMODE); 695 - } else { 696 - if (test_pt_regs_flag(regs, PIF_SYSCALL) && 697 - addr == offsetof(struct compat_user, regs.gprs[2])) { 698 - struct pt_regs *regs = task_pt_regs(child); 699 - 700 - regs->int_code = 0x20000 | (data & 0xffff); 701 - } 702 - /* gpr 0-15 */ 703 - *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; 704 - } 705 - } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { 706 - /* 707 - * access registers are stored in the thread structure 708 - */ 709 - offset = addr - offsetof(struct compat_user, regs.acrs); 710 - *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; 711 - 712 - } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { 713 - /* 714 - * orig_gpr2 is stored on the kernel stack 715 - */ 716 - *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; 717 - 718 - } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { 719 - /* 720 - * prevent writess of padding hole between 721 - * orig_gpr2 and fp_regs on s390. 722 - */ 723 - return 0; 724 - 725 - } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { 726 - /* 727 - * floating point control reg. is in the thread structure 728 - */ 729 - child->thread.ufpu.fpc = data; 730 - 731 - } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { 732 - /* 733 - * floating point regs. are in the child->thread.ufpu.vxrs array 734 - */ 735 - offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); 736 - *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp; 737 - } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { 738 - /* 739 - * Handle access to the per_info structure. 740 - */ 741 - addr -= offsetof(struct compat_user, regs.per_info); 742 - __poke_user_per_compat(child, addr, data); 743 - } 744 - 745 - return 0; 746 - } 747 - 748 - static int poke_user_compat(struct task_struct *child, 749 - addr_t addr, addr_t data) 750 - { 751 - if (!is_compat_task() || (addr & 3) || 752 - addr > sizeof(struct compat_user) - 3) 753 - return -EIO; 754 - 755 - return __poke_user_compat(child, addr, data); 756 - } 757 - 758 - long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 759 - compat_ulong_t caddr, compat_ulong_t cdata) 760 - { 761 - unsigned long addr = caddr; 762 - unsigned long data = cdata; 763 - compat_ptrace_area parea; 764 - int copied, ret; 765 - 766 - switch (request) { 767 - case PTRACE_PEEKUSR: 768 - /* read the word at location addr in the USER area. */ 769 - return peek_user_compat(child, addr, data); 770 - 771 - case PTRACE_POKEUSR: 772 - /* write the word at location addr in the USER area */ 773 - return poke_user_compat(child, addr, data); 774 - 775 - case PTRACE_PEEKUSR_AREA: 776 - case PTRACE_POKEUSR_AREA: 777 - if (copy_from_user(&parea, (void __force __user *) addr, 778 - sizeof(parea))) 779 - return -EFAULT; 780 - addr = parea.kernel_addr; 781 - data = parea.process_addr; 782 - copied = 0; 783 - while (copied < parea.len) { 784 - if (request == PTRACE_PEEKUSR_AREA) 785 - ret = peek_user_compat(child, addr, data); 786 - else { 787 - __u32 utmp; 788 - if (get_user(utmp, 789 - (__u32 __force __user *) data)) 790 - return -EFAULT; 791 - ret = poke_user_compat(child, addr, utmp); 792 - } 793 - if (ret) 794 - return ret; 795 - addr += sizeof(unsigned int); 796 - data += sizeof(unsigned int); 797 - copied += sizeof(unsigned int); 798 - } 799 - return 0; 800 - case PTRACE_GET_LAST_BREAK: 801 - return put_user(child->thread.last_break, (unsigned int __user *)data); 802 - } 803 - return compat_ptrace_request(child, request, addr, data); 804 - } 805 - #endif 806 509 807 510 /* 808 511 * user_regset definitions. ··· 990 1297 .n = ARRAY_SIZE(s390_regsets) 991 1298 }; 992 1299 993 - #ifdef CONFIG_COMPAT 994 - static int s390_compat_regs_get(struct task_struct *target, 995 - const struct user_regset *regset, 996 - struct membuf to) 997 - { 998 - unsigned n; 999 - 1000 - if (target == current) 1001 - save_access_regs(target->thread.acrs); 1002 - 1003 - for (n = 0; n < sizeof(s390_compat_regs); n += sizeof(compat_ulong_t)) 1004 - membuf_store(&to, __peek_user_compat(target, n)); 1005 - return 0; 1006 - } 1007 - 1008 - static int s390_compat_regs_set(struct task_struct *target, 1009 - const struct user_regset *regset, 1010 - unsigned int pos, unsigned int count, 1011 - const void *kbuf, const void __user *ubuf) 1012 - { 1013 - int rc = 0; 1014 - 1015 - if (target == current) 1016 - save_access_regs(target->thread.acrs); 1017 - 1018 - if (kbuf) { 1019 - const compat_ulong_t *k = kbuf; 1020 - while (count > 0 && !rc) { 1021 - rc = __poke_user_compat(target, pos, *k++); 1022 - count -= sizeof(*k); 1023 - pos += sizeof(*k); 1024 - } 1025 - } else { 1026 - const compat_ulong_t __user *u = ubuf; 1027 - while (count > 0 && !rc) { 1028 - compat_ulong_t word; 1029 - rc = __get_user(word, u++); 1030 - if (rc) 1031 - break; 1032 - rc = __poke_user_compat(target, pos, word); 1033 - count -= sizeof(*u); 1034 - pos += sizeof(*u); 1035 - } 1036 - } 1037 - 1038 - if (rc == 0 && target == current) 1039 - restore_access_regs(target->thread.acrs); 1040 - 1041 - return rc; 1042 - } 1043 - 1044 - static int s390_compat_regs_high_get(struct task_struct *target, 1045 - const struct user_regset *regset, 1046 - struct membuf to) 1047 - { 1048 - compat_ulong_t *gprs_high; 1049 - int i; 1050 - 1051 - gprs_high = (compat_ulong_t *)task_pt_regs(target)->gprs; 1052 - for (i = 0; i < NUM_GPRS; i++, gprs_high += 2) 1053 - membuf_store(&to, *gprs_high); 1054 - return 0; 1055 - } 1056 - 1057 - static int s390_compat_regs_high_set(struct task_struct *target, 1058 - const struct user_regset *regset, 1059 - unsigned int pos, unsigned int count, 1060 - const void *kbuf, const void __user *ubuf) 1061 - { 1062 - compat_ulong_t *gprs_high; 1063 - int rc = 0; 1064 - 1065 - gprs_high = (compat_ulong_t *) 1066 - &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)]; 1067 - if (kbuf) { 1068 - const compat_ulong_t *k = kbuf; 1069 - while (count > 0) { 1070 - *gprs_high = *k++; 1071 - *gprs_high += 2; 1072 - count -= sizeof(*k); 1073 - } 1074 - } else { 1075 - const compat_ulong_t __user *u = ubuf; 1076 - while (count > 0 && !rc) { 1077 - unsigned long word; 1078 - rc = __get_user(word, u++); 1079 - if (rc) 1080 - break; 1081 - *gprs_high = word; 1082 - *gprs_high += 2; 1083 - count -= sizeof(*u); 1084 - } 1085 - } 1086 - 1087 - return rc; 1088 - } 1089 - 1090 - static int s390_compat_last_break_get(struct task_struct *target, 1091 - const struct user_regset *regset, 1092 - struct membuf to) 1093 - { 1094 - compat_ulong_t last_break = target->thread.last_break; 1095 - 1096 - return membuf_store(&to, (unsigned long)last_break); 1097 - } 1098 - 1099 - static int s390_compat_last_break_set(struct task_struct *target, 1100 - const struct user_regset *regset, 1101 - unsigned int pos, unsigned int count, 1102 - const void *kbuf, const void __user *ubuf) 1103 - { 1104 - return 0; 1105 - } 1106 - 1107 - static const struct user_regset s390_compat_regsets[] = { 1108 - { 1109 - USER_REGSET_NOTE_TYPE(PRSTATUS), 1110 - .n = sizeof(s390_compat_regs) / sizeof(compat_long_t), 1111 - .size = sizeof(compat_long_t), 1112 - .align = sizeof(compat_long_t), 1113 - .regset_get = s390_compat_regs_get, 1114 - .set = s390_compat_regs_set, 1115 - }, 1116 - { 1117 - USER_REGSET_NOTE_TYPE(PRFPREG), 1118 - .n = sizeof(s390_fp_regs) / sizeof(compat_long_t), 1119 - .size = sizeof(compat_long_t), 1120 - .align = sizeof(compat_long_t), 1121 - .regset_get = s390_fpregs_get, 1122 - .set = s390_fpregs_set, 1123 - }, 1124 - { 1125 - USER_REGSET_NOTE_TYPE(S390_SYSTEM_CALL), 1126 - .n = 1, 1127 - .size = sizeof(compat_uint_t), 1128 - .align = sizeof(compat_uint_t), 1129 - .regset_get = s390_system_call_get, 1130 - .set = s390_system_call_set, 1131 - }, 1132 - { 1133 - USER_REGSET_NOTE_TYPE(S390_LAST_BREAK), 1134 - .n = 1, 1135 - .size = sizeof(long), 1136 - .align = sizeof(long), 1137 - .regset_get = s390_compat_last_break_get, 1138 - .set = s390_compat_last_break_set, 1139 - }, 1140 - { 1141 - USER_REGSET_NOTE_TYPE(S390_TDB), 1142 - .n = 1, 1143 - .size = 256, 1144 - .align = 1, 1145 - .regset_get = s390_tdb_get, 1146 - .set = s390_tdb_set, 1147 - }, 1148 - { 1149 - USER_REGSET_NOTE_TYPE(S390_VXRS_LOW), 1150 - .n = __NUM_VXRS_LOW, 1151 - .size = sizeof(__u64), 1152 - .align = sizeof(__u64), 1153 - .regset_get = s390_vxrs_low_get, 1154 - .set = s390_vxrs_low_set, 1155 - }, 1156 - { 1157 - USER_REGSET_NOTE_TYPE(S390_VXRS_HIGH), 1158 - .n = __NUM_VXRS_HIGH, 1159 - .size = sizeof(__vector128), 1160 - .align = sizeof(__vector128), 1161 - .regset_get = s390_vxrs_high_get, 1162 - .set = s390_vxrs_high_set, 1163 - }, 1164 - { 1165 - USER_REGSET_NOTE_TYPE(S390_HIGH_GPRS), 1166 - .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1167 - .size = sizeof(compat_long_t), 1168 - .align = sizeof(compat_long_t), 1169 - .regset_get = s390_compat_regs_high_get, 1170 - .set = s390_compat_regs_high_set, 1171 - }, 1172 - { 1173 - USER_REGSET_NOTE_TYPE(S390_GS_CB), 1174 - .n = sizeof(struct gs_cb) / sizeof(__u64), 1175 - .size = sizeof(__u64), 1176 - .align = sizeof(__u64), 1177 - .regset_get = s390_gs_cb_get, 1178 - .set = s390_gs_cb_set, 1179 - }, 1180 - { 1181 - USER_REGSET_NOTE_TYPE(S390_GS_BC), 1182 - .n = sizeof(struct gs_cb) / sizeof(__u64), 1183 - .size = sizeof(__u64), 1184 - .align = sizeof(__u64), 1185 - .regset_get = s390_gs_bc_get, 1186 - .set = s390_gs_bc_set, 1187 - }, 1188 - { 1189 - USER_REGSET_NOTE_TYPE(S390_RI_CB), 1190 - .n = sizeof(struct runtime_instr_cb) / sizeof(__u64), 1191 - .size = sizeof(__u64), 1192 - .align = sizeof(__u64), 1193 - .regset_get = s390_runtime_instr_get, 1194 - .set = s390_runtime_instr_set, 1195 - }, 1196 - }; 1197 - 1198 - static const struct user_regset_view user_s390_compat_view = { 1199 - .name = "s390", 1200 - .e_machine = EM_S390, 1201 - .regsets = s390_compat_regsets, 1202 - .n = ARRAY_SIZE(s390_compat_regsets) 1203 - }; 1204 - #endif 1205 - 1206 1300 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1207 1301 { 1208 - #ifdef CONFIG_COMPAT 1209 - if (test_tsk_thread_flag(task, TIF_31BIT)) 1210 - return &user_s390_compat_view; 1211 - #endif 1212 1302 return &user_s390_view; 1213 1303 } 1214 1304
+2 -4
arch/s390/kernel/setup.c
··· 13 13 * This file handles the architecture-dependent parts of initialization 14 14 */ 15 15 16 - #define KMSG_COMPONENT "setup" 17 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16 + #define pr_fmt(fmt) "setup: " fmt 18 17 19 18 #include <linux/errno.h> 20 19 #include <linux/export.h> ··· 46 47 #include <linux/kexec.h> 47 48 #include <linux/crash_dump.h> 48 49 #include <linux/memory.h> 49 - #include <linux/compat.h> 50 50 #include <linux/start_kernel.h> 51 51 #include <linux/hugetlb.h> 52 52 #include <linux/kmemleak.h> ··· 110 112 * Because the AMODE31 sections are relocated below 2G at startup, 111 113 * the content of control registers CR2, CR5 and CR15 must be updated 112 114 * with new addresses after the relocation. The initial initialization of 113 - * control registers occurs in head64.S and then gets updated again after AMODE31 115 + * control registers occurs in head.S and then gets updated again after AMODE31 114 116 * relocation. We must access the relevant AMODE31 tables indirectly via 115 117 * pointers placed in the .amode31.refs linker section. Those pointers get 116 118 * updated automatically during AMODE31 relocation and always contain a valid
+4 -23
arch/s390/kernel/signal.c
··· 27 27 #include <linux/personality.h> 28 28 #include <linux/binfmts.h> 29 29 #include <linux/syscalls.h> 30 - #include <linux/compat.h> 31 30 #include <asm/ucontext.h> 32 31 #include <linux/uaccess.h> 33 32 #include <asm/vdso-symbols.h> ··· 289 290 unsigned long restorer; 290 291 size_t frame_size; 291 292 292 - /* 293 - * gprs_high are only present for a 31-bit task running on 294 - * a 64-bit kernel (see compat_signal.c) but the space for 295 - * gprs_high need to be allocated if vector registers are 296 - * included in the signal frame on a 31-bit system. 297 - */ 298 293 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext); 299 294 if (cpu_has_vx()) 300 295 frame_size += sizeof(frame->sregs_ext); ··· 326 333 if (ka->sa.sa_flags & SA_RESTORER) 327 334 restorer = (unsigned long) ka->sa.sa_restorer; 328 335 else 329 - restorer = VDSO64_SYMBOL(current, sigreturn); 336 + restorer = VDSO_SYMBOL(current, sigreturn); 330 337 331 338 /* Set up registers for signal handler */ 332 339 regs->gprs[14] = restorer; ··· 360 367 size_t frame_size; 361 368 362 369 frame_size = sizeof(struct rt_sigframe) - sizeof(_sigregs_ext); 363 - /* 364 - * gprs_high are only present for a 31-bit task running on 365 - * a 64-bit kernel (see compat_signal.c) but the space for 366 - * gprs_high need to be allocated if vector registers are 367 - * included in the signal frame on a 31-bit system. 368 - */ 369 370 uc_flags = 0; 370 371 if (cpu_has_vx()) { 371 372 frame_size += sizeof(_sigregs_ext); ··· 378 391 if (ksig->ka.sa.sa_flags & SA_RESTORER) 379 392 restorer = (unsigned long) ksig->ka.sa.sa_restorer; 380 393 else 381 - restorer = VDSO64_SYMBOL(current, rt_sigreturn); 394 + restorer = VDSO_SYMBOL(current, rt_sigreturn); 382 395 383 396 /* Create siginfo on the signal stack */ 384 397 if (copy_siginfo_to_user(&frame->info, &ksig->info)) ··· 477 490 clear_pt_regs_flag(regs, PIF_SYSCALL); 478 491 479 492 rseq_signal_deliver(&ksig, regs); 480 - if (is_compat_task()) 481 - handle_signal32(&ksig, oldset, regs); 482 - else 483 - handle_signal(&ksig, oldset, regs); 493 + handle_signal(&ksig, oldset, regs); 484 494 return; 485 495 } 486 496 ··· 490 506 /* Restart with sys_restart_syscall */ 491 507 regs->gprs[2] = regs->orig_gpr2; 492 508 current->restart_block.arch_data = regs->psw.addr; 493 - if (is_compat_task()) 494 - regs->psw.addr = VDSO32_SYMBOL(current, restart_syscall); 495 - else 496 - regs->psw.addr = VDSO64_SYMBOL(current, restart_syscall); 509 + regs->psw.addr = VDSO_SYMBOL(current, restart_syscall); 497 510 if (test_thread_flag(TIF_SINGLE_STEP)) 498 511 clear_thread_flag(TIF_PER_TRAP); 499 512 break;
+9 -6
arch/s390/kernel/smp.c
··· 15 15 * operates on physical cpu numbers needs to go into smp.c. 16 16 */ 17 17 18 - #define KMSG_COMPONENT "cpu" 19 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18 + #define pr_fmt(fmt) "cpu: " fmt 20 19 21 20 #include <linux/cpufeature.h> 22 21 #include <linux/workqueue.h> ··· 280 281 lc->hardirq_timer = tsk->thread.hardirq_timer; 281 282 lc->softirq_timer = tsk->thread.softirq_timer; 282 283 lc->steal_timer = 0; 284 + #ifdef CONFIG_STACKPROTECTOR 285 + lc->stack_canary = tsk->stack_canary; 286 + #endif 283 287 } 284 288 285 289 static void pcpu_start_fn(int cpu, void (*func)(void *), void *data) ··· 307 305 func(data); /* should not return */ 308 306 } 309 307 310 - static void pcpu_delegate(struct pcpu *pcpu, int cpu, 311 - pcpu_delegate_fn *func, 312 - void *data, unsigned long stack) 308 + static void __noreturn pcpu_delegate(struct pcpu *pcpu, int cpu, 309 + pcpu_delegate_fn *func, 310 + void *data, unsigned long stack) 313 311 { 314 312 struct lowcore *lc, *abs_lc; 315 313 unsigned int source_cpu; ··· 372 370 /* 373 371 * Call function on the ipl CPU. 374 372 */ 375 - void smp_call_ipl_cpu(void (*func)(void *), void *data) 373 + void __noreturn smp_call_ipl_cpu(void (*func)(void *), void *data) 376 374 { 377 375 struct lowcore *lc = lowcore_ptr[0]; 378 376 ··· 699 697 continue; 700 698 info->core[info->configured].core_id = 701 699 address >> smp_cpu_mt_shift; 700 + info->core[info->configured].type = boot_core_type; 702 701 info->configured++; 703 702 } 704 703 info->combined = info->configured;
+156
arch/s390/kernel/stackprotector.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #ifndef pr_fmt 4 + #define pr_fmt(fmt) "stackprot: " fmt 5 + #endif 6 + 7 + #include <linux/export.h> 8 + #include <linux/uaccess.h> 9 + #include <linux/printk.h> 10 + #include <asm/abs_lowcore.h> 11 + #include <asm/sections.h> 12 + #include <asm/machine.h> 13 + #include <asm/asm-offsets.h> 14 + #include <asm/arch-stackprotector.h> 15 + 16 + #ifdef __DECOMPRESSOR 17 + 18 + #define DEBUGP boot_debug 19 + #define EMERGP boot_emerg 20 + #define PANIC boot_panic 21 + 22 + #else /* __DECOMPRESSOR */ 23 + 24 + #define DEBUGP pr_debug 25 + #define EMERGP pr_emerg 26 + #define PANIC panic 27 + 28 + #endif /* __DECOMPRESSOR */ 29 + 30 + int __bootdata_preserved(stack_protector_debug); 31 + 32 + unsigned long __stack_chk_guard; 33 + EXPORT_SYMBOL(__stack_chk_guard); 34 + 35 + struct insn_ril { 36 + u8 opc1 : 8; 37 + u8 r1 : 4; 38 + u8 opc2 : 4; 39 + u32 imm; 40 + } __packed; 41 + 42 + /* 43 + * Convert a virtual instruction address to a real instruction address. The 44 + * decompressor needs to patch instructions within the kernel image based on 45 + * their virtual addresses, while dynamic address translation is still 46 + * disabled. Therefore a translation from virtual kernel image addresses to 47 + * the corresponding physical addresses is required. 48 + * 49 + * After dynamic address translation is enabled and when the kernel needs to 50 + * patch instructions such a translation is not required since the addresses 51 + * are identical. 52 + */ 53 + static struct insn_ril *vaddress_to_insn(unsigned long vaddress) 54 + { 55 + #ifdef __DECOMPRESSOR 56 + return (struct insn_ril *)__kernel_pa(vaddress); 57 + #else 58 + return (struct insn_ril *)vaddress; 59 + #endif 60 + } 61 + 62 + static unsigned long insn_to_vaddress(struct insn_ril *insn) 63 + { 64 + #ifdef __DECOMPRESSOR 65 + return (unsigned long)__kernel_va(insn); 66 + #else 67 + return (unsigned long)insn; 68 + #endif 69 + } 70 + 71 + #define INSN_RIL_STRING_SIZE (sizeof(struct insn_ril) * 2 + 1) 72 + 73 + static void insn_ril_to_string(char *str, struct insn_ril *insn) 74 + { 75 + u8 *ptr = (u8 *)insn; 76 + int i; 77 + 78 + for (i = 0; i < sizeof(*insn); i++) 79 + hex_byte_pack(&str[2 * i], ptr[i]); 80 + str[2 * i] = 0; 81 + } 82 + 83 + static void stack_protector_dump(struct insn_ril *old, struct insn_ril *new) 84 + { 85 + char ostr[INSN_RIL_STRING_SIZE]; 86 + char nstr[INSN_RIL_STRING_SIZE]; 87 + 88 + insn_ril_to_string(ostr, old); 89 + insn_ril_to_string(nstr, new); 90 + DEBUGP("%016lx: %s -> %s\n", insn_to_vaddress(old), ostr, nstr); 91 + } 92 + 93 + static int stack_protector_verify(struct insn_ril *insn, unsigned long kernel_start) 94 + { 95 + char istr[INSN_RIL_STRING_SIZE]; 96 + unsigned long vaddress, offset; 97 + 98 + /* larl */ 99 + if (insn->opc1 == 0xc0 && insn->opc2 == 0x0) 100 + return 0; 101 + /* lgrl */ 102 + if (insn->opc1 == 0xc4 && insn->opc2 == 0x8) 103 + return 0; 104 + insn_ril_to_string(istr, insn); 105 + vaddress = insn_to_vaddress(insn); 106 + if (__is_defined(__DECOMPRESSOR)) { 107 + offset = (unsigned long)insn - kernel_start + TEXT_OFFSET; 108 + EMERGP("Unexpected instruction at %016lx/%016lx: %s\n", vaddress, offset, istr); 109 + PANIC("Stackprotector error\n"); 110 + } else { 111 + EMERGP("Unexpected instruction at %016lx: %s\n", vaddress, istr); 112 + } 113 + return -EINVAL; 114 + } 115 + 116 + int __stack_protector_apply(unsigned long *start, unsigned long *end, unsigned long kernel_start) 117 + { 118 + unsigned long canary, *loc; 119 + struct insn_ril *insn, new; 120 + int rc; 121 + 122 + /* 123 + * Convert LARL/LGRL instructions to LLILF so register R1 contains the 124 + * address of the per-cpu / per-process stack canary: 125 + * 126 + * LARL/LGRL R1,__stack_chk_guard => LLILF R1,__lc_stack_canary 127 + */ 128 + canary = __LC_STACK_CANARY; 129 + if (machine_has_relocated_lowcore()) 130 + canary += LOWCORE_ALT_ADDRESS; 131 + for (loc = start; loc < end; loc++) { 132 + insn = vaddress_to_insn(*loc); 133 + rc = stack_protector_verify(insn, kernel_start); 134 + if (rc) 135 + return rc; 136 + new = *insn; 137 + new.opc1 = 0xc0; 138 + new.opc2 = 0xf; 139 + new.imm = canary; 140 + if (stack_protector_debug) 141 + stack_protector_dump(insn, &new); 142 + s390_kernel_write(insn, &new, sizeof(*insn)); 143 + } 144 + return 0; 145 + } 146 + 147 + #ifdef __DECOMPRESSOR 148 + void __stack_protector_apply_early(unsigned long kernel_start) 149 + { 150 + unsigned long *start, *end; 151 + 152 + start = (unsigned long *)vmlinux.stack_prot_start; 153 + end = (unsigned long *)vmlinux.stack_prot_end; 154 + __stack_protector_apply(start, end, kernel_start); 155 + } 156 + #endif
-3
arch/s390/kernel/stacktrace.c
··· 8 8 #include <linux/perf_event.h> 9 9 #include <linux/stacktrace.h> 10 10 #include <linux/uaccess.h> 11 - #include <linux/compat.h> 12 11 #include <asm/asm-offsets.h> 13 12 #include <asm/stacktrace.h> 14 13 #include <asm/unwind.h> ··· 106 107 unsigned long ip, sp; 107 108 bool first = true; 108 109 109 - if (is_compat_task()) 110 - return; 111 110 if (!current->mm) 112 111 return; 113 112 ip = instruction_pointer(regs);
+1 -1
arch/s390/kernel/sthyi.c
··· 253 253 sctns->mac.infmval1 |= MAC_CNT_VLD; 254 254 } 255 255 256 - /* Returns a pointer to the the next partition block. */ 256 + /* Returns a pointer to the next partition block. */ 257 257 static struct diag204_x_part_block *lpar_cpu_inf(struct lpar_cpu_inf *part_inf, 258 258 bool this_lpar, 259 259 void *diag224_buf,
+11 -1
arch/s390/kernel/syscall.c
··· 39 39 40 40 #include "entry.h" 41 41 42 + #define __SYSCALL(nr, sym) long __s390x_##sym(struct pt_regs *); 43 + #include <asm/syscall_table.h> 44 + #undef __SYSCALL 45 + 46 + #define __SYSCALL(nr, sym) [nr] = (__s390x_##sym), 47 + const sys_call_ptr_t sys_call_table[__NR_syscalls] = { 48 + #include <asm/syscall_table.h> 49 + }; 50 + #undef __SYSCALL 51 + 42 52 #ifdef CONFIG_SYSVIPC 43 53 /* 44 54 * sys_ipc() is the de-multiplexer for the SysV IPC calls. ··· 132 122 goto out; 133 123 regs->gprs[2] = -ENOSYS; 134 124 if (likely(nr < NR_syscalls)) 135 - regs->gprs[2] = current->thread.sys_call_table[nr](regs); 125 + regs->gprs[2] = sys_call_table[nr](regs); 136 126 out: 137 127 syscall_exit_to_user_mode(regs); 138 128 }
+21 -37
arch/s390/kernel/syscalls/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 + kapi := arch/$(SRCARCH)/include/generated/asm 3 + uapi := arch/$(SRCARCH)/include/generated/uapi/asm 2 4 3 - gen := arch/$(ARCH)/include/generated 4 - kapi := $(gen)/asm 5 - uapi := $(gen)/uapi/asm 6 - 7 - syscall := $(src)/syscall.tbl 8 - systbl := $(src)/syscalltbl 9 - 10 - gen-y := $(kapi)/syscall_table.h 11 - kapi-hdrs-y := $(kapi)/unistd_nr.h 12 - uapi-hdrs-y := $(uapi)/unistd_32.h 13 - uapi-hdrs-y += $(uapi)/unistd_64.h 14 - 15 - targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y)) 16 - 17 - PHONY += kapi uapi 18 - 19 - kapi: $(gen-y) $(kapi-hdrs-y) 20 - uapi: $(uapi-hdrs-y) 21 - 22 - 23 - # Create output directory if not already present 24 5 $(shell mkdir -p $(uapi) $(kapi)) 25 6 7 + syscall := $(src)/syscall.tbl 8 + syshdr := $(srctree)/scripts/syscallhdr.sh 9 + systbl := $(srctree)/scripts/syscalltbl.sh 10 + 26 11 quiet_cmd_syshdr = SYSHDR $@ 27 - cmd_syshdr = $(CONFIG_SHELL) '$(systbl)' -H -a $(syshdr_abi_$(basetarget)) -f "$@" < $< > $@ 12 + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis common,$* $< $@ 28 13 29 - quiet_cmd_sysnr = SYSNR $@ 30 - cmd_sysnr = $(CONFIG_SHELL) '$(systbl)' -N -a $(sysnr_abi_$(basetarget)) < $< > $@ 14 + quiet_cmd_systbl = SYSTBL $@ 15 + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@ 31 16 32 - quiet_cmd_syscalls = SYSTBL $@ 33 - cmd_syscalls = $(CONFIG_SHELL) '$(systbl)' -S < $< > $@ 34 - 35 - syshdr_abi_unistd_32 := common,32 36 - $(uapi)/unistd_32.h: $(syscall) $(systbl) FORCE 37 - $(call if_changed,syshdr) 38 - 39 - syshdr_abi_unistd_64 := common,64 40 - $(uapi)/unistd_64.h: $(syscall) $(systbl) FORCE 17 + $(uapi)/unistd_%.h: $(syscall) $(syshdr) FORCE 41 18 $(call if_changed,syshdr) 42 19 43 20 $(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE 44 - $(call if_changed,syscalls) 21 + $(call if_changed,systbl) 45 22 46 - sysnr_abi_unistd_nr := common,32,64 47 - $(kapi)/unistd_nr.h: $(syscall) $(systbl) FORCE 48 - $(call if_changed,sysnr) 23 + uapisyshdr-y += unistd_64.h 24 + kapisyshdr-y += syscall_table.h 25 + 26 + uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) 27 + kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) 28 + targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) 29 + 30 + PHONY += all 31 + all: $(uapisyshdr-y) $(kapisyshdr-y) 32 + @:
+391 -467
arch/s390/kernel/syscalls/syscall.tbl
··· 3 3 # System call table for s390 4 4 # 5 5 # Format: 6 + # <nr> <abi> <syscall> <entry> 6 7 # 7 - # <nr> <abi> <syscall> <entry-64bit> <compat-entry> 8 - # 9 - # where <abi> can be common, 64, or 32 8 + # <abi> is always common. 10 9 11 - 1 common exit sys_exit sys_exit 12 - 2 common fork sys_fork sys_fork 13 - 3 common read sys_read compat_sys_s390_read 14 - 4 common write sys_write compat_sys_s390_write 15 - 5 common open sys_open compat_sys_open 16 - 6 common close sys_close sys_close 17 - 7 common restart_syscall sys_restart_syscall sys_restart_syscall 18 - 8 common creat sys_creat sys_creat 19 - 9 common link sys_link sys_link 20 - 10 common unlink sys_unlink sys_unlink 21 - 11 common execve sys_execve compat_sys_execve 22 - 12 common chdir sys_chdir sys_chdir 23 - 13 32 time - sys_time32 24 - 14 common mknod sys_mknod sys_mknod 25 - 15 common chmod sys_chmod sys_chmod 26 - 16 32 lchown - sys_lchown16 27 - 19 common lseek sys_lseek compat_sys_lseek 28 - 20 common getpid sys_getpid sys_getpid 29 - 21 common mount sys_mount sys_mount 30 - 22 common umount sys_oldumount sys_oldumount 31 - 23 32 setuid - sys_setuid16 32 - 24 32 getuid - sys_getuid16 33 - 25 32 stime - sys_stime32 34 - 26 common ptrace sys_ptrace compat_sys_ptrace 35 - 27 common alarm sys_alarm sys_alarm 36 - 29 common pause sys_pause sys_pause 37 - 30 common utime sys_utime sys_utime32 38 - 33 common access sys_access sys_access 39 - 34 common nice sys_nice sys_nice 40 - 36 common sync sys_sync sys_sync 41 - 37 common kill sys_kill sys_kill 42 - 38 common rename sys_rename sys_rename 43 - 39 common mkdir sys_mkdir sys_mkdir 44 - 40 common rmdir sys_rmdir sys_rmdir 45 - 41 common dup sys_dup sys_dup 46 - 42 common pipe sys_pipe sys_pipe 47 - 43 common times sys_times compat_sys_times 48 - 45 common brk sys_brk sys_brk 49 - 46 32 setgid - sys_setgid16 50 - 47 32 getgid - sys_getgid16 51 - 48 common signal sys_signal sys_signal 52 - 49 32 geteuid - sys_geteuid16 53 - 50 32 getegid - sys_getegid16 54 - 51 common acct sys_acct sys_acct 55 - 52 common umount2 sys_umount sys_umount 56 - 54 common ioctl sys_ioctl compat_sys_ioctl 57 - 55 common fcntl sys_fcntl compat_sys_fcntl 58 - 57 common setpgid sys_setpgid sys_setpgid 59 - 60 common umask sys_umask sys_umask 60 - 61 common chroot sys_chroot sys_chroot 61 - 62 common ustat sys_ustat compat_sys_ustat 62 - 63 common dup2 sys_dup2 sys_dup2 63 - 64 common getppid sys_getppid sys_getppid 64 - 65 common getpgrp sys_getpgrp sys_getpgrp 65 - 66 common setsid sys_setsid sys_setsid 66 - 67 common sigaction sys_sigaction compat_sys_sigaction 67 - 70 32 setreuid - sys_setreuid16 68 - 71 32 setregid - sys_setregid16 69 - 72 common sigsuspend sys_sigsuspend sys_sigsuspend 70 - 73 common sigpending sys_sigpending compat_sys_sigpending 71 - 74 common sethostname sys_sethostname sys_sethostname 72 - 75 common setrlimit sys_setrlimit compat_sys_setrlimit 73 - 76 32 getrlimit - compat_sys_old_getrlimit 74 - 77 common getrusage sys_getrusage compat_sys_getrusage 75 - 78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 76 - 79 common settimeofday sys_settimeofday compat_sys_settimeofday 77 - 80 32 getgroups - sys_getgroups16 78 - 81 32 setgroups - sys_setgroups16 79 - 83 common symlink sys_symlink sys_symlink 80 - 85 common readlink sys_readlink sys_readlink 81 - 86 common uselib sys_uselib sys_uselib 82 - 87 common swapon sys_swapon sys_swapon 83 - 88 common reboot sys_reboot sys_reboot 84 - 89 common readdir - compat_sys_old_readdir 85 - 90 common mmap sys_old_mmap compat_sys_s390_old_mmap 86 - 91 common munmap sys_munmap sys_munmap 87 - 92 common truncate sys_truncate compat_sys_truncate 88 - 93 common ftruncate sys_ftruncate compat_sys_ftruncate 89 - 94 common fchmod sys_fchmod sys_fchmod 90 - 95 32 fchown - sys_fchown16 91 - 96 common getpriority sys_getpriority sys_getpriority 92 - 97 common setpriority sys_setpriority sys_setpriority 93 - 99 common statfs sys_statfs compat_sys_statfs 94 - 100 common fstatfs sys_fstatfs compat_sys_fstatfs 95 - 101 32 ioperm - - 96 - 102 common socketcall sys_socketcall compat_sys_socketcall 97 - 103 common syslog sys_syslog sys_syslog 98 - 104 common setitimer sys_setitimer compat_sys_setitimer 99 - 105 common getitimer sys_getitimer compat_sys_getitimer 100 - 106 common stat sys_newstat compat_sys_newstat 101 - 107 common lstat sys_newlstat compat_sys_newlstat 102 - 108 common fstat sys_newfstat compat_sys_newfstat 103 - 110 common lookup_dcookie - - 104 - 111 common vhangup sys_vhangup sys_vhangup 105 - 112 common idle - - 106 - 114 common wait4 sys_wait4 compat_sys_wait4 107 - 115 common swapoff sys_swapoff sys_swapoff 108 - 116 common sysinfo sys_sysinfo compat_sys_sysinfo 109 - 117 common ipc sys_s390_ipc compat_sys_s390_ipc 110 - 118 common fsync sys_fsync sys_fsync 111 - 119 common sigreturn sys_sigreturn compat_sys_sigreturn 112 - 120 common clone sys_clone sys_clone 113 - 121 common setdomainname sys_setdomainname sys_setdomainname 114 - 122 common uname sys_newuname sys_newuname 115 - 124 common adjtimex sys_adjtimex sys_adjtimex_time32 116 - 125 common mprotect sys_mprotect sys_mprotect 117 - 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask 118 - 127 common create_module - - 119 - 128 common init_module sys_init_module sys_init_module 120 - 129 common delete_module sys_delete_module sys_delete_module 121 - 130 common get_kernel_syms - - 122 - 131 common quotactl sys_quotactl sys_quotactl 123 - 132 common getpgid sys_getpgid sys_getpgid 124 - 133 common fchdir sys_fchdir sys_fchdir 125 - 134 common bdflush sys_ni_syscall sys_ni_syscall 126 - 135 common sysfs sys_sysfs sys_sysfs 127 - 136 common personality sys_s390_personality sys_s390_personality 128 - 137 common afs_syscall - - 129 - 138 32 setfsuid - sys_setfsuid16 130 - 139 32 setfsgid - sys_setfsgid16 131 - 140 32 _llseek - sys_llseek 132 - 141 common getdents sys_getdents compat_sys_getdents 133 - 142 32 _newselect - compat_sys_select 134 - 142 64 select sys_select - 135 - 143 common flock sys_flock sys_flock 136 - 144 common msync sys_msync sys_msync 137 - 145 common readv sys_readv sys_readv 138 - 146 common writev sys_writev sys_writev 139 - 147 common getsid sys_getsid sys_getsid 140 - 148 common fdatasync sys_fdatasync sys_fdatasync 141 - 149 common _sysctl - - 142 - 150 common mlock sys_mlock sys_mlock 143 - 151 common munlock sys_munlock sys_munlock 144 - 152 common mlockall sys_mlockall sys_mlockall 145 - 153 common munlockall sys_munlockall sys_munlockall 146 - 154 common sched_setparam sys_sched_setparam sys_sched_setparam 147 - 155 common sched_getparam sys_sched_getparam sys_sched_getparam 148 - 156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler 149 - 157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler 150 - 158 common sched_yield sys_sched_yield sys_sched_yield 151 - 159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max 152 - 160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min 153 - 161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32 154 - 162 common nanosleep sys_nanosleep sys_nanosleep_time32 155 - 163 common mremap sys_mremap sys_mremap 156 - 164 32 setresuid - sys_setresuid16 157 - 165 32 getresuid - sys_getresuid16 158 - 167 common query_module - - 159 - 168 common poll sys_poll sys_poll 160 - 169 common nfsservctl - - 161 - 170 32 setresgid - sys_setresgid16 162 - 171 32 getresgid - sys_getresgid16 163 - 172 common prctl sys_prctl sys_prctl 164 - 173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn 165 - 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 166 - 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 167 - 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending 168 - 177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 169 - 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 170 - 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 171 - 180 common pread64 sys_pread64 compat_sys_s390_pread64 172 - 181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64 173 - 182 32 chown - sys_chown16 174 - 183 common getcwd sys_getcwd sys_getcwd 175 - 184 common capget sys_capget sys_capget 176 - 185 common capset sys_capset sys_capset 177 - 186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack 178 - 187 common sendfile sys_sendfile64 compat_sys_sendfile 179 - 188 common getpmsg - - 180 - 189 common putpmsg - - 181 - 190 common vfork sys_vfork sys_vfork 182 - 191 32 ugetrlimit - compat_sys_getrlimit 183 - 191 64 getrlimit sys_getrlimit - 184 - 192 32 mmap2 - compat_sys_s390_mmap2 185 - 193 32 truncate64 - compat_sys_s390_truncate64 186 - 194 32 ftruncate64 - compat_sys_s390_ftruncate64 187 - 195 32 stat64 - compat_sys_s390_stat64 188 - 196 32 lstat64 - compat_sys_s390_lstat64 189 - 197 32 fstat64 - compat_sys_s390_fstat64 190 - 198 32 lchown32 - sys_lchown 191 - 198 64 lchown sys_lchown - 192 - 199 32 getuid32 - sys_getuid 193 - 199 64 getuid sys_getuid - 194 - 200 32 getgid32 - sys_getgid 195 - 200 64 getgid sys_getgid - 196 - 201 32 geteuid32 - sys_geteuid 197 - 201 64 geteuid sys_geteuid - 198 - 202 32 getegid32 - sys_getegid 199 - 202 64 getegid sys_getegid - 200 - 203 32 setreuid32 - sys_setreuid 201 - 203 64 setreuid sys_setreuid - 202 - 204 32 setregid32 - sys_setregid 203 - 204 64 setregid sys_setregid - 204 - 205 32 getgroups32 - sys_getgroups 205 - 205 64 getgroups sys_getgroups - 206 - 206 32 setgroups32 - sys_setgroups 207 - 206 64 setgroups sys_setgroups - 208 - 207 32 fchown32 - sys_fchown 209 - 207 64 fchown sys_fchown - 210 - 208 32 setresuid32 - sys_setresuid 211 - 208 64 setresuid sys_setresuid - 212 - 209 32 getresuid32 - sys_getresuid 213 - 209 64 getresuid sys_getresuid - 214 - 210 32 setresgid32 - sys_setresgid 215 - 210 64 setresgid sys_setresgid - 216 - 211 32 getresgid32 - sys_getresgid 217 - 211 64 getresgid sys_getresgid - 218 - 212 32 chown32 - sys_chown 219 - 212 64 chown sys_chown - 220 - 213 32 setuid32 - sys_setuid 221 - 213 64 setuid sys_setuid - 222 - 214 32 setgid32 - sys_setgid 223 - 214 64 setgid sys_setgid - 224 - 215 32 setfsuid32 - sys_setfsuid 225 - 215 64 setfsuid sys_setfsuid - 226 - 216 32 setfsgid32 - sys_setfsgid 227 - 216 64 setfsgid sys_setfsgid - 228 - 217 common pivot_root sys_pivot_root sys_pivot_root 229 - 218 common mincore sys_mincore sys_mincore 230 - 219 common madvise sys_madvise sys_madvise 231 - 220 common getdents64 sys_getdents64 sys_getdents64 232 - 221 32 fcntl64 - compat_sys_fcntl64 233 - 222 common readahead sys_readahead compat_sys_s390_readahead 234 - 223 32 sendfile64 - compat_sys_sendfile64 235 - 224 common setxattr sys_setxattr sys_setxattr 236 - 225 common lsetxattr sys_lsetxattr sys_lsetxattr 237 - 226 common fsetxattr sys_fsetxattr sys_fsetxattr 238 - 227 common getxattr sys_getxattr sys_getxattr 239 - 228 common lgetxattr sys_lgetxattr sys_lgetxattr 240 - 229 common fgetxattr sys_fgetxattr sys_fgetxattr 241 - 230 common listxattr sys_listxattr sys_listxattr 242 - 231 common llistxattr sys_llistxattr sys_llistxattr 243 - 232 common flistxattr sys_flistxattr sys_flistxattr 244 - 233 common removexattr sys_removexattr sys_removexattr 245 - 234 common lremovexattr sys_lremovexattr sys_lremovexattr 246 - 235 common fremovexattr sys_fremovexattr sys_fremovexattr 247 - 236 common gettid sys_gettid sys_gettid 248 - 237 common tkill sys_tkill sys_tkill 249 - 238 common futex sys_futex sys_futex_time32 250 - 239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 251 - 240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 252 - 241 common tgkill sys_tgkill sys_tgkill 253 - 243 common io_setup sys_io_setup compat_sys_io_setup 254 - 244 common io_destroy sys_io_destroy sys_io_destroy 255 - 245 common io_getevents sys_io_getevents sys_io_getevents_time32 256 - 246 common io_submit sys_io_submit compat_sys_io_submit 257 - 247 common io_cancel sys_io_cancel sys_io_cancel 258 - 248 common exit_group sys_exit_group sys_exit_group 259 - 249 common epoll_create sys_epoll_create sys_epoll_create 260 - 250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl 261 - 251 common epoll_wait sys_epoll_wait sys_epoll_wait 262 - 252 common set_tid_address sys_set_tid_address sys_set_tid_address 263 - 253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64 264 - 254 common timer_create sys_timer_create compat_sys_timer_create 265 - 255 common timer_settime sys_timer_settime sys_timer_settime32 266 - 256 common timer_gettime sys_timer_gettime sys_timer_gettime32 267 - 257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun 268 - 258 common timer_delete sys_timer_delete sys_timer_delete 269 - 259 common clock_settime sys_clock_settime sys_clock_settime32 270 - 260 common clock_gettime sys_clock_gettime sys_clock_gettime32 271 - 261 common clock_getres sys_clock_getres sys_clock_getres_time32 272 - 262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32 273 - 264 32 fadvise64_64 - compat_sys_s390_fadvise64_64 274 - 265 common statfs64 sys_statfs64 compat_sys_statfs64 275 - 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 276 - 267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages 277 - 268 common mbind sys_mbind sys_mbind 278 - 269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy 279 - 270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy 280 - 271 common mq_open sys_mq_open compat_sys_mq_open 281 - 272 common mq_unlink sys_mq_unlink sys_mq_unlink 282 - 273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 283 - 274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32 284 - 275 common mq_notify sys_mq_notify compat_sys_mq_notify 285 - 276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 286 - 277 common kexec_load sys_kexec_load compat_sys_kexec_load 287 - 278 common add_key sys_add_key sys_add_key 288 - 279 common request_key sys_request_key sys_request_key 289 - 280 common keyctl sys_keyctl compat_sys_keyctl 290 - 281 common waitid sys_waitid compat_sys_waitid 291 - 282 common ioprio_set sys_ioprio_set sys_ioprio_set 292 - 283 common ioprio_get sys_ioprio_get sys_ioprio_get 293 - 284 common inotify_init sys_inotify_init sys_inotify_init 294 - 285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 295 - 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch 296 - 287 common migrate_pages sys_migrate_pages sys_migrate_pages 297 - 288 common openat sys_openat compat_sys_openat 298 - 289 common mkdirat sys_mkdirat sys_mkdirat 299 - 290 common mknodat sys_mknodat sys_mknodat 300 - 291 common fchownat sys_fchownat sys_fchownat 301 - 292 common futimesat sys_futimesat sys_futimesat_time32 302 - 293 32 fstatat64 - compat_sys_s390_fstatat64 303 - 293 64 newfstatat sys_newfstatat - 304 - 294 common unlinkat sys_unlinkat sys_unlinkat 305 - 295 common renameat sys_renameat sys_renameat 306 - 296 common linkat sys_linkat sys_linkat 307 - 297 common symlinkat sys_symlinkat sys_symlinkat 308 - 298 common readlinkat sys_readlinkat sys_readlinkat 309 - 299 common fchmodat sys_fchmodat sys_fchmodat 310 - 300 common faccessat sys_faccessat sys_faccessat 311 - 301 common pselect6 sys_pselect6 compat_sys_pselect6_time32 312 - 302 common ppoll sys_ppoll compat_sys_ppoll_time32 313 - 303 common unshare sys_unshare sys_unshare 314 - 304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 315 - 305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list 316 - 306 common splice sys_splice sys_splice 317 - 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range 318 - 308 common tee sys_tee sys_tee 319 - 309 common vmsplice sys_vmsplice sys_vmsplice 320 - 310 common move_pages sys_move_pages sys_move_pages 321 - 311 common getcpu sys_getcpu sys_getcpu 322 - 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 323 - 313 common utimes sys_utimes sys_utimes_time32 324 - 314 common fallocate sys_fallocate compat_sys_s390_fallocate 325 - 315 common utimensat sys_utimensat sys_utimensat_time32 326 - 316 common signalfd sys_signalfd compat_sys_signalfd 327 - 317 common timerfd - - 328 - 318 common eventfd sys_eventfd sys_eventfd 329 - 319 common timerfd_create sys_timerfd_create sys_timerfd_create 330 - 320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32 331 - 321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32 332 - 322 common signalfd4 sys_signalfd4 compat_sys_signalfd4 333 - 323 common eventfd2 sys_eventfd2 sys_eventfd2 334 - 324 common inotify_init1 sys_inotify_init1 sys_inotify_init1 335 - 325 common pipe2 sys_pipe2 sys_pipe2 336 - 326 common dup3 sys_dup3 sys_dup3 337 - 327 common epoll_create1 sys_epoll_create1 sys_epoll_create1 338 - 328 common preadv sys_preadv compat_sys_preadv 339 - 329 common pwritev sys_pwritev compat_sys_pwritev 340 - 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 341 - 331 common perf_event_open sys_perf_event_open sys_perf_event_open 342 - 332 common fanotify_init sys_fanotify_init sys_fanotify_init 343 - 333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 344 - 334 common prlimit64 sys_prlimit64 sys_prlimit64 345 - 335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at 346 - 336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 347 - 337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32 348 - 338 common syncfs sys_syncfs sys_syncfs 349 - 339 common setns sys_setns sys_setns 350 - 340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv 351 - 341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev 352 - 342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr 353 - 343 common kcmp sys_kcmp sys_kcmp 354 - 344 common finit_module sys_finit_module sys_finit_module 355 - 345 common sched_setattr sys_sched_setattr sys_sched_setattr 356 - 346 common sched_getattr sys_sched_getattr sys_sched_getattr 357 - 347 common renameat2 sys_renameat2 sys_renameat2 358 - 348 common seccomp sys_seccomp sys_seccomp 359 - 349 common getrandom sys_getrandom sys_getrandom 360 - 350 common memfd_create sys_memfd_create sys_memfd_create 361 - 351 common bpf sys_bpf sys_bpf 362 - 352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write 363 - 353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read 364 - 354 common execveat sys_execveat compat_sys_execveat 365 - 355 common userfaultfd sys_userfaultfd sys_userfaultfd 366 - 356 common membarrier sys_membarrier sys_membarrier 367 - 357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32 368 - 358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 369 - 359 common socket sys_socket sys_socket 370 - 360 common socketpair sys_socketpair sys_socketpair 371 - 361 common bind sys_bind sys_bind 372 - 362 common connect sys_connect sys_connect 373 - 363 common listen sys_listen sys_listen 374 - 364 common accept4 sys_accept4 sys_accept4 375 - 365 common getsockopt sys_getsockopt sys_getsockopt 376 - 366 common setsockopt sys_setsockopt sys_setsockopt 377 - 367 common getsockname sys_getsockname sys_getsockname 378 - 368 common getpeername sys_getpeername sys_getpeername 379 - 369 common sendto sys_sendto sys_sendto 380 - 370 common sendmsg sys_sendmsg compat_sys_sendmsg 381 - 371 common recvfrom sys_recvfrom compat_sys_recvfrom 382 - 372 common recvmsg sys_recvmsg compat_sys_recvmsg 383 - 373 common shutdown sys_shutdown sys_shutdown 384 - 374 common mlock2 sys_mlock2 sys_mlock2 385 - 375 common copy_file_range sys_copy_file_range sys_copy_file_range 386 - 376 common preadv2 sys_preadv2 compat_sys_preadv2 387 - 377 common pwritev2 sys_pwritev2 compat_sys_pwritev2 388 - 378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage 389 - 379 common statx sys_statx sys_statx 390 - 380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi 391 - 381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load 392 - 382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents 393 - 383 common rseq sys_rseq sys_rseq 394 - 384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect 395 - 385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc 396 - 386 common pkey_free sys_pkey_free sys_pkey_free 10 + 1 common exit sys_exit 11 + 2 common fork sys_fork 12 + 3 common read sys_read 13 + 4 common write sys_write 14 + 5 common open sys_open 15 + 6 common close sys_close 16 + 7 common restart_syscall sys_restart_syscall 17 + 8 common creat sys_creat 18 + 9 common link sys_link 19 + 10 common unlink sys_unlink 20 + 11 common execve sys_execve 21 + 12 common chdir sys_chdir 22 + 14 common mknod sys_mknod 23 + 15 common chmod sys_chmod 24 + 19 common lseek sys_lseek 25 + 20 common getpid sys_getpid 26 + 21 common mount sys_mount 27 + 22 common umount sys_oldumount 28 + 26 common ptrace sys_ptrace 29 + 27 common alarm sys_alarm 30 + 29 common pause sys_pause 31 + 30 common utime sys_utime 32 + 33 common access sys_access 33 + 34 common nice sys_nice 34 + 36 common sync sys_sync 35 + 37 common kill sys_kill 36 + 38 common rename sys_rename 37 + 39 common mkdir sys_mkdir 38 + 40 common rmdir sys_rmdir 39 + 41 common dup sys_dup 40 + 42 common pipe sys_pipe 41 + 43 common times sys_times 42 + 45 common brk sys_brk 43 + 48 common signal sys_signal 44 + 51 common acct sys_acct 45 + 52 common umount2 sys_umount 46 + 54 common ioctl sys_ioctl 47 + 55 common fcntl sys_fcntl 48 + 57 common setpgid sys_setpgid 49 + 60 common umask sys_umask 50 + 61 common chroot sys_chroot 51 + 62 common ustat sys_ustat 52 + 63 common dup2 sys_dup2 53 + 64 common getppid sys_getppid 54 + 65 common getpgrp sys_getpgrp 55 + 66 common setsid sys_setsid 56 + 67 common sigaction sys_sigaction 57 + 72 common sigsuspend sys_sigsuspend 58 + 73 common sigpending sys_sigpending 59 + 74 common sethostname sys_sethostname 60 + 75 common setrlimit sys_setrlimit 61 + 77 common getrusage sys_getrusage 62 + 78 common gettimeofday sys_gettimeofday 63 + 79 common settimeofday sys_settimeofday 64 + 83 common symlink sys_symlink 65 + 85 common readlink sys_readlink 66 + 86 common uselib sys_uselib 67 + 87 common swapon sys_swapon 68 + 88 common reboot sys_reboot 69 + 89 common readdir sys_ni_syscall 70 + 90 common mmap sys_old_mmap 71 + 91 common munmap sys_munmap 72 + 92 common truncate sys_truncate 73 + 93 common ftruncate sys_ftruncate 74 + 94 common fchmod sys_fchmod 75 + 96 common getpriority sys_getpriority 76 + 97 common setpriority sys_setpriority 77 + 99 common statfs sys_statfs 78 + 100 common fstatfs sys_fstatfs 79 + 102 common socketcall sys_socketcall 80 + 103 common syslog sys_syslog 81 + 104 common setitimer sys_setitimer 82 + 105 common getitimer sys_getitimer 83 + 106 common stat sys_newstat 84 + 107 common lstat sys_newlstat 85 + 108 common fstat sys_newfstat 86 + 110 common lookup_dcookie sys_ni_syscall 87 + 111 common vhangup sys_vhangup 88 + 112 common idle sys_ni_syscall 89 + 114 common wait4 sys_wait4 90 + 115 common swapoff sys_swapoff 91 + 116 common sysinfo sys_sysinfo 92 + 117 common ipc sys_s390_ipc 93 + 118 common fsync sys_fsync 94 + 119 common sigreturn sys_sigreturn 95 + 120 common clone sys_clone 96 + 121 common setdomainname sys_setdomainname 97 + 122 common uname sys_newuname 98 + 124 common adjtimex sys_adjtimex 99 + 125 common mprotect sys_mprotect 100 + 126 common sigprocmask sys_sigprocmask 101 + 127 common create_module sys_ni_syscall 102 + 128 common init_module sys_init_module 103 + 129 common delete_module sys_delete_module 104 + 130 common get_kernel_syms sys_ni_syscall 105 + 131 common quotactl sys_quotactl 106 + 132 common getpgid sys_getpgid 107 + 133 common fchdir sys_fchdir 108 + 134 common bdflush sys_ni_syscall 109 + 135 common sysfs sys_sysfs 110 + 136 common personality sys_s390_personality 111 + 137 common afs_syscall sys_ni_syscall 112 + 141 common getdents sys_getdents 113 + 142 common select sys_select 114 + 143 common flock sys_flock 115 + 144 common msync sys_msync 116 + 145 common readv sys_readv 117 + 146 common writev sys_writev 118 + 147 common getsid sys_getsid 119 + 148 common fdatasync sys_fdatasync 120 + 149 common _sysctl sys_ni_syscall 121 + 150 common mlock sys_mlock 122 + 151 common munlock sys_munlock 123 + 152 common mlockall sys_mlockall 124 + 153 common munlockall sys_munlockall 125 + 154 common sched_setparam sys_sched_setparam 126 + 155 common sched_getparam sys_sched_getparam 127 + 156 common sched_setscheduler sys_sched_setscheduler 128 + 157 common sched_getscheduler sys_sched_getscheduler 129 + 158 common sched_yield sys_sched_yield 130 + 159 common sched_get_priority_max sys_sched_get_priority_max 131 + 160 common sched_get_priority_min sys_sched_get_priority_min 132 + 161 common sched_rr_get_interval sys_sched_rr_get_interval 133 + 162 common nanosleep sys_nanosleep 134 + 163 common mremap sys_mremap 135 + 167 common query_module sys_ni_syscall 136 + 168 common poll sys_poll 137 + 169 common nfsservctl sys_ni_syscall 138 + 172 common prctl sys_prctl 139 + 173 common rt_sigreturn sys_rt_sigreturn 140 + 174 common rt_sigaction sys_rt_sigaction 141 + 175 common rt_sigprocmask sys_rt_sigprocmask 142 + 176 common rt_sigpending sys_rt_sigpending 143 + 177 common rt_sigtimedwait sys_rt_sigtimedwait 144 + 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 145 + 179 common rt_sigsuspend sys_rt_sigsuspend 146 + 180 common pread64 sys_pread64 147 + 181 common pwrite64 sys_pwrite64 148 + 183 common getcwd sys_getcwd 149 + 184 common capget sys_capget 150 + 185 common capset sys_capset 151 + 186 common sigaltstack sys_sigaltstack 152 + 187 common sendfile sys_sendfile64 153 + 188 common getpmsg sys_ni_syscall 154 + 189 common putpmsg sys_ni_syscall 155 + 190 common vfork sys_vfork 156 + 191 common getrlimit sys_getrlimit 157 + 198 common lchown sys_lchown 158 + 199 common getuid sys_getuid 159 + 200 common getgid sys_getgid 160 + 201 common geteuid sys_geteuid 161 + 202 common getegid sys_getegid 162 + 203 common setreuid sys_setreuid 163 + 204 common setregid sys_setregid 164 + 205 common getgroups sys_getgroups 165 + 206 common setgroups sys_setgroups 166 + 207 common fchown sys_fchown 167 + 208 common setresuid sys_setresuid 168 + 209 common getresuid sys_getresuid 169 + 210 common setresgid sys_setresgid 170 + 211 common getresgid sys_getresgid 171 + 212 common chown sys_chown 172 + 213 common setuid sys_setuid 173 + 214 common setgid sys_setgid 174 + 215 common setfsuid sys_setfsuid 175 + 216 common setfsgid sys_setfsgid 176 + 217 common pivot_root sys_pivot_root 177 + 218 common mincore sys_mincore 178 + 219 common madvise sys_madvise 179 + 220 common getdents64 sys_getdents64 180 + 222 common readahead sys_readahead 181 + 224 common setxattr sys_setxattr 182 + 225 common lsetxattr sys_lsetxattr 183 + 226 common fsetxattr sys_fsetxattr 184 + 227 common getxattr sys_getxattr 185 + 228 common lgetxattr sys_lgetxattr 186 + 229 common fgetxattr sys_fgetxattr 187 + 230 common listxattr sys_listxattr 188 + 231 common llistxattr sys_llistxattr 189 + 232 common flistxattr sys_flistxattr 190 + 233 common removexattr sys_removexattr 191 + 234 common lremovexattr sys_lremovexattr 192 + 235 common fremovexattr sys_fremovexattr 193 + 236 common gettid sys_gettid 194 + 237 common tkill sys_tkill 195 + 238 common futex sys_futex 196 + 239 common sched_setaffinity sys_sched_setaffinity 197 + 240 common sched_getaffinity sys_sched_getaffinity 198 + 241 common tgkill sys_tgkill 199 + 243 common io_setup sys_io_setup 200 + 244 common io_destroy sys_io_destroy 201 + 245 common io_getevents sys_io_getevents 202 + 246 common io_submit sys_io_submit 203 + 247 common io_cancel sys_io_cancel 204 + 248 common exit_group sys_exit_group 205 + 249 common epoll_create sys_epoll_create 206 + 250 common epoll_ctl sys_epoll_ctl 207 + 251 common epoll_wait sys_epoll_wait 208 + 252 common set_tid_address sys_set_tid_address 209 + 253 common fadvise64 sys_fadvise64_64 210 + 254 common timer_create sys_timer_create 211 + 255 common timer_settime sys_timer_settime 212 + 256 common timer_gettime sys_timer_gettime 213 + 257 common timer_getoverrun sys_timer_getoverrun 214 + 258 common timer_delete sys_timer_delete 215 + 259 common clock_settime sys_clock_settime 216 + 260 common clock_gettime sys_clock_gettime 217 + 261 common clock_getres sys_clock_getres 218 + 262 common clock_nanosleep sys_clock_nanosleep 219 + 265 common statfs64 sys_statfs64 220 + 266 common fstatfs64 sys_fstatfs64 221 + 267 common remap_file_pages sys_remap_file_pages 222 + 268 common mbind sys_mbind 223 + 269 common get_mempolicy sys_get_mempolicy 224 + 270 common set_mempolicy sys_set_mempolicy 225 + 271 common mq_open sys_mq_open 226 + 272 common mq_unlink sys_mq_unlink 227 + 273 common mq_timedsend sys_mq_timedsend 228 + 274 common mq_timedreceive sys_mq_timedreceive 229 + 275 common mq_notify sys_mq_notify 230 + 276 common mq_getsetattr sys_mq_getsetattr 231 + 277 common kexec_load sys_kexec_load 232 + 278 common add_key sys_add_key 233 + 279 common request_key sys_request_key 234 + 280 common keyctl sys_keyctl 235 + 281 common waitid sys_waitid 236 + 282 common ioprio_set sys_ioprio_set 237 + 283 common ioprio_get sys_ioprio_get 238 + 284 common inotify_init sys_inotify_init 239 + 285 common inotify_add_watch sys_inotify_add_watch 240 + 286 common inotify_rm_watch sys_inotify_rm_watch 241 + 287 common migrate_pages sys_migrate_pages 242 + 288 common openat sys_openat 243 + 289 common mkdirat sys_mkdirat 244 + 290 common mknodat sys_mknodat 245 + 291 common fchownat sys_fchownat 246 + 292 common futimesat sys_futimesat 247 + 293 common newfstatat sys_newfstatat 248 + 294 common unlinkat sys_unlinkat 249 + 295 common renameat sys_renameat 250 + 296 common linkat sys_linkat 251 + 297 common symlinkat sys_symlinkat 252 + 298 common readlinkat sys_readlinkat 253 + 299 common fchmodat sys_fchmodat 254 + 300 common faccessat sys_faccessat 255 + 301 common pselect6 sys_pselect6 256 + 302 common ppoll sys_ppoll 257 + 303 common unshare sys_unshare 258 + 304 common set_robust_list sys_set_robust_list 259 + 305 common get_robust_list sys_get_robust_list 260 + 306 common splice sys_splice 261 + 307 common sync_file_range sys_sync_file_range 262 + 308 common tee sys_tee 263 + 309 common vmsplice sys_vmsplice 264 + 310 common move_pages sys_move_pages 265 + 311 common getcpu sys_getcpu 266 + 312 common epoll_pwait sys_epoll_pwait 267 + 313 common utimes sys_utimes 268 + 314 common fallocate sys_fallocate 269 + 315 common utimensat sys_utimensat 270 + 316 common signalfd sys_signalfd 271 + 317 common timerfd sys_ni_syscall 272 + 318 common eventfd sys_eventfd 273 + 319 common timerfd_create sys_timerfd_create 274 + 320 common timerfd_settime sys_timerfd_settime 275 + 321 common timerfd_gettime sys_timerfd_gettime 276 + 322 common signalfd4 sys_signalfd4 277 + 323 common eventfd2 sys_eventfd2 278 + 324 common inotify_init1 sys_inotify_init1 279 + 325 common pipe2 sys_pipe2 280 + 326 common dup3 sys_dup3 281 + 327 common epoll_create1 sys_epoll_create1 282 + 328 common preadv sys_preadv 283 + 329 common pwritev sys_pwritev 284 + 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 285 + 331 common perf_event_open sys_perf_event_open 286 + 332 common fanotify_init sys_fanotify_init 287 + 333 common fanotify_mark sys_fanotify_mark 288 + 334 common prlimit64 sys_prlimit64 289 + 335 common name_to_handle_at sys_name_to_handle_at 290 + 336 common open_by_handle_at sys_open_by_handle_at 291 + 337 common clock_adjtime sys_clock_adjtime 292 + 338 common syncfs sys_syncfs 293 + 339 common setns sys_setns 294 + 340 common process_vm_readv sys_process_vm_readv 295 + 341 common process_vm_writev sys_process_vm_writev 296 + 342 common s390_runtime_instr sys_s390_runtime_instr 297 + 343 common kcmp sys_kcmp 298 + 344 common finit_module sys_finit_module 299 + 345 common sched_setattr sys_sched_setattr 300 + 346 common sched_getattr sys_sched_getattr 301 + 347 common renameat2 sys_renameat2 302 + 348 common seccomp sys_seccomp 303 + 349 common getrandom sys_getrandom 304 + 350 common memfd_create sys_memfd_create 305 + 351 common bpf sys_bpf 306 + 352 common s390_pci_mmio_write sys_s390_pci_mmio_write 307 + 353 common s390_pci_mmio_read sys_s390_pci_mmio_read 308 + 354 common execveat sys_execveat 309 + 355 common userfaultfd sys_userfaultfd 310 + 356 common membarrier sys_membarrier 311 + 357 common recvmmsg sys_recvmmsg 312 + 358 common sendmmsg sys_sendmmsg 313 + 359 common socket sys_socket 314 + 360 common socketpair sys_socketpair 315 + 361 common bind sys_bind 316 + 362 common connect sys_connect 317 + 363 common listen sys_listen 318 + 364 common accept4 sys_accept4 319 + 365 common getsockopt sys_getsockopt 320 + 366 common setsockopt sys_setsockopt 321 + 367 common getsockname sys_getsockname 322 + 368 common getpeername sys_getpeername 323 + 369 common sendto sys_sendto 324 + 370 common sendmsg sys_sendmsg 325 + 371 common recvfrom sys_recvfrom 326 + 372 common recvmsg sys_recvmsg 327 + 373 common shutdown sys_shutdown 328 + 374 common mlock2 sys_mlock2 329 + 375 common copy_file_range sys_copy_file_range 330 + 376 common preadv2 sys_preadv2 331 + 377 common pwritev2 sys_pwritev2 332 + 378 common s390_guarded_storage sys_s390_guarded_storage 333 + 379 common statx sys_statx 334 + 380 common s390_sthyi sys_s390_sthyi 335 + 381 common kexec_file_load sys_kexec_file_load 336 + 382 common io_pgetevents sys_io_pgetevents 337 + 383 common rseq sys_rseq 338 + 384 common pkey_mprotect sys_pkey_mprotect 339 + 385 common pkey_alloc sys_pkey_alloc 340 + 386 common pkey_free sys_pkey_free 397 341 # room for arch specific syscalls 398 - 392 64 semtimedop sys_semtimedop - 399 - 393 common semget sys_semget sys_semget 400 - 394 common semctl sys_semctl compat_sys_semctl 401 - 395 common shmget sys_shmget sys_shmget 402 - 396 common shmctl sys_shmctl compat_sys_shmctl 403 - 397 common shmat sys_shmat compat_sys_shmat 404 - 398 common shmdt sys_shmdt sys_shmdt 405 - 399 common msgget sys_msgget sys_msgget 406 - 400 common msgsnd sys_msgsnd compat_sys_msgsnd 407 - 401 common msgrcv sys_msgrcv compat_sys_msgrcv 408 - 402 common msgctl sys_msgctl compat_sys_msgctl 409 - 403 32 clock_gettime64 - sys_clock_gettime 410 - 404 32 clock_settime64 - sys_clock_settime 411 - 405 32 clock_adjtime64 - sys_clock_adjtime 412 - 406 32 clock_getres_time64 - sys_clock_getres 413 - 407 32 clock_nanosleep_time64 - sys_clock_nanosleep 414 - 408 32 timer_gettime64 - sys_timer_gettime 415 - 409 32 timer_settime64 - sys_timer_settime 416 - 410 32 timerfd_gettime64 - sys_timerfd_gettime 417 - 411 32 timerfd_settime64 - sys_timerfd_settime 418 - 412 32 utimensat_time64 - sys_utimensat 419 - 413 32 pselect6_time64 - compat_sys_pselect6_time64 420 - 414 32 ppoll_time64 - compat_sys_ppoll_time64 421 - 416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64 422 - 417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 423 - 418 32 mq_timedsend_time64 - sys_mq_timedsend 424 - 419 32 mq_timedreceive_time64 - sys_mq_timedreceive 425 - 420 32 semtimedop_time64 - sys_semtimedop 426 - 421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 427 - 422 32 futex_time64 - sys_futex 428 - 423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval 429 - 424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal 430 - 425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup 431 - 426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter 432 - 427 common io_uring_register sys_io_uring_register sys_io_uring_register 433 - 428 common open_tree sys_open_tree sys_open_tree 434 - 429 common move_mount sys_move_mount sys_move_mount 435 - 430 common fsopen sys_fsopen sys_fsopen 436 - 431 common fsconfig sys_fsconfig sys_fsconfig 437 - 432 common fsmount sys_fsmount sys_fsmount 438 - 433 common fspick sys_fspick sys_fspick 439 - 434 common pidfd_open sys_pidfd_open sys_pidfd_open 440 - 435 common clone3 sys_clone3 sys_clone3 441 - 436 common close_range sys_close_range sys_close_range 442 - 437 common openat2 sys_openat2 sys_openat2 443 - 438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd 444 - 439 common faccessat2 sys_faccessat2 sys_faccessat2 445 - 440 common process_madvise sys_process_madvise sys_process_madvise 446 - 441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2 447 - 442 common mount_setattr sys_mount_setattr sys_mount_setattr 448 - 443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd 449 - 444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset 450 - 445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule 451 - 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self 452 - 447 common memfd_secret sys_memfd_secret sys_memfd_secret 453 - 448 common process_mrelease sys_process_mrelease sys_process_mrelease 454 - 449 common futex_waitv sys_futex_waitv sys_futex_waitv 455 - 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node 456 - 451 common cachestat sys_cachestat sys_cachestat 457 - 452 common fchmodat2 sys_fchmodat2 sys_fchmodat2 458 - 453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack 459 - 454 common futex_wake sys_futex_wake sys_futex_wake 460 - 455 common futex_wait sys_futex_wait sys_futex_wait 461 - 456 common futex_requeue sys_futex_requeue sys_futex_requeue 462 - 457 common statmount sys_statmount sys_statmount 463 - 458 common listmount sys_listmount sys_listmount 464 - 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 465 - 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 466 - 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules 467 - 462 common mseal sys_mseal sys_mseal 468 - 463 common setxattrat sys_setxattrat sys_setxattrat 469 - 464 common getxattrat sys_getxattrat sys_getxattrat 470 - 465 common listxattrat sys_listxattrat sys_listxattrat 471 - 466 common removexattrat sys_removexattrat sys_removexattrat 472 - 467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr 473 - 468 common file_getattr sys_file_getattr sys_file_getattr 474 - 469 common file_setattr sys_file_setattr sys_file_setattr 475 - 470 common listns sys_listns sys_listns 342 + 392 common semtimedop sys_semtimedop 343 + 393 common semget sys_semget 344 + 394 common semctl sys_semctl 345 + 395 common shmget sys_shmget 346 + 396 common shmctl sys_shmctl 347 + 397 common shmat sys_shmat 348 + 398 common shmdt sys_shmdt 349 + 399 common msgget sys_msgget 350 + 400 common msgsnd sys_msgsnd 351 + 401 common msgrcv sys_msgrcv 352 + 402 common msgctl sys_msgctl 353 + 424 common pidfd_send_signal sys_pidfd_send_signal 354 + 425 common io_uring_setup sys_io_uring_setup 355 + 426 common io_uring_enter sys_io_uring_enter 356 + 427 common io_uring_register sys_io_uring_register 357 + 428 common open_tree sys_open_tree 358 + 429 common move_mount sys_move_mount 359 + 430 common fsopen sys_fsopen 360 + 431 common fsconfig sys_fsconfig 361 + 432 common fsmount sys_fsmount 362 + 433 common fspick sys_fspick 363 + 434 common pidfd_open sys_pidfd_open 364 + 435 common clone3 sys_clone3 365 + 436 common close_range sys_close_range 366 + 437 common openat2 sys_openat2 367 + 438 common pidfd_getfd sys_pidfd_getfd 368 + 439 common faccessat2 sys_faccessat2 369 + 440 common process_madvise sys_process_madvise 370 + 441 common epoll_pwait2 sys_epoll_pwait2 371 + 442 common mount_setattr sys_mount_setattr 372 + 443 common quotactl_fd sys_quotactl_fd 373 + 444 common landlock_create_ruleset sys_landlock_create_ruleset 374 + 445 common landlock_add_rule sys_landlock_add_rule 375 + 446 common landlock_restrict_self sys_landlock_restrict_self 376 + 447 common memfd_secret sys_memfd_secret 377 + 448 common process_mrelease sys_process_mrelease 378 + 449 common futex_waitv sys_futex_waitv 379 + 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 380 + 451 common cachestat sys_cachestat 381 + 452 common fchmodat2 sys_fchmodat2 382 + 453 common map_shadow_stack sys_map_shadow_stack 383 + 454 common futex_wake sys_futex_wake 384 + 455 common futex_wait sys_futex_wait 385 + 456 common futex_requeue sys_futex_requeue 386 + 457 common statmount sys_statmount 387 + 458 common listmount sys_listmount 388 + 459 common lsm_get_self_attr sys_lsm_get_self_attr 389 + 460 common lsm_set_self_attr sys_lsm_set_self_attr 390 + 461 common lsm_list_modules sys_lsm_list_modules 391 + 462 common mseal sys_mseal 392 + 463 common setxattrat sys_setxattrat 393 + 464 common getxattrat sys_getxattrat 394 + 465 common listxattrat sys_listxattrat 395 + 466 common removexattrat sys_removexattrat 396 + 467 common open_tree_attr sys_open_tree_attr 397 + 468 common file_getattr sys_file_getattr 398 + 469 common file_setattr sys_file_setattr 399 + 470 common listns sys_listns
-232
arch/s390/kernel/syscalls/syscalltbl
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - # 4 - # Generate system call table and header files 5 - # 6 - # Copyright IBM Corp. 2018 7 - # Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 8 - 9 - # 10 - # File path to the system call table definition. 11 - # You can set the path with the -i option. If omitted, 12 - # system call table definitions are read from standard input. 13 - # 14 - SYSCALL_TBL="" 15 - 16 - 17 - create_syscall_table_entries() 18 - { 19 - local nr abi name entry64 entry32 _ignore 20 - local temp=$(mktemp ${TMPDIR:-/tmp}/syscalltbl-common.XXXXXXXXX) 21 - 22 - ( 23 - # 24 - # Initialize with 0 to create an NI_SYSCALL for 0 25 - # 26 - local prev_nr=0 prev_32=sys_ni_syscall prev_64=sys_ni_syscall 27 - while read nr abi name entry64 entry32 _ignore; do 28 - test x$entry32 = x- && entry32=sys_ni_syscall 29 - test x$entry64 = x- && entry64=sys_ni_syscall 30 - 31 - if test $prev_nr -eq $nr; then 32 - # 33 - # Same syscall but different ABI, just update 34 - # the respective entry point 35 - # 36 - case $abi in 37 - 32) 38 - prev_32=$entry32 39 - ;; 40 - 64) 41 - prev_64=$entry64 42 - ;; 43 - esac 44 - continue; 45 - else 46 - printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32 47 - fi 48 - 49 - prev_nr=$nr 50 - prev_64=$entry64 51 - prev_32=$entry32 52 - done 53 - printf "%d\t%s\t%s\n" $prev_nr $prev_64 $prev_32 54 - ) >> $temp 55 - 56 - # 57 - # Check for duplicate syscall numbers 58 - # 59 - if ! cat $temp |cut -f1 |uniq -d 2>&1; then 60 - echo "Error: generated system call table contains duplicate entries: $temp" >&2 61 - exit 1 62 - fi 63 - 64 - # 65 - # Generate syscall table 66 - # 67 - prev_nr=0 68 - while read nr entry64 entry32; do 69 - while test $prev_nr -lt $((nr - 1)); do 70 - printf "NI_SYSCALL\n" 71 - prev_nr=$((prev_nr + 1)) 72 - done 73 - if test x$entry64 = xsys_ni_syscall && 74 - test x$entry32 = xsys_ni_syscall; then 75 - printf "NI_SYSCALL\n" 76 - else 77 - printf "SYSCALL(%s,%s)\n" $entry64 $entry32 78 - fi 79 - prev_nr=$nr 80 - done < $temp 81 - rm $temp 82 - } 83 - 84 - generate_syscall_table() 85 - { 86 - cat <<-EoHEADER 87 - /* SPDX-License-Identifier: GPL-2.0 */ 88 - /* 89 - * Definitions for sys_call_table, each line represents an 90 - * entry in the table in the form 91 - * SYSCALL(64 bit syscall, 31 bit emulated syscall) 92 - * 93 - * This file is meant to be included from entry.S. 94 - */ 95 - 96 - #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) 97 - 98 - EoHEADER 99 - grep -Ev '^(#|[[:blank:]]*$)' $SYSCALL_TBL \ 100 - |sort -k1 -n \ 101 - |create_syscall_table_entries 102 - } 103 - 104 - create_header_defines() 105 - { 106 - local nr abi name _ignore 107 - 108 - while read nr abi name _ignore; do 109 - printf "#define __NR_%s %d\n" $name $nr 110 - done 111 - } 112 - 113 - normalize_fileguard() 114 - { 115 - local fileguard="$1" 116 - 117 - echo "$1" |tr '[[:lower:]]' '[[:upper:]]' \ 118 - |sed -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g' 119 - } 120 - 121 - generate_syscall_header() 122 - { 123 - local abis=$(echo "($1)" | tr ',' '|') 124 - local filename="$2" 125 - local fileguard suffix 126 - 127 - if test "$filename"; then 128 - fileguard=$(normalize_fileguard "__UAPI_ASM_S390_$2") 129 - else 130 - case "$abis" in 131 - *64*) suffix=64 ;; 132 - *32*) suffix=32 ;; 133 - esac 134 - fileguard=$(normalize_fileguard "__UAPI_ASM_S390_SYSCALLS_$suffix") 135 - fi 136 - 137 - cat <<-EoHEADER 138 - /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 139 - #ifndef ${fileguard} 140 - #define ${fileguard} 141 - 142 - EoHEADER 143 - 144 - grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \ 145 - |sort -k1 -n \ 146 - |create_header_defines 147 - 148 - cat <<-EoFOOTER 149 - 150 - #endif /* ${fileguard} */ 151 - EoFOOTER 152 - } 153 - 154 - __max_syscall_nr() 155 - { 156 - local abis=$(echo "($1)" | tr ',' '|') 157 - 158 - grep -E "^[[:digit:]]+[[:space:]]+${abis}" $SYSCALL_TBL \ 159 - |sed -ne 's/^\([[:digit:]]*\)[[:space:]].*/\1/p' \ 160 - |sort -n \ 161 - |tail -1 162 - } 163 - 164 - 165 - generate_syscall_nr() 166 - { 167 - local abis="$1" 168 - local max_syscall_nr num_syscalls 169 - 170 - max_syscall_nr=$(__max_syscall_nr "$abis") 171 - num_syscalls=$((max_syscall_nr + 1)) 172 - 173 - cat <<-EoHEADER 174 - /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 175 - #ifndef __ASM_S390_SYSCALLS_NR 176 - #define __ASM_S390_SYSCALLS_NR 177 - 178 - #define NR_syscalls ${num_syscalls} 179 - 180 - #endif /* __ASM_S390_SYSCALLS_NR */ 181 - EoHEADER 182 - } 183 - 184 - 185 - # 186 - # Parse command line arguments 187 - # 188 - do_syscall_header="" 189 - do_syscall_table="" 190 - do_syscall_nr="" 191 - output_file="" 192 - abi_list="common,64" 193 - filename="" 194 - while getopts ":HNSXi:a:f:" arg; do 195 - case $arg in 196 - a) 197 - abi_list="$OPTARG" 198 - ;; 199 - i) 200 - SYSCALL_TBL="$OPTARG" 201 - ;; 202 - f) 203 - filename=${OPTARG##*/} 204 - ;; 205 - H) 206 - do_syscall_header=1 207 - ;; 208 - N) 209 - do_syscall_nr=1 210 - ;; 211 - S) 212 - do_syscall_table=1 213 - ;; 214 - X) 215 - set -x 216 - ;; 217 - :) 218 - echo "Missing argument for -$OPTARG" >&2 219 - exit 1 220 - ;; 221 - \?) 222 - echo "Invalid option specified" >&2 223 - exit 1 224 - ;; 225 - esac 226 - done 227 - 228 - test "$do_syscall_header" && generate_syscall_header "$abi_list" "$filename" 229 - test "$do_syscall_table" && generate_syscall_table 230 - test "$do_syscall_nr" && generate_syscall_nr "$abi_list" 231 - 232 - exit 0
+1 -1
arch/s390/kernel/sysinfo.c
··· 526 526 if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY) && cpu_has_topology()) { 527 527 char link_to[10]; 528 528 529 - sprintf(link_to, "15_1_%d", topology_mnest_limit()); 529 + snprintf(link_to, sizeof(link_to), "15_1_%d", topology_mnest_limit()); 530 530 debugfs_create_symlink("topology", stsi_root, link_to); 531 531 } 532 532 return 0;
+1 -2
arch/s390/kernel/time.c
··· 12 12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 13 13 */ 14 14 15 - #define KMSG_COMPONENT "time" 16 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 + #define pr_fmt(fmt) "time: " fmt 17 16 18 17 #include <linux/kernel_stat.h> 19 18 #include <linux/errno.h>
+1 -2
arch/s390/kernel/topology.c
··· 3 3 * Copyright IBM Corp. 2007, 2011 4 4 */ 5 5 6 - #define KMSG_COMPONENT "cpu" 7 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 6 + #define pr_fmt(fmt) "cpu: " fmt 8 7 9 8 #include <linux/cpufeature.h> 10 9 #include <linux/workqueue.h>
+3 -10
arch/s390/kernel/uprobes.c
··· 8 8 9 9 #include <linux/uaccess.h> 10 10 #include <linux/uprobes.h> 11 - #include <linux/compat.h> 12 11 #include <linux/kdebug.h> 13 12 #include <linux/sched/task_stack.h> 14 13 ··· 28 29 { 29 30 if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) 30 31 return -EINVAL; 31 - if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) 32 + if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) 32 33 return -EINVAL; 33 34 clear_thread_flag(TIF_PER_TRAP); 34 35 auprobe->saved_per = psw_bits(regs->psw).per; ··· 159 160 } 160 161 161 162 /* Instruction Emulation */ 162 - 163 - static void adjust_psw_addr(psw_t *psw, unsigned long len) 164 - { 165 - psw->addr = __rewind_psw(*psw, -len); 166 - } 167 163 168 164 #define EMU_ILLEGAL_OP 1 169 165 #define EMU_SPECIFICATION 2 ··· 347 353 } 348 354 break; 349 355 } 350 - adjust_psw_addr(&regs->psw, ilen); 356 + regs->psw.addr = __forward_psw(regs->psw, ilen); 351 357 switch (rc) { 352 358 case EMU_ILLEGAL_OP: 353 359 regs->int_code = ilen << 16 | 0x0001; ··· 367 373 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) 368 374 { 369 375 if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) || 370 - ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) && 371 - !is_compat_task())) { 376 + (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)) { 372 377 regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); 373 378 do_report_trap(regs, SIGILL, ILL_ILLADR, NULL); 374 379 return true;
+1 -2
arch/s390/kernel/uv.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2019, 2024 6 6 */ 7 - #define KMSG_COMPONENT "prot_virt" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "prot_virt: " fmt 9 8 10 9 #include <linux/export.h> 11 10 #include <linux/kernel.h>
+7 -29
arch/s390/kernel/vdso.c
··· 7 7 */ 8 8 9 9 #include <linux/binfmts.h> 10 - #include <linux/compat.h> 11 10 #include <linux/elf.h> 12 11 #include <linux/errno.h> 13 12 #include <linux/init.h> ··· 22 23 #include <asm/alternative.h> 23 24 #include <asm/vdso.h> 24 25 25 - extern char vdso64_start[], vdso64_end[]; 26 - extern char vdso32_start[], vdso32_end[]; 26 + extern char vdso_start[], vdso_end[]; 27 27 28 28 static int vdso_mremap(const struct vm_special_mapping *sm, 29 29 struct vm_area_struct *vma) ··· 31 33 return 0; 32 34 } 33 35 34 - static struct vm_special_mapping vdso64_mapping = { 35 - .name = "[vdso]", 36 - .mremap = vdso_mremap, 37 - }; 38 - 39 - static struct vm_special_mapping vdso32_mapping = { 36 + static struct vm_special_mapping vdso_mapping = { 40 37 .name = "[vdso]", 41 38 .mremap = vdso_mremap, 42 39 }; ··· 46 53 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len) 47 54 { 48 55 unsigned long vvar_start, vdso_text_start, vdso_text_len; 49 - struct vm_special_mapping *vdso_mapping; 50 56 struct mm_struct *mm = current->mm; 51 57 struct vm_area_struct *vma; 52 58 int rc; ··· 54 62 if (mmap_write_lock_killable(mm)) 55 63 return -EINTR; 56 64 57 - if (is_compat_task()) { 58 - vdso_text_len = vdso32_end - vdso32_start; 59 - vdso_mapping = &vdso32_mapping; 60 - } else { 61 - vdso_text_len = vdso64_end - vdso64_start; 62 - vdso_mapping = &vdso64_mapping; 63 - } 65 + vdso_text_len = vdso_end - vdso_start; 64 66 vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0); 65 67 rc = vvar_start; 66 68 if (IS_ERR_VALUE(vvar_start)) ··· 68 82 vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len, 69 83 VM_READ|VM_EXEC|VM_SEALED_SYSMAP| 70 84 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 71 - vdso_mapping); 85 + &vdso_mapping); 72 86 if (IS_ERR(vma)) { 73 87 do_munmap(mm, vvar_start, PAGE_SIZE, NULL); 74 88 rc = PTR_ERR(vma); ··· 108 122 109 123 unsigned long vdso_text_size(void) 110 124 { 111 - unsigned long size; 112 - 113 - if (is_compat_task()) 114 - size = vdso32_end - vdso32_start; 115 - else 116 - size = vdso64_end - vdso64_start; 117 - return PAGE_ALIGN(size); 125 + return PAGE_ALIGN(vdso_end - vdso_start); 118 126 } 119 127 120 128 unsigned long vdso_size(void) ··· 146 166 struct alt_instr *start, *end; 147 167 const struct elf64_hdr *hdr; 148 168 149 - hdr = (struct elf64_hdr *)vdso64_start; 169 + hdr = (struct elf64_hdr *)vdso_start; 150 170 shdr = (void *)hdr + hdr->e_shoff; 151 171 alt = find_section(hdr, shdr, ".altinstructions"); 152 172 if (!alt) ··· 159 179 static int __init vdso_init(void) 160 180 { 161 181 vdso_apply_alternatives(); 162 - vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end); 163 - if (IS_ENABLED(CONFIG_COMPAT)) 164 - vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end); 182 + vdso_mapping.pages = vdso_setup_pages(vdso_start, vdso_end); 165 183 return 0; 166 184 } 167 185 arch_initcall(vdso_init);
+76
arch/s390/kernel/vdso/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # List of files in the vdso 3 + 4 + # Include the generic Makefile to check the built vdso. 5 + include $(srctree)/lib/vdso/Makefile.include 6 + obj-vdso = vdso_user_wrapper.o note.o vgetrandom-chacha.o 7 + obj-cvdso = vdso_generic.o getcpu.o vgetrandom.o 8 + VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) 9 + CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE) 10 + CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE) 11 + CFLAGS_REMOVE_vdso_generic.o = $(VDSO_CFLAGS_REMOVE) 12 + 13 + ifneq ($(c-getrandom-y),) 14 + CFLAGS_vgetrandom.o += -include $(c-getrandom-y) 15 + endif 16 + 17 + # Build rules 18 + 19 + targets := $(obj-vdso) $(obj-cvdso) vdso.so vdso.so.dbg 20 + obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) 21 + obj-cvdso := $(addprefix $(obj)/, $(obj-cvdso)) 22 + 23 + KBUILD_AFLAGS_VDSO := $(KBUILD_AFLAGS) -DBUILD_VDSO 24 + 25 + KBUILD_CFLAGS_VDSO := $(KBUILD_CFLAGS) -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING 26 + KBUILD_CFLAGS_VDSO := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_VDSO)) 27 + KBUILD_CFLAGS_VDSO := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_VDSO)) 28 + KBUILD_CFLAGS_VDSO := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_VDSO)) 29 + KBUILD_CFLAGS_VDSO := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_VDSO)) 30 + KBUILD_CFLAGS_VDSO += -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables 31 + KBUILD_CFLAGS_VDSO += -fno-stack-protector 32 + ldflags-y := -shared -soname=linux-vdso.so.1 \ 33 + --hash-style=both --build-id=sha1 -T 34 + 35 + $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_VDSO) 36 + $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_VDSO) 37 + 38 + obj-y += vdso_wrapper.o 39 + targets += vdso.lds 40 + CPPFLAGS_vdso.lds += -P -C -U$(ARCH) 41 + 42 + # Force dependency (incbin is bad) 43 + $(obj)/vdso_wrapper.o : $(obj)/vdso.so 44 + 45 + quiet_cmd_vdso_and_check = VDSO $@ 46 + cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) 47 + 48 + # link rule for the .so file, .lds has to be first 49 + $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) $(obj-cvdso) FORCE 50 + $(call if_changed,vdso_and_check) 51 + 52 + # strip rule for the .so file 53 + $(obj)/%.so: OBJCOPYFLAGS := -S 54 + $(obj)/%.so: $(obj)/%.so.dbg FORCE 55 + $(call if_changed,objcopy) 56 + 57 + # assembly rules for the .S files 58 + $(obj-vdso): %.o: %.S FORCE 59 + $(call if_changed_dep,vdsoas) 60 + 61 + $(obj-cvdso): %.o: %.c FORCE 62 + $(call if_changed_dep,vdsocc) 63 + 64 + # actual build commands 65 + quiet_cmd_vdsoas = VDSOA $@ 66 + cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< 67 + quiet_cmd_vdsocc = VDSOC $@ 68 + cmd_vdsocc = $(CC) $(c_flags) -c -o $@ $< 69 + 70 + # Generate VDSO offsets using helper script 71 + gen-vdsosym := $(src)/gen_vdso_offsets.sh 72 + quiet_cmd_vdsosym = VDSOSYM $@ 73 + cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ 74 + 75 + include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE 76 + $(call if_changed,vdsosym)
-2
arch/s390/kernel/vdso32/.gitignore
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - vdso32.lds
-64
arch/s390/kernel/vdso32/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - # List of files in the vdso 3 - 4 - # Include the generic Makefile to check the built vdso. 5 - include $(srctree)/lib/vdso/Makefile.include 6 - obj-vdso32 = vdso_user_wrapper-32.o note-32.o 7 - 8 - # Build rules 9 - 10 - targets := $(obj-vdso32) vdso32.so vdso32.so.dbg 11 - obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) 12 - 13 - KBUILD_AFLAGS += -DBUILD_VDSO 14 - KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING 15 - 16 - KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) 17 - KBUILD_AFLAGS_32 += -m31 -s 18 - 19 - KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) 20 - KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS)) 21 - KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32)) 22 - KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32)) 23 - KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables 24 - 25 - LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \ 26 - --hash-style=both --build-id=sha1 -melf_s390 -T 27 - 28 - $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 29 - $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) 30 - 31 - obj-y += vdso32_wrapper.o 32 - targets += vdso32.lds 33 - CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 34 - 35 - # Force dependency (incbin is bad) 36 - $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 37 - 38 - quiet_cmd_vdso_and_check = VDSO $@ 39 - cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) 40 - 41 - $(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) FORCE 42 - $(call if_changed,vdso_and_check) 43 - 44 - # strip rule for the .so file 45 - $(obj)/%.so: OBJCOPYFLAGS := -S 46 - $(obj)/%.so: $(obj)/%.so.dbg FORCE 47 - $(call if_changed,objcopy) 48 - 49 - $(obj-vdso32): %-32.o: %.S FORCE 50 - $(call if_changed_dep,vdso32as) 51 - 52 - # actual build commands 53 - quiet_cmd_vdso32as = VDSO32A $@ 54 - cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< 55 - quiet_cmd_vdso32cc = VDSO32C $@ 56 - cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $< 57 - 58 - # Generate VDSO offsets using helper script 59 - gen-vdsosym := $(src)/gen_vdso_offsets.sh 60 - quiet_cmd_vdsosym = VDSOSYM $@ 61 - cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ 62 - 63 - include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE 64 - $(call if_changed,vdsosym)
-15
arch/s390/kernel/vdso32/gen_vdso_offsets.sh
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - 4 - # 5 - # Match symbols in the DSO that look like VDSO_*; produce a header file 6 - # of constant offsets into the shared object. 7 - # 8 - # Doing this inside the Makefile will break the $(filter-out) function, 9 - # causing Kbuild to rebuild the vdso-offsets header file every time. 10 - # 11 - # Inspired by arm64 version. 12 - # 13 - 14 - LC_ALL=C 15 - sed -n 's/\([0-9a-f]*\) . __kernel_compat_\(.*\)/\#define vdso32_offset_\2\t0x\1/p'
arch/s390/kernel/vdso32/note.S arch/s390/kernel/vdso/note.S
+18 -45
arch/s390/kernel/vdso32/vdso32.lds.S arch/s390/kernel/vdso/vdso.lds.S
··· 4 4 * library 5 5 */ 6 6 7 + #include <asm/vdso/vsyscall.h> 7 8 #include <asm/page.h> 8 9 #include <asm/vdso.h> 10 + #include <asm-generic/vmlinux.lds.h> 9 11 #include <vdso/datapage.h> 10 12 11 - OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 12 - OUTPUT_ARCH(s390:31-bit) 13 + OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 14 + OUTPUT_ARCH(s390:64-bit) 13 15 14 16 SECTIONS 15 17 { ··· 43 41 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 44 42 .rodata1 : { *(.rodata1) } 45 43 44 + . = ALIGN(8); 45 + .altinstructions : { *(.altinstructions) } 46 + .altinstr_replacement : { *(.altinstr_replacement) } 47 + 46 48 .dynamic : { *(.dynamic) } :text :dynamic 47 49 48 50 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr ··· 60 54 _end = .; 61 55 PROVIDE(end = .); 62 56 63 - /* 64 - * Stabs debugging sections are here too. 65 - */ 66 - .stab 0 : { *(.stab) } 67 - .stabstr 0 : { *(.stabstr) } 68 - .stab.excl 0 : { *(.stab.excl) } 69 - .stab.exclstr 0 : { *(.stab.exclstr) } 70 - .stab.index 0 : { *(.stab.index) } 71 - .stab.indexstr 0 : { *(.stab.indexstr) } 57 + STABS_DEBUG 58 + DWARF_DEBUG 72 59 .comment 0 : { *(.comment) } 73 - 74 - /* 75 - * DWARF debug sections. 76 - * Symbols in the DWARF debugging sections are relative to the 77 - * beginning of the section so we begin them at 0. 78 - */ 79 - /* DWARF 1 */ 80 - .debug 0 : { *(.debug) } 81 - .line 0 : { *(.line) } 82 - /* GNU DWARF 1 extensions */ 83 - .debug_srcinfo 0 : { *(.debug_srcinfo) } 84 - .debug_sfnames 0 : { *(.debug_sfnames) } 85 - /* DWARF 1.1 and DWARF 2 */ 86 - .debug_aranges 0 : { *(.debug_aranges) } 87 - .debug_pubnames 0 : { *(.debug_pubnames) } 88 - /* DWARF 2 */ 89 - .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } 90 - .debug_abbrev 0 : { *(.debug_abbrev) } 91 - .debug_line 0 : { *(.debug_line) } 92 - .debug_frame 0 : { *(.debug_frame) } 93 - .debug_str 0 : { *(.debug_str) } 94 - .debug_loc 0 : { *(.debug_loc) } 95 - .debug_macinfo 0 : { *(.debug_macinfo) } 96 - /* SGI/MIPS DWARF 2 extensions */ 97 - .debug_weaknames 0 : { *(.debug_weaknames) } 98 - .debug_funcnames 0 : { *(.debug_funcnames) } 99 - .debug_typenames 0 : { *(.debug_typenames) } 100 - .debug_varnames 0 : { *(.debug_varnames) } 101 - /* DWARF 3 */ 102 - .debug_pubtypes 0 : { *(.debug_pubtypes) } 103 - .debug_ranges 0 : { *(.debug_ranges) } 104 60 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 105 61 106 62 /DISCARD/ : { ··· 100 132 /* 101 133 * Has to be there for the kernel to find 102 134 */ 103 - __kernel_compat_restart_syscall; 104 - __kernel_compat_rt_sigreturn; 105 - __kernel_compat_sigreturn; 135 + __kernel_gettimeofday; 136 + __kernel_clock_gettime; 137 + __kernel_clock_getres; 138 + __kernel_getcpu; 139 + __kernel_restart_syscall; 140 + __kernel_rt_sigreturn; 141 + __kernel_sigreturn; 142 + __kernel_getrandom; 106 143 local: *; 107 144 }; 108 145 }
-15
arch/s390/kernel/vdso32/vdso32_wrapper.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #include <linux/init.h> 3 - #include <linux/linkage.h> 4 - #include <asm/page.h> 5 - 6 - __PAGE_ALIGNED_DATA 7 - 8 - .globl vdso32_start, vdso32_end 9 - .balign PAGE_SIZE 10 - vdso32_start: 11 - .incbin "arch/s390/kernel/vdso32/vdso32.so" 12 - .balign PAGE_SIZE 13 - vdso32_end: 14 - 15 - .previous
-22
arch/s390/kernel/vdso32/vdso_user_wrapper.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - #include <linux/linkage.h> 4 - #include <asm/unistd.h> 5 - #include <asm/dwarf.h> 6 - 7 - .macro vdso_syscall func,syscall 8 - .globl __kernel_compat_\func 9 - .type __kernel_compat_\func,@function 10 - __ALIGN 11 - __kernel_compat_\func: 12 - CFI_STARTPROC 13 - svc \syscall 14 - /* Make sure we notice when a syscall returns, which shouldn't happen */ 15 - .word 0 16 - CFI_ENDPROC 17 - .size __kernel_compat_\func,.-__kernel_compat_\func 18 - .endm 19 - 20 - vdso_syscall restart_syscall,__NR_restart_syscall 21 - vdso_syscall sigreturn,__NR_sigreturn 22 - vdso_syscall rt_sigreturn,__NR_rt_sigreturn
+1 -1
arch/s390/kernel/vdso64/.gitignore arch/s390/kernel/vdso/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - vdso64.lds 2 + vdso.lds
-79
arch/s390/kernel/vdso64/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - # List of files in the vdso 3 - 4 - # Include the generic Makefile to check the built vdso. 5 - include $(srctree)/lib/vdso/Makefile.include 6 - obj-vdso64 = vdso_user_wrapper.o note.o vgetrandom-chacha.o 7 - obj-cvdso64 = vdso64_generic.o getcpu.o vgetrandom.o 8 - VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) 9 - CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE) 10 - CFLAGS_REMOVE_vgetrandom.o = $(VDSO_CFLAGS_REMOVE) 11 - CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE) 12 - 13 - ifneq ($(c-getrandom-y),) 14 - CFLAGS_vgetrandom.o += -include $(c-getrandom-y) 15 - endif 16 - 17 - # Build rules 18 - 19 - targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so vdso64.so.dbg 20 - obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) 21 - obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64)) 22 - 23 - KBUILD_AFLAGS += -DBUILD_VDSO 24 - KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING 25 - 26 - KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) 27 - KBUILD_AFLAGS_64 += -m64 28 - 29 - KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) 30 - KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64)) 31 - KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64)) 32 - KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64)) 33 - KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64)) 34 - KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables 35 - ldflags-y := -shared -soname=linux-vdso64.so.1 \ 36 - --hash-style=both --build-id=sha1 -T 37 - 38 - $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) 39 - $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) 40 - 41 - obj-y += vdso64_wrapper.o 42 - targets += vdso64.lds 43 - CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 44 - 45 - # Force dependency (incbin is bad) 46 - $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 47 - 48 - quiet_cmd_vdso_and_check = VDSO $@ 49 - cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) 50 - 51 - # link rule for the .so file, .lds has to be first 52 - $(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE 53 - $(call if_changed,vdso_and_check) 54 - 55 - # strip rule for the .so file 56 - $(obj)/%.so: OBJCOPYFLAGS := -S 57 - $(obj)/%.so: $(obj)/%.so.dbg FORCE 58 - $(call if_changed,objcopy) 59 - 60 - # assembly rules for the .S files 61 - $(obj-vdso64): %.o: %.S FORCE 62 - $(call if_changed_dep,vdso64as) 63 - 64 - $(obj-cvdso64): %.o: %.c FORCE 65 - $(call if_changed_dep,vdso64cc) 66 - 67 - # actual build commands 68 - quiet_cmd_vdso64as = VDSO64A $@ 69 - cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< 70 - quiet_cmd_vdso64cc = VDSO64C $@ 71 - cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< 72 - 73 - # Generate VDSO offsets using helper script 74 - gen-vdsosym := $(src)/gen_vdso_offsets.sh 75 - quiet_cmd_vdsosym = VDSOSYM $@ 76 - cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ 77 - 78 - include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE 79 - $(call if_changed,vdsosym)
+1 -1
arch/s390/kernel/vdso64/gen_vdso_offsets.sh arch/s390/kernel/vdso/gen_vdso_offsets.sh
··· 12 12 # 13 13 14 14 LC_ALL=C 15 - sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso64_offset_\2\t0x\1/p' 15 + sed -n 's/\([0-9a-f]*\) . __kernel_\(.*\)/\#define vdso_offset_\2\t0x\1/p'
arch/s390/kernel/vdso64/getcpu.c arch/s390/kernel/vdso/getcpu.c
-13
arch/s390/kernel/vdso64/note.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 4 - * Here we can supply some information useful to userland. 5 - */ 6 - 7 - #include <linux/uts.h> 8 - #include <linux/version.h> 9 - #include <linux/elfnote.h> 10 - 11 - ELFNOTE_START(Linux, 0, "a") 12 - .long LINUX_VERSION_CODE 13 - ELFNOTE_END
+3 -3
arch/s390/kernel/vdso64/vdso.h arch/s390/kernel/vdso/vdso.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __ARCH_S390_KERNEL_VDSO64_VDSO_H 3 - #define __ARCH_S390_KERNEL_VDSO64_VDSO_H 2 + #ifndef __ARCH_S390_KERNEL_VDSO_VDSO_H 3 + #define __ARCH_S390_KERNEL_VDSO_VDSO_H 4 4 5 5 #include <vdso/datapage.h> 6 6 ··· 12 12 int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts); 13 13 ssize_t __kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len); 14 14 15 - #endif /* __ARCH_S390_KERNEL_VDSO64_VDSO_H */ 15 + #endif /* __ARCH_S390_KERNEL_VDSO_VDSO_H */
-150
arch/s390/kernel/vdso64/vdso64.lds.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This is the infamous ld script for the 64 bits vdso 4 - * library 5 - */ 6 - 7 - #include <asm/vdso/vsyscall.h> 8 - #include <asm/page.h> 9 - #include <asm/vdso.h> 10 - #include <vdso/datapage.h> 11 - 12 - OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 13 - OUTPUT_ARCH(s390:64-bit) 14 - 15 - SECTIONS 16 - { 17 - VDSO_VVAR_SYMS 18 - 19 - . = SIZEOF_HEADERS; 20 - 21 - .hash : { *(.hash) } :text 22 - .gnu.hash : { *(.gnu.hash) } 23 - .dynsym : { *(.dynsym) } 24 - .dynstr : { *(.dynstr) } 25 - .gnu.version : { *(.gnu.version) } 26 - .gnu.version_d : { *(.gnu.version_d) } 27 - .gnu.version_r : { *(.gnu.version_r) } 28 - 29 - .note : { *(.note.*) } :text :note 30 - 31 - . = ALIGN(16); 32 - .text : { 33 - *(.text .stub .text.* .gnu.linkonce.t.*) 34 - } :text 35 - PROVIDE(__etext = .); 36 - PROVIDE(_etext = .); 37 - PROVIDE(etext = .); 38 - 39 - /* 40 - * Other stuff is appended to the text segment: 41 - */ 42 - .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 43 - .rodata1 : { *(.rodata1) } 44 - 45 - . = ALIGN(8); 46 - .altinstructions : { *(.altinstructions) } 47 - .altinstr_replacement : { *(.altinstr_replacement) } 48 - 49 - .dynamic : { *(.dynamic) } :text :dynamic 50 - 51 - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 52 - .eh_frame : { KEEP (*(.eh_frame)) } :text 53 - .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } 54 - 55 - .rela.dyn ALIGN(8) : { *(.rela.dyn) } 56 - .got ALIGN(8) : { *(.got .toc) } 57 - .got.plt ALIGN(8) : { *(.got.plt) } 58 - 59 - _end = .; 60 - PROVIDE(end = .); 61 - 62 - /* 63 - * Stabs debugging sections are here too. 64 - */ 65 - .stab 0 : { *(.stab) } 66 - .stabstr 0 : { *(.stabstr) } 67 - .stab.excl 0 : { *(.stab.excl) } 68 - .stab.exclstr 0 : { *(.stab.exclstr) } 69 - .stab.index 0 : { *(.stab.index) } 70 - .stab.indexstr 0 : { *(.stab.indexstr) } 71 - .comment 0 : { *(.comment) } 72 - 73 - /* 74 - * DWARF debug sections. 75 - * Symbols in the DWARF debugging sections are relative to the 76 - * beginning of the section so we begin them at 0. 77 - */ 78 - /* DWARF 1 */ 79 - .debug 0 : { *(.debug) } 80 - .line 0 : { *(.line) } 81 - /* GNU DWARF 1 extensions */ 82 - .debug_srcinfo 0 : { *(.debug_srcinfo) } 83 - .debug_sfnames 0 : { *(.debug_sfnames) } 84 - /* DWARF 1.1 and DWARF 2 */ 85 - .debug_aranges 0 : { *(.debug_aranges) } 86 - .debug_pubnames 0 : { *(.debug_pubnames) } 87 - /* DWARF 2 */ 88 - .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } 89 - .debug_abbrev 0 : { *(.debug_abbrev) } 90 - .debug_line 0 : { *(.debug_line) } 91 - .debug_frame 0 : { *(.debug_frame) } 92 - .debug_str 0 : { *(.debug_str) } 93 - .debug_loc 0 : { *(.debug_loc) } 94 - .debug_macinfo 0 : { *(.debug_macinfo) } 95 - /* SGI/MIPS DWARF 2 extensions */ 96 - .debug_weaknames 0 : { *(.debug_weaknames) } 97 - .debug_funcnames 0 : { *(.debug_funcnames) } 98 - .debug_typenames 0 : { *(.debug_typenames) } 99 - .debug_varnames 0 : { *(.debug_varnames) } 100 - /* DWARF 3 */ 101 - .debug_pubtypes 0 : { *(.debug_pubtypes) } 102 - .debug_ranges 0 : { *(.debug_ranges) } 103 - .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 104 - 105 - /DISCARD/ : { 106 - *(.note.GNU-stack) 107 - *(.branch_lt) 108 - *(.data .data.* .gnu.linkonce.d.* .sdata*) 109 - *(.bss .sbss .dynbss .dynsbss) 110 - } 111 - } 112 - 113 - /* 114 - * Very old versions of ld do not recognize this name token; use the constant. 115 - */ 116 - #define PT_GNU_EH_FRAME 0x6474e550 117 - 118 - /* 119 - * We must supply the ELF program headers explicitly to get just one 120 - * PT_LOAD segment, and set the flags explicitly to make segments read-only. 121 - */ 122 - PHDRS 123 - { 124 - text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ 125 - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 126 - note PT_NOTE FLAGS(4); /* PF_R */ 127 - eh_frame_hdr PT_GNU_EH_FRAME; 128 - } 129 - 130 - /* 131 - * This controls what symbols we export from the DSO. 132 - */ 133 - VERSION 134 - { 135 - VDSO_VERSION_STRING { 136 - global: 137 - /* 138 - * Has to be there for the kernel to find 139 - */ 140 - __kernel_gettimeofday; 141 - __kernel_clock_gettime; 142 - __kernel_clock_getres; 143 - __kernel_getcpu; 144 - __kernel_restart_syscall; 145 - __kernel_rt_sigreturn; 146 - __kernel_sigreturn; 147 - __kernel_getrandom; 148 - local: *; 149 - }; 150 - }
arch/s390/kernel/vdso64/vdso64_generic.c arch/s390/kernel/vdso/vdso_generic.c
+4 -4
arch/s390/kernel/vdso64/vdso64_wrapper.S arch/s390/kernel/vdso/vdso_wrapper.S
··· 5 5 6 6 __PAGE_ALIGNED_DATA 7 7 8 - .globl vdso64_start, vdso64_end 8 + .globl vdso_start, vdso_end 9 9 .balign PAGE_SIZE 10 - vdso64_start: 11 - .incbin "arch/s390/kernel/vdso64/vdso64.so" 10 + vdso_start: 11 + .incbin "arch/s390/kernel/vdso/vdso.so" 12 12 .balign PAGE_SIZE 13 - vdso64_end: 13 + vdso_end: 14 14 15 15 .previous
arch/s390/kernel/vdso64/vdso_user_wrapper.S arch/s390/kernel/vdso/vdso_user_wrapper.S
arch/s390/kernel/vdso64/vgetrandom-chacha.S arch/s390/kernel/vdso/vgetrandom-chacha.S
arch/s390/kernel/vdso64/vgetrandom.c arch/s390/kernel/vdso/vgetrandom.c
+13
arch/s390/kernel/vmlinux.lds.S
··· 150 150 *(.altinstr_replacement) 151 151 } 152 152 153 + #ifdef CONFIG_STACKPROTECTOR 154 + . = ALIGN(8); 155 + .stack_prot_table : { 156 + __stack_prot_start = .; 157 + KEEP(*(__stack_protector_loc)) 158 + __stack_prot_end = .; 159 + } 160 + #endif 161 + 153 162 /* 154 163 * Table with the patch locations to undo expolines 155 164 */ ··· 266 257 QUAD(invalid_pg_dir) 267 258 QUAD(__alt_instructions) 268 259 QUAD(__alt_instructions_end) 260 + #ifdef CONFIG_STACKPROTECTOR 261 + QUAD(__stack_prot_start) 262 + QUAD(__stack_prot_end) 263 + #endif 269 264 #ifdef CONFIG_KASAN 270 265 QUAD(kasan_early_shadow_page) 271 266 QUAD(kasan_early_shadow_pte)
+1 -2
arch/s390/kvm/interrupt.c
··· 7 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "kvm-s390" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "kvm-s390: " fmt 12 11 13 12 #include <linux/cpufeature.h> 14 13 #include <linux/interrupt.h>
+1 -2
arch/s390/kvm/kvm-s390.c
··· 10 10 * Jason J. Herne <jjherne@us.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "kvm-s390" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "kvm-s390: " fmt 15 14 16 15 #include <linux/compiler.h> 17 16 #include <linux/export.h>
+1 -1
arch/s390/kvm/priv.c
··· 754 754 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) 755 755 { 756 756 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 757 - psw_compat_t new_psw; 757 + psw32_t new_psw; 758 758 u64 addr; 759 759 int rc; 760 760 u8 ar;
+2 -2
arch/s390/mm/cmm.c
··· 321 321 cmm_set_timeout(nr, seconds); 322 322 *ppos += *lenp; 323 323 } else { 324 - len = sprintf(buf, "%ld %ld\n", 325 - cmm_timeout_pages, cmm_timeout_seconds); 324 + len = scnprintf(buf, sizeof(buf), "%ld %ld\n", 325 + cmm_timeout_pages, cmm_timeout_seconds); 326 326 if (len > *lenp) 327 327 len = *lenp; 328 328 memcpy(buffer, buf, len);
+1 -1
arch/s390/mm/dump_pagetables.c
··· 51 51 struct seq_file *__m = (m); \ 52 52 \ 53 53 if (__m) \ 54 - seq_printf(__m, fmt); \ 54 + seq_puts(__m, fmt); \ 55 55 }) 56 56 57 57 static void print_prot(struct seq_file *m, unsigned int pr, int level)
+9 -8
arch/s390/mm/extmem.c
··· 7 7 * Copyright IBM Corp. 2002, 2004 8 8 */ 9 9 10 - #define KMSG_COMPONENT "extmem" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "extmem: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/string.h> ··· 597 598 goto out; 598 599 } 599 600 600 - sprintf(cmd1, "DEFSEG %s", name); 601 + snprintf(cmd1, sizeof(cmd1), "DEFSEG %s", name); 601 602 for (i=0; i<seg->segcnt; i++) { 602 - sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", 603 - seg->range[i].start >> PAGE_SHIFT, 604 - seg->range[i].end >> PAGE_SHIFT, 605 - segtype_string[seg->range[i].start & 0xff]); 603 + size_t len = strlen(cmd1); 604 + 605 + snprintf(cmd1 + len, sizeof(cmd1) - len, " %lX-%lX %s", 606 + seg->range[i].start >> PAGE_SHIFT, 607 + seg->range[i].end >> PAGE_SHIFT, 608 + segtype_string[seg->range[i].start & 0xff]); 606 609 } 607 - sprintf(cmd2, "SAVESEG %s", name); 610 + snprintf(cmd2, sizeof(cmd2), "SAVESEG %s", name); 608 611 response = 0; 609 612 cpcmd(cmd1, NULL, 0, &response); 610 613 if (response) {
+17 -12
arch/s390/mm/fault.c
··· 23 23 #include <linux/ptrace.h> 24 24 #include <linux/mman.h> 25 25 #include <linux/mm.h> 26 - #include <linux/compat.h> 27 26 #include <linux/smp.h> 28 27 #include <linux/kdebug.h> 29 28 #include <linux/init.h> ··· 132 133 union teid teid = { .val = regs->int_parm_long }; 133 134 unsigned long asce; 134 135 135 - pr_alert("Failing address: %016lx TEID: %016lx\n", 136 + pr_alert("Failing address: %016lx TEID: %016lx", 136 137 get_fault_address(regs), teid.val); 138 + if (test_facility(131)) 139 + pr_cont(" ESOP-2"); 140 + else if (machine_has_esop()) 141 + pr_cont(" ESOP-1"); 142 + else 143 + pr_cont(" SOP"); 144 + if (test_facility(75)) 145 + pr_cont(" FSI"); 146 + pr_cont("\n"); 137 147 pr_alert("Fault in "); 138 148 switch (teid.as) { 139 149 case PSW_BITS_AS_HOME: ··· 373 365 * The exception to this rule are aborted transactions, for these 374 366 * the PSW already points to the correct location. 375 367 */ 376 - if (!(regs->int_code & 0x200)) 368 + if (!(regs->int_code & 0x200)) { 377 369 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 370 + set_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED); 371 + } 378 372 /* 379 - * Check for low-address protection. This needs to be treated 380 - * as a special case because the translation exception code 381 - * field is not guaranteed to contain valid data in this case. 373 + * If bit 61 if the TEID is not set, the remainder of the 374 + * TEID is unpredictable. Special handling is required. 382 375 */ 383 376 if (unlikely(!teid.b61)) { 384 377 if (user_mode(regs)) { 385 - /* Low-address protection in user mode: cannot happen */ 386 378 dump_fault_info(regs); 387 - die(regs, "Low-address protection"); 379 + die(regs, "Unexpected TEID"); 388 380 } 389 - /* 390 - * Low-address protection in kernel mode means 391 - * NULL pointer write access in kernel mode. 392 - */ 381 + /* Assume low-address protection in kernel mode. */ 393 382 return handle_fault_error_nolock(regs, 0); 394 383 } 395 384 if (unlikely(cpu_has_nx() && teid.b56)) {
+5 -23
arch/s390/mm/gmap.c
··· 138 138 139 139 static void gmap_flush_tlb(struct gmap *gmap) 140 140 { 141 - if (cpu_has_idte()) 142 - __tlb_flush_idte(gmap->asce); 143 - else 144 - __tlb_flush_global(); 141 + __tlb_flush_idte(gmap->asce); 145 142 } 146 143 147 144 static void gmap_radix_tree_free(struct radix_tree_root *root) ··· 1985 1988 if (machine_has_tlb_guest()) 1986 1989 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, 1987 1990 IDTE_GLOBAL); 1988 - else if (cpu_has_idte()) 1989 - __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 1990 1991 else 1991 - __pmdp_csp(pmdp); 1992 + __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 1992 1993 set_pmd(pmdp, new); 1993 1994 } 1994 1995 ··· 2007 2012 _SEGMENT_ENTRY_GMAP_UC | 2008 2013 _SEGMENT_ENTRY)); 2009 2014 if (purge) 2010 - __pmdp_csp(pmdp); 2015 + __pmdp_cspg(pmdp); 2011 2016 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 2012 2017 } 2013 2018 spin_unlock(&gmap->guest_table_lock); ··· 2026 2031 gmap_pmdp_clear(mm, vmaddr, 0); 2027 2032 } 2028 2033 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate); 2029 - 2030 - /** 2031 - * gmap_pmdp_csp - csp all affected guest pmd entries 2032 - * @mm: pointer to the process mm_struct 2033 - * @vmaddr: virtual address in the process address space 2034 - */ 2035 - void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr) 2036 - { 2037 - gmap_pmdp_clear(mm, vmaddr, 1); 2038 - } 2039 - EXPORT_SYMBOL_GPL(gmap_pmdp_csp); 2040 2034 2041 2035 /** 2042 2036 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry ··· 2050 2066 if (machine_has_tlb_guest()) 2051 2067 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2052 2068 gmap->asce, IDTE_LOCAL); 2053 - else if (cpu_has_idte()) 2069 + else 2054 2070 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL); 2055 2071 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 2056 2072 } ··· 2083 2099 if (machine_has_tlb_guest()) 2084 2100 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2085 2101 gmap->asce, IDTE_GLOBAL); 2086 - else if (cpu_has_idte()) 2087 - __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2088 2102 else 2089 - __pmdp_csp(pmdp); 2103 + __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2090 2104 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 2091 2105 } 2092 2106 spin_unlock(&gmap->guest_table_lock);
+1 -2
arch/s390/mm/hugetlbpage.c
··· 6 6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "hugetlb" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "hugetlb: " fmt 11 10 12 11 #include <linux/cpufeature.h> 13 12 #include <linux/mm.h>
-1
arch/s390/mm/mmap.c
··· 15 15 #include <linux/sched/signal.h> 16 16 #include <linux/sched/mm.h> 17 17 #include <linux/random.h> 18 - #include <linux/compat.h> 19 18 #include <linux/security.h> 20 19 #include <linux/hugetlb.h> 21 20 #include <asm/elf.h>
+1 -3
arch/s390/mm/pageattr.c
··· 78 78 } 79 79 table = (unsigned long *)((unsigned long)old & mask); 80 80 crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); 81 - } else if (cpu_has_idte()) { 82 - cspg(old, *old, new); 83 81 } else { 84 - csp((unsigned int *)old + 1, *old, new); 82 + cspg(old, *old, new); 85 83 } 86 84 } 87 85
+2
arch/s390/mm/pgalloc.c
··· 164 164 { 165 165 struct ptdesc *ptdesc = virt_to_ptdesc(table); 166 166 167 + if (pagetable_is_reserved(ptdesc)) 168 + return free_reserved_ptdesc(ptdesc); 167 169 pagetable_dtor_free(ptdesc); 168 170 } 169 171
+2 -12
arch/s390/mm/pgtable.c
··· 360 360 mm->context.asce, IDTE_GLOBAL); 361 361 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 362 362 gmap_pmdp_idte_global(mm, addr); 363 - } else if (cpu_has_idte()) { 363 + } else { 364 364 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); 365 365 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 366 366 gmap_pmdp_idte_global(mm, addr); 367 - } else { 368 - __pmdp_csp(pmdp); 369 - if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 370 - gmap_pmdp_csp(mm, addr); 371 367 } 372 368 } 373 369 ··· 483 487 if (machine_has_tlb_guest()) 484 488 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 485 489 mm->context.asce, IDTE_GLOBAL); 486 - else if (cpu_has_idte()) 487 - __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 488 490 else 489 - /* 490 - * Invalid bit position is the same for pmd and pud, so we can 491 - * reuse _pmd_csp() here 492 - */ 493 - __pmdp_csp((pmd_t *) pudp); 491 + __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 494 492 } 495 493 496 494 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
+12 -9
arch/s390/mm/vmem.c
··· 4 4 */ 5 5 6 6 #include <linux/memory_hotplug.h> 7 + #include <linux/bootmem_info.h> 7 8 #include <linux/cpufeature.h> 8 9 #include <linux/memblock.h> 9 10 #include <linux/pfn.h> ··· 40 39 41 40 static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap) 42 41 { 42 + unsigned int nr_pages = 1 << order; 43 + struct page *page; 44 + 43 45 if (altmap) { 44 46 vmem_altmap_free(altmap, 1 << order); 45 47 return; 46 48 } 47 - /* We don't expect boot memory to be removed ever. */ 48 - if (!slab_is_available() || 49 - WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr)))) 50 - return; 51 - free_pages(addr, order); 49 + page = virt_to_page((void *)addr); 50 + if (PageReserved(page)) { 51 + /* allocated from memblock */ 52 + while (nr_pages--) 53 + free_bootmem_page(page++); 54 + } else { 55 + free_pages(addr, order); 56 + } 52 57 } 53 58 54 59 void *vmem_crst_alloc(unsigned long val) ··· 86 79 87 80 static void vmem_pte_free(unsigned long *table) 88 81 { 89 - /* We don't expect boot memory to be removed ever. */ 90 - if (!slab_is_available() || 91 - WARN_ON_ONCE(PageReserved(virt_to_page(table)))) 92 - return; 93 82 page_table_free(&init_mm, table); 94 83 } 95 84
+1 -2
arch/s390/net/bpf_jit_comp.c
··· 15 15 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 16 16 */ 17 17 18 - #define KMSG_COMPONENT "bpf_jit" 19 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18 + #define pr_fmt(fmt) "bpf_jit: " fmt 20 19 21 20 #include <linux/netdevice.h> 22 21 #include <linux/filter.h>
+1 -2
arch/s390/pci/pci.c
··· 16 16 * Thomas Klein 17 17 */ 18 18 19 - #define KMSG_COMPONENT "zpci" 20 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19 + #define pr_fmt(fmt) "zpci: " fmt 21 20 22 21 #include <linux/kernel.h> 23 22 #include <linux/slab.h>
+4 -3
arch/s390/pci/pci_bus.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zpci" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zpci: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/slab.h> ··· 44 45 45 46 if (!zdev_enabled(zdev)) { 46 47 rc = zpci_enable_device(zdev); 47 - if (rc) 48 + if (rc) { 49 + pr_err("Enabling PCI function %08x failed\n", zdev->fid); 48 50 return rc; 51 + } 49 52 } 50 53 51 54 if (!zdev->has_resources) {
+2 -5
arch/s390/pci/pci_clp.c
··· 6 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "zpci" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "zpci: " fmt 11 10 12 - #include <linux/compat.h> 13 11 #include <linux/kernel.h> 14 12 #include <linux/miscdevice.h> 15 13 #include <linux/slab.h> ··· 649 651 if (cmd != CLP_SYNC) 650 652 return -EINVAL; 651 653 652 - argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg; 654 + argp = (void __user *)arg; 653 655 if (copy_from_user(&req, argp, sizeof(req))) 654 656 return -EFAULT; 655 657 if (req.r != 0) ··· 667 669 .open = nonseekable_open, 668 670 .release = clp_misc_release, 669 671 .unlocked_ioctl = clp_misc_ioctl, 670 - .compat_ioctl = clp_misc_ioctl, 671 672 }; 672 673 673 674 static struct miscdevice clp_misc_device = {
+1 -2
arch/s390/pci/pci_debug.c
··· 6 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "zpci" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "zpci: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/seq_file.h>
+1 -2
arch/s390/pci/pci_event.c
··· 6 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "zpci" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "zpci: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/pci.h>
+1 -2
arch/s390/pci/pci_iov.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zpci" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zpci: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/pci.h>
+1 -2
arch/s390/pci/pci_irq.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #define KMSG_COMPONENT "zpci" 3 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 2 + #define pr_fmt(fmt) "zpci: " fmt 4 3 5 4 #include <linux/kernel.h> 6 5 #include <linux/irq.h>
+1 -2
arch/s390/pci/pci_report.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zpci" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zpci: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/sprintf.h>
+1 -2
arch/s390/pci/pci_sysfs.c
··· 6 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "zpci" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "zpci: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/stat.h>
+1
arch/s390/tools/gen_facilities.c
··· 29 29 .bits = (int[]){ 30 30 0, /* N3 instructions */ 31 31 1, /* z/Arch mode installed */ 32 + 3, /* dat-enhancement 1 */ 32 33 18, /* long displacement facility */ 33 34 21, /* extended-immediate facility */ 34 35 25, /* store clock fast */
+1 -22
drivers/base/memory.c
··· 226 226 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 227 227 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 228 228 unsigned long nr_vmemmap_pages = 0; 229 - struct memory_notify arg; 230 229 struct zone *zone; 231 230 int ret; 232 231 ··· 245 246 if (mem->altmap) 246 247 nr_vmemmap_pages = mem->altmap->free; 247 248 248 - arg.altmap_start_pfn = start_pfn; 249 - arg.altmap_nr_pages = nr_vmemmap_pages; 250 - arg.start_pfn = start_pfn + nr_vmemmap_pages; 251 - arg.nr_pages = nr_pages - nr_vmemmap_pages; 252 249 mem_hotplug_begin(); 253 - ret = memory_notify(MEM_PREPARE_ONLINE, &arg); 254 - ret = notifier_to_errno(ret); 255 - if (ret) 256 - goto out_notifier; 257 - 258 250 if (nr_vmemmap_pages) { 259 - ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, 260 - zone, mem->altmap->inaccessible); 251 + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); 261 252 if (ret) 262 253 goto out; 263 254 } ··· 269 280 nr_vmemmap_pages); 270 281 271 282 mem->zone = zone; 272 - mem_hotplug_done(); 273 - return ret; 274 283 out: 275 - memory_notify(MEM_FINISH_OFFLINE, &arg); 276 - out_notifier: 277 284 mem_hotplug_done(); 278 285 return ret; 279 286 } ··· 282 297 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 283 298 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 284 299 unsigned long nr_vmemmap_pages = 0; 285 - struct memory_notify arg; 286 300 int ret; 287 301 288 302 if (!mem->zone) ··· 313 329 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); 314 330 315 331 mem->zone = NULL; 316 - arg.altmap_start_pfn = start_pfn; 317 - arg.altmap_nr_pages = nr_vmemmap_pages; 318 - arg.start_pfn = start_pfn + nr_vmemmap_pages; 319 - arg.nr_pages = nr_pages - nr_vmemmap_pages; 320 - memory_notify(MEM_FINISH_OFFLINE, &arg); 321 332 out: 322 333 mem_hotplug_done(); 323 334 return ret;
+1 -2
drivers/char/hw_random/s390-trng.c
··· 9 9 * Author(s): Harald Freudenberger <freude@de.ibm.com> 10 10 */ 11 11 12 - #define KMSG_COMPONENT "trng" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "trng: " fmt 14 13 15 14 #include <linux/hw_random.h> 16 15 #include <linux/kernel.h>
+1 -2
drivers/pci/hotplug/s390_pci_hpc.c
··· 8 8 * Jan Glauber <jang@linux.vnet.ibm.com> 9 9 */ 10 10 11 - #define KMSG_COMPONENT "zpci" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "zpci: " fmt 13 12 14 13 #include <linux/kernel.h> 15 14 #include <linux/slab.h>
-1
drivers/s390/block/dasd.c
··· 3350 3350 .open = dasd_open, 3351 3351 .release = dasd_release, 3352 3352 .ioctl = dasd_ioctl, 3353 - .compat_ioctl = dasd_ioctl, 3354 3353 .getgeo = dasd_getgeo, 3355 3354 .set_read_only = dasd_set_read_only, 3356 3355 };
-11
drivers/s390/block/dasd_eckd.c
··· 16 16 #include <linux/hdreg.h> /* HDIO_GETGEO */ 17 17 #include <linux/bio.h> 18 18 #include <linux/module.h> 19 - #include <linux/compat.h> 20 19 #include <linux/init.h> 21 20 #include <linux/seq_file.h> 22 21 #include <linux/uaccess.h> ··· 5388 5389 rc = -EFAULT; 5389 5390 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5390 5391 goto out; 5391 - if (is_compat_task()) { 5392 - /* Make sure pointers are sane even on 31 bit. */ 5393 - rc = -EINVAL; 5394 - if ((usrparm.psf_data >> 32) != 0) 5395 - goto out; 5396 - if ((usrparm.rssd_result >> 32) != 0) 5397 - goto out; 5398 - usrparm.psf_data &= 0x7fffffffULL; 5399 - usrparm.rssd_result &= 0x7fffffffULL; 5400 - } 5401 5392 /* at least 2 bytes are accessed and should be allocated */ 5402 5393 if (usrparm.psf_data_len < 2) { 5403 5394 DBF_DEV_EVENT(DBF_WARNING, device,
-1
drivers/s390/block/dasd_fba.c
··· 5 5 * Copyright IBM Corp. 1999, 2009 6 6 */ 7 7 8 - #define KMSG_COMPONENT "dasd-fba" 9 8 10 9 #include <linux/stddef.h> 11 10 #include <linux/kernel.h>
+1 -5
drivers/s390/block/dasd_ioctl.c
··· 11 11 */ 12 12 13 13 #include <linux/interrupt.h> 14 - #include <linux/compat.h> 15 14 #include <linux/export.h> 16 15 #include <linux/major.h> 17 16 #include <linux/fs.h> ··· 615 616 void __user *argp; 616 617 int rc; 617 618 618 - if (is_compat_task()) 619 - argp = compat_ptr(arg); 620 - else 621 - argp = (void __user *)arg; 619 + argp = (void __user *)arg; 622 620 623 621 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) 624 622 return -EINVAL;
+3 -4
drivers/s390/block/dcssblk.c
··· 5 5 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer 6 6 */ 7 7 8 - #define KMSG_COMPONENT "dcssblk" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "dcssblk: " fmt 10 9 11 10 #include <linux/module.h> 12 11 #include <linux/moduleparam.h> ··· 673 674 rc = dcssblk_assign_free_minor(dev_info); 674 675 if (rc) 675 676 goto release_gd; 676 - sprintf(dev_info->gd->disk_name, "dcssblk%d", 677 - dev_info->gd->first_minor); 677 + scnprintf(dev_info->gd->disk_name, sizeof(dev_info->gd->disk_name), 678 + "dcssblk%d", dev_info->gd->first_minor); 678 679 list_add_tail(&dev_info->lh, &dcssblk_devices); 679 680 680 681 if (!try_module_get(THIS_MODULE)) {
+1 -2
drivers/s390/block/scm_blk.c
··· 6 6 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "scm_block" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "scm_block: " fmt 11 10 12 11 #include <linux/interrupt.h> 13 12 #include <linux/spinlock.h>
+1 -2
drivers/s390/block/scm_drv.c
··· 6 6 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "scm_block" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "scm_block: " fmt 11 10 12 11 #include <linux/module.h> 13 12 #include <linux/slab.h>
+1 -20
drivers/s390/char/con3270.c
··· 21 21 #include <linux/reboot.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/memblock.h> 24 - #include <linux/compat.h> 25 24 26 25 #include <asm/machine.h> 27 26 #include <asm/ccwdev.h> ··· 1661 1662 else if (tp->esc_par[0] == 6) { /* Cursor report. */ 1662 1663 char buf[40]; 1663 1664 1664 - sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1); 1665 + scnprintf(buf, sizeof(buf), "\033[%d;%dR", tp->cy + 1, tp->cx + 1); 1665 1666 kbd_puts_queue(&tp->port, buf); 1666 1667 } 1667 1668 return; ··· 1946 1947 return kbd_ioctl(tp->kbd, cmd, arg); 1947 1948 } 1948 1949 1949 - #ifdef CONFIG_COMPAT 1950 - static long tty3270_compat_ioctl(struct tty_struct *tty, 1951 - unsigned int cmd, unsigned long arg) 1952 - { 1953 - struct tty3270 *tp; 1954 - 1955 - tp = tty->driver_data; 1956 - if (!tp) 1957 - return -ENODEV; 1958 - if (tty_io_error(tty)) 1959 - return -EIO; 1960 - return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg)); 1961 - } 1962 - #endif 1963 - 1964 1950 static const struct tty_operations tty3270_ops = { 1965 1951 .install = tty3270_install, 1966 1952 .cleanup = tty3270_cleanup, ··· 1960 1976 .hangup = tty3270_hangup, 1961 1977 .wait_until_sent = tty3270_wait_until_sent, 1962 1978 .ioctl = tty3270_ioctl, 1963 - #ifdef CONFIG_COMPAT 1964 - .compat_ioctl = tty3270_compat_ioctl, 1965 - #endif 1966 1979 .set_termios = tty3270_set_termios 1967 1980 }; 1968 1981
+1 -2
drivers/s390/char/diag_ftp.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "hmcdrv" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "hmcdrv: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/mm.h>
+1 -6
drivers/s390/char/fs3270.c
··· 12 12 #include <linux/console.h> 13 13 #include <linux/init.h> 14 14 #include <linux/interrupt.h> 15 - #include <linux/compat.h> 16 15 #include <linux/sched/signal.h> 17 16 #include <linux/module.h> 18 17 #include <linux/list.h> ··· 329 330 fp = filp->private_data; 330 331 if (!fp) 331 332 return -ENODEV; 332 - if (is_compat_task()) 333 - argp = compat_ptr(arg); 334 - else 335 - argp = (char __user *)arg; 333 + argp = (char __user *)arg; 336 334 rc = 0; 337 335 mutex_lock(&fs3270_mutex); 338 336 switch (cmd) { ··· 508 512 .read = fs3270_read, /* read */ 509 513 .write = fs3270_write, /* write */ 510 514 .unlocked_ioctl = fs3270_ioctl, /* ioctl */ 511 - .compat_ioctl = fs3270_ioctl, /* ioctl */ 512 515 .open = fs3270_open, /* open */ 513 516 .release = fs3270_close, /* release */ 514 517 };
+1 -2
drivers/s390/char/hmcdrv_cache.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "hmcdrv" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "hmcdrv: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/mm.h>
+1 -2
drivers/s390/char/hmcdrv_dev.c
··· 14 14 * end read() the response. 15 15 */ 16 16 17 - #define KMSG_COMPONENT "hmcdrv" 18 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 + #define pr_fmt(fmt) "hmcdrv: " fmt 19 18 20 19 #include <linux/kernel.h> 21 20 #include <linux/module.h>
+1 -2
drivers/s390/char/hmcdrv_ftp.c
··· 6 6 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com) 7 7 */ 8 8 9 - #define KMSG_COMPONENT "hmcdrv" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "hmcdrv: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/slab.h>
+1 -2
drivers/s390/char/hmcdrv_mod.c
··· 6 6 * Author(s): Ralf Hoppe (rhoppe@de.ibm.com) 7 7 */ 8 8 9 - #define KMSG_COMPONENT "hmcdrv" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "hmcdrv: " fmt 11 10 12 11 #include <linux/kernel.h> 13 12 #include <linux/module.h>
+1 -2
drivers/s390/char/monreader.c
··· 7 7 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "monreader" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "monreader: " fmt 12 11 13 12 #include <linux/module.h> 14 13 #include <linux/moduleparam.h>
+1 -2
drivers/s390/char/monwriter.c
··· 7 7 * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "monwriter" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "monwriter: " fmt 12 11 13 12 #include <linux/module.h> 14 13 #include <linux/moduleparam.h>
+1 -2
drivers/s390/char/sclp_ap.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2020 6 6 */ 7 - #define KMSG_COMPONENT "sclp_cmd" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "sclp_cmd: " fmt 9 8 10 9 #include <linux/export.h> 11 10 #include <linux/slab.h>
+1 -2
drivers/s390/char/sclp_cmd.c
··· 5 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 6 */ 7 7 8 - #define KMSG_COMPONENT "sclp_cmd" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "sclp_cmd: " fmt 10 9 11 10 #include <linux/completion.h> 12 11 #include <linux/err.h>
+1 -2
drivers/s390/char/sclp_config.c
··· 3 3 * Copyright IBM Corp. 2007 4 4 */ 5 5 6 - #define KMSG_COMPONENT "sclp_config" 7 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 6 + #define pr_fmt(fmt) "sclp_config: " fmt 8 7 9 8 #include <linux/init.h> 10 9 #include <linux/errno.h>
+1 -2
drivers/s390/char/sclp_cpi_sys.c
··· 7 7 * Michael Ernst <mernst@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "sclp_cpi" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "sclp_cpi: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/init.h>
+2 -10
drivers/s390/char/sclp_ctl.c
··· 7 7 * Author: Michael Holzheu <holzheu@linux.vnet.ibm.com> 8 8 */ 9 9 10 - #include <linux/compat.h> 11 10 #include <linux/uaccess.h> 12 11 #include <linux/miscdevice.h> 13 12 #include <linux/gfp.h> ··· 42 43 43 44 static void __user *u64_to_uptr(u64 value) 44 45 { 45 - if (is_compat_task()) 46 - return compat_ptr(value); 47 - else 48 - return (void __user *)(unsigned long)value; 46 + return (void __user *)(unsigned long)value; 49 47 } 50 48 51 49 /* ··· 91 95 { 92 96 void __user *argp; 93 97 94 - if (is_compat_task()) 95 - argp = compat_ptr(arg); 96 - else 97 - argp = (void __user *) arg; 98 + argp = (void __user *)arg; 98 99 switch (cmd) { 99 100 case SCLP_CTL_SCCB: 100 101 return sclp_ctl_ioctl_sccb(argp); ··· 107 114 .owner = THIS_MODULE, 108 115 .open = nonseekable_open, 109 116 .unlocked_ioctl = sclp_ctl_ioctl, 110 - .compat_ioctl = sclp_ctl_ioctl, 111 117 }; 112 118 113 119 /*
+1 -2
drivers/s390/char/sclp_early.c
··· 5 5 * Copyright IBM Corp. 2013 6 6 */ 7 7 8 - #define KMSG_COMPONENT "sclp_early" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "sclp_early: " fmt 10 9 11 10 #include <linux/export.h> 12 11 #include <linux/errno.h>
+1 -2
drivers/s390/char/sclp_ftp.c
··· 7 7 * 8 8 */ 9 9 10 - #define KMSG_COMPONENT "hmcdrv" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "hmcdrv: " fmt 12 11 13 12 #include <linux/kernel.h> 14 13 #include <linux/mm.h>
+207 -85
drivers/s390/char/sclp_mem.c
··· 5 5 * Copyright IBM Corp. 2025 6 6 */ 7 7 8 - #define KMSG_COMPONENT "sclp_mem" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "sclp_mem: " fmt 10 9 11 10 #include <linux/cpufeature.h> 11 + #include <linux/container_of.h> 12 12 #include <linux/err.h> 13 13 #include <linux/errno.h> 14 14 #include <linux/init.h> 15 + #include <linux/kobject.h> 16 + #include <linux/kstrtox.h> 15 17 #include <linux/memory.h> 16 18 #include <linux/memory_hotplug.h> 17 19 #include <linux/mm.h> ··· 29 27 #define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001 30 28 #define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001 31 29 32 - static DEFINE_MUTEX(sclp_mem_mutex); 33 30 static LIST_HEAD(sclp_mem_list); 34 31 static u8 sclp_max_storage_id; 35 32 static DECLARE_BITMAP(sclp_storage_ids, 256); ··· 37 36 struct list_head list; 38 37 u16 rn; 39 38 int standby; 39 + }; 40 + 41 + struct sclp_mem { 42 + struct kobject kobj; 43 + unsigned int id; 44 + unsigned int memmap_on_memory; 45 + unsigned int config; 46 + }; 47 + 48 + struct sclp_mem_arg { 49 + struct sclp_mem *sclp_mems; 50 + struct kset *kset; 40 51 }; 41 52 42 53 struct assign_storage_sccb { ··· 176 163 return rc ? -EIO : 0; 177 164 } 178 165 179 - static bool contains_standby_increment(unsigned long start, unsigned long end) 166 + static ssize_t sclp_config_mem_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 180 167 { 181 - struct memory_increment *incr; 182 - unsigned long istart; 168 + struct sclp_mem *sclp_mem = container_of(kobj, struct sclp_mem, kobj); 183 169 184 - list_for_each_entry(incr, &sclp_mem_list, list) { 185 - istart = rn2addr(incr->rn); 186 - if (end - 1 < istart) 187 - continue; 188 - if (start > istart + sclp.rzm - 1) 189 - continue; 190 - if (incr->standby) 191 - return true; 192 - } 193 - return false; 170 + return sysfs_emit(buf, "%u\n", READ_ONCE(sclp_mem->config)); 194 171 } 195 172 196 - static int sclp_mem_notifier(struct notifier_block *nb, 197 - unsigned long action, void *data) 173 + static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute *attr, 174 + const char *buf, size_t count) 198 175 { 199 - unsigned long start, size; 200 - struct memory_notify *arg; 176 + unsigned long addr, block_size; 177 + struct sclp_mem *sclp_mem; 178 + struct memory_block *mem; 201 179 unsigned char id; 202 - int rc = 0; 180 + bool value; 181 + int rc; 203 182 204 - arg = data; 205 - start = arg->start_pfn << PAGE_SHIFT; 206 - size = arg->nr_pages << PAGE_SHIFT; 207 - mutex_lock(&sclp_mem_mutex); 183 + rc = kstrtobool(buf, &value); 184 + if (rc) 185 + return rc; 186 + sclp_mem = container_of(kobj, struct sclp_mem, kobj); 187 + block_size = memory_block_size_bytes(); 188 + addr = sclp_mem->id * block_size; 189 + /* 190 + * Hold device_hotplug_lock when adding/removing memory blocks. 191 + * Additionally, also protect calls to find_memory_block() and 192 + * sclp_attach_storage(). 193 + */ 194 + rc = lock_device_hotplug_sysfs(); 195 + if (rc) 196 + goto out; 208 197 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) 209 198 sclp_attach_storage(id); 210 - switch (action) { 211 - case MEM_GOING_OFFLINE: 199 + if (value) { 200 + if (sclp_mem->config) 201 + goto out_unlock; 202 + rc = sclp_mem_change_state(addr, block_size, 1); 203 + if (rc) 204 + goto out_unlock; 212 205 /* 213 - * Do not allow to set memory blocks offline that contain 214 - * standby memory. This is done to simplify the "memory online" 215 - * case. 206 + * Set entire memory block CMMA state to nodat. Later, when 207 + * page tables pages are allocated via __add_memory(), those 208 + * regions are marked __arch_set_page_dat(). 216 209 */ 217 - if (contains_standby_increment(start, start + size)) 218 - rc = -EPERM; 219 - break; 220 - case MEM_PREPARE_ONLINE: 221 - /* 222 - * Access the altmap_start_pfn and altmap_nr_pages fields 223 - * within the struct memory_notify specifically when dealing 224 - * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers. 225 - * 226 - * When altmap is in use, take the specified memory range 227 - * online, which includes the altmap. 228 - */ 229 - if (arg->altmap_nr_pages) { 230 - start = PFN_PHYS(arg->altmap_start_pfn); 231 - size += PFN_PHYS(arg->altmap_nr_pages); 210 + __arch_set_page_nodat((void *)__va(addr), block_size >> PAGE_SHIFT); 211 + rc = __add_memory(0, addr, block_size, 212 + sclp_mem->memmap_on_memory ? 213 + MHP_MEMMAP_ON_MEMORY : MHP_NONE); 214 + if (rc) { 215 + sclp_mem_change_state(addr, block_size, 0); 216 + goto out_unlock; 232 217 } 233 - rc = sclp_mem_change_state(start, size, 1); 234 - if (rc || !arg->altmap_nr_pages) 235 - break; 236 - /* 237 - * Set CMMA state to nodat here, since the struct page memory 238 - * at the beginning of the memory block will not go through the 239 - * buddy allocator later. 240 - */ 241 - __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages); 242 - break; 243 - case MEM_FINISH_OFFLINE: 244 - /* 245 - * When altmap is in use, take the specified memory range 246 - * offline, which includes the altmap. 247 - */ 248 - if (arg->altmap_nr_pages) { 249 - start = PFN_PHYS(arg->altmap_start_pfn); 250 - size += PFN_PHYS(arg->altmap_nr_pages); 218 + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(addr))); 219 + put_device(&mem->dev); 220 + WRITE_ONCE(sclp_mem->config, 1); 221 + } else { 222 + if (!sclp_mem->config) 223 + goto out_unlock; 224 + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(addr))); 225 + if (mem->state != MEM_OFFLINE) { 226 + put_device(&mem->dev); 227 + rc = -EBUSY; 228 + goto out_unlock; 251 229 } 252 - sclp_mem_change_state(start, size, 0); 253 - break; 254 - default: 255 - break; 230 + /* drop the ref just got via find_memory_block() */ 231 + put_device(&mem->dev); 232 + sclp_mem_change_state(addr, block_size, 0); 233 + __remove_memory(addr, block_size); 234 + WRITE_ONCE(sclp_mem->config, 0); 256 235 } 257 - mutex_unlock(&sclp_mem_mutex); 258 - return rc ? NOTIFY_BAD : NOTIFY_OK; 236 + out_unlock: 237 + unlock_device_hotplug(); 238 + out: 239 + return rc ? rc : count; 259 240 } 260 241 261 - static struct notifier_block sclp_mem_nb = { 262 - .notifier_call = sclp_mem_notifier, 242 + static struct kobj_attribute sclp_config_mem_attr = 243 + __ATTR(config, 0644, sclp_config_mem_show, sclp_config_mem_store); 244 + 245 + static ssize_t sclp_memmap_on_memory_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 246 + { 247 + struct sclp_mem *sclp_mem = container_of(kobj, struct sclp_mem, kobj); 248 + 249 + return sysfs_emit(buf, "%u\n", READ_ONCE(sclp_mem->memmap_on_memory)); 250 + } 251 + 252 + static ssize_t sclp_memmap_on_memory_store(struct kobject *kobj, struct kobj_attribute *attr, 253 + const char *buf, size_t count) 254 + { 255 + struct sclp_mem *sclp_mem; 256 + unsigned long block_size; 257 + struct memory_block *mem; 258 + bool value; 259 + int rc; 260 + 261 + rc = kstrtobool(buf, &value); 262 + if (rc) 263 + return rc; 264 + if (value && !mhp_supports_memmap_on_memory()) 265 + return -EOPNOTSUPP; 266 + rc = lock_device_hotplug_sysfs(); 267 + if (rc) 268 + return rc; 269 + block_size = memory_block_size_bytes(); 270 + sclp_mem = container_of(kobj, struct sclp_mem, kobj); 271 + mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(sclp_mem->id * block_size))); 272 + if (!mem) { 273 + WRITE_ONCE(sclp_mem->memmap_on_memory, value); 274 + } else { 275 + put_device(&mem->dev); 276 + rc = -EBUSY; 277 + } 278 + unlock_device_hotplug(); 279 + return rc ? rc : count; 280 + } 281 + 282 + static const struct kobj_type ktype = { 283 + .sysfs_ops = &kobj_sysfs_ops, 263 284 }; 285 + 286 + static struct kobj_attribute sclp_memmap_attr = 287 + __ATTR(memmap_on_memory, 0644, sclp_memmap_on_memory_show, sclp_memmap_on_memory_store); 288 + 289 + static struct attribute *sclp_mem_attrs[] = { 290 + &sclp_config_mem_attr.attr, 291 + &sclp_memmap_attr.attr, 292 + NULL, 293 + }; 294 + 295 + static struct attribute_group sclp_mem_attr_group = { 296 + .attrs = sclp_mem_attrs, 297 + }; 298 + 299 + static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset, 300 + unsigned int id, bool config, bool memmap_on_memory) 301 + { 302 + int rc; 303 + 304 + sclp_mem->memmap_on_memory = memmap_on_memory; 305 + sclp_mem->config = config; 306 + sclp_mem->id = id; 307 + kobject_init(&sclp_mem->kobj, &ktype); 308 + rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id); 309 + if (rc) 310 + return rc; 311 + return sysfs_create_group(&sclp_mem->kobj, &sclp_mem_attr_group); 312 + } 313 + 314 + static int sclp_create_configured_mem(struct memory_block *mem, void *argument) 315 + { 316 + struct sclp_mem *sclp_mems; 317 + struct sclp_mem_arg *arg; 318 + struct kset *kset; 319 + unsigned int id; 320 + 321 + id = mem->dev.id; 322 + arg = (struct sclp_mem_arg *)argument; 323 + sclp_mems = arg->sclp_mems; 324 + kset = arg->kset; 325 + return sclp_create_mem(&sclp_mems[id], kset, id, true, false); 326 + } 264 327 265 328 static void __init align_to_block_size(unsigned long *start, 266 329 unsigned long *size, ··· 353 264 *size = size_align; 354 265 } 355 266 356 - static void __init add_memory_merged(u16 rn) 267 + static int __init sclp_create_standby_mems_merged(struct sclp_mem *sclp_mems, 268 + struct kset *kset, u16 rn) 357 269 { 358 270 unsigned long start, size, addr, block_size; 359 271 static u16 first_rn, num; 272 + unsigned int id; 273 + int rc = 0; 360 274 361 275 if (rn && first_rn && (first_rn + num == rn)) { 362 276 num++; 363 - return; 277 + return rc; 364 278 } 365 279 if (!first_rn) 366 280 goto skip_add; ··· 378 286 if (!size) 379 287 goto skip_add; 380 288 for (addr = start; addr < start + size; addr += block_size) { 381 - add_memory(0, addr, block_size, 382 - cpu_has_edat1() ? 383 - MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE); 289 + id = addr / block_size; 290 + rc = sclp_create_mem(&sclp_mems[id], kset, id, false, 291 + mhp_supports_memmap_on_memory()); 292 + if (rc) 293 + break; 384 294 } 385 295 skip_add: 386 296 first_rn = rn; 387 297 num = 1; 298 + return rc; 388 299 } 389 300 390 - static void __init sclp_add_standby_memory(void) 301 + static int __init sclp_create_standby_mems(struct sclp_mem *sclp_mems, struct kset *kset) 391 302 { 392 303 struct memory_increment *incr; 304 + int rc = 0; 393 305 394 306 list_for_each_entry(incr, &sclp_mem_list, list) { 395 307 if (incr->standby) 396 - add_memory_merged(incr->rn); 308 + rc = sclp_create_standby_mems_merged(sclp_mems, kset, incr->rn); 309 + if (rc) 310 + return rc; 397 311 } 398 - add_memory_merged(0); 312 + return sclp_create_standby_mems_merged(sclp_mems, kset, 0); 313 + } 314 + 315 + static int __init sclp_init_mem(void) 316 + { 317 + const unsigned long block_size = memory_block_size_bytes(); 318 + unsigned int max_sclp_mems; 319 + struct sclp_mem *sclp_mems; 320 + struct sclp_mem_arg arg; 321 + struct kset *kset; 322 + int rc; 323 + 324 + max_sclp_mems = roundup(sclp.rnmax * sclp.rzm, block_size) / block_size; 325 + /* Allocate memory for all blocks ahead of time. */ 326 + sclp_mems = kcalloc(max_sclp_mems, sizeof(struct sclp_mem), GFP_KERNEL); 327 + if (!sclp_mems) 328 + return -ENOMEM; 329 + kset = kset_create_and_add("memory", NULL, firmware_kobj); 330 + if (!kset) 331 + return -ENOMEM; 332 + /* Initial memory is in the "configured" state already. */ 333 + arg.sclp_mems = sclp_mems; 334 + arg.kset = kset; 335 + rc = for_each_memory_block(&arg, sclp_create_configured_mem); 336 + if (rc) 337 + return rc; 338 + /* Standby memory is "deconfigured". */ 339 + return sclp_create_standby_mems(sclp_mems, kset); 399 340 } 400 341 401 342 static void __init insert_increment(u16 rn, int standby, int assigned) ··· 461 336 list_add(&new_incr->list, prev); 462 337 } 463 338 464 - static int __init sclp_detect_standby_memory(void) 339 + static int __init sclp_setup_memory(void) 465 340 { 466 341 struct read_storage_sccb *sccb; 467 342 int i, id, assigned, rc; ··· 513 388 goto out; 514 389 for (i = 1; i <= sclp.rnmax - assigned; i++) 515 390 insert_increment(0, 1, 0); 516 - rc = register_memory_notifier(&sclp_mem_nb); 517 - if (rc) 518 - goto out; 519 - sclp_add_standby_memory(); 391 + rc = sclp_init_mem(); 520 392 out: 521 393 free_page((unsigned long)sccb); 522 394 return rc; 523 395 } 524 - __initcall(sclp_detect_standby_memory); 396 + __initcall(sclp_setup_memory);
+1 -2
drivers/s390/char/sclp_ocf.c
··· 6 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "sclp_ocf" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "sclp_ocf: " fmt 11 10 12 11 #include <linux/export.h> 13 12 #include <linux/kernel.h>
+1 -2
drivers/s390/char/sclp_pci.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2016 6 6 */ 7 - #define KMSG_COMPONENT "sclp_cmd" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "sclp_cmd: " fmt 9 8 10 9 #include <linux/completion.h> 11 10 #include <linux/export.h>
+1 -2
drivers/s390/char/sclp_sd.c
··· 5 5 * Copyright IBM Corp. 2017 6 6 */ 7 7 8 - #define KMSG_COMPONENT "sclp_sd" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "sclp_sd: " fmt 10 9 11 10 #include <linux/completion.h> 12 11 #include <linux/jiffies.h>
+1 -2
drivers/s390/char/sclp_sdias.c
··· 6 6 * Author(s): Michael Holzheu 7 7 */ 8 8 9 - #define KMSG_COMPONENT "sclp_sdias" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "sclp_sdias: " fmt 11 10 12 11 #include <linux/completion.h> 13 12 #include <linux/sched.h>
+16 -5
drivers/s390/char/tape.h
··· 130 130 int retries; /* retry counter for error recovery. */ 131 131 int rescnt; /* residual count from devstat. */ 132 132 struct timer_list timer; /* timer for std_assign_timeout(). */ 133 + struct irb irb; /* device status */ 133 134 134 135 /* Callback for delivering final status. */ 135 136 void (*callback)(struct tape_request *, void *); ··· 152 151 int (*setup_device)(struct tape_device *); 153 152 void (*cleanup_device)(struct tape_device *); 154 153 int (*irq)(struct tape_device *, struct tape_request *, struct irb *); 155 - struct tape_request *(*read_block)(struct tape_device *, size_t); 156 - struct tape_request *(*write_block)(struct tape_device *, size_t); 154 + struct tape_request *(*read_block)(struct tape_device *); 155 + struct tape_request *(*write_block)(struct tape_device *); 157 156 void (*process_eov)(struct tape_device*); 158 157 /* ioctl function for additional ioctls. */ 159 158 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); ··· 173 172 174 173 /* Char Frontend Data */ 175 174 struct tape_char_data { 176 - struct idal_buffer *idal_buf; /* idal buffer for user char data */ 175 + struct idal_buffer **ibs; /* idal buffer array for user char data */ 177 176 int block_size; /* of size block_size. */ 178 177 }; 179 178 ··· 235 234 /* Externals from tape_core.c */ 236 235 extern struct tape_request *tape_alloc_request(int cplength, int datasize); 237 236 extern void tape_free_request(struct tape_request *); 237 + extern int tape_check_idalbuffer(struct tape_device *device, size_t size); 238 238 extern int tape_do_io(struct tape_device *, struct tape_request *); 239 239 extern int tape_do_io_async(struct tape_device *, struct tape_request *); 240 240 extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *); ··· 349 347 } 350 348 351 349 static inline struct ccw1 * 350 + tape_ccw_dc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal) 351 + { 352 + ccw->cmd_code = cmd_code; 353 + ccw->flags = CCW_FLAG_DC; 354 + idal_buffer_set_cda(idal, ccw); 355 + return ccw + 1; 356 + } 357 + 358 + static inline struct ccw1 * 352 359 tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal) 353 360 { 354 361 ccw->cmd_code = cmd_code; 355 362 ccw->flags = CCW_FLAG_CC; 356 363 idal_buffer_set_cda(idal, ccw); 357 - return ccw++; 364 + return ccw + 1; 358 365 } 359 366 360 367 static inline struct ccw1 * ··· 372 361 ccw->cmd_code = cmd_code; 373 362 ccw->flags = 0; 374 363 idal_buffer_set_cda(idal, ccw); 375 - return ccw++; 364 + return ccw + 1; 376 365 } 377 366 378 367 /* Global vars */
+1 -30
drivers/s390/char/tape_34xx.c
··· 8 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 9 */ 10 10 11 - #define KMSG_COMPONENT "tape_34xx" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "tape_34xx: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/module.h> ··· 233 234 return TAPE_IO_SUCCESS; 234 235 } 235 236 236 - /* 237 - * Read Opposite Error Recovery Function: 238 - * Used, when Read Forward does not work 239 - */ 240 - static int 241 - tape_34xx_erp_read_opposite(struct tape_device *device, 242 - struct tape_request *request) 243 - { 244 - if (request->op == TO_RFO) { 245 - /* 246 - * We did read forward, but the data could not be read 247 - * *correctly*. We transform the request to a read backward 248 - * and try again. 249 - */ 250 - tape_std_read_backward(device, request); 251 - return tape_34xx_erp_retry(request); 252 - } 253 - 254 - /* 255 - * We tried to read forward and backward, but hat no 256 - * success -> failed. 257 - */ 258 - return tape_34xx_erp_failed(request, -EIO); 259 - } 260 - 261 237 static int 262 238 tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request, 263 239 struct irb *irb, int no) ··· 414 440 dev_warn (&device->cdev->dev, "A write error on the " 415 441 "tape cannot be recovered\n"); 416 442 return tape_34xx_erp_failed(request, -EIO); 417 - case 0x26: 418 - /* Data Check (read opposite) occurred. */ 419 - return tape_34xx_erp_read_opposite(device, request); 420 443 case 0x28: 421 444 /* ID-Mark at tape start couldn't be written */ 422 445 dev_warn (&device->cdev->dev, "Writing the ID-mark "
+1 -91
drivers/s390/char/tape_3590.c
··· 8 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 9 */ 10 10 11 - #define KMSG_COMPONENT "tape_3590" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "tape_3590: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/module.h> ··· 550 551 } 551 552 552 553 /* 553 - * Read Opposite Error Recovery Function: 554 - * Used, when Read Forward does not work 555 - */ 556 - static void 557 - tape_3590_read_opposite(struct tape_device *device, 558 - struct tape_request *request) 559 - { 560 - struct tape_3590_disc_data *data; 561 - 562 - /* 563 - * We have allocated 4 ccws in tape_std_read, so we can now 564 - * transform the request to a read backward, followed by a 565 - * forward space block. 566 - */ 567 - request->op = TO_RBA; 568 - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 569 - data = device->discdata; 570 - tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, 571 - device->char_data.idal_buf); 572 - tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); 573 - tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); 574 - DBF_EVENT(6, "xrop ccwg\n"); 575 - } 576 - 577 - /* 578 554 * Read Attention Msg 579 555 * This should be done after an interrupt with attention bit (0x80) 580 556 * in device state. ··· 868 894 struct tape_request *request, struct irb *irb) 869 895 { 870 896 return tape_3590_erp_basic(device, request, irb, -EIO); 871 - } 872 - 873 - /* 874 - * RDA: Read Alternate 875 - */ 876 - static int 877 - tape_3590_erp_read_alternate(struct tape_device *device, 878 - struct tape_request *request, struct irb *irb) 879 - { 880 - struct tape_3590_disc_data *data; 881 - 882 - /* 883 - * The issued Read Backward or Read Previous command is not 884 - * supported by the device 885 - * The recovery action should be to issue another command: 886 - * Read Revious: if Read Backward is not supported 887 - * Read Backward: if Read Previous is not supported 888 - */ 889 - data = device->discdata; 890 - if (data->read_back_op == READ_PREVIOUS) { 891 - DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", 892 - device->cdev_id); 893 - data->read_back_op = READ_BACKWARD; 894 - } else { 895 - DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", 896 - device->cdev_id); 897 - data->read_back_op = READ_PREVIOUS; 898 - } 899 - tape_3590_read_opposite(device, request); 900 - return tape_3590_erp_retry(device, request, irb); 901 - } 902 - 903 - /* 904 - * Error Recovery read opposite 905 - */ 906 - static int 907 - tape_3590_erp_read_opposite(struct tape_device *device, 908 - struct tape_request *request, struct irb *irb) 909 - { 910 - switch (request->op) { 911 - case TO_RFO: 912 - /* 913 - * We did read forward, but the data could not be read. 914 - * We will read backward and then skip forward again. 915 - */ 916 - tape_3590_read_opposite(device, request); 917 - return tape_3590_erp_retry(device, request, irb); 918 - case TO_RBA: 919 - /* We tried to read forward and backward, but hat no success */ 920 - return tape_3590_erp_failed(device, request, irb, -EIO); 921 - break; 922 - default: 923 - return tape_3590_erp_failed(device, request, irb, -EIO); 924 - } 925 897 } 926 898 927 899 /* ··· 1268 1348 tape_3590_print_era_msg(device, irb); 1269 1349 return tape_3590_erp_read_buf_log(device, request, irb); 1270 1350 1271 - case 0x2011: 1272 - tape_3590_print_era_msg(device, irb); 1273 - return tape_3590_erp_read_alternate(device, request, irb); 1274 - 1275 1351 case 0x2230: 1276 1352 case 0x2231: 1277 1353 tape_3590_print_era_msg(device, irb); ··· 1320 1404 /* Swap */ 1321 1405 tape_3590_print_era_msg(device, irb); 1322 1406 return tape_3590_erp_swap(device, request, irb); 1323 - } 1324 - if (sense->rac == 0x26) { 1325 - /* Read Opposite */ 1326 - tape_3590_print_era_msg(device, irb); 1327 - return tape_3590_erp_read_opposite(device, request, 1328 - irb); 1329 1407 } 1330 1408 return tape_3590_erp_basic(device, request, irb, -EIO); 1331 1409 case 0x5020:
+57 -85
drivers/s390/char/tape_char.c
··· 10 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "tape" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "tape: " fmt 15 14 16 15 #include <linux/module.h> 17 16 #include <linux/types.h> 18 17 #include <linux/proc_fs.h> 19 18 #include <linux/mtio.h> 20 - #include <linux/compat.h> 21 19 22 20 #include <linux/uaccess.h> 23 21 ··· 35 37 static int tapechar_open(struct inode *,struct file *); 36 38 static int tapechar_release(struct inode *,struct file *); 37 39 static long tapechar_ioctl(struct file *, unsigned int, unsigned long); 38 - #ifdef CONFIG_COMPAT 39 - static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long); 40 - #endif 41 40 42 41 static const struct file_operations tape_fops = 43 42 { ··· 42 47 .read = tapechar_read, 43 48 .write = tapechar_write, 44 49 .unlocked_ioctl = tapechar_ioctl, 45 - #ifdef CONFIG_COMPAT 46 - .compat_ioctl = tapechar_compat_ioctl, 47 - #endif 48 50 .open = tapechar_open, 49 51 .release = tapechar_release, 50 52 }; ··· 56 64 { 57 65 char device_name[20]; 58 66 59 - sprintf(device_name, "ntibm%i", device->first_minor / 2); 67 + scnprintf(device_name, sizeof(device_name), "ntibm%i", device->first_minor / 2); 60 68 device->nt = register_tape_dev( 61 69 &device->cdev->dev, 62 70 MKDEV(tapechar_major, device->first_minor), ··· 85 93 device->nt = NULL; 86 94 } 87 95 88 - static int 89 - tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 90 - { 91 - struct idal_buffer *new; 92 - 93 - if (device->char_data.idal_buf != NULL && 94 - device->char_data.idal_buf->size == block_size) 95 - return 0; 96 - 97 - if (block_size > MAX_BLOCKSIZE) { 98 - DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", 99 - block_size, MAX_BLOCKSIZE); 100 - return -EINVAL; 101 - } 102 - 103 - /* The current idal buffer is not correct. Allocate a new one. */ 104 - new = idal_buffer_alloc(block_size, 0); 105 - if (IS_ERR(new)) 106 - return -ENOMEM; 107 - 108 - if (device->char_data.idal_buf != NULL) 109 - idal_buffer_free(device->char_data.idal_buf); 110 - 111 - device->char_data.idal_buf = new; 112 - 113 - return 0; 114 - } 115 96 116 97 /* 117 98 * Tape device read function ··· 92 127 static ssize_t 93 128 tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 94 129 { 95 - struct tape_device *device; 96 130 struct tape_request *request; 131 + struct ccw1 *ccw, *last_ccw; 132 + struct tape_device *device; 133 + struct idal_buffer **ibs; 97 134 size_t block_size; 135 + size_t read = 0; 98 136 int rc; 99 137 100 138 DBF_EVENT(6, "TCHAR:read\n"); ··· 124 156 block_size = count; 125 157 } 126 158 127 - rc = tapechar_check_idalbuffer(device, block_size); 159 + rc = tape_check_idalbuffer(device, block_size); 128 160 if (rc) 129 161 return rc; 130 162 131 163 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); 132 164 /* Let the discipline build the ccw chain. */ 133 - request = device->discipline->read_block(device, block_size); 165 + request = device->discipline->read_block(device); 134 166 if (IS_ERR(request)) 135 167 return PTR_ERR(request); 136 168 /* Execute it. */ 137 169 rc = tape_do_io(device, request); 138 170 if (rc == 0) { 139 - rc = block_size - request->rescnt; 140 171 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); 141 - /* Copy data from idal buffer to user space. */ 142 - if (idal_buffer_to_user(device->char_data.idal_buf, 143 - data, rc) != 0) 144 - rc = -EFAULT; 172 + /* Channel Program Address (cpa) points to last CCW + 8 */ 173 + last_ccw = dma32_to_virt(request->irb.scsw.cmd.cpa); 174 + ccw = request->cpaddr; 175 + ibs = device->char_data.ibs; 176 + while (++ccw < last_ccw) { 177 + /* Copy data from idal buffer to user space. */ 178 + if (idal_buffer_to_user(*ibs++, data, ccw->count) != 0) { 179 + rc = -EFAULT; 180 + break; 181 + } 182 + read += ccw->count; 183 + data += ccw->count; 184 + } 185 + if (&last_ccw[-1] == &request->cpaddr[1] && 186 + request->rescnt == last_ccw[-1].count) 187 + rc = 0; 188 + else 189 + rc = read - request->rescnt; 145 190 } 146 191 tape_free_request(request); 147 192 return rc; ··· 166 185 static ssize_t 167 186 tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 168 187 { 169 - struct tape_device *device; 170 188 struct tape_request *request; 189 + struct ccw1 *ccw, *last_ccw; 190 + struct tape_device *device; 191 + struct idal_buffer **ibs; 192 + size_t written = 0; 171 193 size_t block_size; 172 - size_t written; 173 194 int nblocks; 174 195 int i, rc; 175 196 ··· 191 208 nblocks = 1; 192 209 } 193 210 194 - rc = tapechar_check_idalbuffer(device, block_size); 211 + rc = tape_check_idalbuffer(device, block_size); 195 212 if (rc) 196 213 return rc; 197 214 198 - DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); 215 + DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); 199 216 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); 200 217 /* Let the discipline build the ccw chain. */ 201 - request = device->discipline->write_block(device, block_size); 218 + request = device->discipline->write_block(device); 202 219 if (IS_ERR(request)) 203 220 return PTR_ERR(request); 204 - rc = 0; 205 - written = 0; 221 + 206 222 for (i = 0; i < nblocks; i++) { 207 - /* Copy data from user space to idal buffer. */ 208 - if (idal_buffer_from_user(device->char_data.idal_buf, 209 - data, block_size)) { 210 - rc = -EFAULT; 211 - break; 223 + size_t wbytes = 0; /* Used to trace written data in dbf */ 224 + 225 + ibs = device->char_data.ibs; 226 + while (ibs && *ibs) { 227 + if (idal_buffer_from_user(*ibs, data, (*ibs)->size)) { 228 + rc = -EFAULT; 229 + goto out; 230 + } 231 + data += (*ibs)->size; 232 + ibs++; 212 233 } 213 234 rc = tape_do_io(device, request); 214 235 if (rc) 215 - break; 216 - DBF_EVENT(6, "TCHAR:wbytes: %lx\n", 217 - block_size - request->rescnt); 218 - written += block_size - request->rescnt; 236 + goto out; 237 + 238 + /* Channel Program Address (cpa) points to last CCW + 8 */ 239 + last_ccw = dma32_to_virt(request->irb.scsw.cmd.cpa); 240 + ccw = request->cpaddr; 241 + while (++ccw < last_ccw) 242 + wbytes += ccw->count; 243 + DBF_EVENT(6, "TCHAR:wbytes: %lx\n", wbytes - request->rescnt); 244 + written += wbytes - request->rescnt; 219 245 if (request->rescnt != 0) 220 246 break; 221 - data += block_size; 222 247 } 248 + 249 + out: 223 250 tape_free_request(request); 224 251 if (rc == -ENOSPC) { 225 252 /* ··· 317 324 } 318 325 } 319 326 320 - if (device->char_data.idal_buf != NULL) { 321 - idal_buffer_free(device->char_data.idal_buf); 322 - device->char_data.idal_buf = NULL; 323 - } 327 + if (device->char_data.ibs) 328 + idal_buffer_array_free(&device->char_data.ibs); 324 329 tape_release(device); 325 330 filp->private_data = NULL; 326 331 tape_put_device(device); ··· 432 441 mutex_unlock(&device->mutex); 433 442 return rc; 434 443 } 435 - 436 - #ifdef CONFIG_COMPAT 437 - static long 438 - tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 439 - { 440 - struct tape_device *device = filp->private_data; 441 - long rc; 442 - 443 - if (no == MTIOCPOS32) 444 - no = MTIOCPOS; 445 - else if (no == MTIOCGET32) 446 - no = MTIOCGET; 447 - 448 - mutex_lock(&device->mutex); 449 - rc = __tapechar_ioctl(device, no, compat_ptr(data)); 450 - mutex_unlock(&device->mutex); 451 - return rc; 452 - } 453 - #endif /* CONFIG_COMPAT */ 454 444 455 445 /* 456 446 * Initialize character device frontend.
+1 -2
drivers/s390/char/tape_class.c
··· 8 8 * Based on simple class device code by Greg K-H 9 9 */ 10 10 11 - #define KMSG_COMPONENT "tape" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "tape: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/slab.h>
+34 -4
drivers/s390/char/tape_core.c
··· 11 11 * Stefan Bader <shbader@de.ibm.com> 12 12 */ 13 13 14 - #define KMSG_COMPONENT "tape" 15 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 + #define pr_fmt(fmt) "tape: " fmt 16 15 17 16 #include <linux/export.h> 18 17 #include <linux/module.h> ··· 725 726 kfree(request); 726 727 } 727 728 729 + int 730 + tape_check_idalbuffer(struct tape_device *device, size_t size) 731 + { 732 + struct idal_buffer **new; 733 + size_t old_size = 0; 734 + 735 + old_size = idal_buffer_array_datasize(device->char_data.ibs); 736 + if (old_size == size) 737 + return 0; 738 + 739 + if (size > MAX_BLOCKSIZE) { 740 + DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", 741 + size, MAX_BLOCKSIZE); 742 + return -EINVAL; 743 + } 744 + 745 + /* The current idal buffer is not correct. Allocate a new one. */ 746 + new = idal_buffer_array_alloc(size, 0); 747 + if (IS_ERR(new)) 748 + return -ENOMEM; 749 + 750 + /* Free old idal buffer array */ 751 + if (device->char_data.ibs) 752 + idal_buffer_array_free(&device->char_data.ibs); 753 + 754 + device->char_data.ibs = new; 755 + 756 + return 0; 757 + } 758 + 728 759 static int 729 760 __tape_start_io(struct tape_device *device, struct tape_request *request) 730 761 { ··· 1128 1099 } 1129 1100 1130 1101 /* May be an unsolicited irq */ 1131 - if(request != NULL) 1102 + if (request != NULL) { 1132 1103 request->rescnt = irb->scsw.cmd.count; 1133 - else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1104 + memcpy(&request->irb, irb, sizeof(*irb)); 1105 + } else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1134 1106 !list_empty(&device->req_queue)) { 1135 1107 /* Not Ready to Ready after long busy ? */ 1136 1108 struct tape_request *req;
+1 -2
drivers/s390/char/tape_proc.c
··· 11 11 * PROCFS Functions 12 12 */ 13 13 14 - #define KMSG_COMPONENT "tape" 15 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 + #define pr_fmt(fmt) "tape: " fmt 16 15 17 16 #include <linux/module.h> 18 17 #include <linux/vmalloc.h>
+29 -54
drivers/s390/char/tape_std.c
··· 11 11 * Stefan Bader <shbader@de.ibm.com> 12 12 */ 13 13 14 - #define KMSG_COMPONENT "tape" 15 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 + #define pr_fmt(fmt) "tape: " fmt 16 15 17 16 #include <linux/export.h> 18 17 #include <linux/stddef.h> ··· 211 212 int 212 213 tape_std_mtsetblk(struct tape_device *device, int count) 213 214 { 214 - struct idal_buffer *new; 215 + int rc; 215 216 216 217 DBF_LH(6, "tape_std_mtsetblk(%d)\n", count); 217 218 if (count <= 0) { ··· 223 224 device->char_data.block_size = 0; 224 225 return 0; 225 226 } 226 - if (device->char_data.idal_buf != NULL && 227 - device->char_data.idal_buf->size == count) 228 - /* We already have a idal buffer of that size. */ 229 - return 0; 230 227 231 - if (count > MAX_BLOCKSIZE) { 232 - DBF_EVENT(3, "Invalid block size (%d > %d) given.\n", 233 - count, MAX_BLOCKSIZE); 234 - return -EINVAL; 235 - } 228 + rc = tape_check_idalbuffer(device, count); 229 + if (rc) 230 + return rc; 236 231 237 - /* Allocate a new idal buffer. */ 238 - new = idal_buffer_alloc(count, 0); 239 - if (IS_ERR(new)) 240 - return -ENOMEM; 241 - if (device->char_data.idal_buf != NULL) 242 - idal_buffer_free(device->char_data.idal_buf); 243 - device->char_data.idal_buf = new; 244 232 device->char_data.block_size = count; 245 - 246 233 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size); 247 234 248 235 return 0; ··· 626 641 * Read Block 627 642 */ 628 643 struct tape_request * 629 - tape_std_read_block(struct tape_device *device, size_t count) 644 + tape_std_read_block(struct tape_device *device) 630 645 { 631 646 struct tape_request *request; 647 + struct idal_buffer **ibs; 648 + struct ccw1 *ccw; 649 + size_t count; 632 650 633 - /* 634 - * We have to alloc 4 ccws in order to be able to transform request 635 - * into a read backward request in error case. 636 - */ 637 - request = tape_alloc_request(4, 0); 651 + ibs = device->char_data.ibs; 652 + count = idal_buffer_array_size(ibs); 653 + request = tape_alloc_request(count + 1 /* MODE_SET_DB */, 0); 638 654 if (IS_ERR(request)) { 639 655 DBF_EXCEPTION(6, "xrbl fail"); 640 656 return request; 641 657 } 642 658 request->op = TO_RFO; 643 - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 644 - tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD, 645 - device->char_data.idal_buf); 659 + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 660 + while (count-- > 1) 661 + ccw = tape_ccw_dc_idal(ccw, READ_FORWARD, *ibs++); 662 + tape_ccw_end_idal(ccw, READ_FORWARD, *ibs); 663 + 646 664 DBF_EVENT(6, "xrbl ccwg\n"); 647 665 return request; 648 666 } 649 667 650 668 /* 651 - * Read Block backward transformation function. 652 - */ 653 - void 654 - tape_std_read_backward(struct tape_device *device, struct tape_request *request) 655 - { 656 - /* 657 - * We have allocated 4 ccws in tape_std_read, so we can now 658 - * transform the request to a read backward, followed by a 659 - * forward space block. 660 - */ 661 - request->op = TO_RBA; 662 - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 663 - tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD, 664 - device->char_data.idal_buf); 665 - tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); 666 - tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); 667 - DBF_EVENT(6, "xrop ccwg");} 668 - 669 - /* 670 669 * Write Block 671 670 */ 672 671 struct tape_request * 673 - tape_std_write_block(struct tape_device *device, size_t count) 672 + tape_std_write_block(struct tape_device *device) 674 673 { 675 674 struct tape_request *request; 675 + struct idal_buffer **ibs; 676 + struct ccw1 *ccw; 677 + size_t count; 676 678 677 - request = tape_alloc_request(2, 0); 679 + count = idal_buffer_array_size(device->char_data.ibs); 680 + request = tape_alloc_request(count + 1 /* MODE_SET_DB */, 0); 678 681 if (IS_ERR(request)) { 679 682 DBF_EXCEPTION(6, "xwbl fail\n"); 680 683 return request; 681 684 } 682 685 request->op = TO_WRI; 683 - tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 684 - tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD, 685 - device->char_data.idal_buf); 686 + ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 687 + ibs = device->char_data.ibs; 688 + while (count-- > 1) 689 + ccw = tape_ccw_dc_idal(ccw, WRITE_CMD, *ibs++); 690 + tape_ccw_end_idal(ccw, WRITE_CMD, *ibs); 691 + 686 692 DBF_EVENT(6, "xwbl ccwg\n"); 687 693 return request; 688 694 } ··· 717 741 EXPORT_SYMBOL(tape_std_mtunload); 718 742 EXPORT_SYMBOL(tape_std_mtcompression); 719 743 EXPORT_SYMBOL(tape_std_read_block); 720 - EXPORT_SYMBOL(tape_std_read_backward); 721 744 EXPORT_SYMBOL(tape_std_write_block); 722 745 EXPORT_SYMBOL(tape_std_process_eov);
+4 -5
drivers/s390/char/tape_std.h
··· 14 14 #include <asm/tape390.h> 15 15 16 16 /* 17 - * Biggest block size to handle. Currently 64K because we only build 18 - * channel programs without data chaining. 17 + * Biggest block size of 256K to handle. 19 18 */ 20 - #define MAX_BLOCKSIZE 65535 19 + #define MAX_BLOCKSIZE 262144 21 20 22 21 /* 23 22 * The CCW commands for the Tape type of command. ··· 96 97 #define SENSE_TAPE_POSITIONING 0x01 97 98 98 99 /* discipline functions */ 99 - struct tape_request *tape_std_read_block(struct tape_device *, size_t); 100 + struct tape_request *tape_std_read_block(struct tape_device *); 100 101 void tape_std_read_backward(struct tape_device *device, 101 102 struct tape_request *request); 102 - struct tape_request *tape_std_write_block(struct tape_device *, size_t); 103 + struct tape_request *tape_std_write_block(struct tape_device *); 103 104 104 105 /* Some non-mtop commands. */ 105 106 int tape_std_assign(struct tape_device *);
+1 -6
drivers/s390/char/vmcp.c
··· 14 14 15 15 #include <linux/fs.h> 16 16 #include <linux/init.h> 17 - #include <linux/compat.h> 18 17 #include <linux/kernel.h> 19 18 #include <linux/miscdevice.h> 20 19 #include <linux/slab.h> ··· 203 204 int __user *argp; 204 205 205 206 session = file->private_data; 206 - if (is_compat_task()) 207 - argp = compat_ptr(arg); 208 - else 209 - argp = (int __user *)arg; 207 + argp = (int __user *)arg; 210 208 if (mutex_lock_interruptible(&session->mutex)) 211 209 return -ERESTARTSYS; 212 210 switch (cmd) { ··· 237 241 .read = vmcp_read, 238 242 .write = vmcp_write, 239 243 .unlocked_ioctl = vmcp_ioctl, 240 - .compat_ioctl = vmcp_ioctl, 241 244 }; 242 245 243 246 static struct miscdevice vmcp_dev = {
+1 -2
drivers/s390/char/vmlogrdr.c
··· 11 11 * 12 12 */ 13 13 14 - #define KMSG_COMPONENT "vmlogrdr" 15 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 + #define pr_fmt(fmt) "vmlogrdr: " fmt 16 15 17 16 #include <linux/module.h> 18 17 #include <linux/init.h>
+1 -2
drivers/s390/char/vmur.c
··· 9 9 * Frank Munzert <munzert@de.ibm.com> 10 10 */ 11 11 12 - #define KMSG_COMPONENT "vmur" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "vmur: " fmt 14 13 15 14 #include <linux/cdev.h> 16 15 #include <linux/slab.h>
+1 -2
drivers/s390/char/zcore.c
··· 9 9 * Author(s): Michael Holzheu 10 10 */ 11 11 12 - #define KMSG_COMPONENT "zdump" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "zdump: " fmt 14 13 15 14 #include <linux/init.h> 16 15 #include <linux/slab.h>
+1 -2
drivers/s390/cio/blacklist.c
··· 8 8 * Arnd Bergmann (arndb@de.ibm.com) 9 9 */ 10 10 11 - #define KMSG_COMPONENT "cio" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "cio: " fmt 13 12 14 13 #include <linux/init.h> 15 14 #include <linux/vmalloc.h>
+3 -3
drivers/s390/cio/ccwgroup.c
··· 41 41 char str[16]; 42 42 43 43 for (i = 0; i < gdev->count; i++) { 44 - sprintf(str, "cdev%d", i); 44 + scnprintf(str, sizeof(str), "cdev%d", i); 45 45 sysfs_remove_link(&gdev->dev.kobj, str); 46 46 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); 47 47 } ··· 249 249 } 250 250 } 251 251 for (i = 0; i < gdev->count; i++) { 252 - sprintf(str, "cdev%d", i); 252 + scnprintf(str, sizeof(str), "cdev%d", i); 253 253 rc = sysfs_create_link(&gdev->dev.kobj, 254 254 &gdev->cdev[i]->dev.kobj, str); 255 255 if (rc) { 256 256 while (i--) { 257 - sprintf(str, "cdev%d", i); 257 + scnprintf(str, sizeof(str), "cdev%d", i); 258 258 sysfs_remove_link(&gdev->dev.kobj, str); 259 259 } 260 260 for (i = 0; i < gdev->count; i++)
+1 -2
drivers/s390/cio/ccwreq.c
··· 6 6 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "cio" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "cio: " fmt 11 10 12 11 #include <linux/types.h> 13 12 #include <linux/err.h>
+3 -2
drivers/s390/cio/chp.c
··· 111 111 char dbf_text[15]; 112 112 int status; 113 113 114 - sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid, 115 - chpid.id); 114 + scnprintf(dbf_text, sizeof(dbf_text), 115 + on ? "varyon%x.%02x" : "varyoff%x.%02x", 116 + chpid.cssid, chpid.id); 116 117 CIO_TRACE_EVENT(2, dbf_text); 117 118 118 119 status = chp_get_status(chpid);
+6 -7
drivers/s390/cio/chsc.c
··· 8 8 * Arnd Bergmann (arndb@de.ibm.com) 9 9 */ 10 10 11 - #define KMSG_COMPONENT "cio" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "cio: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/module.h> ··· 252 253 struct chp_link link; 253 254 char dbf_txt[15]; 254 255 255 - sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 256 + scnprintf(dbf_txt, sizeof(dbf_txt), "chpr%x.%02x", chpid.cssid, chpid.id); 256 257 CIO_TRACE_EVENT(2, dbf_txt); 257 258 258 259 if (chp_get_status(chpid) <= 0) ··· 283 284 { 284 285 char dbf_txt[15]; 285 286 286 - sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 287 - link->chpid.id); 287 + scnprintf(dbf_txt, sizeof(dbf_txt), "accpr%x.%02x", link->chpid.cssid, 288 + link->chpid.id); 288 289 CIO_TRACE_EVENT( 2, dbf_txt); 289 290 if (link->fla != 0) { 290 - sprintf(dbf_txt, "fla%x", link->fla); 291 + scnprintf(dbf_txt, sizeof(dbf_txt), "fla%x", link->fla); 291 292 CIO_TRACE_EVENT( 2, dbf_txt); 292 293 } 293 294 /* Wait until previous actions have settled. */ ··· 756 757 struct chp_link link; 757 758 char dbf_txt[15]; 758 759 759 - sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 760 + scnprintf(dbf_txt, sizeof(dbf_txt), "cadd%x.%02x", chpid.cssid, chpid.id); 760 761 CIO_TRACE_EVENT(2, dbf_txt); 761 762 762 763 if (chp_get_status(chpid) != 0) {
+1 -6
drivers/s390/cio/chsc_sch.c
··· 9 9 */ 10 10 11 11 #include <linux/slab.h> 12 - #include <linux/compat.h> 13 12 #include <linux/device.h> 14 13 #include <linux/io.h> 15 14 #include <linux/module.h> ··· 844 845 void __user *argp; 845 846 846 847 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); 847 - if (is_compat_task()) 848 - argp = compat_ptr(arg); 849 - else 850 - argp = (void __user *)arg; 848 + argp = (void __user *)arg; 851 849 switch (cmd) { 852 850 case CHSC_START: 853 851 return chsc_ioctl_start(argp); ··· 919 923 .open = chsc_open, 920 924 .release = chsc_release, 921 925 .unlocked_ioctl = chsc_ioctl, 922 - .compat_ioctl = chsc_ioctl, 923 926 }; 924 927 925 928 static struct miscdevice chsc_misc_device = {
+2 -3
drivers/s390/cio/cio.c
··· 9 9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 10 10 */ 11 11 12 - #define KMSG_COMPONENT "cio" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "cio: " fmt 14 13 15 14 #include <linux/export.h> 16 15 #include <linux/ftrace.h> ··· 112 113 if (cio_update_schib(sch)) 113 114 return -ENODEV; 114 115 115 - sprintf(dbf_text, "no%s", dev_name(&sch->dev)); 116 + scnprintf(dbf_text, sizeof(dbf_text), "no%s", dev_name(&sch->dev)); 116 117 CIO_TRACE_EVENT(0, dbf_text); 117 118 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 118 119
+1 -2
drivers/s390/cio/cio_inject.c
··· 6 6 * Author(s): Vineeth Vijayan <vneethv@linux.ibm.com> 7 7 */ 8 8 9 - #define KMSG_COMPONENT "cio" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "cio: " fmt 11 10 12 11 #include <linux/slab.h> 13 12 #include <linux/spinlock.h>
+1 -2
drivers/s390/cio/cmf.c
··· 10 10 * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "cio" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "cio: " fmt 15 14 16 15 #include <linux/memblock.h> 17 16 #include <linux/device.h>
+1 -2
drivers/s390/cio/css.c
··· 8 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 9 */ 10 10 11 - #define KMSG_COMPONENT "cio" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "cio: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/init.h>
+1 -2
drivers/s390/cio/device.c
··· 8 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 9 */ 10 10 11 - #define KMSG_COMPONENT "cio" 12 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 + #define pr_fmt(fmt) "cio: " fmt 13 12 14 13 #include <linux/export.h> 15 14 #include <linux/init.h>
+1 -1
drivers/s390/cio/device_status.c
··· 42 42 cdev->private->dev_id.devno, sch->schid.ssid, 43 43 sch->schid.sch_no, 44 44 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); 45 - sprintf(dbf_text, "chk%x", sch->schid.sch_no); 45 + scnprintf(dbf_text, sizeof(dbf_text), "chk%x", sch->schid.sch_no); 46 46 CIO_TRACE_EVENT(0, dbf_text); 47 47 CIO_HEX_EVENT(0, irb, sizeof(struct irb)); 48 48 }
+145 -51
drivers/s390/crypto/ap_bus.c
··· 11 11 * Adjunct processor bus. 12 12 */ 13 13 14 - #define KMSG_COMPONENT "ap" 15 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 + #define pr_fmt(fmt) "ap: " fmt 16 15 17 16 #include <linux/kernel_stat.h> 18 17 #include <linux/moduleparam.h> ··· 85 86 /* Default permissions (ioctl, card and domain masking) */ 86 87 struct ap_perms ap_perms; 87 88 EXPORT_SYMBOL(ap_perms); 88 - DEFINE_MUTEX(ap_perms_mutex); 89 - EXPORT_SYMBOL(ap_perms_mutex); 89 + /* true if apmask and/or aqmask are NOT default */ 90 + bool ap_apmask_aqmask_in_use; 91 + /* counter for how many driver_overrides are currently active */ 92 + int ap_driver_override_ctr; 93 + /* 94 + * Mutex for consistent read and write of the ap_perms struct, 95 + * ap_apmask_aqmask_in_use, ap_driver_override_ctr 96 + * and the ap bus sysfs attributes apmask and aqmask. 97 + */ 98 + DEFINE_MUTEX(ap_attr_mutex); 99 + EXPORT_SYMBOL(ap_attr_mutex); 90 100 91 101 /* # of bindings complete since init */ 92 102 static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0); ··· 861 853 int rc, card, queue, devres, drvres; 862 854 863 855 if (is_queue_dev(dev)) { 864 - card = AP_QID_CARD(to_ap_queue(dev)->qid); 865 - queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 866 - mutex_lock(&ap_perms_mutex); 867 - devres = test_bit_inv(card, ap_perms.apm) && 868 - test_bit_inv(queue, ap_perms.aqm); 869 - mutex_unlock(&ap_perms_mutex); 870 - drvres = to_ap_drv(dev->driver)->flags 871 - & AP_DRIVER_FLAG_DEFAULT; 872 - if (!!devres != !!drvres) { 873 - pr_debug("reprobing queue=%02x.%04x\n", card, queue); 874 - rc = device_reprobe(dev); 875 - if (rc) 876 - AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", 877 - __func__, card, queue); 856 + struct ap_driver *ap_drv = to_ap_drv(dev->driver); 857 + struct ap_queue *aq = to_ap_queue(dev); 858 + struct ap_device *ap_dev = &aq->ap_dev; 859 + 860 + card = AP_QID_CARD(aq->qid); 861 + queue = AP_QID_QUEUE(aq->qid); 862 + 863 + if (ap_dev->driver_override) { 864 + if (strcmp(ap_dev->driver_override, 865 + ap_drv->driver.name)) { 866 + pr_debug("reprobing queue=%02x.%04x\n", card, queue); 867 + rc = device_reprobe(dev); 868 + if (rc) { 869 + AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", 870 + __func__, card, queue); 871 + } 872 + } 873 + } else { 874 + mutex_lock(&ap_attr_mutex); 875 + devres = test_bit_inv(card, ap_perms.apm) && 876 + test_bit_inv(queue, ap_perms.aqm); 877 + mutex_unlock(&ap_attr_mutex); 878 + drvres = to_ap_drv(dev->driver)->flags 879 + & AP_DRIVER_FLAG_DEFAULT; 880 + if (!!devres != !!drvres) { 881 + pr_debug("reprobing queue=%02x.%04x\n", card, queue); 882 + rc = device_reprobe(dev); 883 + if (rc) { 884 + AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n", 885 + __func__, card, queue); 886 + } 887 + } 878 888 } 879 889 } 880 890 ··· 910 884 * @card: the APID of the adapter card to check 911 885 * @queue: the APQI of the queue to check 912 886 * 913 - * Note: the ap_perms_mutex must be locked by the caller of this function. 887 + * Note: the ap_attr_mutex must be locked by the caller of this function. 914 888 * 915 889 * Return: an int specifying whether the AP adapter is reserved for the host (1) 916 890 * or not (0). 917 891 */ 918 892 int ap_owned_by_def_drv(int card, int queue) 919 893 { 894 + struct ap_queue *aq; 920 895 int rc = 0; 921 896 922 897 if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS) 923 898 return -EINVAL; 924 899 900 + aq = ap_get_qdev(AP_MKQID(card, queue)); 901 + if (aq) { 902 + const struct device_driver *drv = aq->ap_dev.device.driver; 903 + const struct ap_driver *ap_drv = to_ap_drv(drv); 904 + bool override = !!aq->ap_dev.driver_override; 905 + 906 + if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT) 907 + rc = 1; 908 + put_device(&aq->ap_dev.device); 909 + if (override) 910 + goto out; 911 + } 912 + 925 913 if (test_bit_inv(card, ap_perms.apm) && 926 914 test_bit_inv(queue, ap_perms.aqm)) 927 915 rc = 1; 928 916 917 + out: 929 918 return rc; 930 919 } 931 920 EXPORT_SYMBOL(ap_owned_by_def_drv); ··· 952 911 * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check 953 912 * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check 954 913 * 955 - * Note: the ap_perms_mutex must be locked by the caller of this function. 914 + * Note: the ap_attr_mutex must be locked by the caller of this function. 956 915 * 957 916 * Return: an int specifying whether each APQN is reserved for the host (1) or 958 917 * not (0) ··· 963 922 int card, queue, rc = 0; 964 923 965 924 for (card = 0; !rc && card < AP_DEVICES; card++) 966 - if (test_bit_inv(card, apm) && 967 - test_bit_inv(card, ap_perms.apm)) 925 + if (test_bit_inv(card, apm)) 968 926 for (queue = 0; !rc && queue < AP_DOMAINS; queue++) 969 - if (test_bit_inv(queue, aqm) && 970 - test_bit_inv(queue, ap_perms.aqm)) 971 - rc = 1; 927 + if (test_bit_inv(queue, aqm)) 928 + rc = ap_owned_by_def_drv(card, queue); 972 929 973 930 return rc; 974 931 } ··· 990 951 */ 991 952 card = AP_QID_CARD(to_ap_queue(dev)->qid); 992 953 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 993 - mutex_lock(&ap_perms_mutex); 994 - devres = test_bit_inv(card, ap_perms.apm) && 995 - test_bit_inv(queue, ap_perms.aqm); 996 - mutex_unlock(&ap_perms_mutex); 997 - drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 998 - if (!!devres != !!drvres) 999 - goto out; 954 + if (ap_dev->driver_override) { 955 + if (strcmp(ap_dev->driver_override, 956 + ap_drv->driver.name)) 957 + goto out; 958 + } else { 959 + mutex_lock(&ap_attr_mutex); 960 + devres = test_bit_inv(card, ap_perms.apm) && 961 + test_bit_inv(queue, ap_perms.aqm); 962 + mutex_unlock(&ap_attr_mutex); 963 + drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 964 + if (!!devres != !!drvres) 965 + goto out; 966 + } 1000 967 } 1001 968 1002 969 /* ··· 1028 983 } 1029 984 1030 985 out: 1031 - if (rc) 986 + if (rc) { 1032 987 put_device(dev); 988 + } else { 989 + if (is_queue_dev(dev)) { 990 + pr_debug("queue=%02x.%04x new driver=%s\n", 991 + card, queue, ap_drv->driver.name); 992 + } else { 993 + pr_debug("card=%02x new driver=%s\n", 994 + to_ap_card(dev)->id, ap_drv->driver.name); 995 + } 996 + } 1033 997 return rc; 1034 998 } 1035 999 ··· 1491 1437 { 1492 1438 int rc; 1493 1439 1494 - if (mutex_lock_interruptible(&ap_perms_mutex)) 1440 + if (mutex_lock_interruptible(&ap_attr_mutex)) 1495 1441 return -ERESTARTSYS; 1496 1442 rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n", 1497 1443 ap_perms.apm[0], ap_perms.apm[1], 1498 1444 ap_perms.apm[2], ap_perms.apm[3]); 1499 - mutex_unlock(&ap_perms_mutex); 1445 + mutex_unlock(&ap_attr_mutex); 1500 1446 1501 1447 return rc; 1502 1448 } ··· 1506 1452 int rc = 0; 1507 1453 struct ap_driver *ap_drv = to_ap_drv(drv); 1508 1454 unsigned long *newapm = (unsigned long *)data; 1455 + unsigned long aqm_any[BITS_TO_LONGS(AP_DOMAINS)]; 1509 1456 1510 1457 /* 1511 1458 * increase the driver's module refcounter to be sure it is not ··· 1516 1461 return 0; 1517 1462 1518 1463 if (ap_drv->in_use) { 1519 - rc = ap_drv->in_use(newapm, ap_perms.aqm); 1464 + bitmap_fill(aqm_any, AP_DOMAINS); 1465 + rc = ap_drv->in_use(newapm, aqm_any); 1520 1466 if (rc) 1521 1467 rc = -EBUSY; 1522 1468 } ··· 1546 1490 1547 1491 memcpy(ap_perms.apm, newapm, APMASKSIZE); 1548 1492 1493 + /* 1494 + * Update ap_apmask_aqmask_in_use. Note that the 1495 + * ap_attr_mutex has to be obtained here. 1496 + */ 1497 + ap_apmask_aqmask_in_use = 1498 + bitmap_full(ap_perms.apm, AP_DEVICES) && 1499 + bitmap_full(ap_perms.aqm, AP_DOMAINS) ? 1500 + false : true; 1501 + 1549 1502 return 0; 1550 1503 } 1551 1504 1552 1505 static ssize_t apmask_store(const struct bus_type *bus, const char *buf, 1553 1506 size_t count) 1554 1507 { 1555 - int rc, changes = 0; 1556 1508 DECLARE_BITMAP(newapm, AP_DEVICES); 1509 + int rc = -EINVAL, changes = 0; 1557 1510 1558 - if (mutex_lock_interruptible(&ap_perms_mutex)) 1511 + if (mutex_lock_interruptible(&ap_attr_mutex)) 1559 1512 return -ERESTARTSYS; 1513 + 1514 + /* Do not allow apmask/aqmask if driver override is active */ 1515 + if (ap_driver_override_ctr) 1516 + goto done; 1560 1517 1561 1518 rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm); 1562 1519 if (rc) ··· 1580 1511 rc = apmask_commit(newapm); 1581 1512 1582 1513 done: 1583 - mutex_unlock(&ap_perms_mutex); 1514 + mutex_unlock(&ap_attr_mutex); 1584 1515 if (rc) 1585 1516 return rc; 1586 1517 ··· 1598 1529 { 1599 1530 int rc; 1600 1531 1601 - if (mutex_lock_interruptible(&ap_perms_mutex)) 1532 + if (mutex_lock_interruptible(&ap_attr_mutex)) 1602 1533 return -ERESTARTSYS; 1603 1534 rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n", 1604 1535 ap_perms.aqm[0], ap_perms.aqm[1], 1605 1536 ap_perms.aqm[2], ap_perms.aqm[3]); 1606 - mutex_unlock(&ap_perms_mutex); 1537 + mutex_unlock(&ap_attr_mutex); 1607 1538 1608 1539 return rc; 1609 1540 } ··· 1613 1544 int rc = 0; 1614 1545 struct ap_driver *ap_drv = to_ap_drv(drv); 1615 1546 unsigned long *newaqm = (unsigned long *)data; 1547 + unsigned long apm_any[BITS_TO_LONGS(AP_DEVICES)]; 1616 1548 1617 1549 /* 1618 1550 * increase the driver's module refcounter to be sure it is not ··· 1623 1553 return 0; 1624 1554 1625 1555 if (ap_drv->in_use) { 1626 - rc = ap_drv->in_use(ap_perms.apm, newaqm); 1556 + bitmap_fill(apm_any, AP_DEVICES); 1557 + rc = ap_drv->in_use(apm_any, newaqm); 1627 1558 if (rc) 1628 1559 rc = -EBUSY; 1629 1560 } ··· 1653 1582 1654 1583 memcpy(ap_perms.aqm, newaqm, AQMASKSIZE); 1655 1584 1585 + /* 1586 + * Update ap_apmask_aqmask_in_use. Note that the 1587 + * ap_attr_mutex has to be obtained here. 1588 + */ 1589 + ap_apmask_aqmask_in_use = 1590 + bitmap_full(ap_perms.apm, AP_DEVICES) && 1591 + bitmap_full(ap_perms.aqm, AP_DOMAINS) ? 1592 + false : true; 1593 + 1656 1594 return 0; 1657 1595 } 1658 1596 1659 1597 static ssize_t aqmask_store(const struct bus_type *bus, const char *buf, 1660 1598 size_t count) 1661 1599 { 1662 - int rc, changes = 0; 1663 1600 DECLARE_BITMAP(newaqm, AP_DOMAINS); 1601 + int rc = -EINVAL, changes = 0; 1664 1602 1665 - if (mutex_lock_interruptible(&ap_perms_mutex)) 1603 + if (mutex_lock_interruptible(&ap_attr_mutex)) 1666 1604 return -ERESTARTSYS; 1605 + 1606 + /* Do not allow apmask/aqmask if driver override is active */ 1607 + if (ap_driver_override_ctr) 1608 + goto done; 1667 1609 1668 1610 rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm); 1669 1611 if (rc) ··· 1687 1603 rc = aqmask_commit(newaqm); 1688 1604 1689 1605 done: 1690 - mutex_unlock(&ap_perms_mutex); 1606 + mutex_unlock(&ap_attr_mutex); 1691 1607 if (rc) 1692 1608 return rc; 1693 1609 ··· 1734 1650 1735 1651 static BUS_ATTR_RO(bindings); 1736 1652 1653 + static ssize_t bindings_complete_count_show(const struct bus_type *bus, 1654 + char *buf) 1655 + { 1656 + return sysfs_emit(buf, "%llu\n", 1657 + atomic64_read(&ap_bindings_complete_count)); 1658 + } 1659 + 1660 + static BUS_ATTR_RO(bindings_complete_count); 1661 + 1737 1662 static ssize_t features_show(const struct bus_type *bus, char *buf) 1738 1663 { 1739 1664 int n = 0; ··· 1783 1690 &bus_attr_aqmask.attr, 1784 1691 &bus_attr_scans.attr, 1785 1692 &bus_attr_bindings.attr, 1693 + &bus_attr_bindings_complete_count.attr, 1786 1694 &bus_attr_features.attr, 1787 1695 NULL, 1788 1696 }; ··· 2558 2464 if (apm_str) { 2559 2465 memset(&ap_perms.apm, 0, sizeof(ap_perms.apm)); 2560 2466 ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES, 2561 - &ap_perms_mutex); 2467 + &ap_attr_mutex); 2562 2468 } 2563 2469 2564 2470 /* aqm kernel parameter string */ 2565 2471 if (aqm_str) { 2566 2472 memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm)); 2567 2473 ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS, 2568 - &ap_perms_mutex); 2474 + &ap_attr_mutex); 2569 2475 } 2570 2476 } 2571 2477 ··· 2578 2484 { 2579 2485 int rc; 2580 2486 2581 - rc = ap_debug_init(); 2582 - if (rc) 2583 - return rc; 2584 - 2585 2487 if (!ap_instructions_available()) { 2586 2488 pr_warn("The hardware system does not support AP instructions\n"); 2587 2489 return -ENODEV; 2588 2490 } 2491 + 2492 + rc = ap_debug_init(); 2493 + if (rc) 2494 + return rc; 2589 2495 2590 2496 /* init ap_queue hashtable */ 2591 2497 hash_init(ap_queues);
+4 -1
drivers/s390/crypto/ap_bus.h
··· 166 166 struct ap_device { 167 167 struct device device; 168 168 int device_type; /* AP device type. */ 169 + const char *driver_override; 169 170 }; 170 171 171 172 #define to_ap_dev(x) container_of((x), struct ap_device, device) ··· 281 280 }; 282 281 283 282 extern struct ap_perms ap_perms; 284 - extern struct mutex ap_perms_mutex; 283 + extern bool ap_apmask_aqmask_in_use; 284 + extern int ap_driver_override_ctr; 285 + extern struct mutex ap_attr_mutex; 285 286 286 287 /* 287 288 * Get ap_queue device for this qid.
+1 -2
drivers/s390/crypto/ap_card.c
··· 6 6 * Adjunct processor bus, card related code. 7 7 */ 8 8 9 - #define KMSG_COMPONENT "ap" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "ap: " fmt 11 10 12 11 #include <linux/init.h> 13 12 #include <linux/slab.h>
+72 -3
drivers/s390/crypto/ap_queue.c
··· 6 6 * Adjunct processor bus, queue related code. 7 7 */ 8 8 9 - #define KMSG_COMPONENT "ap" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "ap: " fmt 11 10 12 11 #include <linux/export.h> 13 12 #include <linux/init.h> 14 13 #include <linux/slab.h> 15 14 #include <asm/facility.h> 16 15 16 + #define CREATE_TRACE_POINTS 17 + #include <asm/trace/ap.h> 18 + 17 19 #include "ap_bus.h" 18 20 #include "ap_debug.h" 21 + 22 + EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap); 23 + EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap); 19 24 20 25 static void __ap_flush_queue(struct ap_queue *aq); 21 26 ··· 103 98 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen, 104 99 int special) 105 100 { 101 + struct ap_queue_status status; 102 + 106 103 if (special) 107 104 qid |= 0x400000UL; 108 - return ap_nqap(qid, psmid, msg, msglen); 105 + 106 + status = ap_nqap(qid, psmid, msg, msglen); 107 + 108 + trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid), 109 + status.value, psmid); 110 + 111 + return status; 109 112 } 110 113 111 114 /* State machine definitions and helpers */ ··· 152 139 &aq->reply->len, &reslen, &resgr0); 153 140 parts++; 154 141 } while (status.response_code == 0xFF && resgr0 != 0); 142 + 143 + trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), 144 + status.value, aq->reply->psmid); 155 145 156 146 switch (status.response_code) { 157 147 case AP_RESPONSE_NORMAL: ··· 730 714 731 715 static DEVICE_ATTR_RO(ap_functions); 732 716 717 + static ssize_t driver_override_show(struct device *dev, 718 + struct device_attribute *attr, 719 + char *buf) 720 + { 721 + struct ap_queue *aq = to_ap_queue(dev); 722 + struct ap_device *ap_dev = &aq->ap_dev; 723 + int rc; 724 + 725 + device_lock(dev); 726 + if (ap_dev->driver_override) 727 + rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override); 728 + else 729 + rc = sysfs_emit(buf, "\n"); 730 + device_unlock(dev); 731 + 732 + return rc; 733 + } 734 + 735 + static ssize_t driver_override_store(struct device *dev, 736 + struct device_attribute *attr, 737 + const char *buf, size_t count) 738 + { 739 + struct ap_queue *aq = to_ap_queue(dev); 740 + struct ap_device *ap_dev = &aq->ap_dev; 741 + int rc = -EINVAL; 742 + bool old_value; 743 + 744 + if (mutex_lock_interruptible(&ap_attr_mutex)) 745 + return -ERESTARTSYS; 746 + 747 + /* Do not allow driver override if apmask/aqmask is in use */ 748 + if (ap_apmask_aqmask_in_use) 749 + goto out; 750 + 751 + old_value = ap_dev->driver_override ? true : false; 752 + rc = driver_set_override(dev, &ap_dev->driver_override, buf, count); 753 + if (rc) 754 + goto out; 755 + if (old_value && !ap_dev->driver_override) 756 + --ap_driver_override_ctr; 757 + else if (!old_value && ap_dev->driver_override) 758 + ++ap_driver_override_ctr; 759 + 760 + rc = count; 761 + 762 + out: 763 + mutex_unlock(&ap_attr_mutex); 764 + return rc; 765 + } 766 + 767 + static DEVICE_ATTR_RW(driver_override); 768 + 733 769 #ifdef CONFIG_AP_DEBUG 734 770 static ssize_t states_show(struct device *dev, 735 771 struct device_attribute *attr, char *buf) ··· 894 826 &dev_attr_config.attr, 895 827 &dev_attr_chkstop.attr, 896 828 &dev_attr_ap_functions.attr, 829 + &dev_attr_driver_override.attr, 897 830 #ifdef CONFIG_AP_DEBUG 898 831 &dev_attr_states.attr, 899 832 &dev_attr_last_err_rc.attr,
+1 -2
drivers/s390/crypto/pkey_api.c
··· 7 7 * Author(s): Harald Freudenberger 8 8 */ 9 9 10 - #define KMSG_COMPONENT "pkey" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "pkey: " fmt 12 11 13 12 #include <linux/init.h> 14 13 #include <linux/miscdevice.h>
+1 -2
drivers/s390/crypto/pkey_base.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/cpufeature.h> 12 11 #include <linux/export.h>
+1 -2
drivers/s390/crypto/pkey_cca.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/init.h> 12 11 #include <linux/module.h>
+1 -2
drivers/s390/crypto/pkey_ep11.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/init.h> 12 11 #include <linux/module.h>
+1 -2
drivers/s390/crypto/pkey_pckmo.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/init.h> 12 11 #include <linux/module.h>
+1 -2
drivers/s390/crypto/pkey_sysfs.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/sysfs.h> 12 11
+1 -2
drivers/s390/crypto/pkey_uv.c
··· 5 5 * Copyright IBM Corp. 2024 6 6 */ 7 7 8 - #define KMSG_COMPONENT "pkey" 9 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 + #define pr_fmt(fmt) "pkey: " fmt 10 9 11 10 #include <linux/cpufeature.h> 12 11 #include <linux/init.h>
+7 -7
drivers/s390/crypto/vfio_ap_ops.c
··· 968 968 * 969 969 * Return: One of the following values: 970 970 * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function, 971 - * most likely -EBUSY indicating the ap_perms_mutex lock is already held. 971 + * most likely -EBUSY indicating the ap_attr_mutex lock is already held. 972 972 * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the 973 973 * zcrypt default driver. 974 974 * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev ··· 1079 1079 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1080 1080 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1081 1081 1082 - mutex_lock(&ap_perms_mutex); 1082 + mutex_lock(&ap_attr_mutex); 1083 1083 get_update_locks_for_mdev(matrix_mdev); 1084 1084 1085 1085 ret = kstrtoul(buf, 0, &apid); ··· 1114 1114 ret = count; 1115 1115 done: 1116 1116 release_update_locks_for_mdev(matrix_mdev); 1117 - mutex_unlock(&ap_perms_mutex); 1117 + mutex_unlock(&ap_attr_mutex); 1118 1118 1119 1119 return ret; 1120 1120 } ··· 1303 1303 DECLARE_BITMAP(apm_filtered, AP_DEVICES); 1304 1304 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); 1305 1305 1306 - mutex_lock(&ap_perms_mutex); 1306 + mutex_lock(&ap_attr_mutex); 1307 1307 get_update_locks_for_mdev(matrix_mdev); 1308 1308 1309 1309 ret = kstrtoul(buf, 0, &apqi); ··· 1338 1338 ret = count; 1339 1339 done: 1340 1340 release_update_locks_for_mdev(matrix_mdev); 1341 - mutex_unlock(&ap_perms_mutex); 1341 + mutex_unlock(&ap_attr_mutex); 1342 1342 1343 1343 return ret; 1344 1344 } ··· 1718 1718 return -ENOMEM; 1719 1719 rest = newbuf; 1720 1720 1721 - mutex_lock(&ap_perms_mutex); 1721 + mutex_lock(&ap_attr_mutex); 1722 1722 get_update_locks_for_mdev(matrix_mdev); 1723 1723 1724 1724 /* Save old state */ ··· 1779 1779 } 1780 1780 out: 1781 1781 release_update_locks_for_mdev(matrix_mdev); 1782 - mutex_unlock(&ap_perms_mutex); 1782 + mutex_unlock(&ap_attr_mutex); 1783 1783 kfree(newbuf); 1784 1784 return rc; 1785 1785 }
+33 -224
drivers/s390/crypto/zcrypt_api.c
··· 12 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 13 */ 14 14 15 - #define KMSG_COMPONENT "zcrypt" 16 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 + #define pr_fmt(fmt) "zcrypt: " fmt 17 16 18 17 #include <linux/export.h> 19 18 #include <linux/module.h> ··· 20 21 #include <linux/interrupt.h> 21 22 #include <linux/miscdevice.h> 22 23 #include <linux/fs.h> 23 - #include <linux/compat.h> 24 24 #include <linux/slab.h> 25 25 #include <linux/atomic.h> 26 26 #include <linux/uaccess.h> ··· 161 163 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 162 164 int i, n; 163 165 164 - if (mutex_lock_interruptible(&ap_perms_mutex)) 166 + if (mutex_lock_interruptible(&ap_attr_mutex)) 165 167 return -ERESTARTSYS; 166 168 167 169 n = sysfs_emit(buf, "0x"); ··· 169 171 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 170 172 n += sysfs_emit_at(buf, n, "\n"); 171 173 172 - mutex_unlock(&ap_perms_mutex); 174 + mutex_unlock(&ap_attr_mutex); 173 175 174 176 return n; 175 177 } ··· 182 184 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 183 185 184 186 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 185 - AP_IOCTLS, &ap_perms_mutex); 187 + AP_IOCTLS, &ap_attr_mutex); 186 188 if (rc) 187 189 return rc; 188 190 ··· 198 200 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 199 201 int i, n; 200 202 201 - if (mutex_lock_interruptible(&ap_perms_mutex)) 203 + if (mutex_lock_interruptible(&ap_attr_mutex)) 202 204 return -ERESTARTSYS; 203 205 204 206 n = sysfs_emit(buf, "0x"); ··· 206 208 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 207 209 n += sysfs_emit_at(buf, n, "\n"); 208 210 209 - mutex_unlock(&ap_perms_mutex); 211 + mutex_unlock(&ap_attr_mutex); 210 212 211 213 return n; 212 214 } ··· 219 221 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 220 222 221 223 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 222 - AP_DEVICES, &ap_perms_mutex); 224 + AP_DEVICES, &ap_attr_mutex); 223 225 if (rc) 224 226 return rc; 225 227 ··· 235 237 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 236 238 int i, n; 237 239 238 - if (mutex_lock_interruptible(&ap_perms_mutex)) 240 + if (mutex_lock_interruptible(&ap_attr_mutex)) 239 241 return -ERESTARTSYS; 240 242 241 243 n = sysfs_emit(buf, "0x"); ··· 243 245 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 244 246 n += sysfs_emit_at(buf, n, "\n"); 245 247 246 - mutex_unlock(&ap_perms_mutex); 248 + mutex_unlock(&ap_attr_mutex); 247 249 248 250 return n; 249 251 } ··· 256 258 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 257 259 258 260 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 259 - AP_DOMAINS, &ap_perms_mutex); 261 + AP_DOMAINS, &ap_attr_mutex); 260 262 if (rc) 261 263 return rc; 262 264 ··· 272 274 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 273 275 int i, n; 274 276 275 - if (mutex_lock_interruptible(&ap_perms_mutex)) 277 + if (mutex_lock_interruptible(&ap_attr_mutex)) 276 278 return -ERESTARTSYS; 277 279 278 280 n = sysfs_emit(buf, "0x"); ··· 280 282 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 281 283 n += sysfs_emit_at(buf, n, "\n"); 282 284 283 - mutex_unlock(&ap_perms_mutex); 285 + mutex_unlock(&ap_attr_mutex); 284 286 285 287 return n; 286 288 } ··· 293 295 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 294 296 295 297 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 296 - AP_DOMAINS, &ap_perms_mutex); 298 + AP_DOMAINS, &ap_attr_mutex); 297 299 if (rc) 298 300 return rc; 299 301 ··· 369 371 int i, rc = 0; 370 372 struct zcdn_device *zcdndev; 371 373 372 - if (mutex_lock_interruptible(&ap_perms_mutex)) 374 + if (mutex_lock_interruptible(&ap_attr_mutex)) 373 375 return -ERESTARTSYS; 374 376 375 377 /* check if device node with this name already exists */ ··· 424 426 __func__, MAJOR(devt), MINOR(devt)); 425 427 426 428 unlockout: 427 - mutex_unlock(&ap_perms_mutex); 429 + mutex_unlock(&ap_attr_mutex); 428 430 return rc; 429 431 } 430 432 ··· 433 435 int rc = 0; 434 436 struct zcdn_device *zcdndev; 435 437 436 - if (mutex_lock_interruptible(&ap_perms_mutex)) 438 + if (mutex_lock_interruptible(&ap_attr_mutex)) 437 439 return -ERESTARTSYS; 438 440 439 441 /* try to find this zcdn device */ ··· 451 453 device_unregister(&zcdndev->device); 452 454 453 455 unlockout: 454 - mutex_unlock(&ap_perms_mutex); 456 + mutex_unlock(&ap_attr_mutex); 455 457 return rc; 456 458 } 457 459 ··· 461 463 dev_t devt; 462 464 struct zcdn_device *zcdndev; 463 465 464 - mutex_lock(&ap_perms_mutex); 466 + mutex_lock(&ap_attr_mutex); 465 467 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 466 468 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 467 469 zcdndev = find_zcdndev_by_devt(devt); ··· 470 472 device_unregister(&zcdndev->device); 471 473 } 472 474 } 473 - mutex_unlock(&ap_perms_mutex); 475 + mutex_unlock(&ap_attr_mutex); 474 476 } 475 477 476 478 /* ··· 507 509 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 508 510 struct zcdn_device *zcdndev; 509 511 510 - if (mutex_lock_interruptible(&ap_perms_mutex)) 512 + if (mutex_lock_interruptible(&ap_attr_mutex)) 511 513 return -ERESTARTSYS; 512 514 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 513 515 /* find returns a reference, no get_device() needed */ 514 - mutex_unlock(&ap_perms_mutex); 516 + mutex_unlock(&ap_attr_mutex); 515 517 if (zcdndev) 516 518 perms = &zcdndev->perms; 517 519 } ··· 531 533 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 532 534 struct zcdn_device *zcdndev; 533 535 534 - mutex_lock(&ap_perms_mutex); 536 + mutex_lock(&ap_attr_mutex); 535 537 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 536 - mutex_unlock(&ap_perms_mutex); 538 + mutex_unlock(&ap_attr_mutex); 537 539 if (zcdndev) { 538 540 /* 2 puts here: one for find, one for open */ 539 541 put_device(&zcdndev->device); ··· 738 740 tr->last_qid = qid; 739 741 } 740 742 trace_s390_zcrypt_rep(mex, func_code, rc, 741 - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 743 + AP_QID_CARD(qid), AP_QID_QUEUE(qid), 744 + ap_msg.psmid); 742 745 return rc; 743 746 } 744 747 ··· 844 845 tr->last_qid = qid; 845 846 } 846 847 trace_s390_zcrypt_rep(crt, func_code, rc, 847 - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 848 + AP_QID_CARD(qid), AP_QID_QUEUE(qid), 849 + ap_msg.psmid); 848 850 return rc; 849 851 } 850 852 ··· 980 980 tr->last_qid = qid; 981 981 } 982 982 trace_s390_zcrypt_rep(xcrb, func_code, rc, 983 - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 983 + AP_QID_CARD(qid), AP_QID_QUEUE(qid), 984 + ap_msg.psmid); 984 985 return rc; 985 986 } 986 987 ··· 1183 1182 tr->last_qid = qid; 1184 1183 } 1185 1184 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1186 - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1185 + AP_QID_CARD(qid), AP_QID_QUEUE(qid), 1186 + ap_msg.psmid); 1187 1187 return rc; 1188 1188 } 1189 1189 ··· 1276 1274 out: 1277 1275 ap_release_apmsg(&ap_msg); 1278 1276 trace_s390_zcrypt_rep(buffer, func_code, rc, 1279 - AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1277 + AP_QID_CARD(qid), AP_QID_QUEUE(qid), 1278 + ap_msg.psmid); 1280 1279 return rc; 1281 1280 } 1282 1281 ··· 1732 1729 } 1733 1730 } 1734 1731 1735 - #ifdef CONFIG_COMPAT 1736 - /* 1737 - * ioctl32 conversion routines 1738 - */ 1739 - struct compat_ica_rsa_modexpo { 1740 - compat_uptr_t inputdata; 1741 - unsigned int inputdatalength; 1742 - compat_uptr_t outputdata; 1743 - unsigned int outputdatalength; 1744 - compat_uptr_t b_key; 1745 - compat_uptr_t n_modulus; 1746 - }; 1747 - 1748 - static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1749 - unsigned int cmd, unsigned long arg) 1750 - { 1751 - struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1752 - struct compat_ica_rsa_modexpo mex32; 1753 - struct ica_rsa_modexpo mex64; 1754 - struct zcrypt_track tr; 1755 - long rc; 1756 - 1757 - memset(&tr, 0, sizeof(tr)); 1758 - if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1759 - return -EFAULT; 1760 - mex64.inputdata = compat_ptr(mex32.inputdata); 1761 - mex64.inputdatalength = mex32.inputdatalength; 1762 - mex64.outputdata = compat_ptr(mex32.outputdata); 1763 - mex64.outputdatalength = mex32.outputdatalength; 1764 - mex64.b_key = compat_ptr(mex32.b_key); 1765 - mex64.n_modulus = compat_ptr(mex32.n_modulus); 1766 - do { 1767 - rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1768 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1769 - 1770 - /* on ENODEV failure: retry once again after a requested rescan */ 1771 - if (rc == -ENODEV && zcrypt_process_rescan()) 1772 - do { 1773 - rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1774 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1775 - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1776 - rc = -EIO; 1777 - if (rc) 1778 - return rc; 1779 - return put_user(mex64.outputdatalength, 1780 - &umex32->outputdatalength); 1781 - } 1782 - 1783 - struct compat_ica_rsa_modexpo_crt { 1784 - compat_uptr_t inputdata; 1785 - unsigned int inputdatalength; 1786 - compat_uptr_t outputdata; 1787 - unsigned int outputdatalength; 1788 - compat_uptr_t bp_key; 1789 - compat_uptr_t bq_key; 1790 - compat_uptr_t np_prime; 1791 - compat_uptr_t nq_prime; 1792 - compat_uptr_t u_mult_inv; 1793 - }; 1794 - 1795 - static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1796 - unsigned int cmd, unsigned long arg) 1797 - { 1798 - struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1799 - struct compat_ica_rsa_modexpo_crt crt32; 1800 - struct ica_rsa_modexpo_crt crt64; 1801 - struct zcrypt_track tr; 1802 - long rc; 1803 - 1804 - memset(&tr, 0, sizeof(tr)); 1805 - if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1806 - return -EFAULT; 1807 - crt64.inputdata = compat_ptr(crt32.inputdata); 1808 - crt64.inputdatalength = crt32.inputdatalength; 1809 - crt64.outputdata = compat_ptr(crt32.outputdata); 1810 - crt64.outputdatalength = crt32.outputdatalength; 1811 - crt64.bp_key = compat_ptr(crt32.bp_key); 1812 - crt64.bq_key = compat_ptr(crt32.bq_key); 1813 - crt64.np_prime = compat_ptr(crt32.np_prime); 1814 - crt64.nq_prime = compat_ptr(crt32.nq_prime); 1815 - crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1816 - do { 1817 - rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1818 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1819 - 1820 - /* on ENODEV failure: retry once again after a requested rescan */ 1821 - if (rc == -ENODEV && zcrypt_process_rescan()) 1822 - do { 1823 - rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1824 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1825 - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1826 - rc = -EIO; 1827 - if (rc) 1828 - return rc; 1829 - return put_user(crt64.outputdatalength, 1830 - &ucrt32->outputdatalength); 1831 - } 1832 - 1833 - struct compat_ica_xcrb { 1834 - unsigned short agent_ID; 1835 - unsigned int user_defined; 1836 - unsigned short request_ID; 1837 - unsigned int request_control_blk_length; 1838 - unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1839 - compat_uptr_t request_control_blk_addr; 1840 - unsigned int request_data_length; 1841 - char padding2[16 - sizeof(compat_uptr_t)]; 1842 - compat_uptr_t request_data_address; 1843 - unsigned int reply_control_blk_length; 1844 - char padding3[16 - sizeof(compat_uptr_t)]; 1845 - compat_uptr_t reply_control_blk_addr; 1846 - unsigned int reply_data_length; 1847 - char padding4[16 - sizeof(compat_uptr_t)]; 1848 - compat_uptr_t reply_data_addr; 1849 - unsigned short priority_window; 1850 - unsigned int status; 1851 - } __packed; 1852 - 1853 - static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1854 - unsigned int cmd, unsigned long arg) 1855 - { 1856 - struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1857 - u32 xflags = ZCRYPT_XFLAG_USERSPACE; 1858 - struct compat_ica_xcrb xcrb32; 1859 - struct zcrypt_track tr; 1860 - struct ica_xcRB xcrb64; 1861 - long rc; 1862 - 1863 - memset(&tr, 0, sizeof(tr)); 1864 - if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1865 - return -EFAULT; 1866 - xcrb64.agent_ID = xcrb32.agent_ID; 1867 - xcrb64.user_defined = xcrb32.user_defined; 1868 - xcrb64.request_ID = xcrb32.request_ID; 1869 - xcrb64.request_control_blk_length = 1870 - xcrb32.request_control_blk_length; 1871 - xcrb64.request_control_blk_addr = 1872 - compat_ptr(xcrb32.request_control_blk_addr); 1873 - xcrb64.request_data_length = 1874 - xcrb32.request_data_length; 1875 - xcrb64.request_data_address = 1876 - compat_ptr(xcrb32.request_data_address); 1877 - xcrb64.reply_control_blk_length = 1878 - xcrb32.reply_control_blk_length; 1879 - xcrb64.reply_control_blk_addr = 1880 - compat_ptr(xcrb32.reply_control_blk_addr); 1881 - xcrb64.reply_data_length = xcrb32.reply_data_length; 1882 - xcrb64.reply_data_addr = 1883 - compat_ptr(xcrb32.reply_data_addr); 1884 - xcrb64.priority_window = xcrb32.priority_window; 1885 - xcrb64.status = xcrb32.status; 1886 - do { 1887 - rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1888 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1889 - 1890 - /* on ENODEV failure: retry once again after a requested rescan */ 1891 - if (rc == -ENODEV && zcrypt_process_rescan()) 1892 - do { 1893 - rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); 1894 - } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); 1895 - if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1896 - rc = -EIO; 1897 - xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1898 - xcrb32.reply_data_length = xcrb64.reply_data_length; 1899 - xcrb32.status = xcrb64.status; 1900 - if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1901 - return -EFAULT; 1902 - return rc; 1903 - } 1904 - 1905 - static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1906 - unsigned long arg) 1907 - { 1908 - int rc; 1909 - struct ap_perms *perms = 1910 - (struct ap_perms *)filp->private_data; 1911 - 1912 - rc = zcrypt_check_ioctl(perms, cmd); 1913 - if (rc) 1914 - return rc; 1915 - 1916 - if (cmd == ICARSAMODEXPO) 1917 - return trans_modexpo32(perms, filp, cmd, arg); 1918 - if (cmd == ICARSACRT) 1919 - return trans_modexpo_crt32(perms, filp, cmd, arg); 1920 - if (cmd == ZSECSENDCPRB) 1921 - return trans_xcrb32(perms, filp, cmd, arg); 1922 - return zcrypt_unlocked_ioctl(filp, cmd, arg); 1923 - } 1924 - #endif 1925 - 1926 1732 /* 1927 1733 * Misc device file operations. 1928 1734 */ ··· 1740 1928 .read = zcrypt_read, 1741 1929 .write = zcrypt_write, 1742 1930 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1743 - #ifdef CONFIG_COMPAT 1744 - .compat_ioctl = zcrypt_compat_ioctl, 1745 - #endif 1746 1931 .open = zcrypt_open, 1747 1932 .release = zcrypt_release, 1748 1933 };
-1
drivers/s390/crypto/zcrypt_card.c
··· 19 19 #include <linux/fs.h> 20 20 #include <linux/proc_fs.h> 21 21 #include <linux/seq_file.h> 22 - #include <linux/compat.h> 23 22 #include <linux/slab.h> 24 23 #include <linux/atomic.h> 25 24 #include <linux/uaccess.h>
+1 -2
drivers/s390/crypto/zcrypt_ccamisc.c
··· 7 7 * Collection of CCA misc functions used by zcrypt and pkey 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zcrypt" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zcrypt: " fmt 12 11 13 12 #include <linux/export.h> 14 13 #include <linux/init.h>
+1 -2
drivers/s390/crypto/zcrypt_ep11misc.c
··· 6 6 * Collection of EP11 misc functions used by zcrypt and pkey 7 7 */ 8 8 9 - #define KMSG_COMPONENT "zcrypt" 10 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 + #define pr_fmt(fmt) "zcrypt: " fmt 11 10 12 11 #include <linux/export.h> 13 12 #include <linux/init.h>
+1 -2
drivers/s390/crypto/zcrypt_msgtype50.c
··· 10 10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "zcrypt" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "zcrypt: " fmt 15 14 16 15 #include <linux/module.h> 17 16 #include <linux/slab.h>
+1 -2
drivers/s390/crypto/zcrypt_msgtype6.c
··· 10 10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 11 11 */ 12 12 13 - #define KMSG_COMPONENT "zcrypt" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "zcrypt: " fmt 15 14 16 15 #include <linux/module.h> 17 16 #include <linux/init.h>
-1
drivers/s390/crypto/zcrypt_queue.c
··· 19 19 #include <linux/fs.h> 20 20 #include <linux/proc_fs.h> 21 21 #include <linux/seq_file.h> 22 - #include <linux/compat.h> 23 22 #include <linux/slab.h> 24 23 #include <linux/atomic.h> 25 24 #include <linux/uaccess.h>
+1 -2
drivers/s390/net/ctcm_fsms.c
··· 12 12 #undef DEBUGDATA 13 13 #undef DEBUGCCW 14 14 15 - #define KMSG_COMPONENT "ctcm" 16 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 + #define pr_fmt(fmt) "ctcm: " fmt 17 16 18 17 #include <linux/module.h> 19 18 #include <linux/init.h>
+1 -2
drivers/s390/net/ctcm_main.c
··· 20 20 #undef DEBUGDATA 21 21 #undef DEBUGCCW 22 22 23 - #define KMSG_COMPONENT "ctcm" 24 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 + #define pr_fmt(fmt) "ctcm: " fmt 25 24 26 25 #include <linux/module.h> 27 26 #include <linux/init.h>
+1 -2
drivers/s390/net/ctcm_mpc.c
··· 18 18 #undef DEBUGDATA 19 19 #undef DEBUGCCW 20 20 21 - #define KMSG_COMPONENT "ctcm" 22 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 + #define pr_fmt(fmt) "ctcm: " fmt 23 22 24 23 #include <linux/export.h> 25 24 #include <linux/module.h>
+1 -2
drivers/s390/net/ctcm_sysfs.c
··· 9 9 #undef DEBUGDATA 10 10 #undef DEBUGCCW 11 11 12 - #define KMSG_COMPONENT "ctcm" 13 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + #define pr_fmt(fmt) "ctcm: " fmt 14 13 15 14 #include <linux/device.h> 16 15 #include <linux/sysfs.h>
+1 -2
drivers/s390/net/ism_drv.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2018 6 6 */ 7 - #define KMSG_COMPONENT "ism" 8 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7 + #define pr_fmt(fmt) "ism: " fmt 9 8 10 9 #include <linux/export.h> 11 10 #include <linux/module.h>
+2 -5
drivers/s390/net/qeth_core_main.c
··· 7 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "qeth" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "qeth: " fmt 12 11 13 - #include <linux/compat.h> 14 12 #include <linux/export.h> 15 13 #include <linux/module.h> 16 14 #include <linux/moduleparam.h> ··· 4803 4805 4804 4806 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4805 4807 if (!rc) { 4806 - tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4807 - u64_to_user_ptr(oat_data.ptr); 4808 + tmp = u64_to_user_ptr(oat_data.ptr); 4808 4809 oat_data.response_len = priv.response_len; 4809 4810 4810 4811 if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
+1 -2
drivers/s390/net/qeth_core_sys.c
··· 7 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "qeth" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "qeth: " fmt 12 11 13 12 #include <linux/list.h> 14 13 #include <linux/rwsem.h>
+1 -2
drivers/s390/net/qeth_ethtool.c
··· 3 3 * Copyright IBM Corp. 2018 4 4 */ 5 5 6 - #define KMSG_COMPONENT "qeth" 7 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 6 + #define pr_fmt(fmt) "qeth: " fmt 8 7 9 8 #include <linux/ethtool.h> 10 9 #include "qeth_core.h"
+1 -2
drivers/s390/net/qeth_l2_main.c
··· 7 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "qeth" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "qeth: " fmt 12 11 13 12 #include <linux/export.h> 14 13 #include <linux/module.h>
+1 -2
drivers/s390/net/qeth_l3_main.c
··· 7 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 8 */ 9 9 10 - #define KMSG_COMPONENT "qeth" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "qeth: " fmt 12 11 13 12 #include <linux/export.h> 14 13 #include <linux/module.h>
+2 -3
drivers/s390/net/smsgiucv_app.c
··· 10 10 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 11 11 * 12 12 */ 13 - #define KMSG_COMPONENT "smsgiucv_app" 14 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 + #define pr_fmt(fmt) "smsgiucv_app: " fmt 15 14 16 15 #include <linux/ctype.h> 17 16 #include <linux/err.h> ··· 160 161 if (!smsgiucv_drv) 161 162 return -ENODEV; 162 163 163 - smsg_app_dev = iucv_alloc_device(NULL, smsgiucv_drv, NULL, KMSG_COMPONENT); 164 + smsg_app_dev = iucv_alloc_device(NULL, smsgiucv_drv, NULL, "smsgiucv_app"); 164 165 if (!smsg_app_dev) 165 166 return -ENOMEM; 166 167
+1 -2
drivers/s390/scsi/zfcp_aux.c
··· 28 28 * Benjamin Block 29 29 */ 30 30 31 - #define KMSG_COMPONENT "zfcp" 32 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 31 + #define pr_fmt(fmt) "zfcp: " fmt 33 32 34 33 #include <linux/seq_file.h> 35 34 #include <linux/slab.h>
+1 -2
drivers/s390/scsi/zfcp_ccw.c
··· 7 7 * Copyright IBM Corp. 2002, 2010 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/module.h> 14 13 #include "zfcp_ext.h"
+1 -2
drivers/s390/scsi/zfcp_dbf.c
··· 7 7 * Copyright IBM Corp. 2002, 2023 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/module.h> 14 13 #include <linux/ctype.h>
+1 -2
drivers/s390/scsi/zfcp_erp.c
··· 7 7 * Copyright IBM Corp. 2002, 2020 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/kthread.h> 14 13 #include <linux/bug.h>
+1 -2
drivers/s390/scsi/zfcp_fc.c
··· 7 7 * Copyright IBM Corp. 2008, 2017 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/types.h> 14 13 #include <linux/slab.h>
+1 -2
drivers/s390/scsi/zfcp_fsf.c
··· 7 7 * Copyright IBM Corp. 2002, 2023 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/blktrace_api.h> 14 13 #include <linux/jiffies.h>
+1 -2
drivers/s390/scsi/zfcp_qdio.c
··· 7 7 * Copyright IBM Corp. 2002, 2020 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/lockdep.h> 14 13 #include <linux/slab.h>
+1 -2
drivers/s390/scsi/zfcp_scsi.c
··· 7 7 * Copyright IBM Corp. 2002, 2020 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/module.h> 14 13 #include <linux/types.h>
+1 -2
drivers/s390/scsi/zfcp_sysfs.c
··· 7 7 * Copyright IBM Corp. 2008, 2020 8 8 */ 9 9 10 - #define KMSG_COMPONENT "zfcp" 11 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + #define pr_fmt(fmt) "zfcp: " fmt 12 11 13 12 #include <linux/slab.h> 14 13 #include "zfcp_diag.h"
+1 -2
drivers/watchdog/diag288_wdt.c
··· 18 18 * 19 19 */ 20 20 21 - #define KMSG_COMPONENT "diag288_wdt" 22 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 + #define pr_fmt(fmt) "diag288_wdt: " fmt 23 22 24 23 #include <linux/init.h> 25 24 #include <linux/kernel.h>
-9
include/linux/memory.h
··· 96 96 #define MEM_GOING_ONLINE (1<<3) 97 97 #define MEM_CANCEL_ONLINE (1<<4) 98 98 #define MEM_CANCEL_OFFLINE (1<<5) 99 - #define MEM_PREPARE_ONLINE (1<<6) 100 - #define MEM_FINISH_OFFLINE (1<<7) 101 99 102 100 struct memory_notify { 103 - /* 104 - * The altmap_start_pfn and altmap_nr_pages fields are designated for 105 - * specifying the altmap range and are exclusively intended for use in 106 - * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers. 107 - */ 108 - unsigned long altmap_start_pfn; 109 - unsigned long altmap_nr_pages; 110 101 unsigned long start_pfn; 111 102 unsigned long nr_pages; 112 103 };
+1 -17
include/linux/memory_hotplug.h
··· 58 58 * implies the node id (nid). 59 59 */ 60 60 #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) 61 - /* 62 - * The hotplugged memory is completely inaccessible while the memory is 63 - * offline. The memory provider will handle MEM_PREPARE_ONLINE / 64 - * MEM_FINISH_OFFLINE notifications and make the memory accessible. 65 - * 66 - * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, 67 - * because the altmap cannot be written (e.g., poisoned) when adding 68 - * memory -- before it is set online. 69 - * 70 - * This allows for adding memory with an altmap that is not currently 71 - * made available by a hypervisor. When onlining that memory, the 72 - * hypervisor can be instructed to make that memory available, and 73 - * the onlining phase will not require any memory allocations, which is 74 - * helpful in low-memory situations. 75 - */ 76 - #define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) 77 61 78 62 /* 79 63 * Extended parameters for memory hotplug: ··· 107 123 long nr_pages); 108 124 /* VM interface that may be used by firmware interface */ 109 125 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 110 - struct zone *zone, bool mhp_off_inaccessible); 126 + struct zone *zone); 111 127 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); 112 128 extern int online_pages(unsigned long pfn, unsigned long nr_pages, 113 129 struct zone *zone, struct memory_group *group);
-1
include/linux/memremap.h
··· 25 25 unsigned long free; 26 26 unsigned long align; 27 27 unsigned long alloc; 28 - bool inaccessible; 29 28 }; 30 29 31 30 /*
+1 -1
include/linux/percpu-defs.h
··· 52 52 __section(".discard") __attribute__((unused)) 53 53 54 54 /* 55 - * s390 and alpha modules require percpu variables to be defined as 55 + * alpha modules require percpu variables to be defined as 56 56 * weak to force the compiler to generate GOT based external 57 57 * references for them. This is necessary because percpu sections 58 58 * will be located outside of the usually addressable area.
+3 -14
mm/memory_hotplug.c
··· 1088 1088 } 1089 1089 1090 1090 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, 1091 - struct zone *zone, bool mhp_off_inaccessible) 1091 + struct zone *zone) 1092 1092 { 1093 1093 unsigned long end_pfn = pfn + nr_pages; 1094 1094 int ret, i; ··· 1096 1096 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); 1097 1097 if (ret) 1098 1098 return ret; 1099 - 1100 - /* 1101 - * Memory block is accessible at this stage and hence poison the struct 1102 - * pages now. If the memory block is accessible during memory hotplug 1103 - * addition phase, then page poisining is already performed in 1104 - * sparse_add_section(). 1105 - */ 1106 - if (mhp_off_inaccessible) 1107 - page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages); 1108 1099 1109 1100 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE, 1110 1101 false); ··· 1435 1444 } 1436 1445 1437 1446 static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group, 1438 - u64 start, u64 size, mhp_t mhp_flags) 1447 + u64 start, u64 size) 1439 1448 { 1440 1449 unsigned long memblock_size = memory_block_size_bytes(); 1441 1450 u64 cur_start; ··· 1451 1460 }; 1452 1461 1453 1462 mhp_altmap.free = memory_block_memmap_on_memory_pages(); 1454 - if (mhp_flags & MHP_OFFLINE_INACCESSIBLE) 1455 - mhp_altmap.inaccessible = true; 1456 1463 params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap), 1457 1464 GFP_KERNEL); 1458 1465 if (!params.altmap) { ··· 1544 1555 */ 1545 1556 if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && 1546 1557 mhp_supports_memmap_on_memory()) { 1547 - ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags); 1558 + ret = create_altmaps_and_memory_blocks(nid, group, start, size); 1548 1559 if (ret) 1549 1560 goto error; 1550 1561 } else {
+1 -2
mm/sparse.c
··· 951 951 * Poison uninitialized struct pages in order to catch invalid flags 952 952 * combinations. 953 953 */ 954 - if (!altmap || !altmap->inaccessible) 955 - page_init_poison(memmap, sizeof(struct page) * nr_pages); 954 + page_init_poison(memmap, sizeof(struct page) * nr_pages); 956 955 957 956 ms = __nr_to_section(section_nr); 958 957 set_section_nid(section_nr, nid);
-4
tools/arch/s390/include/uapi/asm/bitsperlong.h
··· 2 2 #ifndef __ASM_S390_BITSPERLONG_H 3 3 #define __ASM_S390_BITSPERLONG_H 4 4 5 - #ifndef __s390x__ 6 - #define __BITS_PER_LONG 32 7 - #else 8 5 #define __BITS_PER_LONG 64 9 - #endif 10 6 11 7 #include <asm-generic/bitsperlong.h> 12 8
-5
tools/include/nolibc/arch-s390.h
··· 143 143 void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void) 144 144 { 145 145 __asm__ volatile ( 146 - #ifdef __s390x__ 147 146 "lgr %r2, %r15\n" /* save stack pointer to %r2, as arg1 of _start_c */ 148 147 "aghi %r15, -160\n" /* allocate new stackframe */ 149 - #else 150 - "lr %r2, %r15\n" 151 - "ahi %r15, -96\n" 152 - #endif 153 148 "xc 0(8,%r15), 0(%r15)\n" /* clear backchain */ 154 149 "brasl %r14, _start_c\n" /* transfer to c runtime */ 155 150 );
+1 -1
tools/include/nolibc/arch.h
··· 27 27 #include "arch-powerpc.h" 28 28 #elif defined(__riscv) 29 29 #include "arch-riscv.h" 30 - #elif defined(__s390x__) || defined(__s390__) 30 + #elif defined(__s390x__) 31 31 #include "arch-s390.h" 32 32 #elif defined(__loongarch__) 33 33 #include "arch-loongarch.h"
-4
tools/lib/bpf/libbpf.c
··· 11325 11325 return "ia32"; 11326 11326 #elif defined(__s390x__) 11327 11327 return "s390x"; 11328 - #elif defined(__s390__) 11329 - return "s390"; 11330 11328 #elif defined(__arm__) 11331 11329 return "arm"; 11332 11330 #elif defined(__aarch64__) ··· 12111 12113 return "/lib/i386-linux-gnu"; 12112 12114 #elif defined(__s390x__) 12113 12115 return "/lib/s390x-linux-gnu"; 12114 - #elif defined(__s390__) 12115 - return "/lib/s390-linux-gnu"; 12116 12116 #elif defined(__arm__) && defined(__SOFTFP__) 12117 12117 return "/lib/arm-linux-gnueabi"; 12118 12118 #elif defined(__arm__) && !defined(__SOFTFP__)
-2
tools/lib/bpf/usdt.c
··· 1376 1376 1377 1377 #elif defined(__s390x__) 1378 1378 1379 - /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */ 1380 - 1381 1379 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz) 1382 1380 { 1383 1381 unsigned int reg;
-5
tools/testing/selftests/nolibc/Makefile.nolibc
··· 87 87 IMAGE_riscv32 = arch/riscv/boot/Image 88 88 IMAGE_riscv64 = arch/riscv/boot/Image 89 89 IMAGE_s390x = arch/s390/boot/bzImage 90 - IMAGE_s390 = arch/s390/boot/bzImage 91 90 IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi 92 91 IMAGE_sparc32 = arch/sparc/boot/image 93 92 IMAGE_sparc64 = arch/sparc/boot/image ··· 116 117 DEFCONFIG_riscv32 = rv32_defconfig 117 118 DEFCONFIG_riscv64 = defconfig 118 119 DEFCONFIG_s390x = defconfig 119 - DEFCONFIG_s390 = defconfig compat.config 120 120 DEFCONFIG_loongarch = defconfig 121 121 DEFCONFIG_sparc32 = sparc32_defconfig 122 122 DEFCONFIG_sparc64 = sparc64_defconfig ··· 154 156 QEMU_ARCH_riscv32 = riscv32 155 157 QEMU_ARCH_riscv64 = riscv64 156 158 QEMU_ARCH_s390x = s390x 157 - QEMU_ARCH_s390 = s390x 158 159 QEMU_ARCH_loongarch = loongarch64 159 160 QEMU_ARCH_sparc32 = sparc 160 161 QEMU_ARCH_sparc64 = sparc64 ··· 194 197 QEMU_ARGS_riscv32 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 195 198 QEMU_ARGS_riscv64 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 196 199 QEMU_ARGS_s390x = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 197 - QEMU_ARGS_s390 = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 198 200 QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 199 201 QEMU_ARGS_sparc32 = -M SS-5 -m 256M -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)" 200 202 QEMU_ARGS_sparc64 = -M sun4u -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)" ··· 219 223 CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple) 220 224 CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2) 221 225 CFLAGS_s390x = -m64 222 - CFLAGS_s390 = -m31 223 226 CFLAGS_mips32le = -EL -mabi=32 -fPIC 224 227 CFLAGS_mips32be = -EB -mabi=32 225 228 CFLAGS_mipsn32le = -EL -mabi=n32 -fPIC -march=mips64r2
+1 -5
tools/testing/selftests/nolibc/run-tests.sh
··· 23 23 mips32le mips32be mipsn32le mipsn32be mips64le mips64be 24 24 ppc ppc64 ppc64le 25 25 riscv32 riscv64 26 - s390x s390 26 + s390x 27 27 loongarch 28 28 sparc32 sparc64 29 29 m68k ··· 185 185 exit 1 186 186 esac 187 187 printf '%-15s' "$arch:" 188 - if [ "$arch" = "s390" ] && ([ "$llvm" = "1" ] || [ "$test_mode" = "user" ]); then 189 - echo "Unsupported configuration" 190 - return 191 - fi 192 188 if [ "$arch" = "m68k" -o "$arch" = "sh4" ] && [ "$llvm" = "1" ]; then 193 189 echo "Unsupported configuration" 194 190 return
-39
tools/testing/selftests/rseq/rseq-s390.h
··· 28 28 RSEQ_WRITE_ONCE(*(p), v); \ 29 29 } while (0) 30 30 31 - #ifdef __s390x__ 32 - 33 31 #define LONG_L "lg" 34 32 #define LONG_S "stg" 35 33 #define LONG_LT_R "ltgr" ··· 60 62 ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ 61 63 ".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n\t" \ 62 64 ".popsection\n\t" 63 - 64 - #elif __s390__ 65 - 66 - #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \ 67 - start_ip, post_commit_offset, abort_ip) \ 68 - ".pushsection __rseq_cs, \"aw\"\n\t" \ 69 - ".balign 32\n\t" \ 70 - __rseq_str(label) ":\n\t" \ 71 - ".long " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \ 72 - ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) "\n\t" \ 73 - ".popsection\n\t" \ 74 - ".pushsection __rseq_cs_ptr_array, \"aw\"\n\t" \ 75 - ".long 0x0, " __rseq_str(label) "b\n\t" \ 76 - ".popsection\n\t" 77 - 78 - /* 79 - * Exit points of a rseq critical section consist of all instructions outside 80 - * of the critical section where a critical section can either branch to or 81 - * reach through the normal course of its execution. The abort IP and the 82 - * post-commit IP are already part of the __rseq_cs section and should not be 83 - * explicitly defined as additional exit points. Knowing all exit points is 84 - * useful to assist debuggers stepping over the critical section. 85 - */ 86 - #define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip) \ 87 - ".pushsection __rseq_exit_point_array, \"aw\"\n\t" \ 88 - ".long 0x0, " __rseq_str(start_ip) ", 0x0, " __rseq_str(exit_ip) "\n\t" \ 89 - ".popsection\n\t" 90 - 91 - #define LONG_L "l" 92 - #define LONG_S "st" 93 - #define LONG_LT_R "ltr" 94 - #define LONG_CMP "c" 95 - #define LONG_CMP_R "cr" 96 - #define LONG_ADDI "ahi" 97 - #define LONG_ADD_R "ar" 98 - 99 - #endif 100 65 101 66 #define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \ 102 67 __RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip, \
-4
tools/testing/selftests/vDSO/vdso_config.h
··· 25 25 #define VDSO_VERSION 1 26 26 #define VDSO_NAMES 0 27 27 #define VDSO_32BIT 1 28 - #elif defined (__s390__) && !defined(__s390x__) 29 - #define VDSO_VERSION 2 30 - #define VDSO_NAMES 0 31 - #define VDSO_32BIT 1 32 28 #elif defined (__s390x__) 33 29 #define VDSO_VERSION 2 34 30 #define VDSO_NAMES 0