Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

- Improved access control for the zcrypt driver, multiple device nodes
can now be created with different access control lists

- Extend the pkey API to provide random protected keys, this is useful
for encrypted swap device with ephemeral protected keys

- Add support for virtually mapped kernel stacks

- Rework the early boot code, this moves the memory detection into the
boot code that runs prior to decompression.

- Add KASAN support

- Bug fixes and cleanups

* tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (83 commits)
s390/pkey: move pckmo subfunction available checks away from module init
s390/kasan: support preemptible kernel build
s390/pkey: Load pkey kernel module automatically
s390/perf: Return error when debug_register fails
s390/sthyi: Fix machine name validity indication
s390/zcrypt: fix broken zcrypt_send_cprb in-kernel api function
s390/vmalloc: fix VMALLOC_START calculation
s390/mem_detect: add missing include
s390/dumpstack: print psw mask and address again
s390/crypto: Enhance paes cipher to accept variable length key material
s390/pkey: Introduce new API for transforming key blobs
s390/pkey: Introduce new API for random protected key verification
s390/pkey: Add sysfs attributes to emit secure key blobs
s390/pkey: Add sysfs attributes to emit protected key blobs
s390/pkey: Define protected key blob format
s390/pkey: Introduce new API for random protected key generation
s390/zcrypt: add ap_adapter_mask sysfs attribute
s390/zcrypt: provide apfs failure code on type 86 error reply
s390/zcrypt: zcrypt device driver cleanup
s390/kasan: add support for mem= kernel parameter
...

+3712 -1183
+9
arch/s390/Kconfig
··· 56 56 config ARCH_SUPPORTS_UPROBES 57 57 def_bool y 58 58 59 + config KASAN_SHADOW_OFFSET 60 + hex 61 + depends on KASAN 62 + default 0x18000000000000 if KASAN_S390_4_LEVEL_PAGING 63 + default 0x30000000000 64 + 59 65 config S390 60 66 def_bool y 61 67 select ARCH_BINFMT_ELF_STATE ··· 126 120 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 127 121 select HAVE_ARCH_AUDITSYSCALL 128 122 select HAVE_ARCH_JUMP_LABEL 123 + select HAVE_ARCH_KASAN 129 124 select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 130 125 select HAVE_ARCH_SECCOMP_FILTER 131 126 select HAVE_ARCH_SOFT_DIRTY 132 127 select HAVE_ARCH_TRACEHOOK 133 128 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 129 + select HAVE_ARCH_VMAP_STACK 134 130 select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES 135 131 select HAVE_CMPXCHG_DOUBLE 136 132 select HAVE_CMPXCHG_LOCAL ··· 657 649 658 650 config CHECK_STACK 659 651 def_bool y 652 + depends on !VMAP_STACK 660 653 prompt "Detect kernel stack overflow" 661 654 help 662 655 This option enables the compiler option -mstack-guard and
+1 -1
arch/s390/Makefile
··· 27 27 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) 28 28 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) 29 29 UTS_MACHINE := s390x 30 - STACK_SIZE := 16384 30 + STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384) 31 31 CHECKFLAGS += -D__s390__ -D__s390x__ 32 32 33 33 export LD_BFD
+23 -10
arch/s390/appldata/appldata_base.c
··· 137 137 mutex_unlock(&appldata_ops_mutex); 138 138 } 139 139 140 + static struct appldata_product_id appldata_id = { 141 + .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 142 + 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 143 + .prod_fn = 0xD5D3, /* "NL" */ 144 + .version_nr = 0xF2F6, /* "26" */ 145 + .release_nr = 0xF0F1, /* "01" */ 146 + }; 147 + 140 148 /* 141 149 * appldata_diag() 142 150 * ··· 153 145 int appldata_diag(char record_nr, u16 function, unsigned long buffer, 154 146 u16 length, char *mod_lvl) 155 147 { 156 - struct appldata_product_id id = { 157 - .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, 158 - 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ 159 - .prod_fn = 0xD5D3, /* "NL" */ 160 - .version_nr = 0xF2F6, /* "26" */ 161 - .release_nr = 0xF0F1, /* "01" */ 162 - }; 148 + struct appldata_parameter_list *parm_list; 149 + struct appldata_product_id *id; 150 + int rc; 163 151 164 - id.record_nr = record_nr; 165 - id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; 166 - return appldata_asm(&id, function, (void *) buffer, length); 152 + parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL); 153 + id = kmemdup(&appldata_id, sizeof(appldata_id), GFP_KERNEL); 154 + rc = -ENOMEM; 155 + if (parm_list && id) { 156 + id->record_nr = record_nr; 157 + id->mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1]; 158 + rc = appldata_asm(parm_list, id, function, 159 + (void *) buffer, length); 160 + } 161 + kfree(id); 162 + kfree(parm_list); 163 + return rc; 167 164 } 168 165 /************************ timer, work, DIAG <END> ****************************/ 169 166
+1
arch/s390/boot/.gitignore
··· 1 1 image 2 2 bzImage 3 + section_cmp.*
+21 -3
arch/s390/boot/Makefile
··· 6 6 KCOV_INSTRUMENT := n 7 7 GCOV_PROFILE := n 8 8 UBSAN_SANITIZE := n 9 + KASAN_SANITIZE := n 9 10 10 11 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) 11 12 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) ··· 28 27 29 28 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char 30 29 31 - obj-y := head.o als.o ebcdic.o sclp_early_core.o mem.o 32 - targets := bzImage startup.a $(obj-y) 30 + obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o 31 + obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o 32 + targets := bzImage startup.a section_cmp.boot.data $(obj-y) 33 33 subdir- := compressed 34 34 35 35 OBJECTS := $(addprefix $(obj)/,$(obj-y)) 36 36 37 - $(obj)/bzImage: $(obj)/compressed/vmlinux FORCE 37 + quiet_cmd_section_cmp = SECTCMP $* 38 + define cmd_section_cmp 39 + s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \ 40 + sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \ 41 + s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \ 42 + sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \ 43 + if [ "$$s1" != "$$s2" ]; then \ 44 + echo "error: section $* differs between $< and $(word 2,$^)" >&2; \ 45 + exit 1; \ 46 + fi; \ 47 + touch $@ 48 + endef 49 + 50 + $(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE 38 51 $(call if_changed,objcopy) 52 + 53 + $(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE 54 + $(call if_changed,section_cmp) 39 55 40 56 $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE 41 57 $(Q)$(MAKE) $(build)=$(obj)/compressed $@
+11
arch/s390/boot/boot.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef BOOT_BOOT_H 3 + #define BOOT_BOOT_H 4 + 5 + void startup_kernel(void); 6 + void detect_memory(void); 7 + void store_ipl_parmblock(void); 8 + void setup_boot_command_line(void); 9 + void setup_memory_end(void); 10 + 11 + #endif /* BOOT_BOOT_H */
+2
arch/s390/boot/cmdline.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "../../../lib/cmdline.c"
+16 -21
arch/s390/boot/compressed/Makefile
··· 8 8 KCOV_INSTRUMENT := n 9 9 GCOV_PROFILE := n 10 10 UBSAN_SANITIZE := n 11 + KASAN_SANITIZE := n 11 12 12 - obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o 13 + obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o 13 14 targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 14 15 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 15 - targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h) 16 + targets += info.bin $(obj-y) 16 17 17 18 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) 18 19 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) 20 + OBJCOPYFLAGS := 19 21 20 22 OBJECTS := $(addprefix $(obj)/,$(obj-y)) 21 23 ··· 25 23 $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) 26 24 $(call if_changed,ld) 27 25 28 - # extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin 29 - sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p' 26 + OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info 27 + $(obj)/info.bin: vmlinux FORCE 28 + $(call if_changed,objcopy) 30 29 31 - quiet_cmd_sizes = GEN $@ 32 - cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@ 30 + OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info 31 + $(obj)/info.o: $(obj)/info.bin FORCE 32 + $(call if_changed,objcopy) 33 33 34 - $(obj)/sizes.h: vmlinux 35 - $(call if_changed,sizes) 36 - 37 - AFLAGS_head.o += -I$(objtree)/$(obj) 38 - $(obj)/head.o: $(obj)/sizes.h 39 - 40 - CFLAGS_misc.o += -I$(objtree)/$(obj) 41 - $(obj)/misc.o: $(obj)/sizes.h 42 - 43 - OBJCOPYFLAGS_vmlinux.bin := -R .comment -S 44 - $(obj)/vmlinux.bin: vmlinux 34 + OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S 35 + $(obj)/vmlinux.bin: vmlinux FORCE 45 36 $(call if_changed,objcopy) 46 37 47 38 vmlinux.bin.all-y := $(obj)/vmlinux.bin ··· 59 64 $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) 60 65 $(call if_changed,xzkern) 61 66 62 - LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T 63 - $(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y) 64 - $(call if_changed,ld) 67 + OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed 68 + $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE 69 + $(call if_changed,objcopy) 65 70 66 - chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS)) 71 + chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS)) 67 72 chkbss-target := $(obj)/vmlinux.bin 68 73 include $(srctree)/arch/s390/scripts/Makefile.chkbss
+85
arch/s390/boot/compressed/decompressor.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Definitions and wrapper functions for kernel decompressor 4 + * 5 + * Copyright IBM Corp. 2010 6 + * 7 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 + */ 9 + 10 + #include <linux/kernel.h> 11 + #include <linux/string.h> 12 + #include <asm/page.h> 13 + #include "decompressor.h" 14 + 15 + /* 16 + * gzip declarations 17 + */ 18 + #define STATIC static 19 + #define STATIC_RW_DATA static __section(.data) 20 + 21 + #undef memset 22 + #undef memcpy 23 + #undef memmove 24 + #define memmove memmove 25 + #define memzero(s, n) memset((s), 0, (n)) 26 + 27 + /* Symbols defined by linker scripts */ 28 + extern char _end[]; 29 + extern unsigned char _compressed_start[]; 30 + extern unsigned char _compressed_end[]; 31 + 32 + #ifdef CONFIG_HAVE_KERNEL_BZIP2 33 + #define HEAP_SIZE 0x400000 34 + #else 35 + #define HEAP_SIZE 0x10000 36 + #endif 37 + 38 + static unsigned long free_mem_ptr = (unsigned long) _end; 39 + static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE; 40 + 41 + #ifdef CONFIG_KERNEL_GZIP 42 + #include "../../../../lib/decompress_inflate.c" 43 + #endif 44 + 45 + #ifdef CONFIG_KERNEL_BZIP2 46 + #include "../../../../lib/decompress_bunzip2.c" 47 + #endif 48 + 49 + #ifdef CONFIG_KERNEL_LZ4 50 + #include "../../../../lib/decompress_unlz4.c" 51 + #endif 52 + 53 + #ifdef CONFIG_KERNEL_LZMA 54 + #include "../../../../lib/decompress_unlzma.c" 55 + #endif 56 + 57 + #ifdef CONFIG_KERNEL_LZO 58 + #include "../../../../lib/decompress_unlzo.c" 59 + #endif 60 + 61 + #ifdef CONFIG_KERNEL_XZ 62 + #include "../../../../lib/decompress_unxz.c" 63 + #endif 64 + 65 + #define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE) 66 + 67 + unsigned long mem_safe_offset(void) 68 + { 69 + /* 70 + * due to 4MB HEAD_SIZE for bzip2 71 + * 'decompress_offset + vmlinux.image_size' could be larger than 72 + * kernel at final position + its .bss, so take the larger of two 73 + */ 74 + return max(decompress_offset + vmlinux.image_size, 75 + vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size); 76 + } 77 + 78 + void *decompress_kernel(void) 79 + { 80 + void *output = (void *)decompress_offset; 81 + 82 + __decompress(_compressed_start, _compressed_end - _compressed_start, 83 + NULL, NULL, output, 0, NULL, error); 84 + return output; 85 + }
+25
arch/s390/boot/compressed/decompressor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef BOOT_COMPRESSED_DECOMPRESSOR_H 3 + #define BOOT_COMPRESSED_DECOMPRESSOR_H 4 + 5 + #ifdef CONFIG_KERNEL_UNCOMPRESSED 6 + static inline void *decompress_kernel(void) {} 7 + #else 8 + void *decompress_kernel(void); 9 + #endif 10 + unsigned long mem_safe_offset(void); 11 + void error(char *m); 12 + 13 + struct vmlinux_info { 14 + unsigned long default_lma; 15 + void (*entry)(void); 16 + unsigned long image_size; /* does not include .bss */ 17 + unsigned long bss_size; /* uncompressed image .bss size */ 18 + unsigned long bootdata_off; 19 + unsigned long bootdata_size; 20 + }; 21 + 22 + extern char _vmlinux_info[]; 23 + #define vmlinux (*(struct vmlinux_info *)_vmlinux_info) 24 + 25 + #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
-52
arch/s390/boot/compressed/head.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Startup glue code to uncompress the kernel 4 - * 5 - * Copyright IBM Corp. 2010 6 - * 7 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 - */ 9 - 10 - #include <linux/init.h> 11 - #include <linux/linkage.h> 12 - #include <asm/asm-offsets.h> 13 - #include <asm/thread_info.h> 14 - #include <asm/page.h> 15 - #include "sizes.h" 16 - 17 - __HEAD 18 - ENTRY(startup_decompressor) 19 - basr %r13,0 # get base 20 - .LPG1: 21 - # setup stack 22 - lg %r15,.Lstack-.LPG1(%r13) 23 - aghi %r15,-160 24 - brasl %r14,decompress_kernel 25 - # Set up registers for memory mover. We move the decompressed image to 26 - # 0x100000, where startup_continue of the decompressed image is supposed 27 - # to be. 28 - lgr %r4,%r2 29 - lg %r2,.Loffset-.LPG1(%r13) 30 - lg %r3,.Lmvsize-.LPG1(%r13) 31 - lgr %r5,%r3 32 - # Move the memory mover someplace safe so it doesn't overwrite itself. 33 - la %r1,0x200 34 - mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) 35 - # When the memory mover is done we pass control to 36 - # arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in 37 - # the decompressed image. 38 - lgr %r6,%r2 39 - br %r1 40 - mover: 41 - mvcle %r2,%r4,0 42 - jo mover 43 - br %r6 44 - mover_end: 45 - 46 - .align 8 47 - .Lstack: 48 - .quad 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)) 49 - .Loffset: 50 - .quad 0x100000 51 - .Lmvsize: 52 - .quad SZ__bss_start
-116
arch/s390/boot/compressed/misc.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Definitions and wrapper functions for kernel decompressor 4 - * 5 - * Copyright IBM Corp. 2010 6 - * 7 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 - */ 9 - 10 - #include <linux/uaccess.h> 11 - #include <asm/page.h> 12 - #include <asm/sclp.h> 13 - #include <asm/ipl.h> 14 - #include "sizes.h" 15 - 16 - /* 17 - * gzip declarations 18 - */ 19 - #define STATIC static 20 - 21 - #undef memset 22 - #undef memcpy 23 - #undef memmove 24 - #define memmove memmove 25 - #define memzero(s, n) memset((s), 0, (n)) 26 - 27 - /* Symbols defined by linker scripts */ 28 - extern char input_data[]; 29 - extern int input_len; 30 - extern char _end[]; 31 - extern char _bss[], _ebss[]; 32 - 33 - static void error(char *m); 34 - 35 - static unsigned long free_mem_ptr; 36 - static unsigned long free_mem_end_ptr; 37 - 38 - #ifdef CONFIG_HAVE_KERNEL_BZIP2 39 - #define HEAP_SIZE 0x400000 40 - #else 41 - #define HEAP_SIZE 0x10000 42 - #endif 43 - 44 - #ifdef CONFIG_KERNEL_GZIP 45 - #include "../../../../lib/decompress_inflate.c" 46 - #endif 47 - 48 - #ifdef CONFIG_KERNEL_BZIP2 49 - #include "../../../../lib/decompress_bunzip2.c" 50 - #endif 51 - 52 - #ifdef CONFIG_KERNEL_LZ4 53 - #include "../../../../lib/decompress_unlz4.c" 54 - #endif 55 - 56 - #ifdef CONFIG_KERNEL_LZMA 57 - #include "../../../../lib/decompress_unlzma.c" 58 - #endif 59 - 60 - #ifdef CONFIG_KERNEL_LZO 61 - #include "../../../../lib/decompress_unlzo.c" 62 - #endif 63 - 64 - #ifdef CONFIG_KERNEL_XZ 65 - #include "../../../../lib/decompress_unxz.c" 66 - #endif 67 - 68 - static int puts(const char *s) 69 - { 70 - sclp_early_printk(s); 71 - return 0; 72 - } 73 - 74 - static void error(char *x) 75 - { 76 - unsigned long long psw = 0x000a0000deadbeefULL; 77 - 78 - puts("\n\n"); 79 - puts(x); 80 - puts("\n\n -- System halted"); 81 - 82 - asm volatile("lpsw %0" : : "Q" (psw)); 83 - } 84 - 85 - unsigned long decompress_kernel(void) 86 - { 87 - void *output, *kernel_end; 88 - 89 - output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE); 90 - kernel_end = output + SZ__bss_start; 91 - 92 - #ifdef CONFIG_BLK_DEV_INITRD 93 - /* 94 - * Move the initrd right behind the end of the decompressed 95 - * kernel image. This also prevents initrd corruption caused by 96 - * bss clearing since kernel_end will always be located behind the 97 - * current bss section.. 98 - */ 99 - if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { 100 - memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); 101 - INITRD_START = (unsigned long) kernel_end; 102 - } 103 - #endif 104 - 105 - /* 106 - * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be 107 - * initialized afterwards since they reside in bss. 108 - */ 109 - memset(_bss, 0, _ebss - _bss); 110 - free_mem_ptr = (unsigned long) _end; 111 - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 112 - 113 - __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); 114 - return (unsigned long) output; 115 - } 116 -
+18 -6
arch/s390/boot/compressed/vmlinux.lds.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #include <asm-generic/vmlinux.lds.h> 3 + #include <asm/vmlinux.lds.h> 3 4 4 5 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 5 6 OUTPUT_ARCH(s390:64-bit) ··· 9 8 10 9 SECTIONS 11 10 { 12 - /* Be careful parts of head_64.S assume startup_32 is at 13 - * address 0. 14 - */ 15 11 . = 0; 16 12 .head.text : { 17 13 _head = . ; ··· 24 26 .rodata : { 25 27 _rodata = . ; 26 28 *(.rodata) /* read-only data */ 27 - *(EXCLUDE_FILE (*piggy.o) .rodata.compressed) 29 + *(.rodata.*) 28 30 _erodata = . ; 29 31 } 30 32 .data : { ··· 33 35 *(.data.*) 34 36 _edata = . ; 35 37 } 36 - startup_continue = 0x100000; 38 + BOOT_DATA 39 + 40 + /* 41 + * uncompressed image info used by the decompressor it should match 42 + * struct vmlinux_info. It comes from .vmlinux.info section of 43 + * uncompressed vmlinux in a form of info.o 44 + */ 45 + . = ALIGN(8); 46 + .vmlinux.info : { 47 + _vmlinux_info = .; 48 + *(.vmlinux.info) 49 + } 50 + 37 51 #ifdef CONFIG_KERNEL_UNCOMPRESSED 38 52 . = 0x100000; 39 53 #else 40 54 . = ALIGN(8); 41 55 #endif 42 56 .rodata.compressed : { 43 - *(.rodata.compressed) 57 + _compressed_start = .; 58 + *(.vmlinux.bin.compressed) 59 + _compressed_end = .; 44 60 } 45 61 . = ALIGN(256); 46 62 .bss : {
-15
arch/s390/boot/compressed/vmlinux.scr.lds.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - SECTIONS 3 - { 4 - .rodata.compressed : { 5 - #ifndef CONFIG_KERNEL_UNCOMPRESSED 6 - input_len = .; 7 - LONG(input_data_end - input_data) input_data = .; 8 - #endif 9 - *(.data) 10 - #ifndef CONFIG_KERNEL_UNCOMPRESSED 11 - output_len = . - 4; 12 - input_data_end = .; 13 - #endif 14 - } 15 - }
+2
arch/s390/boot/ctype.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "../../../lib/ctype.c"
+5 -7
arch/s390/boot/head.S
··· 60 60 .long 0x02000690,0x60000050 61 61 .long 0x020006e0,0x20000050 62 62 63 + .org 0x1a0 64 + .quad 0,iplstart 65 + 63 66 .org 0x200 64 67 65 68 # ··· 311 308 spt 6f-.LPG0(%r13) 312 309 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 313 310 l %r15,.Lstack-.LPG0(%r13) 314 - ahi %r15,-STACK_FRAME_OVERHEAD 315 311 brasl %r14,verify_facilities 316 - #ifdef CONFIG_KERNEL_UNCOMPRESSED 317 - jg startup_continue 318 - #else 319 - jg startup_decompressor 320 - #endif 312 + brasl %r14,startup_kernel 321 313 322 314 .Lstack: 323 - .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)) 315 + .long 0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD 324 316 .align 8 325 317 6: .long 0x7fffffff,0xffffffff 326 318
+182
arch/s390/boot/ipl_parm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/init.h> 3 + #include <linux/ctype.h> 4 + #include <asm/ebcdic.h> 5 + #include <asm/sclp.h> 6 + #include <asm/sections.h> 7 + #include <asm/boot_data.h> 8 + #include "boot.h" 9 + 10 + char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; 11 + struct ipl_parameter_block __bootdata(early_ipl_block); 12 + int __bootdata(early_ipl_block_valid); 13 + 14 + unsigned long __bootdata(memory_end); 15 + int __bootdata(memory_end_set); 16 + int __bootdata(noexec_disabled); 17 + 18 + static inline int __diag308(unsigned long subcode, void *addr) 19 + { 20 + register unsigned long _addr asm("0") = (unsigned long)addr; 21 + register unsigned long _rc asm("1") = 0; 22 + unsigned long reg1, reg2; 23 + psw_t old = S390_lowcore.program_new_psw; 24 + 25 + asm volatile( 26 + " epsw %0,%1\n" 27 + " st %0,%[psw_pgm]\n" 28 + " st %1,%[psw_pgm]+4\n" 29 + " larl %0,1f\n" 30 + " stg %0,%[psw_pgm]+8\n" 31 + " diag %[addr],%[subcode],0x308\n" 32 + "1: nopr %%r7\n" 33 + : "=&d" (reg1), "=&a" (reg2), 34 + [psw_pgm] "=Q" (S390_lowcore.program_new_psw), 35 + [addr] "+d" (_addr), "+d" (_rc) 36 + : [subcode] "d" (subcode) 37 + : "cc", "memory"); 38 + S390_lowcore.program_new_psw = old; 39 + return _rc; 40 + } 41 + 42 + void store_ipl_parmblock(void) 43 + { 44 + int rc; 45 + 46 + rc = __diag308(DIAG308_STORE, &early_ipl_block); 47 + if (rc == DIAG308_RC_OK && 48 + early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION) 49 + early_ipl_block_valid = 1; 50 + } 51 + 52 + static size_t scpdata_length(const char *buf, size_t count) 53 + { 54 + while (count) { 55 + if (buf[count - 1] != '\0' && buf[count - 1] != ' ') 56 + break; 57 + count--; 58 + } 59 + return count; 60 + } 61 + 62 + static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size, 63 + const struct ipl_parameter_block *ipb) 64 + { 65 + size_t count; 66 + size_t i; 67 + int has_lowercase; 68 + 69 + count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, 70 + ipb->ipl_info.fcp.scp_data_len)); 71 + if (!count) 72 + goto out; 73 + 74 + has_lowercase = 0; 75 + for (i = 0; i < count; i++) { 76 + if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { 77 + count = 0; 78 + goto out; 79 + } 80 + if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) 81 + has_lowercase = 1; 82 + } 83 + 84 + if (has_lowercase) 85 + memcpy(dest, ipb->ipl_info.fcp.scp_data, count); 86 + else 87 + for (i = 0; i < count; i++) 88 + dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); 89 + out: 90 + dest[count] = '\0'; 91 + return count; 92 + } 93 + 94 + static void append_ipl_block_parm(void) 95 + { 96 + char *parm, *delim; 97 + size_t len, rc = 0; 98 + 99 + len = strlen(early_command_line); 100 + 101 + delim = early_command_line + len; /* '\0' character position */ 102 + parm = early_command_line + len + 1; /* append right after '\0' */ 103 + 104 + switch (early_ipl_block.hdr.pbt) { 105 + case DIAG308_IPL_TYPE_CCW: 106 + rc = ipl_block_get_ascii_vmparm( 107 + parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block); 108 + break; 109 + case DIAG308_IPL_TYPE_FCP: 110 + rc = ipl_block_get_ascii_scpdata( 111 + parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block); 112 + break; 113 + } 114 + if (rc) { 115 + if (*parm == '=') 116 + memmove(early_command_line, parm + 1, rc); 117 + else 118 + *delim = ' '; /* replace '\0' with space */ 119 + } 120 + } 121 + 122 + static inline int has_ebcdic_char(const char *str) 123 + { 124 + int i; 125 + 126 + for (i = 0; str[i]; i++) 127 + if (str[i] & 0x80) 128 + return 1; 129 + return 0; 130 + } 131 + 132 + void setup_boot_command_line(void) 133 + { 134 + COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0; 135 + /* convert arch command line to ascii if necessary */ 136 + if (has_ebcdic_char(COMMAND_LINE)) 137 + EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 138 + /* copy arch command line */ 139 + strcpy(early_command_line, strim(COMMAND_LINE)); 140 + 141 + /* append IPL PARM data to the boot command line */ 142 + if (early_ipl_block_valid) 143 + append_ipl_block_parm(); 144 + } 145 + 146 + static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); 147 + static void parse_mem_opt(void) 148 + { 149 + char *param, *val; 150 + bool enabled; 151 + char *args; 152 + int rc; 153 + 154 + args = strcpy(command_line_buf, early_command_line); 155 + while (*args) { 156 + args = next_arg(args, &param, &val); 157 + 158 + if (!strcmp(param, "mem")) { 159 + memory_end = memparse(val, NULL); 160 + memory_end_set = 1; 161 + } 162 + 163 + if (!strcmp(param, "noexec")) { 164 + rc = kstrtobool(val, &enabled); 165 + if (!rc && !enabled) 166 + noexec_disabled = 1; 167 + } 168 + } 169 + } 170 + 171 + void setup_memory_end(void) 172 + { 173 + parse_mem_opt(); 174 + #ifdef CONFIG_CRASH_DUMP 175 + if (!OLDMEM_BASE && early_ipl_block_valid && 176 + early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP && 177 + early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) { 178 + if (!sclp_early_get_hsa_size(&memory_end) && memory_end) 179 + memory_end_set = 1; 180 + } 181 + #endif 182 + }
+2
arch/s390/boot/ipl_vmparm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "../kernel/ipl_vmparm.c"
+182
arch/s390/boot/mem_detect.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/errno.h> 3 + #include <linux/init.h> 4 + #include <asm/sclp.h> 5 + #include <asm/sections.h> 6 + #include <asm/mem_detect.h> 7 + #include <asm/sparsemem.h> 8 + #include "compressed/decompressor.h" 9 + #include "boot.h" 10 + 11 + unsigned long __bootdata(max_physmem_end); 12 + struct mem_detect_info __bootdata(mem_detect); 13 + 14 + /* up to 256 storage elements, 1020 subincrements each */ 15 + #define ENTRIES_EXTENDED_MAX \ 16 + (256 * (1020 / 2) * sizeof(struct mem_detect_block)) 17 + 18 + /* 19 + * To avoid corrupting old kernel memory during dump, find lowest memory 20 + * chunk possible either right after the kernel end (decompressed kernel) or 21 + * after initrd (if it is present and there is no hole between the kernel end 22 + * and initrd) 23 + */ 24 + static void *mem_detect_alloc_extended(void) 25 + { 26 + unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64)); 27 + 28 + if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE && 29 + INITRD_START < offset + ENTRIES_EXTENDED_MAX) 30 + offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64)); 31 + 32 + return (void *)offset; 33 + } 34 + 35 + static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n) 36 + { 37 + if (n < MEM_INLINED_ENTRIES) 38 + return &mem_detect.entries[n]; 39 + if (unlikely(!mem_detect.entries_extended)) 40 + mem_detect.entries_extended = mem_detect_alloc_extended(); 41 + return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES]; 42 + } 43 + 44 + /* 45 + * sequential calls to add_mem_detect_block with adjacent memory areas 46 + * are merged together into single memory block. 47 + */ 48 + void add_mem_detect_block(u64 start, u64 end) 49 + { 50 + struct mem_detect_block *block; 51 + 52 + if (mem_detect.count) { 53 + block = __get_mem_detect_block_ptr(mem_detect.count - 1); 54 + if (block->end == start) { 55 + block->end = end; 56 + return; 57 + } 58 + } 59 + 60 + block = __get_mem_detect_block_ptr(mem_detect.count); 61 + block->start = start; 62 + block->end = end; 63 + mem_detect.count++; 64 + } 65 + 66 + static unsigned long get_mem_detect_end(void) 67 + { 68 + if (mem_detect.count) 69 + return __get_mem_detect_block_ptr(mem_detect.count - 1)->end; 70 + return 0; 71 + } 72 + 73 + static int __diag260(unsigned long rx1, unsigned long rx2) 74 + { 75 + register unsigned long _rx1 asm("2") = rx1; 76 + register unsigned long _rx2 asm("3") = rx2; 77 + register unsigned long _ry asm("4") = 0x10; /* storage configuration */ 78 + int rc = -1; /* fail */ 79 + unsigned long reg1, reg2; 80 + psw_t old = S390_lowcore.program_new_psw; 81 + 82 + asm volatile( 83 + " epsw %0,%1\n" 84 + " st %0,%[psw_pgm]\n" 85 + " st %1,%[psw_pgm]+4\n" 86 + " larl %0,1f\n" 87 + " stg %0,%[psw_pgm]+8\n" 88 + " diag %[rx],%[ry],0x260\n" 89 + " ipm %[rc]\n" 90 + " srl %[rc],28\n" 91 + "1:\n" 92 + : "=&d" (reg1), "=&a" (reg2), 93 + [psw_pgm] "=Q" (S390_lowcore.program_new_psw), 94 + [rc] "+&d" (rc), [ry] "+d" (_ry) 95 + : [rx] "d" (_rx1), "d" (_rx2) 96 + : "cc", "memory"); 97 + S390_lowcore.program_new_psw = old; 98 + return rc == 0 ? _ry : -1; 99 + } 100 + 101 + static int diag260(void) 102 + { 103 + int rc, i; 104 + 105 + struct { 106 + unsigned long start; 107 + unsigned long end; 108 + } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */ 109 + 110 + memset(storage_extents, 0, sizeof(storage_extents)); 111 + rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents)); 112 + if (rc == -1) 113 + return -1; 114 + 115 + for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++) 116 + add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1); 117 + return 0; 118 + } 119 + 120 + static int tprot(unsigned long addr) 121 + { 122 + unsigned long pgm_addr; 123 + int rc = -EFAULT; 124 + psw_t old = S390_lowcore.program_new_psw; 125 + 126 + S390_lowcore.program_new_psw.mask = __extract_psw(); 127 + asm volatile( 128 + " larl %[pgm_addr],1f\n" 129 + " stg %[pgm_addr],%[psw_pgm_addr]\n" 130 + " tprot 0(%[addr]),0\n" 131 + " ipm %[rc]\n" 132 + " srl %[rc],28\n" 133 + "1:\n" 134 + : [pgm_addr] "=&d"(pgm_addr), 135 + [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr), 136 + [rc] "+&d"(rc) 137 + : [addr] "a"(addr) 138 + : "cc", "memory"); 139 + S390_lowcore.program_new_psw = old; 140 + return rc; 141 + } 142 + 143 + static void search_mem_end(void) 144 + { 145 + unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */ 146 + unsigned long offset = 0; 147 + unsigned long pivot; 148 + 149 + while (range > 1) { 150 + range >>= 1; 151 + pivot = offset + range; 152 + if (!tprot(pivot << 20)) 153 + offset = pivot; 154 + } 155 + 156 + add_mem_detect_block(0, (offset + 1) << 20); 157 + } 158 + 159 + void detect_memory(void) 160 + { 161 + sclp_early_get_memsize(&max_physmem_end); 162 + 163 + if (!sclp_early_read_storage_info()) { 164 + mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO; 165 + return; 166 + } 167 + 168 + if (!diag260()) { 169 + mem_detect.info_source = MEM_DETECT_DIAG260; 170 + return; 171 + } 172 + 173 + if (max_physmem_end) { 174 + add_mem_detect_block(0, max_physmem_end); 175 + mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO; 176 + return; 177 + } 178 + 179 + search_mem_end(); 180 + mem_detect.info_source = MEM_DETECT_BIN_SEARCH; 181 + max_physmem_end = get_mem_detect_end(); 182 + }
+64
arch/s390/boot/startup.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/string.h> 3 + #include <asm/setup.h> 4 + #include <asm/sclp.h> 5 + #include "compressed/decompressor.h" 6 + #include "boot.h" 7 + 8 + extern char __boot_data_start[], __boot_data_end[]; 9 + 10 + void error(char *x) 11 + { 12 + sclp_early_printk("\n\n"); 13 + sclp_early_printk(x); 14 + sclp_early_printk("\n\n -- System halted"); 15 + 16 + disabled_wait(0xdeadbeef); 17 + } 18 + 19 + #ifdef CONFIG_KERNEL_UNCOMPRESSED 20 + unsigned long mem_safe_offset(void) 21 + { 22 + return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size; 23 + } 24 + #endif 25 + 26 + static void rescue_initrd(void) 27 + { 28 + unsigned long min_initrd_addr; 29 + 30 + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 31 + return; 32 + if (!INITRD_START || !INITRD_SIZE) 33 + return; 34 + min_initrd_addr = mem_safe_offset(); 35 + if (min_initrd_addr <= INITRD_START) 36 + return; 37 + memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE); 38 + INITRD_START = min_initrd_addr; 39 + } 40 + 41 + static void copy_bootdata(void) 42 + { 43 + if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size) 44 + error(".boot.data section size mismatch"); 45 + memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size); 46 + } 47 + 48 + void startup_kernel(void) 49 + { 50 + void *img; 51 + 52 + rescue_initrd(); 53 + sclp_early_read_info(); 54 + store_ipl_parmblock(); 55 + setup_boot_command_line(); 56 + setup_memory_end(); 57 + detect_memory(); 58 + if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { 59 + img = decompress_kernel(); 60 + memmove((void *)vmlinux.default_lma, img, vmlinux.image_size); 61 + } 62 + copy_bootdata(); 63 + vmlinux.entry(); 64 + }
+138
arch/s390/boot/string.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/ctype.h> 3 + #include <linux/kernel.h> 4 + #include <linux/errno.h> 5 + #include "../lib/string.c" 6 + 7 + int strncmp(const char *cs, const char *ct, size_t count) 8 + { 9 + unsigned char c1, c2; 10 + 11 + while (count) { 12 + c1 = *cs++; 13 + c2 = *ct++; 14 + if (c1 != c2) 15 + return c1 < c2 ? -1 : 1; 16 + if (!c1) 17 + break; 18 + count--; 19 + } 20 + return 0; 21 + } 22 + 23 + char *skip_spaces(const char *str) 24 + { 25 + while (isspace(*str)) 26 + ++str; 27 + return (char *)str; 28 + } 29 + 30 + char *strim(char *s) 31 + { 32 + size_t size; 33 + char *end; 34 + 35 + size = strlen(s); 36 + if (!size) 37 + return s; 38 + 39 + end = s + size - 1; 40 + while (end >= s && isspace(*end)) 41 + end--; 42 + *(end + 1) = '\0'; 43 + 44 + return skip_spaces(s); 45 + } 46 + 47 + /* Works only for digits and letters, but small and fast */ 48 + #define TOLOWER(x) ((x) | 0x20) 49 + 50 + static unsigned int simple_guess_base(const char *cp) 51 + { 52 + if (cp[0] == '0') { 53 + if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2])) 54 + return 16; 55 + else 56 + return 8; 57 + } else { 58 + return 10; 59 + } 60 + } 61 + 62 + /** 63 + * simple_strtoull - convert a string to an unsigned long long 64 + * @cp: The start of the string 65 + * @endp: A pointer to the end of the parsed string will be placed here 66 + * @base: The number base to use 67 + */ 68 + 69 + unsigned long long simple_strtoull(const char *cp, char **endp, 70 + unsigned int base) 71 + { 72 + unsigned long long result = 0; 73 + 74 + if (!base) 75 + base = simple_guess_base(cp); 76 + 77 + if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x') 78 + cp += 2; 79 + 80 + while (isxdigit(*cp)) { 81 + unsigned int value; 82 + 83 + value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10; 84 + if (value >= base) 85 + break; 86 + result = result * base + value; 87 + cp++; 88 + } 89 + if (endp) 90 + *endp = (char *)cp; 91 + 92 + return result; 93 + } 94 + 95 + long simple_strtol(const char *cp, char **endp, unsigned int base) 96 + { 97 + if (*cp == '-') 98 + return -simple_strtoull(cp + 1, endp, base); 99 + 100 + return simple_strtoull(cp, endp, base); 101 + } 102 + 103 + int kstrtobool(const char *s, bool *res) 104 + { 105 + if (!s) 106 + return -EINVAL; 107 + 108 + switch (s[0]) { 109 + case 'y': 110 + case 'Y': 111 + case '1': 112 + *res = true; 113 + return 0; 114 + case 'n': 115 + case 'N': 116 + case '0': 117 + *res = false; 118 + return 0; 119 + case 'o': 120 + case 'O': 121 + switch (s[1]) { 122 + case 'n': 123 + case 'N': 124 + *res = true; 125 + return 0; 126 + case 'f': 127 + case 'F': 128 + *res = false; 129 + return 0; 130 + default: 131 + break; 132 + } 133 + default: 134 + break; 135 + } 136 + 137 + return -EINVAL; 138 + }
+37 -26
arch/s390/crypto/paes_s390.c
··· 30 30 31 31 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 32 32 33 + struct key_blob { 34 + __u8 key[MAXKEYBLOBSIZE]; 35 + unsigned int keylen; 36 + }; 37 + 33 38 struct s390_paes_ctx { 34 - struct pkey_seckey sk; 39 + struct key_blob kb; 35 40 struct pkey_protkey pk; 36 41 unsigned long fc; 37 42 }; 38 43 39 44 struct s390_pxts_ctx { 40 - struct pkey_seckey sk[2]; 45 + struct key_blob kb[2]; 41 46 struct pkey_protkey pk[2]; 42 47 unsigned long fc; 43 48 }; 44 49 45 - static inline int __paes_convert_key(struct pkey_seckey *sk, 50 + static inline int __paes_convert_key(struct key_blob *kb, 46 51 struct pkey_protkey *pk) 47 52 { 48 53 int i, ret; 49 54 50 55 /* try three times in case of failure */ 51 56 for (i = 0; i < 3; i++) { 52 - ret = pkey_skey2pkey(sk, pk); 57 + ret = pkey_keyblob2pkey(kb->key, kb->keylen, pk); 53 58 if (ret == 0) 54 59 break; 55 60 } ··· 66 61 { 67 62 unsigned long fc; 68 63 69 - if (__paes_convert_key(&ctx->sk, &ctx->pk)) 64 + if (__paes_convert_key(&ctx->kb, &ctx->pk)) 70 65 return -EINVAL; 71 66 72 67 /* Pick the correct function code based on the protected key type */ ··· 85 80 { 86 81 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); 87 82 88 - if (key_len != SECKEYBLOBSIZE) 89 - return -EINVAL; 90 - 91 - memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE); 83 + memcpy(ctx->kb.key, in_key, key_len); 84 + ctx->kb.keylen = key_len; 92 85 if (__paes_set_key(ctx)) { 93 86 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 94 87 return -EINVAL; ··· 150 147 .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list), 151 148 .cra_u = { 152 149 .blkcipher = { 153 - .min_keysize = SECKEYBLOBSIZE, 154 - .max_keysize = SECKEYBLOBSIZE, 150 + .min_keysize = MINKEYBLOBSIZE, 151 + .max_keysize = MAXKEYBLOBSIZE, 155 152 .setkey = ecb_paes_set_key, 156 153 .encrypt = ecb_paes_encrypt, 157 154 .decrypt = ecb_paes_decrypt, ··· 163 160 { 164 161 unsigned long fc; 165 162 166 - if (__paes_convert_key(&ctx->sk, &ctx->pk)) 163 + if (__paes_convert_key(&ctx->kb, &ctx->pk)) 167 164 return -EINVAL; 168 165 169 166 /* Pick the correct function code based on the protected key type */ ··· 182 179 { 183 180 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); 184 181 185 - memcpy(ctx->sk.seckey, in_key, SECKEYBLOBSIZE); 182 + memcpy(ctx->kb.key, in_key, key_len); 183 + ctx->kb.keylen = key_len; 186 184 if (__cbc_paes_set_key(ctx)) { 187 185 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 188 186 return -EINVAL; ··· 254 250 .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list), 255 251 .cra_u = { 256 252 .blkcipher = { 257 - .min_keysize = SECKEYBLOBSIZE, 258 - .max_keysize = SECKEYBLOBSIZE, 253 + .min_keysize = MINKEYBLOBSIZE, 254 + .max_keysize = MAXKEYBLOBSIZE, 259 255 .ivsize = AES_BLOCK_SIZE, 260 256 .setkey = cbc_paes_set_key, 261 257 .encrypt = cbc_paes_encrypt, ··· 268 264 { 269 265 unsigned long fc; 270 266 271 - if (__paes_convert_key(&ctx->sk[0], &ctx->pk[0]) || 272 - __paes_convert_key(&ctx->sk[1], &ctx->pk[1])) 267 + if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) || 268 + __paes_convert_key(&ctx->kb[1], &ctx->pk[1])) 273 269 return -EINVAL; 274 270 275 271 if (ctx->pk[0].type != ctx->pk[1].type) ··· 291 287 { 292 288 struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm); 293 289 u8 ckey[2 * AES_MAX_KEY_SIZE]; 294 - unsigned int ckey_len; 290 + unsigned int ckey_len, keytok_len; 295 291 296 - memcpy(ctx->sk[0].seckey, in_key, SECKEYBLOBSIZE); 297 - memcpy(ctx->sk[1].seckey, in_key + SECKEYBLOBSIZE, SECKEYBLOBSIZE); 292 + if (key_len % 2) 293 + return -EINVAL; 294 + 295 + keytok_len = key_len / 2; 296 + memcpy(ctx->kb[0].key, in_key, keytok_len); 297 + ctx->kb[0].keylen = keytok_len; 298 + memcpy(ctx->kb[1].key, in_key + keytok_len, keytok_len); 299 + ctx->kb[1].keylen = keytok_len; 298 300 if (__xts_paes_set_key(ctx)) { 299 301 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 300 302 return -EINVAL; ··· 396 386 .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list), 397 387 .cra_u = { 398 388 .blkcipher = { 399 - .min_keysize = 2 * SECKEYBLOBSIZE, 400 - .max_keysize = 2 * SECKEYBLOBSIZE, 389 + .min_keysize = 2 * MINKEYBLOBSIZE, 390 + .max_keysize = 2 * MAXKEYBLOBSIZE, 401 391 .ivsize = AES_BLOCK_SIZE, 402 392 .setkey = xts_paes_set_key, 403 393 .encrypt = xts_paes_encrypt, ··· 410 400 { 411 401 unsigned long fc; 412 402 413 - if (__paes_convert_key(&ctx->sk, &ctx->pk)) 403 + if (__paes_convert_key(&ctx->kb, &ctx->pk)) 414 404 return -EINVAL; 415 405 416 406 /* Pick the correct function code based on the protected key type */ ··· 430 420 { 431 421 struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm); 432 422 433 - memcpy(ctx->sk.seckey, in_key, key_len); 423 + memcpy(ctx->kb.key, in_key, key_len); 424 + ctx->kb.keylen = key_len; 434 425 if (__ctr_paes_set_key(ctx)) { 435 426 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 436 427 return -EINVAL; ··· 543 532 .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list), 544 533 .cra_u = { 545 534 .blkcipher = { 546 - .min_keysize = SECKEYBLOBSIZE, 547 - .max_keysize = SECKEYBLOBSIZE, 535 + .min_keysize = MINKEYBLOBSIZE, 536 + .max_keysize = MAXKEYBLOBSIZE, 548 537 .ivsize = AES_BLOCK_SIZE, 549 538 .setkey = ctr_paes_set_key, 550 539 .encrypt = ctr_paes_encrypt,
+1
arch/s390/defconfig
··· 232 232 CONFIG_CRYPTO_USER_API_SKCIPHER=m 233 233 CONFIG_CRYPTO_USER_API_RNG=m 234 234 CONFIG_ZCRYPT=m 235 + CONFIG_ZCRYPT_MULTIDEVNODES=y 235 236 CONFIG_PKEY=m 236 237 CONFIG_CRYPTO_PAES_S390=m 237 238 CONFIG_CRYPTO_SHA1_S390=m
+22 -18
arch/s390/hypfs/hypfs_sprp.c
··· 68 68 69 69 static int __hypfs_sprp_ioctl(void __user *user_area) 70 70 { 71 - struct hypfs_diag304 diag304; 71 + struct hypfs_diag304 *diag304; 72 72 unsigned long cmd; 73 73 void __user *udata; 74 74 void *data; 75 75 int rc; 76 76 77 - if (copy_from_user(&diag304, user_area, sizeof(diag304))) 78 - return -EFAULT; 79 - if ((diag304.args[0] >> 8) != 0 || diag304.args[1] > DIAG304_CMD_MAX) 80 - return -EINVAL; 81 - 77 + rc = -ENOMEM; 82 78 data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 83 - if (!data) 84 - return -ENOMEM; 79 + diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL); 80 + if (!data || !diag304) 81 + goto out; 85 82 86 - udata = (void __user *)(unsigned long) diag304.data; 87 - if (diag304.args[1] == DIAG304_SET_WEIGHTS || 88 - diag304.args[1] == DIAG304_SET_CAPPING) 89 - if (copy_from_user(data, udata, PAGE_SIZE)) { 90 - rc = -EFAULT; 83 + rc = -EFAULT; 84 + if (copy_from_user(diag304, user_area, sizeof(*diag304))) 85 + goto out; 86 + rc = -EINVAL; 87 + if ((diag304->args[0] >> 8) != 0 || diag304->args[1] > DIAG304_CMD_MAX) 88 + goto out; 89 + 90 + rc = -EFAULT; 91 + udata = (void __user *)(unsigned long) diag304->data; 92 + if (diag304->args[1] == DIAG304_SET_WEIGHTS || 93 + diag304->args[1] == DIAG304_SET_CAPPING) 94 + if (copy_from_user(data, udata, PAGE_SIZE)) 91 95 goto out; 92 - } 93 96 94 - cmd = *(unsigned long *) &diag304.args[0]; 95 - diag304.rc = hypfs_sprp_diag304(data, cmd); 97 + cmd = *(unsigned long *) &diag304->args[0]; 98 + diag304->rc = hypfs_sprp_diag304(data, cmd); 96 99 97 - if (diag304.args[1] == DIAG304_QUERY_PRP) 100 + if (diag304->args[1] == DIAG304_QUERY_PRP) 98 101 if (copy_to_user(udata, data, PAGE_SIZE)) { 99 102 rc = -EFAULT; 100 103 goto out; 101 104 } 102 105 103 - rc = copy_to_user(user_area, &diag304, sizeof(diag304)) ? -EFAULT : 0; 106 + rc = copy_to_user(user_area, diag304, sizeof(*diag304)) ? -EFAULT : 0; 104 107 out: 108 + kfree(diag304); 105 109 free_page((unsigned long) data); 106 110 return rc; 107 111 }
+10 -9
arch/s390/include/asm/appldata.h
··· 40 40 u16 mod_lvl; /* modification level */ 41 41 } __attribute__ ((packed)); 42 42 43 - static inline int appldata_asm(struct appldata_product_id *id, 43 + 44 + static inline int appldata_asm(struct appldata_parameter_list *parm_list, 45 + struct appldata_product_id *id, 44 46 unsigned short fn, void *buffer, 45 47 unsigned short length) 46 48 { 47 - struct appldata_parameter_list parm_list; 48 49 int ry; 49 50 50 51 if (!MACHINE_IS_VM) 51 52 return -EOPNOTSUPP; 52 - parm_list.diag = 0xdc; 53 - parm_list.function = fn; 54 - parm_list.parlist_length = sizeof(parm_list); 55 - parm_list.buffer_length = length; 56 - parm_list.product_id_addr = (unsigned long) id; 57 - parm_list.buffer_addr = virt_to_phys(buffer); 53 + parm_list->diag = 0xdc; 54 + parm_list->function = fn; 55 + parm_list->parlist_length = sizeof(*parm_list); 56 + parm_list->buffer_length = length; 57 + parm_list->product_id_addr = (unsigned long) id; 58 + parm_list->buffer_addr = virt_to_phys(buffer); 58 59 diag_stat_inc(DIAG_STAT_X0DC); 59 60 asm volatile( 60 61 " diag %1,%0,0xdc" 61 62 : "=d" (ry) 62 - : "d" (&parm_list), "m" (parm_list), "m" (*id) 63 + : "d" (parm_list), "m" (*parm_list), "m" (*id) 63 64 : "cc"); 64 65 return ry; 65 66 }
+11
arch/s390/include/asm/boot_data.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_S390_BOOT_DATA_H 3 + 4 + #include <asm/setup.h> 5 + #include <asm/ipl.h> 6 + 7 + extern char early_command_line[COMMAND_LINE_SIZE]; 8 + extern struct ipl_parameter_block early_ipl_block; 9 + extern int early_ipl_block_valid; 10 + 11 + #endif /* _ASM_S390_BOOT_DATA_H */
+2
arch/s390/include/asm/ccwgroup.h
··· 64 64 extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver); 65 65 int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv, 66 66 int num_devices, const char *buf); 67 + struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv, 68 + char *bus_id); 67 69 68 70 extern int ccwgroup_set_online(struct ccwgroup_device *gdev); 69 71 extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+7 -2
arch/s390/include/asm/facility.h
··· 64 64 * @stfle_fac_list: array where facility list can be stored 65 65 * @size: size of passed in array in double words 66 66 */ 67 - static inline void stfle(u64 *stfle_fac_list, int size) 67 + static inline void __stfle(u64 *stfle_fac_list, int size) 68 68 { 69 69 unsigned long nr; 70 70 71 - preempt_disable(); 72 71 asm volatile( 73 72 " stfl 0(0)\n" 74 73 : "=m" (S390_lowcore.stfl_fac_list)); ··· 84 85 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ 85 86 } 86 87 memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); 88 + } 89 + 90 + static inline void stfle(u64 *stfle_fac_list, int size) 91 + { 92 + preempt_disable(); 93 + __stfle(stfle_fac_list, size); 87 94 preempt_enable(); 88 95 } 89 96
+2 -2
arch/s390/include/asm/ipl.h
··· 89 89 90 90 extern void s390_reset_system(void); 91 91 extern void ipl_store_parameters(void); 92 - extern size_t append_ipl_vmparm(char *, size_t); 93 - extern size_t append_ipl_scpdata(char *, size_t); 92 + extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size, 93 + const struct ipl_parameter_block *ipb); 94 94 95 95 enum ipl_type { 96 96 IPL_TYPE_UNKNOWN = 1,
+30
arch/s390/include/asm/kasan.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_KASAN_H 3 + #define __ASM_KASAN_H 4 + 5 + #include <asm/pgtable.h> 6 + 7 + #ifdef CONFIG_KASAN 8 + 9 + #define KASAN_SHADOW_SCALE_SHIFT 3 10 + #ifdef CONFIG_KASAN_S390_4_LEVEL_PAGING 11 + #define KASAN_SHADOW_SIZE \ 12 + (_AC(1, UL) << (_REGION1_SHIFT - KASAN_SHADOW_SCALE_SHIFT)) 13 + #else 14 + #define KASAN_SHADOW_SIZE \ 15 + (_AC(1, UL) << (_REGION2_SHIFT - KASAN_SHADOW_SCALE_SHIFT)) 16 + #endif 17 + #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 18 + #define KASAN_SHADOW_START KASAN_SHADOW_OFFSET 19 + #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) 20 + 21 + extern void kasan_early_init(void); 22 + extern void kasan_copy_shadow(pgd_t *dst); 23 + extern void kasan_free_early_identity(void); 24 + #else 25 + static inline void kasan_early_init(void) { } 26 + static inline void kasan_copy_shadow(pgd_t *dst) { } 27 + static inline void kasan_free_early_identity(void) { } 28 + #endif 29 + 30 + #endif
+2 -2
arch/s390/include/asm/lowcore.h
··· 102 102 __u64 current_task; /* 0x0338 */ 103 103 __u64 kernel_stack; /* 0x0340 */ 104 104 105 - /* Interrupt, panic and restart stack. */ 105 + /* Interrupt, DAT-off and restartstack. */ 106 106 __u64 async_stack; /* 0x0348 */ 107 - __u64 panic_stack; /* 0x0350 */ 107 + __u64 nodat_stack; /* 0x0350 */ 108 108 __u64 restart_stack; /* 0x0358 */ 109 109 110 110 /* Restart function and parameter. */
+82
arch/s390/include/asm/mem_detect.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_S390_MEM_DETECT_H 3 + #define _ASM_S390_MEM_DETECT_H 4 + 5 + #include <linux/types.h> 6 + 7 + enum mem_info_source { 8 + MEM_DETECT_NONE = 0, 9 + MEM_DETECT_SCLP_STOR_INFO, 10 + MEM_DETECT_DIAG260, 11 + MEM_DETECT_SCLP_READ_INFO, 12 + MEM_DETECT_BIN_SEARCH 13 + }; 14 + 15 + struct mem_detect_block { 16 + u64 start; 17 + u64 end; 18 + }; 19 + 20 + /* 21 + * Storage element id is defined as 1 byte (up to 256 storage elements). 22 + * In practise only storage element id 0 and 1 are used). 23 + * According to architecture one storage element could have as much as 24 + * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info. 25 + * If more mem_detect_blocks are required, a block of memory from already 26 + * known mem_detect_block is taken (entries_extended points to it). 27 + */ 28 + #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */ 29 + 30 + struct mem_detect_info { 31 + u32 count; 32 + u8 info_source; 33 + struct mem_detect_block entries[MEM_INLINED_ENTRIES]; 34 + struct mem_detect_block *entries_extended; 35 + }; 36 + extern struct mem_detect_info mem_detect; 37 + 38 + void add_mem_detect_block(u64 start, u64 end); 39 + 40 + static inline int __get_mem_detect_block(u32 n, unsigned long *start, 41 + unsigned long *end) 42 + { 43 + if (n >= mem_detect.count) { 44 + *start = 0; 45 + *end = 0; 46 + return -1; 47 + } 48 + 49 + if (n < MEM_INLINED_ENTRIES) { 50 + *start = (unsigned long)mem_detect.entries[n].start; 51 + *end = (unsigned long)mem_detect.entries[n].end; 52 + } else { 53 + *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start; 54 + *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end; 55 + } 56 + return 0; 57 + } 58 + 59 + /** 60 + * for_each_mem_detect_block - early online memory range iterator 61 + * @i: an integer used as loop variable 62 + * @p_start: ptr to unsigned long for start address of the range 63 + * @p_end: ptr to unsigned long for end address of the range 64 + * 65 + * Walks over detected online memory ranges. 66 + */ 67 + #define for_each_mem_detect_block(i, p_start, p_end) \ 68 + for (i = 0, __get_mem_detect_block(i, p_start, p_end); \ 69 + i < mem_detect.count; \ 70 + i++, __get_mem_detect_block(i, p_start, p_end)) 71 + 72 + static inline void get_mem_detect_reserved(unsigned long *start, 73 + unsigned long *size) 74 + { 75 + *start = (unsigned long)mem_detect.entries_extended; 76 + if (mem_detect.count > MEM_INLINED_ENTRIES) 77 + *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block); 78 + else 79 + *size = 0; 80 + } 81 + 82 + #endif
+2
arch/s390/include/asm/mmu.h
··· 32 32 unsigned int uses_cmm:1; 33 33 /* The gmaps associated with this context are allowed to use huge pages. */ 34 34 unsigned int allow_gmap_hpage_1m:1; 35 + /* The mmu context is for compat task */ 36 + unsigned int compat_mm:1; 35 37 } mm_context_t; 36 38 37 39 #define INIT_MM_CONTEXT(name) \
+1
arch/s390/include/asm/mmu_context.h
··· 25 25 atomic_set(&mm->context.flush_count, 0); 26 26 mm->context.gmap_asce = 0; 27 27 mm->context.flush_mm = 0; 28 + mm->context.compat_mm = 0; 28 29 #ifdef CONFIG_PGSTE 29 30 mm->context.alloc_pgste = page_table_allocate_pgste || 30 31 test_thread_flag(TIF_PGSTE) ||
+1
arch/s390/include/asm/page.h
··· 161 161 162 162 #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 163 163 #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 164 + #define pfn_to_kaddr(pfn) pfn_to_virt(pfn) 164 165 165 166 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 166 167 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
+19 -1
arch/s390/include/asm/pgtable.h
··· 341 341 #define PTRS_PER_P4D _CRST_ENTRIES 342 342 #define PTRS_PER_PGD _CRST_ENTRIES 343 343 344 + #define MAX_PTRS_PER_P4D PTRS_PER_P4D 345 + 344 346 /* 345 347 * Segment table and region3 table entry encoding 346 348 * (R = read-only, I = invalid, y = young bit): ··· 468 466 _SEGMENT_ENTRY_YOUNG | \ 469 467 _SEGMENT_ENTRY_PROTECT | \ 470 468 _SEGMENT_ENTRY_NOEXEC) 469 + #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \ 470 + _SEGMENT_ENTRY_LARGE | \ 471 + _SEGMENT_ENTRY_READ | \ 472 + _SEGMENT_ENTRY_WRITE | \ 473 + _SEGMENT_ENTRY_YOUNG | \ 474 + _SEGMENT_ENTRY_DIRTY) 471 475 472 476 /* 473 477 * Region3 entry (large page) protection definitions. ··· 605 597 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & 606 598 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 607 599 return (pgd_val(pgd) & mask) != 0; 600 + } 601 + 602 + static inline unsigned long pgd_pfn(pgd_t pgd) 603 + { 604 + unsigned long origin_mask; 605 + 606 + origin_mask = _REGION_ENTRY_ORIGIN; 607 + return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT; 608 608 } 609 609 610 610 static inline int p4d_folded(p4d_t p4d) ··· 1187 1171 1188 1172 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1189 1173 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1174 + #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) 1190 1175 1191 1176 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1192 1177 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) ··· 1227 1210 1228 1211 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) 1229 1212 #define pud_page(pud) pfn_to_page(pud_pfn(pud)) 1230 - #define p4d_page(pud) pfn_to_page(p4d_pfn(p4d)) 1213 + #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) 1214 + #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) 1231 1215 1232 1216 /* Find an entry in the lowest level page table.. */ 1233 1217 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
+26
arch/s390/include/asm/pkey.h
··· 109 109 u16 *pcardnr, u16 *pdomain, 110 110 u16 *pkeysize, u32 *pattributes); 111 111 112 + /* 113 + * In-kernel API: Generate (AES) random protected key. 114 + * @param keytype one of the PKEY_KEYTYPE values 115 + * @param protkey pointer to buffer receiving the protected key 116 + * @return 0 on success, negative errno value on failure 117 + */ 118 + int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey); 119 + 120 + /* 121 + * In-kernel API: Verify an (AES) protected key. 122 + * @param protkey pointer to buffer containing the protected key to verify 123 + * @return 0 on success, negative errno value on failure. In case the protected 124 + * key is not valid -EKEYREJECTED is returned 125 + */ 126 + int pkey_verifyprotkey(const struct pkey_protkey *protkey); 127 + 128 + /* 129 + * In-kernel API: Transform an key blob (of any type) into a protected key. 130 + * @param key pointer to a buffer containing the key blob 131 + * @param keylen size of the key blob in bytes 132 + * @param protkey pointer to buffer receiving the protected key 133 + * @return 0 on success, negative errno value on failure 134 + */ 135 + int pkey_keyblob2pkey(const __u8 *key, __u32 keylen, 136 + struct pkey_protkey *protkey); 137 + 112 138 #endif /* _KAPI_PKEY_H */
+51 -2
arch/s390/include/asm/processor.h
··· 242 242 return sp; 243 243 } 244 244 245 - static inline unsigned short stap(void) 245 + static __no_sanitize_address_or_inline unsigned short stap(void) 246 246 { 247 247 unsigned short cpu_address; 248 248 249 249 asm volatile("stap %0" : "=Q" (cpu_address)); 250 250 return cpu_address; 251 251 } 252 + 253 + #define CALL_ARGS_0() \ 254 + register unsigned long r2 asm("2") 255 + #define CALL_ARGS_1(arg1) \ 256 + register unsigned long r2 asm("2") = (unsigned long)(arg1) 257 + #define CALL_ARGS_2(arg1, arg2) \ 258 + CALL_ARGS_1(arg1); \ 259 + register unsigned long r3 asm("3") = (unsigned long)(arg2) 260 + #define CALL_ARGS_3(arg1, arg2, arg3) \ 261 + CALL_ARGS_2(arg1, arg2); \ 262 + register unsigned long r4 asm("4") = (unsigned long)(arg3) 263 + #define CALL_ARGS_4(arg1, arg2, arg3, arg4) \ 264 + CALL_ARGS_3(arg1, arg2, arg3); \ 265 + register unsigned long r4 asm("5") = (unsigned long)(arg4) 266 + #define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \ 267 + CALL_ARGS_4(arg1, arg2, arg3, arg4); \ 268 + register unsigned long r4 asm("6") = (unsigned long)(arg5) 269 + 270 + #define CALL_FMT_0 271 + #define CALL_FMT_1 CALL_FMT_0, "0" (r2) 272 + #define CALL_FMT_2 CALL_FMT_1, "d" (r3) 273 + #define CALL_FMT_3 CALL_FMT_2, "d" (r4) 274 + #define CALL_FMT_4 CALL_FMT_3, "d" (r5) 275 + #define CALL_FMT_5 CALL_FMT_4, "d" (r6) 276 + 277 + #define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory" 278 + #define CALL_CLOBBER_4 CALL_CLOBBER_5 279 + #define CALL_CLOBBER_3 CALL_CLOBBER_4, "5" 280 + #define CALL_CLOBBER_2 CALL_CLOBBER_3, "4" 281 + #define CALL_CLOBBER_1 CALL_CLOBBER_2, "3" 282 + #define CALL_CLOBBER_0 CALL_CLOBBER_1 283 + 284 + #define CALL_ON_STACK(fn, stack, nr, args...) \ 285 + ({ \ 286 + CALL_ARGS_##nr(args); \ 287 + unsigned long prev; \ 288 + \ 289 + asm volatile( \ 290 + " la %[_prev],0(15)\n" \ 291 + " la 15,0(%[_stack])\n" \ 292 + " stg %[_prev],%[_bc](15)\n" \ 293 + " brasl 14,%[_fn]\n" \ 294 + " la 15,0(%[_prev])\n" \ 295 + : "+&d" (r2), [_prev] "=&a" (prev) \ 296 + : [_stack] "a" (stack), \ 297 + [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ 298 + [_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \ 299 + r2; \ 300 + }) 252 301 253 302 /* 254 303 * Give up the time slice of the virtual PU. ··· 336 287 * Set PSW mask to specified value, while leaving the 337 288 * PSW addr pointing to the next instruction. 338 289 */ 339 - static inline void __load_psw_mask(unsigned long mask) 290 + static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask) 340 291 { 341 292 unsigned long addr; 342 293 psw_t psw;
-2
arch/s390/include/asm/qdio.h
··· 252 252 * (for communication with upper layer programs) 253 253 * (only required for use with completion queues) 254 254 * @flags: flags indicating state of buffer 255 - * @aob: pointer to QAOB used for the particular SBAL 256 255 * @user: pointer to upper layer program's state information related to SBAL 257 256 * (stored in user1 data of QAOB) 258 257 */ 259 258 struct qdio_outbuf_state { 260 259 u8 flags; 261 - struct qaob *aob; 262 260 void *user; 263 261 }; 264 262
+5
arch/s390/include/asm/sclp.h
··· 95 95 struct zpci_report_error_header { 96 96 u8 version; /* Interface version byte */ 97 97 u8 action; /* Action qualifier byte 98 + * 0: Adapter Reset Request 98 99 * 1: Deconfigure and repair action requested 99 100 * (OpenCrypto Problem Call Home) 100 101 * 2: Informational Report ··· 105 104 u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */ 106 105 } __packed; 107 106 107 + int sclp_early_read_info(void); 108 + int sclp_early_read_storage_info(void); 108 109 int sclp_early_get_core_info(struct sclp_core_info *info); 109 110 void sclp_early_get_ipl_info(struct sclp_ipl_info *info); 110 111 void sclp_early_detect(void); ··· 114 111 void sclp_early_printk_force(const char *s); 115 112 void __sclp_early_printk(const char *s, unsigned int len, unsigned int force); 116 113 114 + int sclp_early_get_memsize(unsigned long *mem); 115 + int sclp_early_get_hsa_size(unsigned long *hsa_size); 117 116 int _sclp_get_core_info(struct sclp_core_info *info); 118 117 int sclp_core_configure(u8 core); 119 118 int sclp_core_deconfigure(u8 core);
+12
arch/s390/include/asm/sections.h
··· 4 4 5 5 #include <asm-generic/sections.h> 6 6 7 + /* 8 + * .boot.data section contains variables "shared" between the decompressor and 9 + * the decompressed kernel. The decompressor will store values in them, and 10 + * copy over to the decompressed image before starting it. 11 + * 12 + * Each variable end up in its own intermediate section .boot.data.<var name>, 13 + * those sections are later sorted by alignment + name and merged together into 14 + * final .boot.data section, which should be identical in the decompressor and 15 + * the decompressed kernel (that is checked during the build). 16 + */ 17 + #define __bootdata(var) __section(.boot.data.var) var 18 + 7 19 #endif
+1 -2
arch/s390/include/asm/setup.h
··· 65 65 #define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) 66 66 #define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) 67 67 68 + extern int noexec_disabled; 68 69 extern int memory_end_set; 69 70 extern unsigned long memory_end; 70 71 extern unsigned long max_physmem_end; 71 - 72 - extern void detect_memory_memblock(void); 73 72 74 73 #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 75 74 #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
+21
arch/s390/include/asm/string.h
··· 53 53 #undef __HAVE_ARCH_STRSEP 54 54 #undef __HAVE_ARCH_STRSPN 55 55 56 + #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) 57 + 58 + extern void *__memcpy(void *dest, const void *src, size_t n); 59 + extern void *__memset(void *s, int c, size_t n); 60 + extern void *__memmove(void *dest, const void *src, size_t n); 61 + 62 + /* 63 + * For files that are not instrumented (e.g. mm/slub.c) we 64 + * should use not instrumented version of mem* functions. 65 + */ 66 + 67 + #define memcpy(dst, src, len) __memcpy(dst, src, len) 68 + #define memmove(dst, src, len) __memmove(dst, src, len) 69 + #define memset(s, c, n) __memset(s, c, n) 70 + 71 + #ifndef __NO_FORTIFY 72 + #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ 73 + #endif 74 + 75 + #endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */ 76 + 56 77 void *__memset16(uint16_t *s, uint16_t v, size_t count); 57 78 void *__memset32(uint32_t *s, uint32_t v, size_t count); 58 79 void *__memset64(uint64_t *s, uint64_t v, size_t count);
+9 -4
arch/s390/include/asm/thread_info.h
··· 11 11 #include <linux/const.h> 12 12 13 13 /* 14 - * Size of kernel stack for each process 14 + * General size of kernel stacks 15 15 */ 16 + #ifdef CONFIG_KASAN 17 + #define THREAD_SIZE_ORDER 3 18 + #else 16 19 #define THREAD_SIZE_ORDER 2 17 - #define ASYNC_ORDER 2 18 - 20 + #endif 21 + #define BOOT_STACK_ORDER 2 19 22 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 20 - #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 21 23 22 24 #ifndef __ASSEMBLY__ 23 25 #include <asm/lowcore.h> 24 26 #include <asm/page.h> 25 27 #include <asm/processor.h> 28 + 29 + #define STACK_INIT_OFFSET \ 30 + (THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs)) 26 31 27 32 /* 28 33 * low level task data that entry.S needs immediate access to
+20
arch/s390/include/asm/vmlinux.lds.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #include <asm/page.h> 3 + 4 + /* 5 + * .boot.data section is shared between the decompressor code and the 6 + * decompressed kernel. The decompressor will store values in it, and copy 7 + * over to the decompressed image before starting it. 8 + * 9 + * .boot.data variables are kept in separate .boot.data.<var name> sections, 10 + * which are sorted by alignment first, then by name before being merged 11 + * into single .boot.data section. This way big holes cased by page aligned 12 + * structs are avoided and linker produces consistent result. 13 + */ 14 + #define BOOT_DATA \ 15 + . = ALIGN(PAGE_SIZE); \ 16 + .boot.data : { \ 17 + __boot_data_start = .; \ 18 + *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \ 19 + __boot_data_end = .; \ 20 + }
+34
arch/s390/include/uapi/asm/pkey.h
··· 21 21 #define PKEY_IOCTL_MAGIC 'p' 22 22 23 23 #define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */ 24 + #define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */ 24 25 #define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */ 25 26 #define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */ 27 + 28 + #define MINKEYBLOBSIZE SECKEYBLOBSIZE /* Minimum size of a key blob */ 29 + #define MAXKEYBLOBSIZE PROTKEYBLOBSIZE /* Maximum size of a key blob */ 26 30 27 31 /* defines for the type field within the pkey_protkey struct */ 28 32 #define PKEY_KEYTYPE_AES_128 1 ··· 132 128 #define PKEY_VERIFYKEY _IOWR(PKEY_IOCTL_MAGIC, 0x07, struct pkey_verifykey) 133 129 #define PKEY_VERIFY_ATTR_AES 0x00000001 /* key is an AES key */ 134 130 #define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */ 131 + 132 + /* 133 + * Generate (AES) random protected key. 134 + */ 135 + struct pkey_genprotk { 136 + __u32 keytype; /* in: key type to generate */ 137 + struct pkey_protkey protkey; /* out: the protected key */ 138 + }; 139 + 140 + #define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk) 141 + 142 + /* 143 + * Verify an (AES) protected key. 144 + */ 145 + struct pkey_verifyprotk { 146 + struct pkey_protkey protkey; /* in: the protected key to verify */ 147 + }; 148 + 149 + #define PKEY_VERIFYPROTK _IOW(PKEY_IOCTL_MAGIC, 0x09, struct pkey_verifyprotk) 150 + 151 + /* 152 + * Transform an key blob (of any type) into a protected key 153 + */ 154 + struct pkey_kblob2pkey { 155 + __u8 __user *key; /* in: the key blob */ 156 + __u32 keylen; /* in: the key blob length */ 157 + struct pkey_protkey protkey; /* out: the protected key */ 158 + }; 159 + 160 + #define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey) 135 161 136 162 #endif /* _UAPI_PKEY_H */
+16 -3
arch/s390/include/uapi/asm/zcrypt.h
··· 2 2 /* 3 3 * include/asm-s390/zcrypt.h 4 4 * 5 - * zcrypt 2.1.0 (user-visible header) 5 + * zcrypt 2.2.1 (user-visible header) 6 6 * 7 - * Copyright IBM Corp. 2001, 2006 7 + * Copyright IBM Corp. 2001, 2018 8 8 * Author(s): Robert Burroughs 9 9 * Eric Rossman (edrossma@us.ibm.com) 10 10 * ··· 15 15 #define __ASM_S390_ZCRYPT_H 16 16 17 17 #define ZCRYPT_VERSION 2 18 - #define ZCRYPT_RELEASE 1 18 + #define ZCRYPT_RELEASE 2 19 19 #define ZCRYPT_VARIANT 1 20 20 21 21 #include <linux/ioctl.h> 22 22 #include <linux/compiler.h> 23 + 24 + /* Name of the zcrypt device driver. */ 25 + #define ZCRYPT_NAME "zcrypt" 23 26 24 27 /** 25 28 * struct ica_rsa_modexpo ··· 311 308 #define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) 312 309 #define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) 313 310 #define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) 311 + 312 + /* 313 + * Support for multiple zcrypt device nodes. 314 + */ 315 + 316 + /* Nr of minor device node numbers to allocate. */ 317 + #define ZCRYPT_MAX_MINOR_NODES 256 318 + 319 + /* Max amount of possible ioctls */ 320 + #define MAX_ZDEV_IOCTLS (1 << _IOC_NRBITS) 314 321 315 322 /* 316 323 * Only deprecated defines, structs and ioctls below this line.
+5 -1
arch/s390/kernel/Makefile
··· 23 23 UBSAN_SANITIZE_early.o := n 24 24 UBSAN_SANITIZE_early_nobss.o := n 25 25 26 + KASAN_SANITIZE_early_nobss.o := n 27 + KASAN_SANITIZE_ipl.o := n 28 + KASAN_SANITIZE_machine_kexec.o := n 29 + 26 30 # 27 31 # Passing null pointers is ok for smp code, since we access the lowcore here. 28 32 # ··· 51 47 obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 52 48 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o 53 49 obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o 54 - obj-y += nospec-branch.o 50 + obj-y += nospec-branch.o ipl_vmparm.o 55 51 56 52 extra-y += head64.o vmlinux.lds 57 53
+1 -1
arch/s390/kernel/asm-offsets.c
··· 159 159 OFFSET(__LC_CURRENT, lowcore, current_task); 160 160 OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack); 161 161 OFFSET(__LC_ASYNC_STACK, lowcore, async_stack); 162 - OFFSET(__LC_PANIC_STACK, lowcore, panic_stack); 162 + OFFSET(__LC_NODAT_STACK, lowcore, nodat_stack); 163 163 OFFSET(__LC_RESTART_STACK, lowcore, restart_stack); 164 164 OFFSET(__LC_RESTART_FN, lowcore, restart_fn); 165 165 OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
+1 -1
arch/s390/kernel/base.S
··· 18 18 19 19 ENTRY(s390_base_mcck_handler) 20 20 basr %r13,0 21 - 0: lg %r15,__LC_PANIC_STACK # load panic stack 21 + 0: lg %r15,__LC_NODAT_STACK # load panic stack 22 22 aghi %r15,-STACK_FRAME_OVERHEAD 23 23 larl %r1,s390_base_mcck_handler_fn 24 24 lg %r9,0(%r1)
+5 -5
arch/s390/kernel/dumpstack.c
··· 30 30 * The stack trace can start at any of the three stacks and can potentially 31 31 * touch all of them. The order is: panic stack, async stack, sync stack. 32 32 */ 33 - static unsigned long 33 + static unsigned long __no_sanitize_address 34 34 __dump_trace(dump_trace_func_t func, void *data, unsigned long sp, 35 35 unsigned long low, unsigned long high) 36 36 { ··· 77 77 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 78 78 #ifdef CONFIG_CHECK_STACK 79 79 sp = __dump_trace(func, data, sp, 80 - S390_lowcore.panic_stack + frame_size - PAGE_SIZE, 81 - S390_lowcore.panic_stack + frame_size); 80 + S390_lowcore.nodat_stack + frame_size - THREAD_SIZE, 81 + S390_lowcore.nodat_stack + frame_size); 82 82 #endif 83 83 sp = __dump_trace(func, data, sp, 84 - S390_lowcore.async_stack + frame_size - ASYNC_SIZE, 84 + S390_lowcore.async_stack + frame_size - THREAD_SIZE, 85 85 S390_lowcore.async_stack + frame_size); 86 86 task = task ?: current; 87 87 __dump_trace(func, data, sp, ··· 124 124 char *mode; 125 125 126 126 mode = user_mode(regs) ? "User" : "Krnl"; 127 - printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 127 + printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr); 128 128 if (!user_mode(regs)) 129 129 pr_cont(" (%pSR)", (void *)regs->psw.addr); 130 130 pr_cont("\n");
+3 -44
arch/s390/kernel/early.c
··· 29 29 #include <asm/cpcmd.h> 30 30 #include <asm/sclp.h> 31 31 #include <asm/facility.h> 32 + #include <asm/boot_data.h> 32 33 #include "entry.h" 33 - 34 - static void __init setup_boot_command_line(void); 35 34 36 35 /* 37 36 * Initialize storage key for kernel pages ··· 283 284 } 284 285 early_param("cad", cad_setup); 285 286 286 - /* Set up boot command line */ 287 - static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) 288 - { 289 - char *parm, *delim; 290 - size_t rc, len; 291 - 292 - len = strlen(boot_command_line); 293 - 294 - delim = boot_command_line + len; /* '\0' character position */ 295 - parm = boot_command_line + len + 1; /* append right after '\0' */ 296 - 297 - rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); 298 - if (rc) { 299 - if (*parm == '=') 300 - memmove(boot_command_line, parm + 1, rc); 301 - else 302 - *delim = ' '; /* replace '\0' with space */ 303 - } 304 - } 305 - 306 - static inline int has_ebcdic_char(const char *str) 307 - { 308 - int i; 309 - 310 - for (i = 0; str[i]; i++) 311 - if (str[i] & 0x80) 312 - return 1; 313 - return 0; 314 - } 315 - 287 + char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; 316 288 static void __init setup_boot_command_line(void) 317 289 { 318 - COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0; 319 - /* convert arch command line to ascii if necessary */ 320 - if (has_ebcdic_char(COMMAND_LINE)) 321 - EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 322 290 /* copy arch command line */ 323 - strlcpy(boot_command_line, strstrip(COMMAND_LINE), 324 - ARCH_COMMAND_LINE_SIZE); 325 - 326 - /* append IPL PARM data to the boot command line */ 327 - if (MACHINE_IS_VM) 328 - append_to_cmdline(append_ipl_vmparm); 329 - 330 - append_to_cmdline(append_ipl_scpdata); 291 + strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE); 331 292 } 332 293 333 294 static void __init check_image_bootable(void)
+2 -22
arch/s390/kernel/early_nobss.c
··· 13 13 #include <linux/string.h> 14 14 #include <asm/sections.h> 15 15 #include <asm/lowcore.h> 16 - #include <asm/setup.h> 17 16 #include <asm/timex.h> 17 + #include <asm/kasan.h> 18 18 #include "entry.h" 19 19 20 20 static void __init reset_tod_clock(void) ··· 32 32 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; 33 33 } 34 34 35 - static void __init rescue_initrd(void) 36 - { 37 - unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20); 38 - 39 - /* 40 - * Just like in case of IPL from VM reader we make sure there is a 41 - * gap of 4MB between end of kernel and start of initrd. 42 - * That way we can also be sure that saving an NSS will succeed, 43 - * which however only requires different segments. 44 - */ 45 - if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) 46 - return; 47 - if (!INITRD_START || !INITRD_SIZE) 48 - return; 49 - if (INITRD_START >= min_initrd_addr) 50 - return; 51 - memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE); 52 - INITRD_START = min_initrd_addr; 53 - } 54 - 55 35 static void __init clear_bss_section(void) 56 36 { 57 37 memset(__bss_start, 0, __bss_stop - __bss_start); ··· 40 60 void __init startup_init_nobss(void) 41 61 { 42 62 reset_tod_clock(); 43 - rescue_initrd(); 44 63 clear_bss_section(); 64 + kasan_early_init(); 45 65 }
+36 -17
arch/s390/kernel/entry.S
··· 85 85 #endif 86 86 .endm 87 87 88 - .macro CHECK_STACK stacksize,savearea 88 + .macro CHECK_STACK savearea 89 89 #ifdef CONFIG_CHECK_STACK 90 - tml %r15,\stacksize - CONFIG_STACK_GUARD 90 + tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 91 91 lghi %r14,\savearea 92 92 jz stack_overflow 93 + #endif 94 + .endm 95 + 96 + .macro CHECK_VMAP_STACK savearea,oklabel 97 + #ifdef CONFIG_VMAP_STACK 98 + lgr %r14,%r15 99 + nill %r14,0x10000 - STACK_SIZE 100 + oill %r14,STACK_INIT 101 + clg %r14,__LC_KERNEL_STACK 102 + je \oklabel 103 + clg %r14,__LC_ASYNC_STACK 104 + je \oklabel 105 + clg %r14,__LC_NODAT_STACK 106 + je \oklabel 107 + clg %r14,__LC_RESTART_STACK 108 + je \oklabel 109 + lghi %r14,\savearea 110 + j stack_overflow 111 + #else 112 + j \oklabel 93 113 #endif 94 114 .endm 95 115 ··· 124 104 brasl %r14,cleanup_critical 125 105 tmhh %r8,0x0001 # retest problem state after cleanup 126 106 jnz 1f 127 - 0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? 107 + 0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 128 108 slgr %r14,%r15 129 109 srag %r14,%r14,STACK_SHIFT 130 110 jnz 2f 131 - CHECK_STACK 1<<STACK_SHIFT,\savearea 111 + CHECK_STACK \savearea 132 112 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 133 113 j 3f 134 114 1: UPDATE_VTIME %r14,%r15,\timer ··· 620 600 jnz 1f # -> enabled, can't be a double fault 621 601 tm __LC_PGM_ILC+3,0x80 # check for per exception 622 602 jnz .Lpgm_svcper # -> single stepped svc 623 - 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 603 + 1: CHECK_STACK __LC_SAVE_AREA_SYNC 624 604 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 625 - j 4f 605 + # CHECK_VMAP_STACK branches to stack_overflow or 4f 606 + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 626 607 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 627 608 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 628 609 lg %r15,__LC_KERNEL_STACK ··· 1157 1136 jnz 4f 1158 1137 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 1159 1138 jno .Lmcck_panic 1160 - 4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1139 + 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1140 + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 1161 1141 .Lmcck_skip: 1162 1142 lghi %r14,__LC_GPREGS_SAVE_AREA+64 1163 1143 stmg %r0,%r7,__PT_R0(%r11) ··· 1185 1163 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1186 1164 la %r11,STACK_FRAME_OVERHEAD(%r1) 1187 1165 lgr %r15,%r1 1188 - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 1189 1166 TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 1190 1167 jno .Lmcck_return 1191 1168 TRACE_IRQS_OFF ··· 1203 1182 lpswe __LC_RETURN_MCCK_PSW 1204 1183 1205 1184 .Lmcck_panic: 1206 - lg %r15,__LC_PANIC_STACK 1185 + lg %r15,__LC_NODAT_STACK 1207 1186 la %r11,STACK_FRAME_OVERHEAD(%r15) 1208 1187 j .Lmcck_skip 1209 1188 ··· 1214 1193 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 1215 1194 stg %r15,__LC_SAVE_AREA_RESTART 1216 1195 lg %r15,__LC_RESTART_STACK 1217 - aghi %r15,-__PT_SIZE # create pt_regs on stack 1218 - xc 0(__PT_SIZE,%r15),0(%r15) 1219 - stmg %r0,%r14,__PT_R0(%r15) 1220 - mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1221 - mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 1222 - aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 1196 + xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 1197 + stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 1198 + mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 1199 + mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW 1223 1200 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 1224 1201 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu 1225 1202 lg %r2,__LC_RESTART_DATA ··· 1235 1216 1236 1217 .section .kprobes.text, "ax" 1237 1218 1238 - #ifdef CONFIG_CHECK_STACK 1219 + #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 1239 1220 /* 1240 1221 * The synchronous or the asynchronous stack overflowed. We are dead. 1241 1222 * No need to properly save the registers, we are going to panic anyway. 1242 1223 * Setup a pt_regs so that show_trace can provide a good call trace. 1243 1224 */ 1244 1225 stack_overflow: 1245 - lg %r15,__LC_PANIC_STACK # change to panic stack 1226 + lg %r15,__LC_NODAT_STACK # change to panic stack 1246 1227 la %r11,STACK_FRAME_OVERHEAD(%r15) 1247 1228 stmg %r0,%r7,__PT_R0(%r11) 1248 1229 stmg %r8,%r9,__PT_PSW(%r11)
+3
arch/s390/kernel/entry.h
··· 86 86 void gs_load_bc_cb(struct pt_regs *regs); 87 87 void set_fs_fixup(void); 88 88 89 + unsigned long stack_alloc(void); 90 + void stack_free(unsigned long stack); 91 + 89 92 #endif /* _ENTRY_H */
+2 -4
arch/s390/kernel/head64.S
··· 14 14 #include <asm/asm-offsets.h> 15 15 #include <asm/thread_info.h> 16 16 #include <asm/page.h> 17 + #include <asm/ptrace.h> 17 18 18 19 __HEAD 19 20 ENTRY(startup_continue) ··· 36 35 # 37 36 larl %r14,init_task 38 37 stg %r14,__LC_CURRENT 39 - larl %r15,init_thread_union 40 - aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE 41 - stg %r15,__LC_KERNEL_STACK # set end of kernel stack 42 - aghi %r15,-160 38 + larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD 43 39 # 44 40 # Early setup functions that may not rely on an initialized bss section, 45 41 # like moving the initrd. Returns with an initialized bss section.
+13 -106
arch/s390/kernel/ipl.c
··· 29 29 #include <asm/checksum.h> 30 30 #include <asm/debug.h> 31 31 #include <asm/os_info.h> 32 + #include <asm/sections.h> 33 + #include <asm/boot_data.h> 32 34 #include "entry.h" 33 35 34 36 #define IPL_PARM_BLOCK_VERSION 0 ··· 119 117 } 120 118 } 121 119 120 + struct ipl_parameter_block __bootdata(early_ipl_block); 121 + int __bootdata(early_ipl_block_valid); 122 + 122 123 static int ipl_block_valid; 123 124 static struct ipl_parameter_block ipl_block; 124 125 ··· 156 151 157 152 int diag308(unsigned long subcode, void *addr) 158 153 { 154 + if (IS_ENABLED(CONFIG_KASAN)) 155 + __arch_local_irq_stosm(0x04); /* enable DAT */ 159 156 diag_stat_inc(DIAG_STAT_X308); 160 157 return __diag308(subcode, addr); 161 158 } ··· 269 262 270 263 static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 271 264 272 - /* VM IPL PARM routines */ 273 - static size_t reipl_get_ascii_vmparm(char *dest, size_t size, 274 - const struct ipl_parameter_block *ipb) 275 - { 276 - int i; 277 - size_t len; 278 - char has_lowercase = 0; 279 - 280 - len = 0; 281 - if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && 282 - (ipb->ipl_info.ccw.vm_parm_len > 0)) { 283 - 284 - len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); 285 - memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); 286 - /* If at least one character is lowercase, we assume mixed 287 - * case; otherwise we convert everything to lowercase. 288 - */ 289 - for (i = 0; i < len; i++) 290 - if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */ 291 - (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */ 292 - (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */ 293 - has_lowercase = 1; 294 - break; 295 - } 296 - if (!has_lowercase) 297 - EBC_TOLOWER(dest, len); 298 - EBCASC(dest, len); 299 - } 300 - dest[len] = 0; 301 - 302 - return len; 303 - } 304 - 305 - size_t append_ipl_vmparm(char *dest, size_t size) 306 - { 307 - size_t rc; 308 - 309 - rc = 0; 310 - if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW) 311 - rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); 312 - else 313 - dest[0] = 0; 314 - return rc; 315 - } 316 - 317 265 static ssize_t ipl_vm_parm_show(struct kobject *kobj, 318 266 struct kobj_attribute *attr, char *page) 319 267 { 320 268 char parm[DIAG308_VMPARM_SIZE + 1] = {}; 321 269 322 - append_ipl_vmparm(parm, sizeof(parm)); 270 + if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) 271 + ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block); 323 272 return sprintf(page, "%s\n", parm); 324 273 } 325 - 326 - static size_t scpdata_length(const char* buf, size_t count) 327 - { 328 - while (count) { 329 - if (buf[count - 1] != '\0' && buf[count - 1] != ' ') 330 - break; 331 - count--; 332 - } 333 - return count; 334 - } 335 - 336 - static size_t reipl_append_ascii_scpdata(char *dest, size_t size, 337 - const struct ipl_parameter_block *ipb) 338 - { 339 - size_t count; 340 - size_t i; 341 - int has_lowercase; 342 - 343 - count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, 344 - ipb->ipl_info.fcp.scp_data_len)); 345 - if (!count) 346 - goto out; 347 - 348 - has_lowercase = 0; 349 - for (i = 0; i < count; i++) { 350 - if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { 351 - count = 0; 352 - goto out; 353 - } 354 - if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) 355 - has_lowercase = 1; 356 - } 357 - 358 - if (has_lowercase) 359 - memcpy(dest, ipb->ipl_info.fcp.scp_data, count); 360 - else 361 - for (i = 0; i < count; i++) 362 - dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); 363 - out: 364 - dest[count] = '\0'; 365 - return count; 366 - } 367 - 368 - size_t append_ipl_scpdata(char *dest, size_t len) 369 - { 370 - size_t rc; 371 - 372 - rc = 0; 373 - if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) 374 - rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); 375 - else 376 - dest[0] = 0; 377 - return rc; 378 - } 379 - 380 274 381 275 static struct kobj_attribute sys_ipl_vm_parm_attr = 382 276 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); ··· 472 564 { 473 565 char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; 474 566 475 - reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); 567 + ipl_block_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); 476 568 return sprintf(page, "%s\n", vmparm); 477 569 } 478 570 ··· 1677 1769 1678 1770 void __init ipl_store_parameters(void) 1679 1771 { 1680 - int rc; 1681 - 1682 - rc = diag308(DIAG308_STORE, &ipl_block); 1683 - if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION) 1772 + if (early_ipl_block_valid) { 1773 + memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block)); 1684 1774 ipl_block_valid = 1; 1775 + } 1685 1776 } 1686 1777 1687 1778 void s390_reset_system(void)
+36
arch/s390/kernel/ipl_vmparm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <asm/ebcdic.h> 3 + #include <asm/ipl.h> 4 + 5 + /* VM IPL PARM routines */ 6 + size_t ipl_block_get_ascii_vmparm(char *dest, size_t size, 7 + const struct ipl_parameter_block *ipb) 8 + { 9 + int i; 10 + size_t len; 11 + char has_lowercase = 0; 12 + 13 + len = 0; 14 + if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && 15 + (ipb->ipl_info.ccw.vm_parm_len > 0)) { 16 + 17 + len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); 18 + memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); 19 + /* If at least one character is lowercase, we assume mixed 20 + * case; otherwise we convert everything to lowercase. 21 + */ 22 + for (i = 0; i < len; i++) 23 + if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */ 24 + (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */ 25 + (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */ 26 + has_lowercase = 1; 27 + break; 28 + } 29 + if (!has_lowercase) 30 + EBC_TOLOWER(dest, len); 31 + EBCASC(dest, len); 32 + } 33 + dest[len] = 0; 34 + 35 + return len; 36 + }
+1 -9
arch/s390/kernel/irq.c
··· 172 172 /* Check against async. stack address range. */ 173 173 new = S390_lowcore.async_stack; 174 174 if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) { 175 - /* Need to switch to the async. stack. */ 176 - new -= STACK_FRAME_OVERHEAD; 177 - ((struct stack_frame *) new)->back_chain = old; 178 - asm volatile(" la 15,0(%0)\n" 179 - " brasl 14,__do_softirq\n" 180 - " la 15,0(%1)\n" 181 - : : "a" (new), "a" (old) 182 - : "0", "1", "2", "3", "4", "5", "14", 183 - "cc", "memory" ); 175 + CALL_ON_STACK(__do_softirq, new, 0); 184 176 } else { 185 177 /* We are already on the async stack. */ 186 178 __do_softirq();
+14 -5
arch/s390/kernel/machine_kexec.c
··· 142 142 } 143 143 #endif 144 144 145 - /* 146 - * Check if kdump checksums are valid: We call purgatory with parameter "0" 147 - */ 148 - static bool kdump_csum_valid(struct kimage *image) 145 + static unsigned long do_start_kdump(unsigned long addr) 149 146 { 150 - #ifdef CONFIG_CRASH_DUMP 147 + struct kimage *image = (struct kimage *) addr; 151 148 int (*start_kdump)(int) = (void *)image->start; 152 149 int rc; 153 150 154 151 __arch_local_irq_stnsm(0xfb); /* disable DAT */ 155 152 rc = start_kdump(0); 156 153 __arch_local_irq_stosm(0x04); /* enable DAT */ 154 + return rc; 155 + } 156 + 157 + /* 158 + * Check if kdump checksums are valid: We call purgatory with parameter "0" 159 + */ 160 + static bool kdump_csum_valid(struct kimage *image) 161 + { 162 + #ifdef CONFIG_CRASH_DUMP 163 + int rc; 164 + 165 + rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image); 157 166 return rc == 0; 158 167 #else 159 168 return false;
+11 -4
arch/s390/kernel/module.c
··· 16 16 #include <linux/fs.h> 17 17 #include <linux/string.h> 18 18 #include <linux/kernel.h> 19 + #include <linux/kasan.h> 19 20 #include <linux/moduleloader.h> 20 21 #include <linux/bug.h> 21 22 #include <asm/alternative.h> ··· 33 32 34 33 void *module_alloc(unsigned long size) 35 34 { 35 + void *p; 36 + 36 37 if (PAGE_ALIGN(size) > MODULES_LEN) 37 38 return NULL; 38 - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 39 - GFP_KERNEL, PAGE_KERNEL_EXEC, 40 - 0, NUMA_NO_NODE, 41 - __builtin_return_address(0)); 39 + p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END, 40 + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 41 + __builtin_return_address(0)); 42 + if (p && (kasan_module_alloc(p, size) < 0)) { 43 + vfree(p); 44 + return NULL; 45 + } 46 + return p; 42 47 } 43 48 44 49 void module_arch_freeing_init(struct module *mod)
+5 -1
arch/s390/kernel/perf_cpum_sf.c
··· 2045 2045 } 2046 2046 2047 2047 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2048 - if (!sfdbg) 2048 + if (!sfdbg) { 2049 2049 pr_err("Registering for s390dbf failed\n"); 2050 + return -ENOMEM; 2051 + } 2050 2052 debug_register_view(sfdbg, &debug_sprintf_view); 2051 2053 2052 2054 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2053 2055 cpumf_measurement_alert); 2054 2056 if (err) { 2055 2057 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2058 + debug_unregister(sfdbg); 2056 2059 goto out; 2057 2060 } 2058 2061 ··· 2064 2061 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2065 2062 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2066 2063 cpumf_measurement_alert); 2064 + debug_unregister(sfdbg); 2067 2065 goto out; 2068 2066 } 2069 2067
+168 -42
arch/s390/kernel/setup.c
··· 49 49 #include <linux/crash_dump.h> 50 50 #include <linux/memory.h> 51 51 #include <linux/compat.h> 52 + #include <linux/start_kernel.h> 52 53 53 54 #include <asm/ipl.h> 54 55 #include <asm/facility.h> ··· 70 69 #include <asm/numa.h> 71 70 #include <asm/alternative.h> 72 71 #include <asm/nospec-branch.h> 72 + #include <asm/mem_detect.h> 73 73 #include "entry.h" 74 74 75 75 /* ··· 90 88 91 89 unsigned long int_hwcap = 0; 92 90 93 - int __initdata memory_end_set; 94 - unsigned long __initdata memory_end; 95 - unsigned long __initdata max_physmem_end; 91 + int __bootdata(noexec_disabled); 92 + int __bootdata(memory_end_set); 93 + unsigned long __bootdata(memory_end); 94 + unsigned long __bootdata(max_physmem_end); 95 + struct mem_detect_info __bootdata(mem_detect); 96 96 97 97 unsigned long VMALLOC_START; 98 98 EXPORT_SYMBOL(VMALLOC_START); ··· 287 283 void (*pm_power_off)(void) = machine_power_off; 288 284 EXPORT_SYMBOL_GPL(pm_power_off); 289 285 290 - static int __init early_parse_mem(char *p) 291 - { 292 - memory_end = memparse(p, &p); 293 - memory_end &= PAGE_MASK; 294 - memory_end_set = 1; 295 - return 0; 296 - } 297 - early_param("mem", early_parse_mem); 298 - 299 286 static int __init parse_vmalloc(char *arg) 300 287 { 301 288 if (!arg) ··· 297 302 early_param("vmalloc", parse_vmalloc); 298 303 299 304 void *restart_stack __section(.data); 305 + 306 + unsigned long stack_alloc(void) 307 + { 308 + #ifdef CONFIG_VMAP_STACK 309 + return (unsigned long) 310 + __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, 311 + VMALLOC_START, VMALLOC_END, 312 + THREADINFO_GFP, 313 + PAGE_KERNEL, 0, NUMA_NO_NODE, 314 + __builtin_return_address(0)); 315 + #else 316 + return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 317 + #endif 318 + } 319 + 320 + void stack_free(unsigned long stack) 321 + { 322 + #ifdef CONFIG_VMAP_STACK 323 + vfree((void *) stack); 324 + #else 325 + free_pages(stack, THREAD_SIZE_ORDER); 326 + #endif 327 + } 328 + 329 + int __init arch_early_irq_init(void) 330 + { 331 + unsigned long stack; 332 + 333 + stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 334 + if (!stack) 335 + panic("Couldn't allocate async stack"); 336 + S390_lowcore.async_stack = stack + STACK_INIT_OFFSET; 337 + return 0; 338 + } 339 + 340 + static int __init async_stack_realloc(void) 341 + { 342 + unsigned long old, new; 343 + 344 + old = S390_lowcore.async_stack - STACK_INIT_OFFSET; 345 + new = stack_alloc(); 346 + if (!new) 347 + panic("Couldn't allocate async stack"); 348 + S390_lowcore.async_stack = new + STACK_INIT_OFFSET; 349 + free_pages(old, THREAD_SIZE_ORDER); 350 + return 0; 351 + } 352 + early_initcall(async_stack_realloc); 353 + 354 + void __init arch_call_rest_init(void) 355 + { 356 + struct stack_frame *frame; 357 + unsigned long stack; 358 + 359 + stack = stack_alloc(); 360 + if (!stack) 361 + panic("Couldn't allocate kernel stack"); 362 + current->stack = (void *) stack; 363 + #ifdef CONFIG_VMAP_STACK 364 + current->stack_vm_area = (void *) stack; 365 + #endif 366 + set_task_stack_end_magic(current); 367 + stack += STACK_INIT_OFFSET; 368 + S390_lowcore.kernel_stack = stack; 369 + frame = (struct stack_frame *) stack; 370 + memset(frame, 0, sizeof(*frame)); 371 + /* Branch to rest_init on the new stack, never returns */ 372 + asm volatile( 373 + " la 15,0(%[_frame])\n" 374 + " jg rest_init\n" 375 + : : [_frame] "a" (frame)); 376 + } 300 377 301 378 static void __init setup_lowcore(void) 302 379 { ··· 396 329 PSW_MASK_DAT | PSW_MASK_MCHECK; 397 330 lc->io_new_psw.addr = (unsigned long) io_int_handler; 398 331 lc->clock_comparator = clock_comparator_max; 399 - lc->kernel_stack = ((unsigned long) &init_thread_union) 332 + lc->nodat_stack = ((unsigned long) &init_thread_union) 400 333 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 401 - lc->async_stack = (unsigned long) 402 - memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE) 403 - + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 404 - lc->panic_stack = (unsigned long) 405 - memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE) 406 - + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 407 334 lc->current_task = (unsigned long)&init_task; 408 335 lc->lpp = LPP_MAGIC; 409 336 lc->machine_flags = S390_lowcore.machine_flags; ··· 418 357 lc->last_update_timer = S390_lowcore.last_update_timer; 419 358 lc->last_update_clock = S390_lowcore.last_update_clock; 420 359 421 - restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE); 422 - restart_stack += ASYNC_SIZE; 360 + /* 361 + * Allocate the global restart stack which is the same for 362 + * all CPUs in cast *one* of them does a PSW restart. 363 + */ 364 + restart_stack = memblock_virt_alloc(THREAD_SIZE, THREAD_SIZE); 365 + restart_stack += STACK_INIT_OFFSET; 423 366 424 367 /* 425 368 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant ··· 532 467 { 533 468 unsigned long vmax, vmalloc_size, tmp; 534 469 535 - /* Choose kernel address space layout: 2, 3, or 4 levels. */ 470 + /* Choose kernel address space layout: 3 or 4 levels. */ 536 471 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 537 - tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 538 - tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 539 - if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) 540 - vmax = _REGION2_SIZE; /* 3-level kernel page table */ 541 - else 542 - vmax = _REGION1_SIZE; /* 4-level kernel page table */ 472 + if (IS_ENABLED(CONFIG_KASAN)) { 473 + vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) 474 + ? _REGION1_SIZE 475 + : _REGION2_SIZE; 476 + } else { 477 + tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 478 + tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 479 + if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE) 480 + vmax = _REGION2_SIZE; /* 3-level kernel page table */ 481 + else 482 + vmax = _REGION1_SIZE; /* 4-level kernel page table */ 483 + } 484 + 543 485 /* module area is at the end of the kernel address space. */ 544 486 MODULES_END = vmax; 545 487 MODULES_VADDR = MODULES_END - MODULES_LEN; 546 488 VMALLOC_END = MODULES_VADDR; 547 - VMALLOC_START = vmax - vmalloc_size; 489 + VMALLOC_START = VMALLOC_END - vmalloc_size; 548 490 549 491 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ 550 492 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); ··· 563 491 vmemmap = (struct page *) tmp; 564 492 565 493 /* Take care that memory_end is set and <= vmemmap */ 566 - memory_end = min(memory_end ?: max_physmem_end, tmp); 494 + memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap); 495 + #ifdef CONFIG_KASAN 496 + /* fit in kasan shadow memory region between 1:1 and vmemmap */ 497 + memory_end = min(memory_end, KASAN_SHADOW_START); 498 + vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END); 499 + #endif 567 500 max_pfn = max_low_pfn = PFN_DOWN(memory_end); 568 501 memblock_remove(memory_end, ULONG_MAX); 569 502 ··· 609 532 */ 610 533 static void reserve_memory_end(void) 611 534 { 612 - #ifdef CONFIG_CRASH_DUMP 613 - if (ipl_info.type == IPL_TYPE_FCP_DUMP && 614 - !OLDMEM_BASE && sclp.hsa_size) { 615 - memory_end = sclp.hsa_size; 616 - memory_end &= PAGE_MASK; 617 - memory_end_set = 1; 618 - } 619 - #endif 620 - if (!memory_end_set) 621 - return; 622 - memblock_reserve(memory_end, ULONG_MAX); 535 + if (memory_end_set) 536 + memblock_reserve(memory_end, ULONG_MAX); 623 537 } 624 538 625 539 /* ··· 715 647 initrd_end = initrd_start + INITRD_SIZE; 716 648 memblock_reserve(INITRD_START, INITRD_SIZE); 717 649 #endif 650 + } 651 + 652 + static void __init reserve_mem_detect_info(void) 653 + { 654 + unsigned long start, size; 655 + 656 + get_mem_detect_reserved(&start, &size); 657 + if (size) 658 + memblock_reserve(start, size); 659 + } 660 + 661 + static void __init free_mem_detect_info(void) 662 + { 663 + unsigned long start, size; 664 + 665 + get_mem_detect_reserved(&start, &size); 666 + if (size) 667 + memblock_free(start, size); 668 + } 669 + 670 + static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size) 671 + { 672 + memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n", 673 + start, start + size - 1); 674 + memblock_add_range(&memblock.memory, start, size, 0, 0); 675 + memblock_add_range(&memblock.physmem, start, size, 0, 0); 676 + } 677 + 678 + static const char * __init get_mem_info_source(void) 679 + { 680 + switch (mem_detect.info_source) { 681 + case MEM_DETECT_SCLP_STOR_INFO: 682 + return "sclp storage info"; 683 + case MEM_DETECT_DIAG260: 684 + return "diag260"; 685 + case MEM_DETECT_SCLP_READ_INFO: 686 + return "sclp read info"; 687 + case MEM_DETECT_BIN_SEARCH: 688 + return "binary search"; 689 + } 690 + return "none"; 691 + } 692 + 693 + static void __init memblock_add_mem_detect_info(void) 694 + { 695 + unsigned long start, end; 696 + int i; 697 + 698 + memblock_dbg("physmem info source: %s (%hhd)\n", 699 + get_mem_info_source(), mem_detect.info_source); 700 + /* keep memblock lists close to the kernel */ 701 + memblock_set_bottom_up(true); 702 + for_each_mem_detect_block(i, &start, &end) 703 + memblock_physmem_add(start, end - start); 704 + memblock_set_bottom_up(false); 705 + memblock_dump_all(); 718 706 } 719 707 720 708 /* ··· 1037 913 reserve_oldmem(); 1038 914 reserve_kernel(); 1039 915 reserve_initrd(); 916 + reserve_mem_detect_info(); 1040 917 memblock_allow_resize(); 1041 918 1042 919 /* Get information about *all* installed memory */ 1043 - detect_memory_memblock(); 920 + memblock_add_mem_detect_info(); 1044 921 922 + free_mem_detect_info(); 1045 923 remove_oldmem(); 1046 924 1047 925 /*
+51 -36
arch/s390/kernel/smp.c
··· 186 186 pcpu_sigp_retry(pcpu, order, 0); 187 187 } 188 188 189 - #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) 190 - #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) 191 - 192 189 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 193 190 { 194 - unsigned long async_stack, panic_stack; 191 + unsigned long async_stack, nodat_stack; 195 192 struct lowcore *lc; 196 193 197 194 if (pcpu != &pcpu_devices[0]) { 198 195 pcpu->lowcore = (struct lowcore *) 199 196 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 200 - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 201 - panic_stack = __get_free_page(GFP_KERNEL); 202 - if (!pcpu->lowcore || !panic_stack || !async_stack) 197 + nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 198 + if (!pcpu->lowcore || !nodat_stack) 203 199 goto out; 204 200 } else { 205 - async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; 206 - panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; 201 + nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 207 202 } 203 + async_stack = stack_alloc(); 204 + if (!async_stack) 205 + goto out; 208 206 lc = pcpu->lowcore; 209 207 memcpy(lc, &S390_lowcore, 512); 210 208 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 211 - lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; 212 - lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 209 + lc->async_stack = async_stack + STACK_INIT_OFFSET; 210 + lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; 213 211 lc->cpu_nr = cpu; 214 212 lc->spinlock_lockval = arch_spin_lockval(cpu); 215 213 lc->spinlock_index = 0; 216 214 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 217 215 if (nmi_alloc_per_cpu(lc)) 218 - goto out; 216 + goto out_async; 219 217 if (vdso_alloc_per_cpu(lc)) 220 218 goto out_mcesa; 221 219 lowcore_ptr[cpu] = lc; ··· 222 224 223 225 out_mcesa: 224 226 nmi_free_per_cpu(lc); 227 + out_async: 228 + stack_free(async_stack); 225 229 out: 226 230 if (pcpu != &pcpu_devices[0]) { 227 - free_page(panic_stack); 228 - free_pages(async_stack, ASYNC_ORDER); 231 + free_pages(nodat_stack, THREAD_SIZE_ORDER); 229 232 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 230 233 } 231 234 return -ENOMEM; ··· 236 237 237 238 static void pcpu_free_lowcore(struct pcpu *pcpu) 238 239 { 240 + unsigned long async_stack, nodat_stack, lowcore; 241 + 242 + nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 243 + async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; 244 + lowcore = (unsigned long) pcpu->lowcore; 245 + 239 246 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 240 247 lowcore_ptr[pcpu - pcpu_devices] = NULL; 241 248 vdso_free_per_cpu(pcpu->lowcore); 242 249 nmi_free_per_cpu(pcpu->lowcore); 250 + stack_free(async_stack); 243 251 if (pcpu == &pcpu_devices[0]) 244 252 return; 245 - free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); 246 - free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); 247 - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 253 + free_pages(nodat_stack, THREAD_SIZE_ORDER); 254 + free_pages(lowcore, LC_ORDER); 248 255 } 249 256 250 257 #endif /* CONFIG_HOTPLUG_CPU */ ··· 298 293 { 299 294 struct lowcore *lc = pcpu->lowcore; 300 295 301 - lc->restart_stack = lc->kernel_stack; 296 + lc->restart_stack = lc->nodat_stack; 302 297 lc->restart_fn = (unsigned long) func; 303 298 lc->restart_data = (unsigned long) data; 304 299 lc->restart_source = -1UL; ··· 308 303 /* 309 304 * Call function via PSW restart on pcpu and stop the current cpu. 310 305 */ 311 - static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), 312 - void *data, unsigned long stack) 306 + static void __pcpu_delegate(void (*func)(void*), void *data) 307 + { 308 + func(data); /* should not return */ 309 + } 310 + 311 + static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu, 312 + void (*func)(void *), 313 + void *data, unsigned long stack) 313 314 { 314 315 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 315 316 unsigned long source_cpu = stap(); 316 317 317 - __load_psw_mask(PSW_KERNEL_BITS); 318 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 318 319 if (pcpu->address == source_cpu) 319 - func(data); /* should not return */ 320 + CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data); 320 321 /* Stop target cpu (if func returns this stops the current cpu). */ 321 322 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 322 323 /* Restart func on the target cpu and stop the current cpu. */ ··· 383 372 void smp_call_ipl_cpu(void (*func)(void *), void *data) 384 373 { 385 374 pcpu_delegate(&pcpu_devices[0], func, data, 386 - pcpu_devices->lowcore->panic_stack - 387 - PANIC_FRAME_OFFSET + PAGE_SIZE); 375 + pcpu_devices->lowcore->nodat_stack); 388 376 } 389 377 390 378 int smp_find_processor_id(u16 address) ··· 801 791 memblock_free_early((unsigned long)info, sizeof(*info)); 802 792 } 803 793 804 - /* 805 - * Activate a secondary processor. 806 - */ 807 - static void smp_start_secondary(void *cpuvoid) 794 + static void smp_init_secondary(void) 808 795 { 809 796 int cpu = smp_processor_id(); 810 797 811 798 S390_lowcore.last_update_clock = get_tod_clock(); 812 - S390_lowcore.restart_stack = (unsigned long) restart_stack; 813 - S390_lowcore.restart_fn = (unsigned long) do_restart; 814 - S390_lowcore.restart_data = 0; 815 - S390_lowcore.restart_source = -1UL; 816 799 restore_access_regs(S390_lowcore.access_regs_save_area); 817 - __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 818 - __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 819 800 cpu_init(); 820 801 preempt_disable(); 821 802 init_cpu_timer(); 822 803 vtime_init(); 823 804 pfault_init(); 824 - notify_cpu_starting(cpu); 805 + notify_cpu_starting(smp_processor_id()); 825 806 if (topology_cpu_dedicated(cpu)) 826 807 set_cpu_flag(CIF_DEDICATED_CPU); 827 808 else 828 809 clear_cpu_flag(CIF_DEDICATED_CPU); 829 - set_cpu_online(cpu, true); 810 + set_cpu_online(smp_processor_id(), true); 830 811 inc_irq_stat(CPU_RST); 831 812 local_irq_enable(); 832 813 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 814 + } 815 + 816 + /* 817 + * Activate a secondary processor. 818 + */ 819 + static void __no_sanitize_address smp_start_secondary(void *cpuvoid) 820 + { 821 + S390_lowcore.restart_stack = (unsigned long) restart_stack; 822 + S390_lowcore.restart_fn = (unsigned long) do_restart; 823 + S390_lowcore.restart_data = 0; 824 + S390_lowcore.restart_source = -1UL; 825 + __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 826 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 827 + CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); 833 828 } 834 829 835 830 /* Upping and downing of CPUs */
+5 -3
arch/s390/kernel/sthyi.c
··· 183 183 static void fill_stsi_mac(struct sthyi_sctns *sctns, 184 184 struct sysinfo_1_1_1 *sysinfo) 185 185 { 186 + sclp_ocf_cpc_name_copy(sctns->mac.infmname); 187 + if (*(u64 *)sctns->mac.infmname != 0) 188 + sctns->mac.infmval1 |= MAC_NAME_VLD; 189 + 186 190 if (stsi(sysinfo, 1, 1, 1)) 187 191 return; 188 - 189 - sclp_ocf_cpc_name_copy(sctns->mac.infmname); 190 192 191 193 memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype)); 192 194 memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu)); 193 195 memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman)); 194 196 memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq)); 195 197 196 - sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD; 198 + sctns->mac.infmval1 |= MAC_ID_VLD; 197 199 } 198 200 199 201 static void fill_stsi_par(struct sthyi_sctns *sctns,
+5 -6
arch/s390/kernel/swsusp.S
··· 29 29 30 30 .section .text 31 31 ENTRY(swsusp_arch_suspend) 32 - stmg %r6,%r15,__SF_GPRS(%r15) 32 + lg %r1,__LC_NODAT_STACK 33 + aghi %r1,-STACK_FRAME_OVERHEAD 34 + stmg %r6,%r15,__SF_GPRS(%r1) 35 + stg %r15,__SF_BACKCHAIN(%r1) 33 36 lgr %r1,%r15 34 - aghi %r15,-STACK_FRAME_OVERHEAD 35 - stg %r1,__SF_BACKCHAIN(%r15) 36 37 37 38 /* Store FPU registers */ 38 39 brasl %r14,save_fpu_regs ··· 198 197 brc 2,3b /* busy, try again */ 199 198 200 199 /* Suspend CPU not available -> panic */ 201 - larl %r15,init_thread_union 202 - aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 203 - aghi %r15,-STACK_FRAME_OVERHEAD 200 + larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD 204 201 larl %r2,.Lpanic_string 205 202 brasl %r14,sclp_early_printk_force 206 203 larl %r3,.Ldisabled_wait_31
+5 -3
arch/s390/kernel/vdso.c
··· 56 56 vdso_pagelist = vdso64_pagelist; 57 57 vdso_pages = vdso64_pages; 58 58 #ifdef CONFIG_COMPAT 59 - if (is_compat_task()) { 59 + if (vma->vm_mm->context.compat_mm) { 60 60 vdso_pagelist = vdso32_pagelist; 61 61 vdso_pages = vdso32_pages; 62 62 } ··· 77 77 78 78 vdso_pages = vdso64_pages; 79 79 #ifdef CONFIG_COMPAT 80 - if (is_compat_task()) 80 + if (vma->vm_mm->context.compat_mm) 81 81 vdso_pages = vdso32_pages; 82 82 #endif 83 83 ··· 224 224 225 225 vdso_pages = vdso64_pages; 226 226 #ifdef CONFIG_COMPAT 227 - if (is_compat_task()) 227 + if (is_compat_task()) { 228 228 vdso_pages = vdso32_pages; 229 + mm->context.compat_mm = 1; 230 + } 229 231 #endif 230 232 /* 231 233 * vDSO has a problem and was disabled, just don't "enable" it for
+2 -1
arch/s390/kernel/vdso32/Makefile
··· 28 28 extra-y += vdso32.lds 29 29 CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 30 30 31 - # Disable gcov profiling and ubsan for VDSO code 31 + # Disable gcov profiling, ubsan and kasan for VDSO code 32 32 GCOV_PROFILE := n 33 33 UBSAN_SANITIZE := n 34 + KASAN_SANITIZE := n 34 35 35 36 # Force dependency (incbin is bad) 36 37 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
+10 -9
arch/s390/kernel/vdso32/clock_gettime.S
··· 10 10 #include <asm/asm-offsets.h> 11 11 #include <asm/unistd.h> 12 12 #include <asm/dwarf.h> 13 + #include <asm/ptrace.h> 13 14 14 15 .text 15 16 .align 4 ··· 19 18 __kernel_clock_gettime: 20 19 CFI_STARTPROC 21 20 ahi %r15,-16 22 - CFI_DEF_CFA_OFFSET 176 23 - CFI_VAL_OFFSET 15, -160 21 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 22 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 23 basr %r5,0 25 24 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ 26 25 chi %r2,__CLOCK_REALTIME_COARSE ··· 73 72 st %r1,4(%r3) /* store tp->tv_nsec */ 74 73 lhi %r2,0 75 74 ahi %r15,16 76 - CFI_DEF_CFA_OFFSET 160 75 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 77 76 CFI_RESTORE 15 78 77 br %r14 79 78 80 79 /* CLOCK_MONOTONIC_COARSE */ 81 - CFI_DEF_CFA_OFFSET 176 82 - CFI_VAL_OFFSET 15, -160 80 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 81 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 83 82 9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 84 83 tml %r4,0x0001 /* pending update ? loop */ 85 84 jnz 9b ··· 159 158 st %r1,4(%r3) /* store tp->tv_nsec */ 160 159 lhi %r2,0 161 160 ahi %r15,16 162 - CFI_DEF_CFA_OFFSET 160 161 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 163 162 CFI_RESTORE 15 164 163 br %r14 165 164 166 165 /* Fallback to system call */ 167 - CFI_DEF_CFA_OFFSET 176 168 - CFI_VAL_OFFSET 15, -160 166 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 167 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 169 168 19: lhi %r1,__NR_clock_gettime 170 169 svc 0 171 170 ahi %r15,16 172 - CFI_DEF_CFA_OFFSET 160 171 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 173 172 CFI_RESTORE 15 174 173 br %r14 175 174 CFI_ENDPROC
+2 -1
arch/s390/kernel/vdso32/gettimeofday.S
··· 10 10 #include <asm/asm-offsets.h> 11 11 #include <asm/unistd.h> 12 12 #include <asm/dwarf.h> 13 + #include <asm/ptrace.h> 13 14 14 15 .text 15 16 .align 4 ··· 20 19 CFI_STARTPROC 21 20 ahi %r15,-16 22 21 CFI_ADJUST_CFA_OFFSET 16 23 - CFI_VAL_OFFSET 15, -160 22 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 23 basr %r5,0 25 24 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ 26 25 1: ltr %r3,%r3 /* check if tz is NULL */
+2 -1
arch/s390/kernel/vdso64/Makefile
··· 28 28 extra-y += vdso64.lds 29 29 CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) 30 30 31 - # Disable gcov profiling and ubsan for VDSO code 31 + # Disable gcov profiling, ubsan and kasan for VDSO code 32 32 GCOV_PROFILE := n 33 33 UBSAN_SANITIZE := n 34 + KASAN_SANITIZE := n 34 35 35 36 # Force dependency (incbin is bad) 36 37 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
+13 -12
arch/s390/kernel/vdso64/clock_gettime.S
··· 10 10 #include <asm/asm-offsets.h> 11 11 #include <asm/unistd.h> 12 12 #include <asm/dwarf.h> 13 + #include <asm/ptrace.h> 13 14 14 15 .text 15 16 .align 4 ··· 19 18 __kernel_clock_gettime: 20 19 CFI_STARTPROC 21 20 aghi %r15,-16 22 - CFI_DEF_CFA_OFFSET 176 23 - CFI_VAL_OFFSET 15, -160 21 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 22 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 23 larl %r5,_vdso_data 25 24 cghi %r2,__CLOCK_REALTIME_COARSE 26 25 je 4f ··· 57 56 stg %r1,8(%r3) /* store tp->tv_nsec */ 58 57 lghi %r2,0 59 58 aghi %r15,16 60 - CFI_DEF_CFA_OFFSET 160 59 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 61 60 CFI_RESTORE 15 62 61 br %r14 63 62 64 63 /* CLOCK_MONOTONIC_COARSE */ 65 - CFI_DEF_CFA_OFFSET 176 66 - CFI_VAL_OFFSET 15, -160 64 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 65 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 67 66 3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 68 67 tmll %r4,0x0001 /* pending update ? loop */ 69 68 jnz 3b ··· 116 115 stg %r1,8(%r3) /* store tp->tv_nsec */ 117 116 lghi %r2,0 118 117 aghi %r15,16 119 - CFI_DEF_CFA_OFFSET 160 118 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 120 119 CFI_RESTORE 15 121 120 br %r14 122 121 123 122 /* CPUCLOCK_VIRT for this thread */ 124 - CFI_DEF_CFA_OFFSET 176 125 - CFI_VAL_OFFSET 15, -160 123 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 124 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 126 125 9: lghi %r4,0 127 126 icm %r0,15,__VDSO_ECTG_OK(%r5) 128 127 jz 12f ··· 143 142 stg %r4,8(%r3) 144 143 lghi %r2,0 145 144 aghi %r15,16 146 - CFI_DEF_CFA_OFFSET 160 145 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 147 146 CFI_RESTORE 15 148 147 br %r14 149 148 150 149 /* Fallback to system call */ 151 - CFI_DEF_CFA_OFFSET 176 152 - CFI_VAL_OFFSET 15, -160 150 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 151 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 153 152 12: lghi %r1,__NR_clock_gettime 154 153 svc 0 155 154 aghi %r15,16 156 - CFI_DEF_CFA_OFFSET 160 155 + CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 157 156 CFI_RESTORE 15 158 157 br %r14 159 158 CFI_ENDPROC
+2 -1
arch/s390/kernel/vdso64/gettimeofday.S
··· 10 10 #include <asm/asm-offsets.h> 11 11 #include <asm/unistd.h> 12 12 #include <asm/dwarf.h> 13 + #include <asm/ptrace.h> 13 14 14 15 .text 15 16 .align 4 ··· 20 19 CFI_STARTPROC 21 20 aghi %r15,-16 22 21 CFI_ADJUST_CFA_OFFSET 16 23 - CFI_VAL_OFFSET 15, -160 22 + CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 23 larl %r5,_vdso_data 25 24 0: ltgr %r3,%r3 /* check if tz is NULL */ 26 25 je 1f
+16
arch/s390/kernel/vmlinux.lds.S
··· 16 16 #define RO_AFTER_INIT_DATA 17 17 18 18 #include <asm-generic/vmlinux.lds.h> 19 + #include <asm/vmlinux.lds.h> 19 20 20 21 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 21 22 OUTPUT_ARCH(s390:64-bit) ··· 135 134 __nospec_return_end = . ; 136 135 } 137 136 137 + BOOT_DATA 138 + 138 139 /* early.c uses stsi, which requires page aligned data. */ 139 140 . = ALIGN(PAGE_SIZE); 140 141 INIT_DATA_SECTION(0x100) ··· 148 145 BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE) 149 146 150 147 _end = . ; 148 + 149 + /* 150 + * uncompressed image info used by the decompressor 151 + * it should match struct vmlinux_info 152 + */ 153 + .vmlinux.info 0 : { 154 + QUAD(_stext) /* default_lma */ 155 + QUAD(startup_continue) /* entry */ 156 + QUAD(__bss_start - _stext) /* image_size */ 157 + QUAD(__bss_stop - __bss_start) /* bss_size */ 158 + QUAD(__boot_data_start) /* bootdata_off */ 159 + QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */ 160 + } 151 161 152 162 /* Debugging sections. */ 153 163 STABS_DEBUG
+4
arch/s390/lib/Makefile
··· 9 9 lib-$(CONFIG_KPROBES) += probes.o 10 10 lib-$(CONFIG_UPROBES) += probes.o 11 11 12 + # Instrumenting memory accesses to __user data (in different address space) 13 + # produce false positives 14 + KASAN_SANITIZE_uaccess.o := n 15 + 12 16 chkbss := mem.o 13 17 include $(srctree)/arch/s390/scripts/Makefile.chkbss
+9 -3
arch/s390/lib/mem.S
··· 14 14 /* 15 15 * void *memmove(void *dest, const void *src, size_t n) 16 16 */ 17 - ENTRY(memmove) 17 + WEAK(memmove) 18 + ENTRY(__memmove) 18 19 ltgr %r4,%r4 19 20 lgr %r1,%r2 20 21 jz .Lmemmove_exit ··· 48 47 BR_EX %r14 49 48 .Lmemmove_mvc: 50 49 mvc 0(1,%r1),0(%r3) 50 + ENDPROC(__memmove) 51 51 EXPORT_SYMBOL(memmove) 52 52 53 53 /* ··· 66 64 * return __builtin_memset(s, c, n); 67 65 * } 68 66 */ 69 - ENTRY(memset) 67 + WEAK(memset) 68 + ENTRY(__memset) 70 69 ltgr %r4,%r4 71 70 jz .Lmemset_exit 72 71 ltgr %r3,%r3 ··· 111 108 xc 0(1,%r1),0(%r1) 112 109 .Lmemset_mvc: 113 110 mvc 1(1,%r1),0(%r1) 111 + ENDPROC(__memset) 114 112 EXPORT_SYMBOL(memset) 115 113 116 114 /* ··· 119 115 * 120 116 * void *memcpy(void *dest, const void *src, size_t n) 121 117 */ 122 - ENTRY(memcpy) 118 + WEAK(memcpy) 119 + ENTRY(__memcpy) 123 120 ltgr %r4,%r4 124 121 jz .Lmemcpy_exit 125 122 aghi %r4,-1 ··· 141 136 j .Lmemcpy_remainder 142 137 .Lmemcpy_mvc: 143 138 mvc 0(1,%r1),0(%r3) 139 + ENDPROC(__memcpy) 144 140 EXPORT_SYMBOL(memcpy) 145 141 146 142 /*
+4 -2
arch/s390/mm/Makefile
··· 4 4 # 5 5 6 6 obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o 7 - obj-y += page-states.o gup.o pageattr.o mem_detect.o 8 - obj-y += pgtable.o pgalloc.o 7 + obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o 9 8 10 9 obj-$(CONFIG_CMM) += cmm.o 11 10 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 12 11 obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o 13 12 obj-$(CONFIG_PGSTE) += gmap.o 13 + 14 + KASAN_SANITIZE_kasan_init.o := n 15 + obj-$(CONFIG_KASAN) += kasan_init.o
+50 -8
arch/s390/mm/dump_pagetables.c
··· 3 3 #include <linux/debugfs.h> 4 4 #include <linux/sched.h> 5 5 #include <linux/mm.h> 6 + #include <linux/kasan.h> 7 + #include <asm/kasan.h> 6 8 #include <asm/sections.h> 7 9 #include <asm/pgtable.h> 8 10 ··· 19 17 IDENTITY_NR = 0, 20 18 KERNEL_START_NR, 21 19 KERNEL_END_NR, 20 + #ifdef CONFIG_KASAN 21 + KASAN_SHADOW_START_NR, 22 + KASAN_SHADOW_END_NR, 23 + #endif 22 24 VMEMMAP_NR, 23 25 VMALLOC_NR, 24 26 MODULES_NR, 25 27 }; 26 28 27 29 static struct addr_marker address_markers[] = { 28 - [IDENTITY_NR] = {0, "Identity Mapping"}, 29 - [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, 30 - [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, 31 - [VMEMMAP_NR] = {0, "vmemmap Area"}, 32 - [VMALLOC_NR] = {0, "vmalloc Area"}, 33 - [MODULES_NR] = {0, "Modules Area"}, 30 + [IDENTITY_NR] = {0, "Identity Mapping"}, 31 + [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, 32 + [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, 33 + #ifdef CONFIG_KASAN 34 + [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"}, 35 + [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"}, 36 + #endif 37 + [VMEMMAP_NR] = {0, "vmemmap Area"}, 38 + [VMALLOC_NR] = {0, "vmalloc Area"}, 39 + [MODULES_NR] = {0, "Modules Area"}, 34 40 { -1, NULL } 35 41 }; 36 42 ··· 90 80 } else if (prot != cur || level != st->level || 91 81 st->current_address >= st->marker[1].start_address) { 92 82 /* Print the actual finished series */ 93 - seq_printf(m, "0x%0*lx-0x%0*lx", 83 + seq_printf(m, "0x%0*lx-0x%0*lx ", 94 84 width, st->start_address, 95 85 width, st->current_address); 96 86 delta = (st->current_address - st->start_address) >> 10; ··· 100 90 } 101 91 seq_printf(m, "%9lu%c ", delta, *unit); 102 92 print_prot(m, st->current_prot, st->level); 103 - if (st->current_address >= st->marker[1].start_address) { 93 + while (st->current_address >= st->marker[1].start_address) { 104 94 st->marker++; 105 95 seq_printf(m, "---[ %s ]---\n", st->marker->name); 106 96 } ··· 109 99 st->level = level; 110 100 } 111 101 } 102 + 103 + #ifdef CONFIG_KASAN 104 + static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st) 105 + { 106 + unsigned int prot; 107 + 108 + prot = pte_val(*kasan_zero_pte) & 109 + (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); 110 + note_page(m, st, prot, 4); 111 + } 112 + #endif 112 113 113 114 /* 114 115 * The actual page table walker functions. In order to keep the ··· 153 132 pmd_t *pmd; 154 133 int i; 155 134 135 + #ifdef CONFIG_KASAN 136 + if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) { 137 + note_kasan_zero_page(m, st); 138 + return; 139 + } 140 + #endif 141 + 156 142 for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { 157 143 st->current_address = addr; 158 144 pmd = pmd_offset(pud, addr); ··· 184 156 pud_t *pud; 185 157 int i; 186 158 159 + #ifdef CONFIG_KASAN 160 + if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) { 161 + note_kasan_zero_page(m, st); 162 + return; 163 + } 164 + #endif 165 + 187 166 for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { 188 167 st->current_address = addr; 189 168 pud = pud_offset(p4d, addr); ··· 213 178 { 214 179 p4d_t *p4d; 215 180 int i; 181 + 182 + #ifdef CONFIG_KASAN 183 + if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) { 184 + note_kasan_zero_page(m, st); 185 + return; 186 + } 187 + #endif 216 188 217 189 for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { 218 190 st->current_address = addr;
+21 -17
arch/s390/mm/fault.c
··· 636 636 u64 reserved; 637 637 } __attribute__ ((packed, aligned(8))); 638 638 639 + static struct pfault_refbk pfault_init_refbk = { 640 + .refdiagc = 0x258, 641 + .reffcode = 0, 642 + .refdwlen = 5, 643 + .refversn = 2, 644 + .refgaddr = __LC_LPP, 645 + .refselmk = 1ULL << 48, 646 + .refcmpmk = 1ULL << 48, 647 + .reserved = __PF_RES_FIELD 648 + }; 649 + 639 650 int pfault_init(void) 640 651 { 641 - struct pfault_refbk refbk = { 642 - .refdiagc = 0x258, 643 - .reffcode = 0, 644 - .refdwlen = 5, 645 - .refversn = 2, 646 - .refgaddr = __LC_LPP, 647 - .refselmk = 1ULL << 48, 648 - .refcmpmk = 1ULL << 48, 649 - .reserved = __PF_RES_FIELD }; 650 652 int rc; 651 653 652 654 if (pfault_disable) ··· 660 658 "1: la %0,8\n" 661 659 "2:\n" 662 660 EX_TABLE(0b,1b) 663 - : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 661 + : "=d" (rc) 662 + : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc"); 664 663 return rc; 665 664 } 666 665 666 + static struct pfault_refbk pfault_fini_refbk = { 667 + .refdiagc = 0x258, 668 + .reffcode = 1, 669 + .refdwlen = 5, 670 + .refversn = 2, 671 + }; 672 + 667 673 void pfault_fini(void) 668 674 { 669 - struct pfault_refbk refbk = { 670 - .refdiagc = 0x258, 671 - .reffcode = 1, 672 - .refdwlen = 5, 673 - .refversn = 2, 674 - }; 675 675 676 676 if (pfault_disable) 677 677 return; ··· 682 678 " diag %0,0,0x258\n" 683 679 "0: nopr %%r7\n" 684 680 EX_TABLE(0b,0b) 685 - : : "a" (&refbk), "m" (refbk) : "cc"); 681 + : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc"); 686 682 } 687 683 688 684 static DEFINE_SPINLOCK(pfault_lock);
+4 -1
arch/s390/mm/init.c
··· 42 42 #include <asm/ctl_reg.h> 43 43 #include <asm/sclp.h> 44 44 #include <asm/set_memory.h> 45 + #include <asm/kasan.h> 45 46 46 47 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); 47 48 ··· 99 98 S390_lowcore.user_asce = S390_lowcore.kernel_asce; 100 99 crst_table_init((unsigned long *) init_mm.pgd, pgd_type); 101 100 vmem_map_init(); 101 + kasan_copy_shadow(init_mm.pgd); 102 102 103 - /* enable virtual mapping in kernel mode */ 103 + /* enable virtual mapping in kernel mode */ 104 104 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 105 105 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 106 106 __ctl_load(S390_lowcore.kernel_asce, 13, 13); ··· 109 107 psw_bits(psw).dat = 1; 110 108 psw_bits(psw).as = PSW_BITS_AS_HOME; 111 109 __load_psw_mask(psw.mask); 110 + kasan_free_early_identity(); 112 111 113 112 sparse_memory_present_with_active_regions(MAX_NUMNODES); 114 113 sparse_init();
+387
arch/s390/mm/kasan_init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/kasan.h> 3 + #include <linux/sched/task.h> 4 + #include <linux/memblock.h> 5 + #include <asm/pgalloc.h> 6 + #include <asm/pgtable.h> 7 + #include <asm/kasan.h> 8 + #include <asm/mem_detect.h> 9 + #include <asm/processor.h> 10 + #include <asm/sclp.h> 11 + #include <asm/facility.h> 12 + #include <asm/sections.h> 13 + #include <asm/setup.h> 14 + 15 + static unsigned long segment_pos __initdata; 16 + static unsigned long segment_low __initdata; 17 + static unsigned long pgalloc_pos __initdata; 18 + static unsigned long pgalloc_low __initdata; 19 + static unsigned long pgalloc_freeable __initdata; 20 + static bool has_edat __initdata; 21 + static bool has_nx __initdata; 22 + 23 + #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) 24 + 25 + static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 26 + 27 + static void __init kasan_early_panic(const char *reason) 28 + { 29 + sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n"); 30 + sclp_early_printk(reason); 31 + disabled_wait(0); 32 + } 33 + 34 + static void * __init kasan_early_alloc_segment(void) 35 + { 36 + segment_pos -= _SEGMENT_SIZE; 37 + 38 + if (segment_pos < segment_low) 39 + kasan_early_panic("out of memory during initialisation\n"); 40 + 41 + return (void *)segment_pos; 42 + } 43 + 44 + static void * __init kasan_early_alloc_pages(unsigned int order) 45 + { 46 + pgalloc_pos -= (PAGE_SIZE << order); 47 + 48 + if (pgalloc_pos < pgalloc_low) 49 + kasan_early_panic("out of memory during initialisation\n"); 50 + 51 + return (void *)pgalloc_pos; 52 + } 53 + 54 + static void * __init kasan_early_crst_alloc(unsigned long val) 55 + { 56 + unsigned long *table; 57 + 58 + table = kasan_early_alloc_pages(CRST_ALLOC_ORDER); 59 + if (table) 60 + crst_table_init(table, val); 61 + return table; 62 + } 63 + 64 + static pte_t * __init kasan_early_pte_alloc(void) 65 + { 66 + static void *pte_leftover; 67 + pte_t *pte; 68 + 69 + BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE); 70 + 71 + if (!pte_leftover) { 72 + pte_leftover = kasan_early_alloc_pages(0); 73 + pte = pte_leftover + _PAGE_TABLE_SIZE; 74 + } else { 75 + pte = pte_leftover; 76 + pte_leftover = NULL; 77 + } 78 + memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); 79 + return pte; 80 + } 81 + 82 + enum populate_mode { 83 + POPULATE_ONE2ONE, 84 + POPULATE_MAP, 85 + POPULATE_ZERO_SHADOW 86 + }; 87 + static void __init kasan_early_vmemmap_populate(unsigned long address, 88 + unsigned long end, 89 + enum populate_mode mode) 90 + { 91 + unsigned long pgt_prot_zero, pgt_prot, sgt_prot; 92 + pgd_t *pg_dir; 93 + p4d_t *p4_dir; 94 + pud_t *pu_dir; 95 + pmd_t *pm_dir; 96 + pte_t *pt_dir; 97 + 98 + pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); 99 + if (!has_nx) 100 + pgt_prot_zero &= ~_PAGE_NOEXEC; 101 + pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); 102 + sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC); 103 + 104 + while (address < end) { 105 + pg_dir = pgd_offset_k(address); 106 + if (pgd_none(*pg_dir)) { 107 + if (mode == POPULATE_ZERO_SHADOW && 108 + IS_ALIGNED(address, PGDIR_SIZE) && 109 + end - address >= PGDIR_SIZE) { 110 + pgd_populate(&init_mm, pg_dir, kasan_zero_p4d); 111 + address = (address + PGDIR_SIZE) & PGDIR_MASK; 112 + continue; 113 + } 114 + p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY); 115 + pgd_populate(&init_mm, pg_dir, p4_dir); 116 + } 117 + 118 + p4_dir = p4d_offset(pg_dir, address); 119 + if (p4d_none(*p4_dir)) { 120 + if (mode == POPULATE_ZERO_SHADOW && 121 + IS_ALIGNED(address, P4D_SIZE) && 122 + end - address >= P4D_SIZE) { 123 + p4d_populate(&init_mm, p4_dir, kasan_zero_pud); 124 + address = (address + P4D_SIZE) & P4D_MASK; 125 + continue; 126 + } 127 + pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY); 128 + p4d_populate(&init_mm, p4_dir, pu_dir); 129 + } 130 + 131 + pu_dir = pud_offset(p4_dir, address); 132 + if (pud_none(*pu_dir)) { 133 + if (mode == POPULATE_ZERO_SHADOW && 134 + IS_ALIGNED(address, PUD_SIZE) && 135 + end - address >= PUD_SIZE) { 136 + pud_populate(&init_mm, pu_dir, kasan_zero_pmd); 137 + address = (address + PUD_SIZE) & PUD_MASK; 138 + continue; 139 + } 140 + pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY); 141 + pud_populate(&init_mm, pu_dir, pm_dir); 142 + } 143 + 144 + pm_dir = pmd_offset(pu_dir, address); 145 + if (pmd_none(*pm_dir)) { 146 + if (mode == POPULATE_ZERO_SHADOW && 147 + IS_ALIGNED(address, PMD_SIZE) && 148 + end - address >= PMD_SIZE) { 149 + pmd_populate(&init_mm, pm_dir, kasan_zero_pte); 150 + address = (address + PMD_SIZE) & PMD_MASK; 151 + continue; 152 + } 153 + /* the first megabyte of 1:1 is mapped with 4k pages */ 154 + if (has_edat && address && end - address >= PMD_SIZE && 155 + mode != POPULATE_ZERO_SHADOW) { 156 + void *page; 157 + 158 + if (mode == POPULATE_ONE2ONE) { 159 + page = (void *)address; 160 + } else { 161 + page = kasan_early_alloc_segment(); 162 + memset(page, 0, _SEGMENT_SIZE); 163 + } 164 + pmd_val(*pm_dir) = __pa(page) | sgt_prot; 165 + address = (address + PMD_SIZE) & PMD_MASK; 166 + continue; 167 + } 168 + 169 + pt_dir = kasan_early_pte_alloc(); 170 + pmd_populate(&init_mm, pm_dir, pt_dir); 171 + } else if (pmd_large(*pm_dir)) { 172 + address = (address + PMD_SIZE) & PMD_MASK; 173 + continue; 174 + } 175 + 176 + pt_dir = pte_offset_kernel(pm_dir, address); 177 + if (pte_none(*pt_dir)) { 178 + void *page; 179 + 180 + switch (mode) { 181 + case POPULATE_ONE2ONE: 182 + page = (void *)address; 183 + pte_val(*pt_dir) = __pa(page) | pgt_prot; 184 + break; 185 + case POPULATE_MAP: 186 + page = kasan_early_alloc_pages(0); 187 + memset(page, 0, PAGE_SIZE); 188 + pte_val(*pt_dir) = __pa(page) | pgt_prot; 189 + break; 190 + case POPULATE_ZERO_SHADOW: 191 + page = kasan_zero_page; 192 + pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; 193 + break; 194 + } 195 + } 196 + address += PAGE_SIZE; 197 + } 198 + } 199 + 200 + static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type) 201 + { 202 + unsigned long asce_bits; 203 + 204 + asce_bits = asce_type | _ASCE_TABLE_LENGTH; 205 + S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits; 206 + S390_lowcore.user_asce = S390_lowcore.kernel_asce; 207 + 208 + __ctl_load(S390_lowcore.kernel_asce, 1, 1); 209 + __ctl_load(S390_lowcore.kernel_asce, 7, 7); 210 + __ctl_load(S390_lowcore.kernel_asce, 13, 13); 211 + } 212 + 213 + static void __init kasan_enable_dat(void) 214 + { 215 + psw_t psw; 216 + 217 + psw.mask = __extract_psw(); 218 + psw_bits(psw).dat = 1; 219 + psw_bits(psw).as = PSW_BITS_AS_HOME; 220 + __load_psw_mask(psw.mask); 221 + } 222 + 223 + static void __init kasan_early_detect_facilities(void) 224 + { 225 + __stfle(S390_lowcore.stfle_fac_list, 226 + ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 227 + if (test_facility(8)) { 228 + has_edat = true; 229 + __ctl_set_bit(0, 23); 230 + } 231 + if (!noexec_disabled && test_facility(130)) { 232 + has_nx = true; 233 + __ctl_set_bit(0, 20); 234 + } 235 + } 236 + 237 + static unsigned long __init get_mem_detect_end(void) 238 + { 239 + unsigned long start; 240 + unsigned long end; 241 + 242 + if (mem_detect.count) { 243 + __get_mem_detect_block(mem_detect.count - 1, &start, &end); 244 + return end; 245 + } 246 + return 0; 247 + } 248 + 249 + void __init kasan_early_init(void) 250 + { 251 + unsigned long untracked_mem_end; 252 + unsigned long shadow_alloc_size; 253 + unsigned long initrd_end; 254 + unsigned long asce_type; 255 + unsigned long memsize; 256 + unsigned long vmax; 257 + unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); 258 + pte_t pte_z; 259 + pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY); 260 + pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY); 261 + p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY); 262 + 263 + kasan_early_detect_facilities(); 264 + if (!has_nx) 265 + pgt_prot &= ~_PAGE_NOEXEC; 266 + pte_z = __pte(__pa(kasan_zero_page) | pgt_prot); 267 + 268 + memsize = get_mem_detect_end(); 269 + if (!memsize) 270 + kasan_early_panic("cannot detect physical memory size\n"); 271 + /* respect mem= cmdline parameter */ 272 + if (memory_end_set && memsize > memory_end) 273 + memsize = memory_end; 274 + memsize = min(memsize, KASAN_SHADOW_START); 275 + 276 + if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) { 277 + /* 4 level paging */ 278 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); 279 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); 280 + crst_table_init((unsigned long *)early_pg_dir, 281 + _REGION2_ENTRY_EMPTY); 282 + untracked_mem_end = vmax = _REGION1_SIZE; 283 + asce_type = _ASCE_TYPE_REGION2; 284 + } else { 285 + /* 3 level paging */ 286 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE)); 287 + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); 288 + crst_table_init((unsigned long *)early_pg_dir, 289 + _REGION3_ENTRY_EMPTY); 290 + untracked_mem_end = vmax = _REGION2_SIZE; 291 + asce_type = _ASCE_TYPE_REGION3; 292 + } 293 + 294 + /* init kasan zero shadow */ 295 + crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z)); 296 + crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z)); 297 + crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z)); 298 + memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE); 299 + 300 + shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; 301 + pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE); 302 + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) { 303 + initrd_end = 304 + round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE); 305 + pgalloc_low = max(pgalloc_low, initrd_end); 306 + } 307 + 308 + if (pgalloc_low + shadow_alloc_size > memsize) 309 + kasan_early_panic("out of memory during initialisation\n"); 310 + 311 + if (has_edat) { 312 + segment_pos = round_down(memsize, _SEGMENT_SIZE); 313 + segment_low = segment_pos - shadow_alloc_size; 314 + pgalloc_pos = segment_low; 315 + } else { 316 + pgalloc_pos = memsize; 317 + } 318 + init_mm.pgd = early_pg_dir; 319 + /* 320 + * Current memory layout: 321 + * +- 0 -------------+ +- shadow start -+ 322 + * | 1:1 ram mapping | /| 1/8 ram | 323 + * +- end of ram ----+ / +----------------+ 324 + * | ... gap ... |/ | kasan | 325 + * +- shadow start --+ | zero | 326 + * | 1/8 addr space | | page | 327 + * +- shadow end -+ | mapping | 328 + * | ... gap ... |\ | (untracked) | 329 + * +- modules vaddr -+ \ +----------------+ 330 + * | 2Gb | \| unmapped | allocated per module 331 + * +-----------------+ +- shadow end ---+ 332 + */ 333 + /* populate kasan shadow (for identity mapping and zero page mapping) */ 334 + kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); 335 + if (IS_ENABLED(CONFIG_MODULES)) 336 + untracked_mem_end = vmax - MODULES_LEN; 337 + kasan_early_vmemmap_populate(__sha(max_physmem_end), 338 + __sha(untracked_mem_end), 339 + POPULATE_ZERO_SHADOW); 340 + /* memory allocated for identity mapping structs will be freed later */ 341 + pgalloc_freeable = pgalloc_pos; 342 + /* populate identity mapping */ 343 + kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE); 344 + kasan_set_pgd(early_pg_dir, asce_type); 345 + kasan_enable_dat(); 346 + /* enable kasan */ 347 + init_task.kasan_depth = 0; 348 + memblock_reserve(pgalloc_pos, memsize - pgalloc_pos); 349 + sclp_early_printk("KernelAddressSanitizer initialized\n"); 350 + } 351 + 352 + void __init kasan_copy_shadow(pgd_t *pg_dir) 353 + { 354 + /* 355 + * At this point we are still running on early pages setup early_pg_dir, 356 + * while swapper_pg_dir has just been initialized with identity mapping. 357 + * Carry over shadow memory region from early_pg_dir to swapper_pg_dir. 358 + */ 359 + 360 + pgd_t *pg_dir_src; 361 + pgd_t *pg_dir_dst; 362 + p4d_t *p4_dir_src; 363 + p4d_t *p4_dir_dst; 364 + pud_t *pu_dir_src; 365 + pud_t *pu_dir_dst; 366 + 367 + pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START); 368 + pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START); 369 + p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START); 370 + p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START); 371 + if (!p4d_folded(*p4_dir_src)) { 372 + /* 4 level paging */ 373 + memcpy(p4_dir_dst, p4_dir_src, 374 + (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t)); 375 + return; 376 + } 377 + /* 3 level paging */ 378 + pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START); 379 + pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START); 380 + memcpy(pu_dir_dst, pu_dir_src, 381 + (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t)); 382 + } 383 + 384 + void __init kasan_free_early_identity(void) 385 + { 386 + memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos); 387 + }
+20 -5
arch/s390/mm/maccess.c
··· 89 89 return rc; 90 90 } 91 91 92 - /* 93 - * Copy memory in real mode (kernel to kernel) 94 - */ 95 - int memcpy_real(void *dest, void *src, size_t count) 92 + static unsigned long _memcpy_real(unsigned long dest, unsigned long src, 93 + unsigned long count) 96 94 { 97 95 int irqs_disabled, rc; 98 96 unsigned long flags; ··· 101 103 irqs_disabled = arch_irqs_disabled_flags(flags); 102 104 if (!irqs_disabled) 103 105 trace_hardirqs_off(); 104 - rc = __memcpy_real(dest, src, count); 106 + rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); 105 107 if (!irqs_disabled) 106 108 trace_hardirqs_on(); 107 109 __arch_local_irq_ssm(flags); 108 110 return rc; 111 + } 112 + 113 + /* 114 + * Copy memory in real mode (kernel to kernel) 115 + */ 116 + int memcpy_real(void *dest, void *src, size_t count) 117 + { 118 + if (S390_lowcore.nodat_stack != 0) 119 + return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 120 + 3, dest, src, count); 121 + /* 122 + * This is a really early memcpy_real call, the stacks are 123 + * not set up yet. Just call _memcpy_real on the early boot 124 + * stack 125 + */ 126 + return _memcpy_real((unsigned long) dest,(unsigned long) src, 127 + (unsigned long) count); 109 128 } 110 129 111 130 /*
-62
arch/s390/mm/mem_detect.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Copyright IBM Corp. 2008, 2009 4 - * 5 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 6 - */ 7 - 8 - #include <linux/kernel.h> 9 - #include <linux/memblock.h> 10 - #include <linux/init.h> 11 - #include <linux/debugfs.h> 12 - #include <linux/seq_file.h> 13 - #include <asm/ipl.h> 14 - #include <asm/sclp.h> 15 - #include <asm/setup.h> 16 - 17 - #define CHUNK_READ_WRITE 0 18 - #define CHUNK_READ_ONLY 1 19 - 20 - static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size) 21 - { 22 - memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n", 23 - start, start + size - 1); 24 - memblock_add_range(&memblock.memory, start, size, 0, 0); 25 - memblock_add_range(&memblock.physmem, start, size, 0, 0); 26 - } 27 - 28 - void __init detect_memory_memblock(void) 29 - { 30 - unsigned long memsize, rnmax, rzm, addr, size; 31 - int type; 32 - 33 - rzm = sclp.rzm; 34 - rnmax = sclp.rnmax; 35 - memsize = rzm * rnmax; 36 - if (!rzm) 37 - rzm = 1UL << 17; 38 - max_physmem_end = memsize; 39 - addr = 0; 40 - /* keep memblock lists close to the kernel */ 41 - memblock_set_bottom_up(true); 42 - do { 43 - size = 0; 44 - /* assume lowcore is writable */ 45 - type = addr ? tprot(addr) : CHUNK_READ_WRITE; 46 - do { 47 - size += rzm; 48 - if (max_physmem_end && addr + size >= max_physmem_end) 49 - break; 50 - } while (type == tprot(addr + size)); 51 - if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { 52 - if (max_physmem_end && (addr + size > max_physmem_end)) 53 - size = max_physmem_end - addr; 54 - memblock_physmem_add(addr, size); 55 - } 56 - addr += size; 57 - } while (addr < max_physmem_end); 58 - memblock_set_bottom_up(false); 59 - if (!max_physmem_end) 60 - max_physmem_end = memblock_end_of_DRAM(); 61 - memblock_dump_all(); 62 - }
+2 -2
arch/s390/purgatory/head.S
··· 11 11 #include <asm/asm-offsets.h> 12 12 #include <asm/page.h> 13 13 #include <asm/sigp.h> 14 + #include <asm/ptrace.h> 14 15 15 16 /* The purgatory is the code running between two kernels. It's main purpose 16 17 * is to verify that the next kernel was not corrupted after load and to ··· 89 88 .base_crash: 90 89 91 90 /* Setup stack */ 92 - larl %r15,purgatory_end 93 - aghi %r15,-160 91 + larl %r15,purgatory_end-STACK_FRAME_OVERHEAD 94 92 95 93 /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called 96 94 * directly with a flag passed in %r2 whether the purgatory shall do
+11
drivers/crypto/Kconfig
··· 73 73 + Crypto Express 2,3,4 or 5 Accelerator (CEXxA) 74 74 + Crypto Express 4 or 5 EP11 Coprocessor (CEXxP) 75 75 76 + config ZCRYPT_MULTIDEVNODES 77 + bool "Support for multiple zcrypt device nodes" 78 + default y 79 + depends on S390 80 + depends on ZCRYPT 81 + help 82 + With this option enabled the zcrypt device driver can 83 + provide multiple devices nodes in /dev. Each device 84 + node can get customized to limit access and narrow 85 + down the use of the available crypto hardware. 86 + 76 87 config PKEY 77 88 tristate "Kernel API for protected key handling" 78 89 depends on S390
+2 -4
drivers/s390/block/dasd.c
··· 3309 3309 dasd_proc_exit(); 3310 3310 #endif 3311 3311 dasd_eer_exit(); 3312 - if (dasd_page_cache != NULL) { 3313 - kmem_cache_destroy(dasd_page_cache); 3314 - dasd_page_cache = NULL; 3315 - } 3312 + kmem_cache_destroy(dasd_page_cache); 3313 + dasd_page_cache = NULL; 3316 3314 dasd_gendisk_exit(); 3317 3315 dasd_devmap_exit(); 3318 3316 if (dasd_debug_area != NULL) {
+1
drivers/s390/char/Makefile
··· 11 11 GCOV_PROFILE_sclp_early_core.o := n 12 12 KCOV_INSTRUMENT_sclp_early_core.o := n 13 13 UBSAN_SANITIZE_sclp_early_core.o := n 14 + KASAN_SANITIZE_sclp_early_core.o := n 14 15 15 16 CFLAGS_sclp_early_core.o += -D__NO_FORTIFY 16 17
+21 -12
drivers/s390/char/monwriter.c
··· 58 58 59 59 static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) 60 60 { 61 - struct appldata_product_id id; 61 + struct appldata_parameter_list *parm_list; 62 + struct appldata_product_id *id; 62 63 int rc; 63 64 64 - memcpy(id.prod_nr, "LNXAPPL", 7); 65 - id.prod_fn = myhdr->applid; 66 - id.record_nr = myhdr->record_num; 67 - id.version_nr = myhdr->version; 68 - id.release_nr = myhdr->release; 69 - id.mod_lvl = myhdr->mod_level; 70 - rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); 65 + id = kmalloc(sizeof(*id), GFP_KERNEL); 66 + parm_list = kmalloc(sizeof(*parm_list), GFP_KERNEL); 67 + rc = -ENOMEM; 68 + if (!id || !parm_list) 69 + goto out; 70 + memcpy(id->prod_nr, "LNXAPPL", 7); 71 + id->prod_fn = myhdr->applid; 72 + id->record_nr = myhdr->record_num; 73 + id->version_nr = myhdr->version; 74 + id->release_nr = myhdr->release; 75 + id->mod_lvl = myhdr->mod_level; 76 + rc = appldata_asm(parm_list, id, fcn, 77 + (void *) buffer, myhdr->datalen); 71 78 if (rc <= 0) 72 - return rc; 79 + goto out; 73 80 pr_err("Writing monitor data failed with rc=%i\n", rc); 74 - if (rc == 5) 75 - return -EPERM; 76 - return -EINVAL; 81 + rc = (rc == 5) ? -EPERM : -EINVAL; 82 + out: 83 + kfree(id); 84 + kfree(parm_list); 85 + return rc; 77 86 } 78 87 79 88 static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
+52
drivers/s390/char/sclp.h
··· 63 63 typedef unsigned int sclp_cmdw_t; 64 64 65 65 #define SCLP_CMDW_READ_CPU_INFO 0x00010001 66 + #define SCLP_CMDW_READ_SCP_INFO 0x00020001 67 + #define SCLP_CMDW_READ_STORAGE_INFO 0x00040001 68 + #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 66 69 #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 67 70 #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 68 71 #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 ··· 158 155 u16 offset_standby; 159 156 u8 reserved[4096 - 16]; 160 157 } __attribute__((packed, aligned(PAGE_SIZE))); 158 + 159 + struct read_info_sccb { 160 + struct sccb_header header; /* 0-7 */ 161 + u16 rnmax; /* 8-9 */ 162 + u8 rnsize; /* 10 */ 163 + u8 _pad_11[16 - 11]; /* 11-15 */ 164 + u16 ncpurl; /* 16-17 */ 165 + u16 cpuoff; /* 18-19 */ 166 + u8 _pad_20[24 - 20]; /* 20-23 */ 167 + u8 loadparm[8]; /* 24-31 */ 168 + u8 _pad_32[42 - 32]; /* 32-41 */ 169 + u8 fac42; /* 42 */ 170 + u8 fac43; /* 43 */ 171 + u8 _pad_44[48 - 44]; /* 44-47 */ 172 + u64 facilities; /* 48-55 */ 173 + u8 _pad_56[66 - 56]; /* 56-65 */ 174 + u8 fac66; /* 66 */ 175 + u8 _pad_67[76 - 67]; /* 67-83 */ 176 + u32 ibc; /* 76-79 */ 177 + u8 _pad80[84 - 80]; /* 80-83 */ 178 + u8 fac84; /* 84 */ 179 + u8 fac85; /* 85 */ 180 + u8 _pad_86[91 - 86]; /* 86-90 */ 181 + u8 fac91; /* 91 */ 182 + u8 _pad_92[98 - 92]; /* 92-97 */ 183 + u8 fac98; /* 98 */ 184 + u8 hamaxpow; /* 99 */ 185 + u32 rnsize2; /* 100-103 */ 186 + u64 rnmax2; /* 104-111 */ 187 + u32 hsa_size; /* 112-115 */ 188 + u8 fac116; /* 116 */ 189 + u8 fac117; /* 117 */ 190 + u8 fac118; /* 118 */ 191 + u8 fac119; /* 119 */ 192 + u16 hcpua; /* 120-121 */ 193 + u8 _pad_122[124 - 122]; /* 122-123 */ 194 + u32 hmfai; /* 124-127 */ 195 + u8 _pad_128[4096 - 128]; /* 128-4095 */ 196 + } __packed __aligned(PAGE_SIZE); 197 + 198 + struct read_storage_sccb { 199 + struct sccb_header header; 200 + u16 max_id; 201 + u16 assigned; 202 + u16 standby; 203 + u16 :16; 204 + u32 entries[0]; 205 + } __packed; 161 206 162 207 static inline void sclp_fill_core_info(struct sclp_core_info *info, 163 208 struct read_cpu_info_sccb *sccb) ··· 326 275 int sclp_early_set_event_mask(struct init_sccb *sccb, 327 276 sccb_mask_t receive_mask, 328 277 sccb_mask_t send_mask); 278 + int sclp_early_get_info(struct read_info_sccb *info); 329 279 330 280 /* useful inlines */ 331 281
+1 -10
drivers/s390/char/sclp_cmd.c
··· 460 460 return -EPERM; 461 461 } 462 462 463 - struct read_storage_sccb { 464 - struct sccb_header header; 465 - u16 max_id; 466 - u16 assigned; 467 - u16 standby; 468 - u16 :16; 469 - u32 entries[0]; 470 - } __packed; 471 - 472 463 static const struct dev_pm_ops sclp_mem_pm_ops = { 473 464 .freeze = sclp_mem_freeze, 474 465 }; ··· 489 498 for (id = 0; id <= sclp_max_storage_id; id++) { 490 499 memset(sccb, 0, PAGE_SIZE); 491 500 sccb->header.length = PAGE_SIZE; 492 - rc = sclp_sync_request(0x00040001 | id << 8, sccb); 501 + rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb); 493 502 if (rc) 494 503 goto out; 495 504 switch (sccb->header.response_code) {
+3 -120
drivers/s390/char/sclp_early.c
··· 15 15 #include "sclp_sdias.h" 16 16 #include "sclp.h" 17 17 18 - #define SCLP_CMDW_READ_SCP_INFO 0x00020001 19 - #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 20 - 21 - struct read_info_sccb { 22 - struct sccb_header header; /* 0-7 */ 23 - u16 rnmax; /* 8-9 */ 24 - u8 rnsize; /* 10 */ 25 - u8 _pad_11[16 - 11]; /* 11-15 */ 26 - u16 ncpurl; /* 16-17 */ 27 - u16 cpuoff; /* 18-19 */ 28 - u8 _pad_20[24 - 20]; /* 20-23 */ 29 - u8 loadparm[8]; /* 24-31 */ 30 - u8 _pad_32[42 - 32]; /* 32-41 */ 31 - u8 fac42; /* 42 */ 32 - u8 fac43; /* 43 */ 33 - u8 _pad_44[48 - 44]; /* 44-47 */ 34 - u64 facilities; /* 48-55 */ 35 - u8 _pad_56[66 - 56]; /* 56-65 */ 36 - u8 fac66; /* 66 */ 37 - u8 _pad_67[76 - 67]; /* 67-83 */ 38 - u32 ibc; /* 76-79 */ 39 - u8 _pad80[84 - 80]; /* 80-83 */ 40 - u8 fac84; /* 84 */ 41 - u8 fac85; /* 85 */ 42 - u8 _pad_86[91 - 86]; /* 86-90 */ 43 - u8 fac91; /* 91 */ 44 - u8 _pad_92[98 - 92]; /* 92-97 */ 45 - u8 fac98; /* 98 */ 46 - u8 hamaxpow; /* 99 */ 47 - u32 rnsize2; /* 100-103 */ 48 - u64 rnmax2; /* 104-111 */ 49 - u8 _pad_112[116 - 112]; /* 112-115 */ 50 - u8 fac116; /* 116 */ 51 - u8 fac117; /* 117 */ 52 - u8 fac118; /* 118 */ 53 - u8 fac119; /* 119 */ 54 - u16 hcpua; /* 120-121 */ 55 - u8 _pad_122[124 - 122]; /* 122-123 */ 56 - u32 hmfai; /* 124-127 */ 57 - u8 _pad_128[4096 - 128]; /* 128-4095 */ 58 - } __packed __aligned(PAGE_SIZE); 59 - 60 18 static struct sclp_ipl_info sclp_ipl_info; 61 19 62 20 struct sclp_info sclp; 63 21 EXPORT_SYMBOL(sclp); 64 - 65 - static int __init sclp_early_read_info(struct read_info_sccb *sccb) 66 - { 67 - int i; 68 - sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 69 - SCLP_CMDW_READ_SCP_INFO}; 70 - 71 - for (i = 0; i < ARRAY_SIZE(commands); i++) { 72 - memset(sccb, 0, sizeof(*sccb)); 73 - sccb->header.length = sizeof(*sccb); 74 - sccb->header.function_code = 0x80; 75 - sccb->header.control_mask[2] = 0x80; 76 - if (sclp_early_cmd(commands[i], sccb)) 77 - break; 78 - if (sccb->header.response_code == 0x10) 79 - return 0; 80 - if (sccb->header.response_code != 0x1f0) 81 - break; 82 - } 83 - return -EIO; 84 - } 85 22 86 23 static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) 87 24 { 88 25 struct sclp_core_entry *cpue; 89 26 u16 boot_cpu_address, cpu; 90 27 91 - if (sclp_early_read_info(sccb)) 28 + if (sclp_early_get_info(sccb)) 92 29 return; 93 30 94 31 sclp.facilities = sccb->facilities; ··· 84 147 sclp_ipl_info.has_dump = 1; 85 148 memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN); 86 149 150 + if (sccb->hsa_size) 151 + sclp.hsa_size = (sccb->hsa_size - 1) * PAGE_SIZE; 87 152 sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0; 88 153 sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0; 89 154 sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0; ··· 128 189 return 0; 129 190 } 130 191 131 - static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb) 132 - { 133 - memset(sccb, 0, sizeof(*sccb)); 134 - sccb->hdr.length = sizeof(*sccb); 135 - sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); 136 - sccb->evbuf.hdr.type = EVTYP_SDIAS; 137 - sccb->evbuf.event_qual = SDIAS_EQ_SIZE; 138 - sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; 139 - sccb->evbuf.event_id = 4712; 140 - sccb->evbuf.dbs = 1; 141 - if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) 142 - return -EIO; 143 - if (sccb->hdr.response_code != 0x20) 144 - return -EIO; 145 - if (sccb->evbuf.blk_cnt == 0) 146 - return 0; 147 - return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; 148 - } 149 - 150 - static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb) 151 - { 152 - memset(sccb, 0, PAGE_SIZE); 153 - sccb->hdr.length = PAGE_SIZE; 154 - if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb)) 155 - return -EIO; 156 - if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220)) 157 - return -EIO; 158 - if (sccb->evbuf.blk_cnt == 0) 159 - return 0; 160 - return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE; 161 - } 162 - 163 - static void __init sclp_early_hsa_size_detect(void *sccb) 164 - { 165 - unsigned long flags; 166 - long size = -EIO; 167 - 168 - raw_local_irq_save(flags); 169 - if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK)) 170 - goto out; 171 - size = sclp_early_hsa_size_init(sccb); 172 - /* First check for synchronous response (LPAR) */ 173 - if (size) 174 - goto out_mask; 175 - if (!(S390_lowcore.ext_params & 1)) 176 - sclp_early_wait_irq(); 177 - size = sclp_early_hsa_copy_wait(sccb); 178 - out_mask: 179 - sclp_early_set_event_mask(sccb, 0, 0); 180 - out: 181 - raw_local_irq_restore(flags); 182 - if (size > 0) 183 - sclp.hsa_size = size; 184 - } 185 - 186 192 static void __init sclp_early_console_detect(struct init_sccb *sccb) 187 193 { 188 194 if (sccb->header.response_code != 0x20) ··· 146 262 147 263 sclp_early_facilities_detect(sccb); 148 264 sclp_early_init_core_info(sccb); 149 - sclp_early_hsa_size_detect(sccb); 150 265 151 266 /* 152 267 * Turn off SCLP event notifications. Also save remote masks in the
+116
drivers/s390/char/sclp_early_core.c
··· 9 9 #include <asm/lowcore.h> 10 10 #include <asm/ebcdic.h> 11 11 #include <asm/irq.h> 12 + #include <asm/sections.h> 13 + #include <asm/mem_detect.h> 12 14 #include "sclp.h" 13 15 #include "sclp_rw.h" 14 16 17 + static struct read_info_sccb __bootdata(sclp_info_sccb); 18 + static int __bootdata(sclp_info_sccb_valid); 15 19 char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); 16 20 int sclp_init_state __section(.data) = sclp_init_state_uninitialized; 17 21 /* ··· 237 233 void sclp_early_printk_force(const char *str) 238 234 { 239 235 __sclp_early_printk(str, strlen(str), 1); 236 + } 237 + 238 + int __init sclp_early_read_info(void) 239 + { 240 + int i; 241 + struct read_info_sccb *sccb = &sclp_info_sccb; 242 + sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 243 + SCLP_CMDW_READ_SCP_INFO}; 244 + 245 + for (i = 0; i < ARRAY_SIZE(commands); i++) { 246 + memset(sccb, 0, sizeof(*sccb)); 247 + sccb->header.length = sizeof(*sccb); 248 + sccb->header.function_code = 0x80; 249 + sccb->header.control_mask[2] = 0x80; 250 + if (sclp_early_cmd(commands[i], sccb)) 251 + break; 252 + if (sccb->header.response_code == 0x10) { 253 + sclp_info_sccb_valid = 1; 254 + return 0; 255 + } 256 + if (sccb->header.response_code != 0x1f0) 257 + break; 258 + } 259 + return -EIO; 260 + } 261 + 262 + int __init sclp_early_get_info(struct read_info_sccb *info) 263 + { 264 + if (!sclp_info_sccb_valid) 265 + return -EIO; 266 + 267 + *info = sclp_info_sccb; 268 + return 0; 269 + } 270 + 271 + int __init sclp_early_get_memsize(unsigned long *mem) 272 + { 273 + unsigned long rnmax; 274 + unsigned long rnsize; 275 + struct read_info_sccb *sccb = &sclp_info_sccb; 276 + 277 + if (!sclp_info_sccb_valid) 278 + return -EIO; 279 + 280 + rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 281 + rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 282 + rnsize <<= 20; 283 + *mem = rnsize * rnmax; 284 + return 0; 285 + } 286 + 287 + int __init sclp_early_get_hsa_size(unsigned long *hsa_size) 288 + { 289 + if (!sclp_info_sccb_valid) 290 + return -EIO; 291 + 292 + *hsa_size = 0; 293 + if (sclp_info_sccb.hsa_size) 294 + *hsa_size = (sclp_info_sccb.hsa_size - 1) * PAGE_SIZE; 295 + return 0; 296 + } 297 + 298 + #define SCLP_STORAGE_INFO_FACILITY 0x0000400000000000UL 299 + 300 + void __weak __init add_mem_detect_block(u64 start, u64 end) {} 301 + int __init sclp_early_read_storage_info(void) 302 + { 303 + struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb; 304 + int rc, id, max_id = 0; 305 + unsigned long rn, rzm; 306 + sclp_cmdw_t command; 307 + u16 sn; 308 + 309 + if (!sclp_info_sccb_valid) 310 + return -EIO; 311 + 312 + if (!(sclp_info_sccb.facilities & SCLP_STORAGE_INFO_FACILITY)) 313 + return -EOPNOTSUPP; 314 + 315 + rzm = sclp_info_sccb.rnsize ?: sclp_info_sccb.rnsize2; 316 + rzm <<= 20; 317 + 318 + for (id = 0; id <= max_id; id++) { 319 + memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb)); 320 + sccb->header.length = sizeof(sclp_early_sccb); 321 + command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8); 322 + rc = sclp_early_cmd(command, sccb); 323 + if (rc) 324 + goto fail; 325 + 326 + max_id = sccb->max_id; 327 + switch (sccb->header.response_code) { 328 + case 0x0010: 329 + for (sn = 0; sn < sccb->assigned; sn++) { 330 + if (!sccb->entries[sn]) 331 + continue; 332 + rn = sccb->entries[sn] >> 16; 333 + add_mem_detect_block((rn - 1) * rzm, rn * rzm); 334 + } 335 + break; 336 + case 0x0310: 337 + case 0x0410: 338 + break; 339 + default: 340 + goto fail; 341 + } 342 + } 343 + 344 + return 0; 345 + fail: 346 + mem_detect.count = 0; 347 + return -EIO; 240 348 }
+8 -2
drivers/s390/char/sclp_pci.c
··· 24 24 25 25 #define SCLP_ATYPE_PCI 2 26 26 27 + #define SCLP_ERRNOTIFY_AQ_RESET 0 27 28 #define SCLP_ERRNOTIFY_AQ_REPAIR 1 28 29 #define SCLP_ERRNOTIFY_AQ_INFO_LOG 2 29 30 ··· 112 111 if (report->version != 1) 113 112 return -EINVAL; 114 113 115 - if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR && 116 - report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG) 114 + switch (report->action) { 115 + case SCLP_ERRNOTIFY_AQ_RESET: 116 + case SCLP_ERRNOTIFY_AQ_REPAIR: 117 + case SCLP_ERRNOTIFY_AQ_INFO_LOG: 118 + break; 119 + default: 117 120 return -EINVAL; 121 + } 118 122 119 123 if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb))) 120 124 return -EINVAL;
+1 -1
drivers/s390/char/tape_3590.c
··· 971 971 snprintf(exception, BUFSIZE, "Data degraded"); 972 972 break; 973 973 case 0x03: 974 - snprintf(exception, BUFSIZE, "Data degraded in partion %i", 974 + snprintf(exception, BUFSIZE, "Data degraded in partition %i", 975 975 sense->fmt.f70.mp); 976 976 break; 977 977 case 0x04:
+1 -1
drivers/s390/char/vmlogrdr.c
··· 153 153 } 154 154 }; 155 155 156 - #define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t)) 156 + #define MAXMINOR ARRAY_SIZE(sys_ser) 157 157 158 158 static char FENCE[] = {"EOR"}; 159 159 static int vmlogrdr_major = 0;
+30
drivers/s390/cio/ccwgroup.c
··· 608 608 } 609 609 EXPORT_SYMBOL(ccwgroup_driver_unregister); 610 610 611 + static int __ccwgroupdev_check_busid(struct device *dev, void *id) 612 + { 613 + char *bus_id = id; 614 + 615 + return (strcmp(bus_id, dev_name(dev)) == 0); 616 + } 617 + 618 + /** 619 + * get_ccwgroupdev_by_busid() - obtain device from a bus id 620 + * @gdrv: driver the device is owned by 621 + * @bus_id: bus id of the device to be searched 622 + * 623 + * This function searches all devices owned by @gdrv for a device with a bus 624 + * id matching @bus_id. 625 + * Returns: 626 + * If a match is found, its reference count of the found device is increased 627 + * and it is returned; else %NULL is returned. 628 + */ 629 + struct ccwgroup_device *get_ccwgroupdev_by_busid(struct ccwgroup_driver *gdrv, 630 + char *bus_id) 631 + { 632 + struct device *dev; 633 + 634 + dev = driver_find_device(&gdrv->driver, NULL, bus_id, 635 + __ccwgroupdev_check_busid); 636 + 637 + return dev ? to_ccwgroupdev(dev) : NULL; 638 + } 639 + EXPORT_SYMBOL_GPL(get_ccwgroupdev_by_busid); 640 + 611 641 /** 612 642 * ccwgroup_probe_ccwdev() - probe function for slave devices 613 643 * @cdev: ccw device to be probed
+2 -13
drivers/s390/cio/qdio_main.c
··· 595 595 return 0; 596 596 } 597 597 598 - static inline int contains_aobs(struct qdio_q *q) 599 - { 600 - return !q->is_input_q && q->u.out.use_cq; 601 - } 602 - 603 598 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) 604 599 { 605 600 unsigned char state = 0; 606 601 int j, b = start; 607 - 608 - if (!contains_aobs(q)) 609 - return; 610 602 611 603 for (j = 0; j < count; ++j) { 612 604 get_buf_state(q, b, &state, 0); ··· 610 618 q->u.out.sbal_state[b].flags |= 611 619 QDIO_OUTBUF_STATE_FLAG_PENDING; 612 620 q->u.out.aobs[b] = NULL; 613 - } else if (state == SLSB_P_OUTPUT_EMPTY) { 614 - q->u.out.sbal_state[b].aob = NULL; 615 621 } 616 622 b = next_buf(b); 617 623 } ··· 628 638 q->aobs[bufnr] = aob; 629 639 } 630 640 if (q->aobs[bufnr]) { 631 - q->sbal_state[bufnr].aob = q->aobs[bufnr]; 632 641 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; 633 642 phys_aob = virt_to_phys(q->aobs[bufnr]); 634 643 WARN_ON_ONCE(phys_aob & 0xFF); ··· 655 666 qperf_inc(q, outbound_handler); 656 667 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 657 668 start, count); 669 + if (q->u.out.use_cq) 670 + qdio_handle_aobs(q, start, count); 658 671 } 659 - 660 - qdio_handle_aobs(q, start, count); 661 672 662 673 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 663 674 q->irq_ptr->int_parm);
-1
drivers/s390/cio/qdio_setup.c
··· 27 27 { 28 28 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); 29 29 } 30 - EXPORT_SYMBOL_GPL(qdio_allocate_aob); 31 30 32 31 void qdio_release_aob(struct qaob *aob) 33 32 {
+1 -1
drivers/s390/crypto/Makefile
··· 10 10 zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o 11 11 obj-$(CONFIG_ZCRYPT) += zcrypt.o 12 12 # adapter drivers depend on ap.o and zcrypt.o 13 - obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o 13 + obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o 14 14 15 15 # pkey kernel module 16 16 pkey-objs := pkey_api.o
+39 -40
drivers/s390/crypto/ap_bus.c
··· 65 65 DEFINE_SPINLOCK(ap_list_lock); 66 66 LIST_HEAD(ap_card_list); 67 67 68 - /* Default permissions (card and domain masking) */ 69 - static struct ap_perms { 70 - DECLARE_BITMAP(apm, AP_DEVICES); 71 - DECLARE_BITMAP(aqm, AP_DOMAINS); 72 - } ap_perms; 73 - static DEFINE_MUTEX(ap_perms_mutex); 68 + /* Default permissions (ioctl, card and domain masking) */ 69 + struct ap_perms ap_perms; 70 + EXPORT_SYMBOL(ap_perms); 71 + DEFINE_MUTEX(ap_perms_mutex); 72 + EXPORT_SYMBOL(ap_perms_mutex); 74 73 75 74 static struct ap_config_info *ap_configuration; 76 75 static bool initialised; ··· 943 944 return 0; 944 945 } 945 946 946 - /* 947 - * process_mask_arg() - parse a bitmap string and clear/set the 948 - * bits in the bitmap accordingly. The string may be given as 949 - * absolute value, a hex string like 0x1F2E3D4C5B6A" simple over- 950 - * writing the current content of the bitmap. Or as relative string 951 - * like "+1-16,-32,-0x40,+128" where only single bits or ranges of 952 - * bits are cleared or set. Distinction is done based on the very 953 - * first character which may be '+' or '-' for the relative string 954 - * and othewise assume to be an absolute value string. If parsing fails 955 - * a negative errno value is returned. All arguments and bitmaps are 956 - * big endian order. 957 - */ 958 - static int process_mask_arg(const char *str, 959 - unsigned long *bitmap, int bits, 960 - struct mutex *lock) 947 + int ap_parse_mask_str(const char *str, 948 + unsigned long *bitmap, int bits, 949 + struct mutex *lock) 961 950 { 962 951 unsigned long *newmap, size; 963 952 int rc; ··· 976 989 kfree(newmap); 977 990 return rc; 978 991 } 992 + EXPORT_SYMBOL(ap_parse_mask_str); 979 993 980 994 /* 981 995 * AP bus attributes. ··· 1036 1048 } 1037 1049 1038 1050 static BUS_ATTR_RO(ap_usage_domain_mask); 1051 + 1052 + static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf) 1053 + { 1054 + if (!ap_configuration) /* QCI not supported */ 1055 + return snprintf(buf, PAGE_SIZE, "not supported\n"); 1056 + 1057 + return snprintf(buf, PAGE_SIZE, 1058 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1059 + ap_configuration->apm[0], ap_configuration->apm[1], 1060 + ap_configuration->apm[2], ap_configuration->apm[3], 1061 + ap_configuration->apm[4], ap_configuration->apm[5], 1062 + ap_configuration->apm[6], ap_configuration->apm[7]); 1063 + } 1064 + 1065 + static BUS_ATTR_RO(ap_adapter_mask); 1039 1066 1040 1067 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 1041 1068 { ··· 1164 1161 { 1165 1162 int rc; 1166 1163 1167 - rc = process_mask_arg(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex); 1164 + rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex); 1168 1165 if (rc) 1169 1166 return rc; 1170 1167 ··· 1195 1192 { 1196 1193 int rc; 1197 1194 1198 - rc = process_mask_arg(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex); 1195 + rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex); 1199 1196 if (rc) 1200 1197 return rc; 1201 1198 ··· 1210 1207 &bus_attr_ap_domain, 1211 1208 &bus_attr_ap_control_domain_mask, 1212 1209 &bus_attr_ap_usage_domain_mask, 1210 + &bus_attr_ap_adapter_mask, 1213 1211 &bus_attr_config_time, 1214 1212 &bus_attr_poll_thread, 1215 1213 &bus_attr_ap_interrupts, ··· 1222 1218 }; 1223 1219 1224 1220 /** 1225 - * ap_select_domain(): Select an AP domain. 1226 - * 1227 - * Pick one of the 16 AP domains. 1221 + * ap_select_domain(): Select an AP domain if possible and we haven't 1222 + * already done so before. 1228 1223 */ 1229 - static int ap_select_domain(void) 1224 + static void ap_select_domain(void) 1230 1225 { 1231 1226 int count, max_count, best_domain; 1232 1227 struct ap_queue_status status; ··· 1240 1237 if (ap_domain_index >= 0) { 1241 1238 /* Domain has already been selected. */ 1242 1239 spin_unlock_bh(&ap_domain_lock); 1243 - return 0; 1240 + return; 1244 1241 } 1245 1242 best_domain = -1; 1246 1243 max_count = 0; ··· 1267 1264 if (best_domain >= 0) { 1268 1265 ap_domain_index = best_domain; 1269 1266 AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index); 1270 - spin_unlock_bh(&ap_domain_lock); 1271 - return 0; 1272 1267 } 1273 1268 spin_unlock_bh(&ap_domain_lock); 1274 - return -ENODEV; 1275 1269 } 1276 1270 1277 1271 /* ··· 1346 1346 AP_DBF(DBF_DEBUG, "%s running\n", __func__); 1347 1347 1348 1348 ap_query_configuration(ap_configuration); 1349 - if (ap_select_domain() != 0) 1350 - goto out; 1349 + ap_select_domain(); 1351 1350 1352 1351 for (id = 0; id < AP_DEVICES; id++) { 1353 1352 /* check if device is registered */ ··· 1466 1467 } 1467 1468 } /* end device loop */ 1468 1469 1469 - if (defdomdevs < 1) 1470 + if (ap_domain_index >= 0 && defdomdevs < 1) 1470 1471 AP_DBF(DBF_INFO, 1471 1472 "no queue device with default domain %d available\n", 1472 1473 ap_domain_index); 1473 1474 1474 - out: 1475 1475 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1476 1476 } 1477 1477 ··· 1494 1496 static void __init ap_perms_init(void) 1495 1497 { 1496 1498 /* all resources useable if no kernel parameter string given */ 1499 + memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm)); 1497 1500 memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm)); 1498 1501 memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm)); 1499 1502 1500 1503 /* apm kernel parameter string */ 1501 1504 if (apm_str) { 1502 1505 memset(&ap_perms.apm, 0, sizeof(ap_perms.apm)); 1503 - process_mask_arg(apm_str, ap_perms.apm, AP_DEVICES, 1504 - &ap_perms_mutex); 1506 + ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES, 1507 + &ap_perms_mutex); 1505 1508 } 1506 1509 1507 1510 /* aqm kernel parameter string */ 1508 1511 if (aqm_str) { 1509 1512 memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm)); 1510 - process_mask_arg(aqm_str, ap_perms.aqm, AP_DOMAINS, 1511 - &ap_perms_mutex); 1513 + ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS, 1514 + &ap_perms_mutex); 1512 1515 } 1513 1516 } 1514 1517 ··· 1532 1533 return -ENODEV; 1533 1534 } 1534 1535 1535 - /* set up the AP permissions (ap and aq masks) */ 1536 + /* set up the AP permissions (ioctls, ap and aq masks) */ 1536 1537 ap_perms_init(); 1537 1538 1538 1539 /* Get AP configuration data if available */
+25
drivers/s390/crypto/ap_bus.h
··· 20 20 21 21 #define AP_DEVICES 256 /* Number of AP devices. */ 22 22 #define AP_DOMAINS 256 /* Number of AP domains. */ 23 + #define AP_IOCTLS 256 /* Number of ioctls. */ 23 24 #define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ 24 25 #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 25 26 #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ ··· 258 257 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, 259 258 int comp_device_type, unsigned int functions); 260 259 260 + struct ap_perms { 261 + unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)]; 262 + unsigned long apm[BITS_TO_LONGS(AP_DEVICES)]; 263 + unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)]; 264 + }; 265 + extern struct ap_perms ap_perms; 266 + extern struct mutex ap_perms_mutex; 267 + 261 268 /* 262 269 * check APQN for owned/reserved by ap bus and default driver(s). 263 270 * Checks if this APQN is or will be in use by the ap bus ··· 288 279 */ 289 280 int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm, 290 281 unsigned long *aqm); 282 + 283 + /* 284 + * ap_parse_mask_str() - helper function to parse a bitmap string 285 + * and clear/set the bits in the bitmap accordingly. The string may be 286 + * given as absolute value, a hex string like 0x1F2E3D4C5B6A" simple 287 + * overwriting the current content of the bitmap. Or as relative string 288 + * like "+1-16,-32,-0x40,+128" where only single bits or ranges of 289 + * bits are cleared or set. Distinction is done based on the very 290 + * first character which may be '+' or '-' for the relative string 291 + * and othewise assume to be an absolute value string. If parsing fails 292 + * a negative errno value is returned. All arguments and bitmaps are 293 + * big endian order. 294 + */ 295 + int ap_parse_mask_str(const char *str, 296 + unsigned long *bitmap, int bits, 297 + struct mutex *lock); 291 298 292 299 #endif /* _AP_BUS_H_ */
+509 -12
drivers/s390/crypto/pkey_api.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/kallsyms.h> 18 18 #include <linux/debugfs.h> 19 + #include <linux/random.h> 20 + #include <linux/cpufeature.h> 19 21 #include <asm/zcrypt.h> 20 22 #include <asm/cpacf.h> 21 23 #include <asm/pkey.h> 24 + #include <crypto/aes.h> 22 25 23 26 #include "zcrypt_api.h" 24 27 ··· 34 31 35 32 /* Size of vardata block used for some of the cca requests/replies */ 36 33 #define VARDATASIZE 4096 34 + 35 + /* mask of available pckmo subfunctions, fetched once at module init */ 36 + static cpacf_mask_t pckmo_functions; 37 37 38 38 /* 39 39 * debug feature data and functions ··· 61 55 debug_unregister(debug_info); 62 56 } 63 57 58 + /* Key token types */ 59 + #define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */ 60 + #define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */ 61 + 62 + /* For TOKTYPE_NON_CCA: */ 63 + #define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */ 64 + 65 + /* For TOKTYPE_CCA_INTERNAL: */ 66 + #define TOKVER_CCA_AES 0x04 /* CCA AES key token */ 67 + 68 + /* header part of a key token */ 69 + struct keytoken_header { 70 + u8 type; /* one of the TOKTYPE values */ 71 + u8 res0[3]; 72 + u8 version; /* one of the TOKVER values */ 73 + u8 res1[3]; 74 + } __packed; 75 + 64 76 /* inside view of a secure key token (only type 0x01 version 0x04) */ 65 77 struct secaeskeytoken { 66 78 u8 type; /* 0x01 for internal key token */ ··· 95 71 u8 tvv[4]; /* token validation value */ 96 72 } __packed; 97 73 74 + /* inside view of a protected key token (only type 0x00 version 0x01) */ 75 + struct protaeskeytoken { 76 + u8 type; /* 0x00 for PAES specific key tokens */ 77 + u8 res0[3]; 78 + u8 version; /* should be 0x01 for protected AES key token */ 79 + u8 res1[3]; 80 + u32 keytype; /* key type, one of the PKEY_KEYTYPE values */ 81 + u32 len; /* bytes actually stored in protkey[] */ 82 + u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */ 83 + } __packed; 84 + 98 85 /* 99 86 * Simple check if the token is a valid CCA secure AES key 100 87 * token. If keybitsize is given, the bitsize of the key is ··· 115 80 { 116 81 struct secaeskeytoken *t = (struct secaeskeytoken *) token; 117 82 118 - if (t->type != 0x01) { 83 + if (t->type != TOKTYPE_CCA_INTERNAL) { 119 84 DEBUG_ERR( 120 - "%s secure token check failed, type mismatch 0x%02x != 0x01\n", 121 - __func__, (int) t->type); 85 + "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n", 86 + __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); 122 87 return -EINVAL; 123 88 } 124 - if (t->version != 0x04) { 89 + if (t->version != TOKVER_CCA_AES) { 125 90 DEBUG_ERR( 126 - "%s secure token check failed, version mismatch 0x%02x != 0x04\n", 127 - __func__, (int) t->version); 91 + "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n", 92 + __func__, (int) t->version, TOKVER_CCA_AES); 128 93 return -EINVAL; 129 94 } 130 95 if (keybitsize > 0 && t->bitsize != keybitsize) { ··· 682 647 return -EINVAL; 683 648 } 684 649 650 + /* 651 + * Check if the needed pckmo subfunction is available. 652 + * These subfunctions can be enabled/disabled by customers 653 + * in the LPAR profile or may even change on the fly. 654 + */ 655 + if (!cpacf_test_func(&pckmo_functions, fc)) { 656 + DEBUG_ERR("%s pckmo functions not available\n", __func__); 657 + return -EOPNOTSUPP; 658 + } 659 + 685 660 /* prepare param block */ 686 661 memset(paramblock, 0, sizeof(paramblock)); 687 662 memcpy(paramblock, clrkey->clrkey, keysize); ··· 1097 1052 EXPORT_SYMBOL(pkey_verifykey); 1098 1053 1099 1054 /* 1055 + * Generate a random protected key 1056 + */ 1057 + int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey) 1058 + { 1059 + struct pkey_clrkey clrkey; 1060 + int keysize; 1061 + int rc; 1062 + 1063 + switch (keytype) { 1064 + case PKEY_KEYTYPE_AES_128: 1065 + keysize = 16; 1066 + break; 1067 + case PKEY_KEYTYPE_AES_192: 1068 + keysize = 24; 1069 + break; 1070 + case PKEY_KEYTYPE_AES_256: 1071 + keysize = 32; 1072 + break; 1073 + default: 1074 + DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, 1075 + keytype); 1076 + return -EINVAL; 1077 + } 1078 + 1079 + /* generate a dummy random clear key */ 1080 + get_random_bytes(clrkey.clrkey, keysize); 1081 + 1082 + /* convert it to a dummy protected key */ 1083 + rc = pkey_clr2protkey(keytype, &clrkey, protkey); 1084 + if (rc) 1085 + return rc; 1086 + 1087 + /* replace the key part of the protected key with random bytes */ 1088 + get_random_bytes(protkey->protkey, keysize); 1089 + 1090 + return 0; 1091 + } 1092 + EXPORT_SYMBOL(pkey_genprotkey); 1093 + 1094 + /* 1095 + * Verify if a protected key is still valid 1096 + */ 1097 + int pkey_verifyprotkey(const struct pkey_protkey *protkey) 1098 + { 1099 + unsigned long fc; 1100 + struct { 1101 + u8 iv[AES_BLOCK_SIZE]; 1102 + u8 key[MAXPROTKEYSIZE]; 1103 + } param; 1104 + u8 null_msg[AES_BLOCK_SIZE]; 1105 + u8 dest_buf[AES_BLOCK_SIZE]; 1106 + unsigned int k; 1107 + 1108 + switch (protkey->type) { 1109 + case PKEY_KEYTYPE_AES_128: 1110 + fc = CPACF_KMC_PAES_128; 1111 + break; 1112 + case PKEY_KEYTYPE_AES_192: 1113 + fc = CPACF_KMC_PAES_192; 1114 + break; 1115 + case PKEY_KEYTYPE_AES_256: 1116 + fc = CPACF_KMC_PAES_256; 1117 + break; 1118 + default: 1119 + DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__, 1120 + protkey->type); 1121 + return -EINVAL; 1122 + } 1123 + 1124 + memset(null_msg, 0, sizeof(null_msg)); 1125 + 1126 + memset(param.iv, 0, sizeof(param.iv)); 1127 + memcpy(param.key, protkey->protkey, sizeof(param.key)); 1128 + 1129 + k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf, 1130 + sizeof(null_msg)); 1131 + if (k != sizeof(null_msg)) { 1132 + DEBUG_ERR("%s protected key is not valid\n", __func__); 1133 + return -EKEYREJECTED; 1134 + } 1135 + 1136 + return 0; 1137 + } 1138 + EXPORT_SYMBOL(pkey_verifyprotkey); 1139 + 1140 + /* 1141 + * Transform a non-CCA key token into a protected key 1142 + */ 1143 + static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen, 1144 + struct pkey_protkey *protkey) 1145 + { 1146 + struct keytoken_header *hdr = (struct keytoken_header *)key; 1147 + struct protaeskeytoken *t; 1148 + 1149 + switch (hdr->version) { 1150 + case TOKVER_PROTECTED_KEY: 1151 + if (keylen != sizeof(struct protaeskeytoken)) 1152 + return -EINVAL; 1153 + 1154 + t = (struct protaeskeytoken *)key; 1155 + protkey->len = t->len; 1156 + protkey->type = t->keytype; 1157 + memcpy(protkey->protkey, t->protkey, 1158 + sizeof(protkey->protkey)); 1159 + 1160 + return pkey_verifyprotkey(protkey); 1161 + default: 1162 + DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n", 1163 + __func__, hdr->version); 1164 + return -EINVAL; 1165 + } 1166 + } 1167 + 1168 + /* 1169 + * Transform a CCA internal key token into a protected key 1170 + */ 1171 + static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen, 1172 + struct pkey_protkey *protkey) 1173 + { 1174 + struct keytoken_header *hdr = (struct keytoken_header *)key; 1175 + 1176 + switch (hdr->version) { 1177 + case TOKVER_CCA_AES: 1178 + if (keylen != sizeof(struct secaeskeytoken)) 1179 + return -EINVAL; 1180 + 1181 + return pkey_skey2pkey((struct pkey_seckey *)key, 1182 + protkey); 1183 + default: 1184 + DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n", 1185 + __func__, hdr->version); 1186 + return -EINVAL; 1187 + } 1188 + } 1189 + 1190 + /* 1191 + * Transform a key blob (of any type) into a protected key 1192 + */ 1193 + int pkey_keyblob2pkey(const __u8 *key, __u32 keylen, 1194 + struct pkey_protkey *protkey) 1195 + { 1196 + struct keytoken_header *hdr = (struct keytoken_header *)key; 1197 + 1198 + if (keylen < sizeof(struct keytoken_header)) 1199 + return -EINVAL; 1200 + 1201 + switch (hdr->type) { 1202 + case TOKTYPE_NON_CCA: 1203 + return pkey_nonccatok2pkey(key, keylen, protkey); 1204 + case TOKTYPE_CCA_INTERNAL: 1205 + return pkey_ccainttok2pkey(key, keylen, protkey); 1206 + default: 1207 + DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__, 1208 + hdr->type); 1209 + return -EINVAL; 1210 + } 1211 + } 1212 + EXPORT_SYMBOL(pkey_keyblob2pkey); 1213 + 1214 + /* 1100 1215 * File io functions 1101 1216 */ 1102 1217 ··· 1372 1167 return -EFAULT; 1373 1168 break; 1374 1169 } 1170 + case PKEY_GENPROTK: { 1171 + struct pkey_genprotk __user *ugp = (void __user *) arg; 1172 + struct pkey_genprotk kgp; 1173 + 1174 + if (copy_from_user(&kgp, ugp, sizeof(kgp))) 1175 + return -EFAULT; 1176 + rc = pkey_genprotkey(kgp.keytype, &kgp.protkey); 1177 + DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc); 1178 + if (rc) 1179 + break; 1180 + if (copy_to_user(ugp, &kgp, sizeof(kgp))) 1181 + return -EFAULT; 1182 + break; 1183 + } 1184 + case PKEY_VERIFYPROTK: { 1185 + struct pkey_verifyprotk __user *uvp = (void __user *) arg; 1186 + struct pkey_verifyprotk kvp; 1187 + 1188 + if (copy_from_user(&kvp, uvp, sizeof(kvp))) 1189 + return -EFAULT; 1190 + rc = pkey_verifyprotkey(&kvp.protkey); 1191 + DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc); 1192 + break; 1193 + } 1194 + case PKEY_KBLOB2PROTK: { 1195 + struct pkey_kblob2pkey __user *utp = (void __user *) arg; 1196 + struct pkey_kblob2pkey ktp; 1197 + __u8 __user *ukey; 1198 + __u8 *kkey; 1199 + 1200 + if (copy_from_user(&ktp, utp, sizeof(ktp))) 1201 + return -EFAULT; 1202 + if (ktp.keylen < MINKEYBLOBSIZE || 1203 + ktp.keylen > MAXKEYBLOBSIZE) 1204 + return -EINVAL; 1205 + ukey = ktp.key; 1206 + kkey = kmalloc(ktp.keylen, GFP_KERNEL); 1207 + if (kkey == NULL) 1208 + return -ENOMEM; 1209 + if (copy_from_user(kkey, ukey, ktp.keylen)) { 1210 + kfree(kkey); 1211 + return -EFAULT; 1212 + } 1213 + rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); 1214 + DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); 1215 + kfree(kkey); 1216 + if (rc) 1217 + break; 1218 + if (copy_to_user(utp, &ktp, sizeof(ktp))) 1219 + return -EFAULT; 1220 + break; 1221 + } 1375 1222 default: 1376 1223 /* unknown/unsupported ioctl cmd */ 1377 1224 return -ENOTTY; ··· 1435 1178 /* 1436 1179 * Sysfs and file io operations 1437 1180 */ 1181 + 1182 + /* 1183 + * Sysfs attribute read function for all protected key binary attributes. 1184 + * The implementation can not deal with partial reads, because a new random 1185 + * protected key blob is generated with each read. In case of partial reads 1186 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 1187 + */ 1188 + static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf, 1189 + loff_t off, size_t count) 1190 + { 1191 + struct protaeskeytoken protkeytoken; 1192 + struct pkey_protkey protkey; 1193 + int rc; 1194 + 1195 + if (off != 0 || count < sizeof(protkeytoken)) 1196 + return -EINVAL; 1197 + if (is_xts) 1198 + if (count < 2 * sizeof(protkeytoken)) 1199 + return -EINVAL; 1200 + 1201 + memset(&protkeytoken, 0, sizeof(protkeytoken)); 1202 + protkeytoken.type = TOKTYPE_NON_CCA; 1203 + protkeytoken.version = TOKVER_PROTECTED_KEY; 1204 + protkeytoken.keytype = keytype; 1205 + 1206 + rc = pkey_genprotkey(protkeytoken.keytype, &protkey); 1207 + if (rc) 1208 + return rc; 1209 + 1210 + protkeytoken.len = protkey.len; 1211 + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 1212 + 1213 + memcpy(buf, &protkeytoken, sizeof(protkeytoken)); 1214 + 1215 + if (is_xts) { 1216 + rc = pkey_genprotkey(protkeytoken.keytype, &protkey); 1217 + if (rc) 1218 + return rc; 1219 + 1220 + protkeytoken.len = protkey.len; 1221 + memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len); 1222 + 1223 + memcpy(buf + sizeof(protkeytoken), &protkeytoken, 1224 + sizeof(protkeytoken)); 1225 + 1226 + return 2 * sizeof(protkeytoken); 1227 + } 1228 + 1229 + return sizeof(protkeytoken); 1230 + } 1231 + 1232 + static ssize_t protkey_aes_128_read(struct file *filp, 1233 + struct kobject *kobj, 1234 + struct bin_attribute *attr, 1235 + char *buf, loff_t off, 1236 + size_t count) 1237 + { 1238 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 1239 + off, count); 1240 + } 1241 + 1242 + static ssize_t protkey_aes_192_read(struct file *filp, 1243 + struct kobject *kobj, 1244 + struct bin_attribute *attr, 1245 + char *buf, loff_t off, 1246 + size_t count) 1247 + { 1248 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 1249 + off, count); 1250 + } 1251 + 1252 + static ssize_t protkey_aes_256_read(struct file *filp, 1253 + struct kobject *kobj, 1254 + struct bin_attribute *attr, 1255 + char *buf, loff_t off, 1256 + size_t count) 1257 + { 1258 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 1259 + off, count); 1260 + } 1261 + 1262 + static ssize_t protkey_aes_128_xts_read(struct file *filp, 1263 + struct kobject *kobj, 1264 + struct bin_attribute *attr, 1265 + char *buf, loff_t off, 1266 + size_t count) 1267 + { 1268 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 1269 + off, count); 1270 + } 1271 + 1272 + static ssize_t protkey_aes_256_xts_read(struct file *filp, 1273 + struct kobject *kobj, 1274 + struct bin_attribute *attr, 1275 + char *buf, loff_t off, 1276 + size_t count) 1277 + { 1278 + return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 1279 + off, count); 1280 + } 1281 + 1282 + static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken)); 1283 + static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken)); 1284 + static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken)); 1285 + static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken)); 1286 + static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken)); 1287 + 1288 + static struct bin_attribute *protkey_attrs[] = { 1289 + &bin_attr_protkey_aes_128, 1290 + &bin_attr_protkey_aes_192, 1291 + &bin_attr_protkey_aes_256, 1292 + &bin_attr_protkey_aes_128_xts, 1293 + &bin_attr_protkey_aes_256_xts, 1294 + NULL 1295 + }; 1296 + 1297 + static struct attribute_group protkey_attr_group = { 1298 + .name = "protkey", 1299 + .bin_attrs = protkey_attrs, 1300 + }; 1301 + 1302 + /* 1303 + * Sysfs attribute read function for all secure key ccadata binary attributes. 1304 + * The implementation can not deal with partial reads, because a new random 1305 + * protected key blob is generated with each read. In case of partial reads 1306 + * (i.e. off != 0 or count < key blob size) -EINVAL is returned. 1307 + */ 1308 + static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf, 1309 + loff_t off, size_t count) 1310 + { 1311 + int rc; 1312 + 1313 + if (off != 0 || count < sizeof(struct secaeskeytoken)) 1314 + return -EINVAL; 1315 + if (is_xts) 1316 + if (count < 2 * sizeof(struct secaeskeytoken)) 1317 + return -EINVAL; 1318 + 1319 + rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf); 1320 + if (rc) 1321 + return rc; 1322 + 1323 + if (is_xts) { 1324 + buf += sizeof(struct pkey_seckey); 1325 + rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf); 1326 + if (rc) 1327 + return rc; 1328 + 1329 + return 2 * sizeof(struct secaeskeytoken); 1330 + } 1331 + 1332 + return sizeof(struct secaeskeytoken); 1333 + } 1334 + 1335 + static ssize_t ccadata_aes_128_read(struct file *filp, 1336 + struct kobject *kobj, 1337 + struct bin_attribute *attr, 1338 + char *buf, loff_t off, 1339 + size_t count) 1340 + { 1341 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf, 1342 + off, count); 1343 + } 1344 + 1345 + static ssize_t ccadata_aes_192_read(struct file *filp, 1346 + struct kobject *kobj, 1347 + struct bin_attribute *attr, 1348 + char *buf, loff_t off, 1349 + size_t count) 1350 + { 1351 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf, 1352 + off, count); 1353 + } 1354 + 1355 + static ssize_t ccadata_aes_256_read(struct file *filp, 1356 + struct kobject *kobj, 1357 + struct bin_attribute *attr, 1358 + char *buf, loff_t off, 1359 + size_t count) 1360 + { 1361 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf, 1362 + off, count); 1363 + } 1364 + 1365 + static ssize_t ccadata_aes_128_xts_read(struct file *filp, 1366 + struct kobject *kobj, 1367 + struct bin_attribute *attr, 1368 + char *buf, loff_t off, 1369 + size_t count) 1370 + { 1371 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf, 1372 + off, count); 1373 + } 1374 + 1375 + static ssize_t ccadata_aes_256_xts_read(struct file *filp, 1376 + struct kobject *kobj, 1377 + struct bin_attribute *attr, 1378 + char *buf, loff_t off, 1379 + size_t count) 1380 + { 1381 + return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf, 1382 + off, count); 1383 + } 1384 + 1385 + static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken)); 1386 + static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken)); 1387 + static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken)); 1388 + static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken)); 1389 + static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken)); 1390 + 1391 + static struct bin_attribute *ccadata_attrs[] = { 1392 + &bin_attr_ccadata_aes_128, 1393 + &bin_attr_ccadata_aes_192, 1394 + &bin_attr_ccadata_aes_256, 1395 + &bin_attr_ccadata_aes_128_xts, 1396 + &bin_attr_ccadata_aes_256_xts, 1397 + NULL 1398 + }; 1399 + 1400 + static struct attribute_group ccadata_attr_group = { 1401 + .name = "ccadata", 1402 + .bin_attrs = ccadata_attrs, 1403 + }; 1404 + 1405 + static const struct attribute_group *pkey_attr_groups[] = { 1406 + &protkey_attr_group, 1407 + &ccadata_attr_group, 1408 + NULL, 1409 + }; 1410 + 1438 1411 static const struct file_operations pkey_fops = { 1439 1412 .owner = THIS_MODULE, 1440 1413 .open = nonseekable_open, ··· 1677 1190 .minor = MISC_DYNAMIC_MINOR, 1678 1191 .mode = 0666, 1679 1192 .fops = &pkey_fops, 1193 + .groups = pkey_attr_groups, 1680 1194 }; 1681 1195 1682 1196 /* ··· 1685 1197 */ 1686 1198 static int __init pkey_init(void) 1687 1199 { 1688 - cpacf_mask_t pckmo_functions; 1200 + cpacf_mask_t kmc_functions; 1689 1201 1690 - /* check for pckmo instructions available */ 1202 + /* 1203 + * The pckmo instruction should be available - even if we don't 1204 + * actually invoke it. This instruction comes with MSA 3 which 1205 + * is also the minimum level for the kmc instructions which 1206 + * are able to work with protected keys. 1207 + */ 1691 1208 if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) 1692 1209 return -EOPNOTSUPP; 1693 - if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) || 1694 - !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) || 1695 - !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY)) 1210 + 1211 + /* check for kmc instructions available */ 1212 + if (!cpacf_query(CPACF_KMC, &kmc_functions)) 1213 + return -EOPNOTSUPP; 1214 + if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || 1215 + !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || 1216 + !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) 1696 1217 return -EOPNOTSUPP; 1697 1218 1698 1219 pkey_debug_init(); ··· 1719 1222 pkey_debug_exit(); 1720 1223 } 1721 1224 1722 - module_init(pkey_init); 1225 + module_cpu_feature_match(MSA, pkey_init); 1723 1226 module_exit(pkey_exit);
+594 -33
drivers/s390/crypto/zcrypt_api.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 - * Copyright IBM Corp. 2001, 2012 3 + * Copyright IBM Corp. 2001, 2018 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) 8 6 * Cornelia Huck <cornelia.huck@de.ibm.com> ··· 9 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 12 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 14 + * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 12 15 */ 13 16 14 17 #include <linux/module.h> ··· 23 24 #include <linux/uaccess.h> 24 25 #include <linux/hw_random.h> 25 26 #include <linux/debugfs.h> 27 + #include <linux/cdev.h> 28 + #include <linux/ctype.h> 26 29 #include <asm/debug.h> 27 30 28 31 #define CREATE_TRACE_POINTS ··· 109 108 } 110 109 EXPORT_SYMBOL(zcrypt_msgtype); 111 110 111 + /* 112 + * Multi device nodes extension functions. 113 + */ 114 + 115 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 116 + 117 + struct zcdn_device; 118 + 119 + static struct class *zcrypt_class; 120 + static dev_t zcrypt_devt; 121 + static struct cdev zcrypt_cdev; 122 + 123 + struct zcdn_device { 124 + struct device device; 125 + struct ap_perms perms; 126 + }; 127 + 128 + #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 129 + 130 + #define ZCDN_MAX_NAME 32 131 + 132 + static int zcdn_create(const char *name); 133 + static int zcdn_destroy(const char *name); 134 + 135 + /* helper function, matches the name for find_zcdndev_by_name() */ 136 + static int __match_zcdn_name(struct device *dev, const void *data) 137 + { 138 + return strcmp(dev_name(dev), (const char *)data) == 0; 139 + } 140 + 141 + /* helper function, matches the devt value for find_zcdndev_by_devt() */ 142 + static int __match_zcdn_devt(struct device *dev, const void *data) 143 + { 144 + return dev->devt == *((dev_t *) data); 145 + } 146 + 147 + /* 148 + * Find zcdn device by name. 149 + * Returns reference to the zcdn device which needs to be released 150 + * with put_device() after use. 151 + */ 152 + static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 153 + { 154 + struct device *dev = 155 + class_find_device(zcrypt_class, NULL, 156 + (void *) name, 157 + __match_zcdn_name); 158 + 159 + return dev ? to_zcdn_dev(dev) : NULL; 160 + } 161 + 162 + /* 163 + * Find zcdn device by devt value. 164 + * Returns reference to the zcdn device which needs to be released 165 + * with put_device() after use. 166 + */ 167 + static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 168 + { 169 + struct device *dev = 170 + class_find_device(zcrypt_class, NULL, 171 + (void *) &devt, 172 + __match_zcdn_devt); 173 + 174 + return dev ? to_zcdn_dev(dev) : NULL; 175 + } 176 + 177 + static ssize_t ioctlmask_show(struct device *dev, 178 + struct device_attribute *attr, 179 + char *buf) 180 + { 181 + int i, rc; 182 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 183 + 184 + if (mutex_lock_interruptible(&ap_perms_mutex)) 185 + return -ERESTARTSYS; 186 + 187 + buf[0] = '0'; 188 + buf[1] = 'x'; 189 + for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 190 + snprintf(buf + 2 + 2 * i * sizeof(long), 191 + PAGE_SIZE - 2 - 2 * i * sizeof(long), 192 + "%016lx", zcdndev->perms.ioctlm[i]); 193 + buf[2 + 2 * i * sizeof(long)] = '\n'; 194 + buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 195 + rc = 2 + 2 * i * sizeof(long) + 1; 196 + 197 + mutex_unlock(&ap_perms_mutex); 198 + 199 + return rc; 200 + } 201 + 202 + static ssize_t ioctlmask_store(struct device *dev, 203 + struct device_attribute *attr, 204 + const char *buf, size_t count) 205 + { 206 + int rc; 207 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 208 + 209 + rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 210 + AP_IOCTLS, &ap_perms_mutex); 211 + if (rc) 212 + return rc; 213 + 214 + return count; 215 + } 216 + 217 + static DEVICE_ATTR_RW(ioctlmask); 218 + 219 + static ssize_t apmask_show(struct device *dev, 220 + struct device_attribute *attr, 221 + char *buf) 222 + { 223 + int i, rc; 224 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 225 + 226 + if (mutex_lock_interruptible(&ap_perms_mutex)) 227 + return -ERESTARTSYS; 228 + 229 + buf[0] = '0'; 230 + buf[1] = 'x'; 231 + for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 232 + snprintf(buf + 2 + 2 * i * sizeof(long), 233 + PAGE_SIZE - 2 - 2 * i * sizeof(long), 234 + "%016lx", zcdndev->perms.apm[i]); 235 + buf[2 + 2 * i * sizeof(long)] = '\n'; 236 + buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 237 + rc = 2 + 2 * i * sizeof(long) + 1; 238 + 239 + mutex_unlock(&ap_perms_mutex); 240 + 241 + return rc; 242 + } 243 + 244 + static ssize_t apmask_store(struct device *dev, 245 + struct device_attribute *attr, 246 + const char *buf, size_t count) 247 + { 248 + int rc; 249 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 250 + 251 + rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 252 + AP_DEVICES, &ap_perms_mutex); 253 + if (rc) 254 + return rc; 255 + 256 + return count; 257 + } 258 + 259 + static DEVICE_ATTR_RW(apmask); 260 + 261 + static ssize_t aqmask_show(struct device *dev, 262 + struct device_attribute *attr, 263 + char *buf) 264 + { 265 + int i, rc; 266 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 267 + 268 + if (mutex_lock_interruptible(&ap_perms_mutex)) 269 + return -ERESTARTSYS; 270 + 271 + buf[0] = '0'; 272 + buf[1] = 'x'; 273 + for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 274 + snprintf(buf + 2 + 2 * i * sizeof(long), 275 + PAGE_SIZE - 2 - 2 * i * sizeof(long), 276 + "%016lx", zcdndev->perms.aqm[i]); 277 + buf[2 + 2 * i * sizeof(long)] = '\n'; 278 + buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 279 + rc = 2 + 2 * i * sizeof(long) + 1; 280 + 281 + mutex_unlock(&ap_perms_mutex); 282 + 283 + return rc; 284 + } 285 + 286 + static ssize_t aqmask_store(struct device *dev, 287 + struct device_attribute *attr, 288 + const char *buf, size_t count) 289 + { 290 + int rc; 291 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 292 + 293 + rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 294 + AP_DOMAINS, &ap_perms_mutex); 295 + if (rc) 296 + return rc; 297 + 298 + return count; 299 + } 300 + 301 + static DEVICE_ATTR_RW(aqmask); 302 + 303 + static struct attribute *zcdn_dev_attrs[] = { 304 + &dev_attr_ioctlmask.attr, 305 + &dev_attr_apmask.attr, 306 + &dev_attr_aqmask.attr, 307 + NULL 308 + }; 309 + 310 + static struct attribute_group zcdn_dev_attr_group = { 311 + .attrs = zcdn_dev_attrs 312 + }; 313 + 314 + static const struct attribute_group *zcdn_dev_attr_groups[] = { 315 + &zcdn_dev_attr_group, 316 + NULL 317 + }; 318 + 319 + static ssize_t zcdn_create_store(struct class *class, 320 + struct class_attribute *attr, 321 + const char *buf, size_t count) 322 + { 323 + int rc; 324 + char name[ZCDN_MAX_NAME]; 325 + 326 + strncpy(name, skip_spaces(buf), sizeof(name)); 327 + name[sizeof(name) - 1] = '\0'; 328 + 329 + rc = zcdn_create(strim(name)); 330 + 331 + return rc ? rc : count; 332 + } 333 + 334 + static const struct class_attribute class_attr_zcdn_create = 335 + __ATTR(create, 0600, NULL, zcdn_create_store); 336 + 337 + static ssize_t zcdn_destroy_store(struct class *class, 338 + struct class_attribute *attr, 339 + const char *buf, size_t count) 340 + { 341 + int rc; 342 + char name[ZCDN_MAX_NAME]; 343 + 344 + strncpy(name, skip_spaces(buf), sizeof(name)); 345 + name[sizeof(name) - 1] = '\0'; 346 + 347 + rc = zcdn_destroy(strim(name)); 348 + 349 + return rc ? rc : count; 350 + } 351 + 352 + static const struct class_attribute class_attr_zcdn_destroy = 353 + __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 354 + 355 + static void zcdn_device_release(struct device *dev) 356 + { 357 + struct zcdn_device *zcdndev = to_zcdn_dev(dev); 358 + 359 + ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n", 360 + MAJOR(dev->devt), MINOR(dev->devt)); 361 + 362 + kfree(zcdndev); 363 + } 364 + 365 + static int zcdn_create(const char *name) 366 + { 367 + dev_t devt; 368 + int i, rc = 0; 369 + char nodename[ZCDN_MAX_NAME]; 370 + struct zcdn_device *zcdndev; 371 + 372 + if (mutex_lock_interruptible(&ap_perms_mutex)) 373 + return -ERESTARTSYS; 374 + 375 + /* check if device node with this name already exists */ 376 + if (name[0]) { 377 + zcdndev = find_zcdndev_by_name(name); 378 + if (zcdndev) { 379 + put_device(&zcdndev->device); 380 + rc = -EEXIST; 381 + goto unlockout; 382 + } 383 + } 384 + 385 + /* find an unused minor number */ 386 + for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 387 + devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 388 + zcdndev = find_zcdndev_by_devt(devt); 389 + if (zcdndev) 390 + put_device(&zcdndev->device); 391 + else 392 + break; 393 + } 394 + if (i == ZCRYPT_MAX_MINOR_NODES) { 395 + rc = -ENOSPC; 396 + goto unlockout; 397 + } 398 + 399 + /* alloc and prepare a new zcdn device */ 400 + zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 401 + if (!zcdndev) { 402 + rc = -ENOMEM; 403 + goto unlockout; 404 + } 405 + zcdndev->device.release = zcdn_device_release; 406 + zcdndev->device.class = zcrypt_class; 407 + zcdndev->device.devt = devt; 408 + zcdndev->device.groups = zcdn_dev_attr_groups; 409 + if (name[0]) 410 + strncpy(nodename, name, sizeof(nodename)); 411 + else 412 + snprintf(nodename, sizeof(nodename), 413 + ZCRYPT_NAME "_%d", (int) MINOR(devt)); 414 + nodename[sizeof(nodename)-1] = '\0'; 415 + if (dev_set_name(&zcdndev->device, nodename)) { 416 + rc = -EINVAL; 417 + goto unlockout; 418 + } 419 + rc = device_register(&zcdndev->device); 420 + if (rc) { 421 + put_device(&zcdndev->device); 422 + goto unlockout; 423 + } 424 + 425 + ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n", 426 + MAJOR(devt), MINOR(devt)); 427 + 428 + unlockout: 429 + mutex_unlock(&ap_perms_mutex); 430 + return rc; 431 + } 432 + 433 + static int zcdn_destroy(const char *name) 434 + { 435 + int rc = 0; 436 + struct zcdn_device *zcdndev; 437 + 438 + if (mutex_lock_interruptible(&ap_perms_mutex)) 439 + return -ERESTARTSYS; 440 + 441 + /* try to find this zcdn device */ 442 + zcdndev = find_zcdndev_by_name(name); 443 + if (!zcdndev) { 444 + rc = -ENOENT; 445 + goto unlockout; 446 + } 447 + 448 + /* 449 + * The zcdn device is not hard destroyed. It is subject to 450 + * reference counting and thus just needs to be unregistered. 451 + */ 452 + put_device(&zcdndev->device); 453 + device_unregister(&zcdndev->device); 454 + 455 + unlockout: 456 + mutex_unlock(&ap_perms_mutex); 457 + return rc; 458 + } 459 + 460 + static void zcdn_destroy_all(void) 461 + { 462 + int i; 463 + dev_t devt; 464 + struct zcdn_device *zcdndev; 465 + 466 + mutex_lock(&ap_perms_mutex); 467 + for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 468 + devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 469 + zcdndev = find_zcdndev_by_devt(devt); 470 + if (zcdndev) { 471 + put_device(&zcdndev->device); 472 + device_unregister(&zcdndev->device); 473 + } 474 + } 475 + mutex_unlock(&ap_perms_mutex); 476 + } 477 + 478 + #endif 479 + 112 480 /** 113 481 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 114 482 * ··· 507 137 */ 508 138 static int zcrypt_open(struct inode *inode, struct file *filp) 509 139 { 140 + struct ap_perms *perms = &ap_perms; 141 + 142 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 143 + if (filp->f_inode->i_cdev == &zcrypt_cdev) { 144 + struct zcdn_device *zcdndev; 145 + 146 + if (mutex_lock_interruptible(&ap_perms_mutex)) 147 + return -ERESTARTSYS; 148 + zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 149 + /* find returns a reference, no get_device() needed */ 150 + mutex_unlock(&ap_perms_mutex); 151 + if (zcdndev) 152 + perms = &zcdndev->perms; 153 + } 154 + #endif 155 + filp->private_data = (void *) perms; 156 + 510 157 atomic_inc(&zcrypt_open_count); 511 158 return nonseekable_open(inode, filp); 512 159 } ··· 535 148 */ 536 149 static int zcrypt_release(struct inode *inode, struct file *filp) 537 150 { 151 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 152 + if (filp->f_inode->i_cdev == &zcrypt_cdev) { 153 + struct zcdn_device *zcdndev; 154 + 155 + if (mutex_lock_interruptible(&ap_perms_mutex)) 156 + return -ERESTARTSYS; 157 + zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 158 + mutex_unlock(&ap_perms_mutex); 159 + if (zcdndev) { 160 + /* 2 puts here: one for find, one for open */ 161 + put_device(&zcdndev->device); 162 + put_device(&zcdndev->device); 163 + } 164 + } 165 + #endif 166 + 538 167 atomic_dec(&zcrypt_open_count); 539 168 return 0; 169 + } 170 + 171 + static inline int zcrypt_check_ioctl(struct ap_perms *perms, 172 + unsigned int cmd) 173 + { 174 + int rc = -EPERM; 175 + int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 176 + 177 + if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 178 + if (test_bit_inv(ioctlnr, perms->ioctlm)) 179 + rc = 0; 180 + } 181 + 182 + if (rc) 183 + ZCRYPT_DBF(DBF_WARN, 184 + "ioctl check failed: ioctlnr=0x%04x rc=%d\n", 185 + ioctlnr, rc); 186 + 187 + return rc; 188 + } 189 + 190 + static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 191 + { 192 + return test_bit_inv(card, perms->apm) ? true : false; 193 + } 194 + 195 + static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 196 + { 197 + return test_bit_inv(queue, perms->aqm) ? true : false; 540 198 } 541 199 542 200 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, ··· 645 213 /* 646 214 * zcrypt ioctls. 647 215 */ 648 - static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 216 + static long zcrypt_rsa_modexpo(struct ap_perms *perms, 217 + struct ica_rsa_modexpo *mex) 649 218 { 650 219 struct zcrypt_card *zc, *pref_zc; 651 220 struct zcrypt_queue *zq, *pref_zq; ··· 683 250 if (zc->min_mod_size > mex->inputdatalength || 684 251 zc->max_mod_size < mex->inputdatalength) 685 252 continue; 253 + /* check if device node has admission for this card */ 254 + if (!zcrypt_check_card(perms, zc->card->id)) 255 + continue; 686 256 /* get weight index of the card device */ 687 257 weight = zc->speed_rating[func_code]; 688 258 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) ··· 693 257 for_each_zcrypt_queue(zq, zc) { 694 258 /* check if device is online and eligible */ 695 259 if (!zq->online || !zq->ops->rsa_modexpo) 260 + continue; 261 + /* check if device node has admission for this queue */ 262 + if (!zcrypt_check_queue(perms, 263 + AP_QID_QUEUE(zq->queue->qid))) 696 264 continue; 697 265 if (zcrypt_queue_compare(zq, pref_zq, 698 266 weight, pref_weight)) ··· 727 287 return rc; 728 288 } 729 289 730 - static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 290 + static long zcrypt_rsa_crt(struct ap_perms *perms, 291 + struct ica_rsa_modexpo_crt *crt) 731 292 { 732 293 struct zcrypt_card *zc, *pref_zc; 733 294 struct zcrypt_queue *zq, *pref_zq; ··· 765 324 if (zc->min_mod_size > crt->inputdatalength || 766 325 zc->max_mod_size < crt->inputdatalength) 767 326 continue; 327 + /* check if device node has admission for this card */ 328 + if (!zcrypt_check_card(perms, zc->card->id)) 329 + continue; 768 330 /* get weight index of the card device */ 769 331 weight = zc->speed_rating[func_code]; 770 332 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) ··· 775 331 for_each_zcrypt_queue(zq, zc) { 776 332 /* check if device is online and eligible */ 777 333 if (!zq->online || !zq->ops->rsa_modexpo_crt) 334 + continue; 335 + /* check if device node has admission for this queue */ 336 + if (!zcrypt_check_queue(perms, 337 + AP_QID_QUEUE(zq->queue->qid))) 778 338 continue; 779 339 if (zcrypt_queue_compare(zq, pref_zq, 780 340 weight, pref_weight)) ··· 809 361 return rc; 810 362 } 811 363 812 - long zcrypt_send_cprb(struct ica_xcRB *xcRB) 364 + static long _zcrypt_send_cprb(struct ap_perms *perms, 365 + struct ica_xcRB *xcRB) 813 366 { 814 367 struct zcrypt_card *zc, *pref_zc; 815 368 struct zcrypt_queue *zq, *pref_zq; ··· 822 373 823 374 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 824 375 376 + xcRB->status = 0; 825 377 ap_init_message(&ap_msg); 826 378 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 827 379 if (rc) ··· 839 389 if (xcRB->user_defined != AUTOSELECT && 840 390 xcRB->user_defined != zc->card->id) 841 391 continue; 392 + /* check if device node has admission for this card */ 393 + if (!zcrypt_check_card(perms, zc->card->id)) 394 + continue; 842 395 /* get weight index of the card device */ 843 396 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 844 397 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) ··· 852 399 !zq->ops->send_cprb || 853 400 ((*domain != (unsigned short) AUTOSELECT) && 854 401 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 402 + continue; 403 + /* check if device node has admission for this queue */ 404 + if (!zcrypt_check_queue(perms, 405 + AP_QID_QUEUE(zq->queue->qid))) 855 406 continue; 856 407 if (zcrypt_queue_compare(zq, pref_zq, 857 408 weight, pref_weight)) ··· 890 433 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 891 434 return rc; 892 435 } 436 + 437 + long zcrypt_send_cprb(struct ica_xcRB *xcRB) 438 + { 439 + return _zcrypt_send_cprb(&ap_perms, xcRB); 440 + } 893 441 EXPORT_SYMBOL(zcrypt_send_cprb); 894 442 895 443 static bool is_desired_ep11_card(unsigned int dev_id, ··· 921 459 return false; 922 460 } 923 461 924 - static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 462 + static long zcrypt_send_ep11_cprb(struct ap_perms *perms, 463 + struct ep11_urb *xcrb) 925 464 { 926 465 struct zcrypt_card *zc, *pref_zc; 927 466 struct zcrypt_queue *zq, *pref_zq; ··· 973 510 if (targets && 974 511 !is_desired_ep11_card(zc->card->id, target_num, targets)) 975 512 continue; 513 + /* check if device node has admission for this card */ 514 + if (!zcrypt_check_card(perms, zc->card->id)) 515 + continue; 976 516 /* get weight index of the card device */ 977 517 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 978 518 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) ··· 987 521 (targets && 988 522 !is_desired_ep11_queue(zq->queue->qid, 989 523 target_num, targets))) 524 + continue; 525 + /* check if device node has admission for this queue */ 526 + if (!zcrypt_check_queue(perms, 527 + AP_QID_QUEUE(zq->queue->qid))) 990 528 continue; 991 529 if (zcrypt_queue_compare(zq, pref_zq, 992 530 weight, pref_weight)) ··· 1258 788 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1259 789 unsigned long arg) 1260 790 { 1261 - int rc = 0; 791 + int rc; 792 + struct ap_perms *perms = 793 + (struct ap_perms *) filp->private_data; 794 + 795 + rc = zcrypt_check_ioctl(perms, cmd); 796 + if (rc) 797 + return rc; 1262 798 1263 799 switch (cmd) { 1264 800 case ICARSAMODEXPO: { ··· 1274 798 if (copy_from_user(&mex, umex, sizeof(mex))) 1275 799 return -EFAULT; 1276 800 do { 1277 - rc = zcrypt_rsa_modexpo(&mex); 801 + rc = zcrypt_rsa_modexpo(perms, &mex); 1278 802 } while (rc == -EAGAIN); 1279 803 /* on failure: retry once again after a requested rescan */ 1280 804 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1281 805 do { 1282 - rc = zcrypt_rsa_modexpo(&mex); 806 + rc = zcrypt_rsa_modexpo(perms, &mex); 1283 807 } while (rc == -EAGAIN); 1284 808 if (rc) { 1285 809 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); ··· 1294 818 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1295 819 return -EFAULT; 1296 820 do { 1297 - rc = zcrypt_rsa_crt(&crt); 821 + rc = zcrypt_rsa_crt(perms, &crt); 1298 822 } while (rc == -EAGAIN); 1299 823 /* on failure: retry once again after a requested rescan */ 1300 824 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1301 825 do { 1302 - rc = zcrypt_rsa_crt(&crt); 826 + rc = zcrypt_rsa_crt(perms, &crt); 1303 827 } while (rc == -EAGAIN); 1304 828 if (rc) { 1305 829 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); ··· 1314 838 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 1315 839 return -EFAULT; 1316 840 do { 1317 - rc = zcrypt_send_cprb(&xcRB); 841 + rc = _zcrypt_send_cprb(perms, &xcRB); 1318 842 } while (rc == -EAGAIN); 1319 843 /* on failure: retry once again after a requested rescan */ 1320 844 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1321 845 do { 1322 - rc = zcrypt_send_cprb(&xcRB); 846 + rc = _zcrypt_send_cprb(perms, &xcRB); 1323 847 } while (rc == -EAGAIN); 1324 848 if (rc) 1325 - ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 849 + ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n", 850 + rc, xcRB.status); 1326 851 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 1327 852 return -EFAULT; 1328 853 return rc; ··· 1335 858 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1336 859 return -EFAULT; 1337 860 do { 1338 - rc = zcrypt_send_ep11_cprb(&xcrb); 861 + rc = zcrypt_send_ep11_cprb(perms, &xcrb); 1339 862 } while (rc == -EAGAIN); 1340 863 /* on failure: retry once again after a requested rescan */ 1341 864 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1342 865 do { 1343 - rc = zcrypt_send_ep11_cprb(&xcrb); 866 + rc = zcrypt_send_ep11_cprb(perms, &xcrb); 1344 867 } while (rc == -EAGAIN); 1345 868 if (rc) 1346 869 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); ··· 1466 989 compat_uptr_t n_modulus; 1467 990 }; 1468 991 1469 - static long trans_modexpo32(struct file *filp, unsigned int cmd, 1470 - unsigned long arg) 992 + static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 993 + unsigned int cmd, unsigned long arg) 1471 994 { 1472 995 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1473 996 struct compat_ica_rsa_modexpo mex32; ··· 1483 1006 mex64.b_key = compat_ptr(mex32.b_key); 1484 1007 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1485 1008 do { 1486 - rc = zcrypt_rsa_modexpo(&mex64); 1009 + rc = zcrypt_rsa_modexpo(perms, &mex64); 1487 1010 } while (rc == -EAGAIN); 1488 1011 /* on failure: retry once again after a requested rescan */ 1489 1012 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1490 1013 do { 1491 - rc = zcrypt_rsa_modexpo(&mex64); 1014 + rc = zcrypt_rsa_modexpo(perms, &mex64); 1492 1015 } while (rc == -EAGAIN); 1493 1016 if (rc) 1494 1017 return rc; ··· 1508 1031 compat_uptr_t u_mult_inv; 1509 1032 }; 1510 1033 1511 - static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1512 - unsigned long arg) 1034 + static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1035 + unsigned int cmd, unsigned long arg) 1513 1036 { 1514 1037 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1515 1038 struct compat_ica_rsa_modexpo_crt crt32; ··· 1528 1051 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1529 1052 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1530 1053 do { 1531 - rc = zcrypt_rsa_crt(&crt64); 1054 + rc = zcrypt_rsa_crt(perms, &crt64); 1532 1055 } while (rc == -EAGAIN); 1533 1056 /* on failure: retry once again after a requested rescan */ 1534 1057 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1535 1058 do { 1536 - rc = zcrypt_rsa_crt(&crt64); 1059 + rc = zcrypt_rsa_crt(perms, &crt64); 1537 1060 } while (rc == -EAGAIN); 1538 1061 if (rc) 1539 1062 return rc; ··· 1561 1084 unsigned int status; 1562 1085 } __packed; 1563 1086 1564 - static long trans_xcRB32(struct file *filp, unsigned int cmd, 1565 - unsigned long arg) 1087 + static long trans_xcRB32(struct ap_perms *perms, struct file *filp, 1088 + unsigned int cmd, unsigned long arg) 1566 1089 { 1567 1090 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1568 1091 struct compat_ica_xcRB xcRB32; ··· 1592 1115 xcRB64.priority_window = xcRB32.priority_window; 1593 1116 xcRB64.status = xcRB32.status; 1594 1117 do { 1595 - rc = zcrypt_send_cprb(&xcRB64); 1118 + rc = _zcrypt_send_cprb(perms, &xcRB64); 1596 1119 } while (rc == -EAGAIN); 1597 1120 /* on failure: retry once again after a requested rescan */ 1598 1121 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1599 1122 do { 1600 - rc = zcrypt_send_cprb(&xcRB64); 1123 + rc = _zcrypt_send_cprb(perms, &xcRB64); 1601 1124 } while (rc == -EAGAIN); 1602 1125 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1603 1126 xcRB32.reply_data_length = xcRB64.reply_data_length; ··· 1610 1133 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1611 1134 unsigned long arg) 1612 1135 { 1136 + int rc; 1137 + struct ap_perms *perms = 1138 + (struct ap_perms *) filp->private_data; 1139 + 1140 + rc = zcrypt_check_ioctl(perms, cmd); 1141 + if (rc) 1142 + return rc; 1143 + 1613 1144 if (cmd == ICARSAMODEXPO) 1614 - return trans_modexpo32(filp, cmd, arg); 1145 + return trans_modexpo32(perms, filp, cmd, arg); 1615 1146 if (cmd == ICARSACRT) 1616 - return trans_modexpo_crt32(filp, cmd, arg); 1147 + return trans_modexpo_crt32(perms, filp, cmd, arg); 1617 1148 if (cmd == ZSECSENDCPRB) 1618 - return trans_xcRB32(filp, cmd, arg); 1149 + return trans_xcRB32(perms, filp, cmd, arg); 1619 1150 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1620 1151 } 1621 1152 #endif ··· 1741 1256 debug_unregister(zcrypt_dbf_info); 1742 1257 } 1743 1258 1259 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1260 + 1261 + static int __init zcdn_init(void) 1262 + { 1263 + int rc; 1264 + 1265 + /* create a new class 'zcrypt' */ 1266 + zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME); 1267 + if (IS_ERR(zcrypt_class)) { 1268 + rc = PTR_ERR(zcrypt_class); 1269 + goto out_class_create_failed; 1270 + } 1271 + zcrypt_class->dev_release = zcdn_device_release; 1272 + 1273 + /* alloc device minor range */ 1274 + rc = alloc_chrdev_region(&zcrypt_devt, 1275 + 0, ZCRYPT_MAX_MINOR_NODES, 1276 + ZCRYPT_NAME); 1277 + if (rc) 1278 + goto out_alloc_chrdev_failed; 1279 + 1280 + cdev_init(&zcrypt_cdev, &zcrypt_fops); 1281 + zcrypt_cdev.owner = THIS_MODULE; 1282 + rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1283 + if (rc) 1284 + goto out_cdev_add_failed; 1285 + 1286 + /* need some class specific sysfs attributes */ 1287 + rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 1288 + if (rc) 1289 + goto out_class_create_file_1_failed; 1290 + rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 1291 + if (rc) 1292 + goto out_class_create_file_2_failed; 1293 + 1294 + return 0; 1295 + 1296 + out_class_create_file_2_failed: 1297 + class_remove_file(zcrypt_class, &class_attr_zcdn_create); 1298 + out_class_create_file_1_failed: 1299 + cdev_del(&zcrypt_cdev); 1300 + out_cdev_add_failed: 1301 + unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1302 + out_alloc_chrdev_failed: 1303 + class_destroy(zcrypt_class); 1304 + out_class_create_failed: 1305 + return rc; 1306 + } 1307 + 1308 + static void zcdn_exit(void) 1309 + { 1310 + class_remove_file(zcrypt_class, &class_attr_zcdn_create); 1311 + class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 1312 + zcdn_destroy_all(); 1313 + cdev_del(&zcrypt_cdev); 1314 + unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1315 + class_destroy(zcrypt_class); 1316 + } 1317 + 1318 + #endif 1319 + 1744 1320 /** 1745 1321 * zcrypt_api_init(): Module initialization. 1746 1322 * ··· 1815 1269 if (rc) 1816 1270 goto out; 1817 1271 1272 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1273 + rc = zcdn_init(); 1274 + if (rc) 1275 + goto out; 1276 + #endif 1277 + 1818 1278 /* Register the request sprayer. */ 1819 1279 rc = misc_register(&zcrypt_misc_device); 1820 1280 if (rc < 0) 1821 - goto out; 1281 + goto out_misc_register_failed; 1822 1282 1823 1283 zcrypt_msgtype6_init(); 1824 1284 zcrypt_msgtype50_init(); 1285 + 1825 1286 return 0; 1826 1287 1288 + out_misc_register_failed: 1289 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1290 + zcdn_exit(); 1291 + #endif 1292 + zcrypt_debug_exit(); 1827 1293 out: 1828 1294 return rc; 1829 1295 } ··· 1847 1289 */ 1848 1290 void __exit zcrypt_api_exit(void) 1849 1291 { 1292 + #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1293 + zcdn_exit(); 1294 + #endif 1850 1295 misc_deregister(&zcrypt_misc_device); 1851 1296 zcrypt_msgtype6_exit(); 1852 1297 zcrypt_msgtype50_exit();
+2 -13
drivers/s390/crypto/zcrypt_api.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 - * Copyright IBM Corp. 2001, 2012 3 + * Copyright IBM Corp. 2001, 2018 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) 8 6 * Cornelia Huck <cornelia.huck@de.ibm.com> ··· 20 22 #include "ap_bus.h" 21 23 22 24 /** 23 - * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2, 24 - * PCIXCC_MCL3, CEX2C, or CEX2A 25 - * 26 - * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed 27 - * Internal Code (LIC) (EC J12220 level 29). 28 - * PCIXCC_MCL2 refers to any LIC before this level. 25 + * Supported device types 29 26 */ 30 - #define ZCRYPT_PCICA 1 31 - #define ZCRYPT_PCICC 2 32 - #define ZCRYPT_PCIXCC_MCL2 3 33 - #define ZCRYPT_PCIXCC_MCL3 4 34 27 #define ZCRYPT_CEX2C 5 35 28 #define ZCRYPT_CEX2A 6 36 29 #define ZCRYPT_CEX3C 7
-2
drivers/s390/crypto/zcrypt_card.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com)
-2
drivers/s390/crypto/zcrypt_cca_key.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2006 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com)
+2 -4
drivers/s390/crypto/zcrypt_cex2a.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 41 43 #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 42 44 43 45 MODULE_AUTHOR("IBM Corporation"); 44 - MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ 45 - "Copyright IBM Corp. 2001, 2012"); 46 + MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \ 47 + "Copyright IBM Corp. 2001, 2018"); 46 48 MODULE_LICENSE("GPL"); 47 49 48 50 static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+2 -4
drivers/s390/crypto/zcrypt_cex2a.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2006 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 12 14 #define _ZCRYPT_CEX2A_H_ 13 15 14 16 /** 15 - * The type 50 message family is associated with a CEX2A card. 17 + * The type 50 message family is associated with CEXxA cards. 16 18 * 17 19 * The four members of the family are described below. 18 20 * ··· 109 111 } __packed; 110 112 111 113 /** 112 - * The type 80 response family is associated with a CEX2A card. 114 + * The type 80 response family is associated with a CEXxA cards. 113 115 * 114 116 * Note that all unsigned char arrays are right-justified and left-padded 115 117 * with zeroes.
+11 -9
drivers/s390/crypto/zcrypt_cex4.c
··· 37 37 #define CEX4_CLEANUP_TIME (900*HZ) 38 38 39 39 MODULE_AUTHOR("IBM Corporation"); 40 - MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ 41 - "Copyright IBM Corp. 2012"); 40 + MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \ 41 + "Copyright IBM Corp. 2018"); 42 42 MODULE_LICENSE("GPL"); 43 43 44 44 static struct ap_device_id zcrypt_cex4_card_ids[] = { ··· 66 66 MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); 67 67 68 68 /** 69 - * Probe function for CEX4 card device. It always accepts the AP device 70 - * since the bus_match already checked the hardware type. 69 + * Probe function for CEX4/CEX5/CEX6 card device. It always 70 + * accepts the AP device since the bus_match already checked 71 + * the hardware type. 71 72 * @ap_dev: pointer to the AP device. 72 73 */ 73 74 static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) ··· 200 199 } 201 200 202 201 /** 203 - * This is called to remove the CEX4 card driver information 202 + * This is called to remove the CEX4/CEX5/CEX6 card driver information 204 203 * if an AP card device is removed. 205 204 */ 206 205 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) ··· 219 218 }; 220 219 221 220 /** 222 - * Probe function for CEX4 queue device. It always accepts the AP device 223 - * since the bus_match already checked the hardware type. 221 + * Probe function for CEX4/CEX5/CEX6 queue device. It always 222 + * accepts the AP device since the bus_match already checked 223 + * the hardware type. 224 224 * @ap_dev: pointer to the AP device. 225 225 */ 226 226 static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) ··· 267 265 } 268 266 269 267 /** 270 - * This is called to remove the CEX4 queue driver information 271 - * if an AP queue device is removed. 268 + * This is called to remove the CEX4/CEX5/CEX6 queue driver 269 + * information if an AP queue device is removed. 272 270 */ 273 271 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) 274 272 {
+22 -2
drivers/s390/crypto/zcrypt_error.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2006 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 14 16 #include <linux/atomic.h> 15 17 #include "zcrypt_debug.h" 16 18 #include "zcrypt_api.h" 19 + #include "zcrypt_msgtype6.h" 17 20 18 21 /** 19 22 * Reply Messages ··· 113 114 card, queue, ehdr->reply_code); 114 115 return -EAGAIN; 115 116 case REP82_ERROR_TRANSPORT_FAIL: 117 + /* Card or infrastructure failure, disable card */ 118 + atomic_set(&zcrypt_rescan_req, 1); 119 + zq->online = 0; 120 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 121 + card, queue); 122 + /* For type 86 response show the apfs value (failure reason) */ 123 + if (ehdr->type == TYPE86_RSP_CODE) { 124 + struct { 125 + struct type86_hdr hdr; 126 + struct type86_fmt2_ext fmt2; 127 + } __packed * head = reply->message; 128 + unsigned int apfs = *((u32 *)head->fmt2.apfs); 129 + 130 + ZCRYPT_DBF(DBF_ERR, 131 + "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n", 132 + card, queue, apfs, ehdr->reply_code); 133 + } else 134 + ZCRYPT_DBF(DBF_ERR, 135 + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", 136 + card, queue, ehdr->reply_code); 137 + return -EAGAIN; 116 138 case REP82_ERROR_MACHINE_FAILURE: 117 139 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 118 140 /* If a card fails disable it and repeat the request. */
+11 -13
drivers/s390/crypto/zcrypt_msgtype50.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 25 27 #include "zcrypt_error.h" 26 28 #include "zcrypt_msgtype50.h" 27 29 28 - /* 4096 bits */ 30 + /* >= CEX3A: 4096 bits */ 29 31 #define CEX3A_MAX_MOD_SIZE 512 30 32 31 - /* max outputdatalength + type80_hdr */ 33 + /* CEX2A: max outputdatalength + type80_hdr */ 32 34 #define CEX2A_MAX_RESPONSE_SIZE 0x110 33 35 34 - /* 512 bit modulus, (max outputdatalength) + type80_hdr */ 36 + /* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */ 35 37 #define CEX3A_MAX_RESPONSE_SIZE 0x210 36 38 37 39 MODULE_AUTHOR("IBM Corporation"); ··· 40 42 MODULE_LICENSE("GPL"); 41 43 42 44 /** 43 - * The type 50 message family is associated with a CEX2A card. 45 + * The type 50 message family is associated with a CEXxA cards. 44 46 * 45 47 * The four members of the family are described below. 46 48 * ··· 137 139 } __packed; 138 140 139 141 /** 140 - * The type 80 response family is associated with a CEX2A card. 142 + * The type 80 response family is associated with a CEXxA cards. 141 143 * 142 144 * Note that all unsigned char arrays are right-justified and left-padded 143 145 * with zeroes. ··· 271 273 /* 272 274 * CEX2A and CEX3A w/o FW update can handle requests up to 273 275 * 256 byte modulus (2k keys). 274 - * CEX3A with FW update and CEX4A cards are able to handle 276 + * CEX3A with FW update and newer CEXxA cards are able to handle 275 277 * 512 byte modulus (4k keys). 276 278 */ 277 279 if (mod_len <= 128) { /* up to 1024 bit key size */ ··· 354 356 unsigned char *data; 355 357 356 358 if (t80h->len < sizeof(*t80h) + outputdatalength) { 357 - /* The result is too short, the CEX2A card may not do that.. */ 359 + /* The result is too short, the CEXxA card may not do that.. */ 358 360 zq->online = 0; 359 361 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 360 362 AP_QID_CARD(zq->queue->qid), ··· 445 447 static atomic_t zcrypt_step = ATOMIC_INIT(0); 446 448 447 449 /** 448 - * The request distributor calls this function if it picked the CEX2A 450 + * The request distributor calls this function if it picked the CEXxA 449 451 * device to handle a modexpo request. 450 452 * @zq: pointer to zcrypt_queue structure that identifies the 451 - * CEX2A device to the request distributor 453 + * CEXxA device to the request distributor 452 454 * @mex: pointer to the modexpo request buffer 453 455 */ 454 456 static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, ··· 491 493 } 492 494 493 495 /** 494 - * The request distributor calls this function if it picked the CEX2A 496 + * The request distributor calls this function if it picked the CEXxA 495 497 * device to handle a modexpo_crt request. 496 498 * @zq: pointer to zcrypt_queue structure that identifies the 497 - * CEX2A device to the request distributor 499 + * CEXxA device to the request distributor 498 500 * @crt: pointer to the modexpoc_crt request buffer 499 501 */ 500 502 static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
-2
drivers/s390/crypto/zcrypt_msgtype50.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com)
+29 -45
drivers/s390/crypto/zcrypt_msgtype6.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 27 29 #include "zcrypt_msgtype6.h" 28 30 #include "zcrypt_cca_key.h" 29 31 30 - #define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 31 - #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 32 + #define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 32 33 33 34 #define CEIL4(x) ((((x)+3)/4)*4) 34 35 ··· 35 38 struct completion work; 36 39 int type; 37 40 }; 38 - #define PCIXCC_RESPONSE_TYPE_ICA 0 39 - #define PCIXCC_RESPONSE_TYPE_XCRB 1 40 - #define PCIXCC_RESPONSE_TYPE_EP11 2 41 + #define CEXXC_RESPONSE_TYPE_ICA 0 42 + #define CEXXC_RESPONSE_TYPE_XCRB 1 43 + #define CEXXC_RESPONSE_TYPE_EP11 2 41 44 42 45 MODULE_AUTHOR("IBM Corporation"); 43 46 MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ ··· 108 111 } __packed; 109 112 110 113 /** 111 - * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C 114 + * The following is used to initialize the CPRBX passed to the CEXxC/CEXxP 112 115 * card in a type6 message. The 3 fields that must be filled in at execution 113 116 * time are req_parml, rpl_parml and usage_domain. 114 117 * Everything about this interface is ascii/big-endian, since the ··· 291 294 /* message header, cprbx and f&r */ 292 295 msg->hdr = static_type6_hdrX; 293 296 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 294 - msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 297 + msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 295 298 296 299 msg->cprbx = static_cprbx; 297 300 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); ··· 361 364 /* message header, cprbx and f&r */ 362 365 msg->hdr = static_type6_hdrX; 363 366 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 364 - msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 367 + msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 365 368 366 369 msg->cprbx = static_cprbx; 367 370 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); ··· 655 658 (int) service_rc, (int) service_rs); 656 659 return -EINVAL; 657 660 } 658 - if (service_rc == 8 && service_rs == 783) { 659 - zq->zcard->min_mod_size = 660 - PCIXCC_MIN_MOD_SIZE_OLD; 661 - ZCRYPT_DBF(DBF_DEBUG, 662 - "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n", 663 - AP_QID_CARD(zq->queue->qid), 664 - AP_QID_QUEUE(zq->queue->qid), 665 - (int) service_rc, (int) service_rs); 666 - return -EAGAIN; 667 - } 668 661 zq->online = 0; 669 662 pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 670 663 AP_QID_CARD(zq->queue->qid), ··· 684 697 if (pad_len > 0) { 685 698 if (pad_len < 10) 686 699 return -EINVAL; 687 - /* 'restore' padding left in the PCICC/PCIXCC card. */ 700 + /* 'restore' padding left in the CEXXC card. */ 688 701 if (copy_to_user(outputdata, static_pad, pad_len - 1)) 689 702 return -EFAULT; 690 703 if (put_user(0, outputdata + pad_len - 1)) ··· 942 955 if (t86r->hdr.type == TYPE86_RSP_CODE && 943 956 t86r->cprbx.cprb_ver_id == 0x02) { 944 957 switch (resp_type->type) { 945 - case PCIXCC_RESPONSE_TYPE_ICA: 958 + case CEXXC_RESPONSE_TYPE_ICA: 946 959 length = sizeof(struct type86x_reply) 947 960 + t86r->length - 2; 948 - length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length); 961 + length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length); 949 962 memcpy(msg->message, reply->message, length); 950 963 break; 951 - case PCIXCC_RESPONSE_TYPE_XCRB: 964 + case CEXXC_RESPONSE_TYPE_XCRB: 952 965 length = t86r->fmt2.offset2 + t86r->fmt2.count2; 953 966 length = min(MSGTYPE06_MAX_MSG_SIZE, length); 954 967 memcpy(msg->message, reply->message, length); ··· 991 1004 if (t86r->hdr.type == TYPE86_RSP_CODE && 992 1005 t86r->cprbx.cprb_ver_id == 0x04) { 993 1006 switch (resp_type->type) { 994 - case PCIXCC_RESPONSE_TYPE_EP11: 1007 + case CEXXC_RESPONSE_TYPE_EP11: 995 1008 length = t86r->fmt2.offset1 + t86r->fmt2.count1; 996 1009 length = min(MSGTYPE06_MAX_MSG_SIZE, length); 997 1010 memcpy(msg->message, reply->message, length); ··· 1009 1022 static atomic_t zcrypt_step = ATOMIC_INIT(0); 1010 1023 1011 1024 /** 1012 - * The request distributor calls this function if it picked the PCIXCC/CEX2C 1025 + * The request distributor calls this function if it picked the CEXxC 1013 1026 * device to handle a modexpo request. 1014 1027 * @zq: pointer to zcrypt_queue structure that identifies the 1015 - * PCIXCC/CEX2C device to the request distributor 1028 + * CEXxC device to the request distributor 1016 1029 * @mex: pointer to the modexpo request buffer 1017 1030 */ 1018 1031 static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ··· 1020 1033 { 1021 1034 struct ap_message ap_msg; 1022 1035 struct response_type resp_type = { 1023 - .type = PCIXCC_RESPONSE_TYPE_ICA, 1036 + .type = CEXXC_RESPONSE_TYPE_ICA, 1024 1037 }; 1025 1038 int rc; 1026 1039 ··· 1053 1066 } 1054 1067 1055 1068 /** 1056 - * The request distributor calls this function if it picked the PCIXCC/CEX2C 1069 + * The request distributor calls this function if it picked the CEXxC 1057 1070 * device to handle a modexpo_crt request. 1058 1071 * @zq: pointer to zcrypt_queue structure that identifies the 1059 - * PCIXCC/CEX2C device to the request distributor 1072 + * CEXxC device to the request distributor 1060 1073 * @crt: pointer to the modexpoc_crt request buffer 1061 1074 */ 1062 1075 static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ··· 1064 1077 { 1065 1078 struct ap_message ap_msg; 1066 1079 struct response_type resp_type = { 1067 - .type = PCIXCC_RESPONSE_TYPE_ICA, 1080 + .type = CEXXC_RESPONSE_TYPE_ICA, 1068 1081 }; 1069 1082 int rc; 1070 1083 ··· 1109 1122 unsigned int *func_code, unsigned short **dom) 1110 1123 { 1111 1124 struct response_type resp_type = { 1112 - .type = PCIXCC_RESPONSE_TYPE_XCRB, 1125 + .type = CEXXC_RESPONSE_TYPE_XCRB, 1113 1126 }; 1114 1127 1115 1128 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); ··· 1118 1131 ap_msg->receive = zcrypt_msgtype6_receive; 1119 1132 ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1120 1133 atomic_inc_return(&zcrypt_step); 1121 - ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1134 + ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1122 1135 if (!ap_msg->private) 1123 1136 return -ENOMEM; 1124 - memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1125 1137 return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); 1126 1138 } 1127 1139 1128 1140 /** 1129 - * The request distributor calls this function if it picked the PCIXCC/CEX2C 1141 + * The request distributor calls this function if it picked the CEXxC 1130 1142 * device to handle a send_cprb request. 1131 1143 * @zq: pointer to zcrypt_queue structure that identifies the 1132 - * PCIXCC/CEX2C device to the request distributor 1144 + * CEXxC device to the request distributor 1133 1145 * @xcRB: pointer to the send_cprb request buffer 1134 1146 */ 1135 1147 static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, ··· 1164 1178 unsigned int *func_code) 1165 1179 { 1166 1180 struct response_type resp_type = { 1167 - .type = PCIXCC_RESPONSE_TYPE_EP11, 1181 + .type = CEXXC_RESPONSE_TYPE_EP11, 1168 1182 }; 1169 1183 1170 1184 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); ··· 1173 1187 ap_msg->receive = zcrypt_msgtype6_receive_ep11; 1174 1188 ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1175 1189 atomic_inc_return(&zcrypt_step); 1176 - ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1190 + ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1177 1191 if (!ap_msg->private) 1178 1192 return -ENOMEM; 1179 - memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1180 1193 return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code); 1181 1194 } 1182 1195 ··· 1258 1273 unsigned int *domain) 1259 1274 { 1260 1275 struct response_type resp_type = { 1261 - .type = PCIXCC_RESPONSE_TYPE_XCRB, 1276 + .type = CEXXC_RESPONSE_TYPE_XCRB, 1262 1277 }; 1263 1278 1264 1279 ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); ··· 1267 1282 ap_msg->receive = zcrypt_msgtype6_receive; 1268 1283 ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1269 1284 atomic_inc_return(&zcrypt_step); 1270 - ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1285 + ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1271 1286 if (!ap_msg->private) 1272 1287 return -ENOMEM; 1273 - memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1274 1288 1275 1289 rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1276 1290 ··· 1278 1294 } 1279 1295 1280 1296 /** 1281 - * The request distributor calls this function if it picked the PCIXCC/CEX2C 1297 + * The request distributor calls this function if it picked the CEXxC 1282 1298 * device to generate random data. 1283 1299 * @zq: pointer to zcrypt_queue structure that identifies the 1284 - * PCIXCC/CEX2C device to the request distributor 1300 + * CEXxC device to the request distributor 1285 1301 * @buffer: pointer to a memory page to return random data 1286 1302 */ 1287 1303 static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, ··· 1316 1332 } 1317 1333 1318 1334 /** 1319 - * The crypto operations for a PCIXCC/CEX2C card. 1335 + * The crypto operations for a CEXxC card. 1320 1336 */ 1321 1337 static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { 1322 1338 .owner = THIS_MODULE,
+4 -11
drivers/s390/crypto/zcrypt_msgtype6.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) ··· 22 24 #define MSGTYPE06_MAX_MSG_SIZE (12*1024) 23 25 24 26 /** 25 - * The type 6 message family is associated with PCICC or PCIXCC cards. 27 + * The type 6 message family is associated with CEXxC/CEXxP cards. 26 28 * 27 29 * It contains a message header followed by a CPRB, both of which 28 30 * are described below. ··· 41 43 unsigned int offset2; /* 0x00000000 */ 42 44 unsigned int offset3; /* 0x00000000 */ 43 45 unsigned int offset4; /* 0x00000000 */ 44 - unsigned char agent_id[16]; /* PCICC: */ 45 - /* 0x0100 */ 46 - /* 0x4343412d4150504c202020 */ 47 - /* 0x010101 */ 48 - /* PCIXCC: */ 49 - /* 0x4341000000000000 */ 50 - /* 0x0000000000000000 */ 46 + unsigned char agent_id[16]; /* 0x4341000000000000 */ 47 + /* 0x0000000000000000 */ 51 48 unsigned char rqid[2]; /* rqid. internal to 603 */ 52 49 unsigned char reserved5[2]; /* 0x0000 */ 53 50 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ ··· 58 65 } __packed; 59 66 60 67 /** 61 - * The type 86 message family is associated with PCICC and PCIXCC cards. 68 + * The type 86 message family is associated with CEXxC/CEXxP cards. 62 69 * 63 70 * It contains a message header followed by a CPRB. The CPRB is 64 71 * the same as the request CPRB, which is described above.
+50 -75
drivers/s390/crypto/zcrypt_pcixcc.c drivers/s390/crypto/zcrypt_cex2c.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 - * Copyright IBM Corp. 2001, 2012 3 + * Copyright IBM Corp. 2001, 2018 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) 8 6 * ··· 23 25 #include "zcrypt_api.h" 24 26 #include "zcrypt_error.h" 25 27 #include "zcrypt_msgtype6.h" 26 - #include "zcrypt_pcixcc.h" 28 + #include "zcrypt_cex2c.h" 27 29 #include "zcrypt_cca_key.h" 28 30 29 - #define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 30 - #define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 31 - #define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 32 - #define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 31 + #define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */ 32 + #define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */ 33 + #define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */ 33 34 #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ 34 - 35 - #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 36 - #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 37 - 38 - #define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024) 39 - 40 - #define PCIXCC_CLEANUP_TIME (15*HZ) 41 - 42 - #define CEIL4(x) ((((x)+3)/4)*4) 43 - 44 - struct response_type { 45 - struct completion work; 46 - int type; 47 - }; 48 - #define PCIXCC_RESPONSE_TYPE_ICA 0 49 - #define PCIXCC_RESPONSE_TYPE_XCRB 1 35 + #define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024) 36 + #define CEX2C_CLEANUP_TIME (15*HZ) 50 37 51 38 MODULE_AUTHOR("IBM Corporation"); 52 - MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ 53 - "Copyright IBM Corp. 2001, 2012"); 39 + MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \ 40 + "Copyright IBM Corp. 2001, 2018"); 54 41 MODULE_LICENSE("GPL"); 55 42 56 - static struct ap_device_id zcrypt_pcixcc_card_ids[] = { 57 - { .dev_type = AP_DEVICE_TYPE_PCIXCC, 58 - .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 43 + static struct ap_device_id zcrypt_cex2c_card_ids[] = { 59 44 { .dev_type = AP_DEVICE_TYPE_CEX2C, 60 45 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 61 46 { .dev_type = AP_DEVICE_TYPE_CEX3C, ··· 46 65 { /* end of list */ }, 47 66 }; 48 67 49 - MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids); 68 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids); 50 69 51 - static struct ap_device_id zcrypt_pcixcc_queue_ids[] = { 52 - { .dev_type = AP_DEVICE_TYPE_PCIXCC, 53 - .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 70 + static struct ap_device_id zcrypt_cex2c_queue_ids[] = { 54 71 { .dev_type = AP_DEVICE_TYPE_CEX2C, 55 72 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 56 73 { .dev_type = AP_DEVICE_TYPE_CEX3C, ··· 56 77 { /* end of list */ }, 57 78 }; 58 79 59 - MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids); 80 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids); 60 81 61 82 /** 62 - * Large random number detection function. Its sends a message to a pcixcc 83 + * Large random number detection function. Its sends a message to a CEX2C/CEX3C 63 84 * card to find out if large random numbers are supported. 64 85 * @ap_dev: pointer to the AP device. 65 86 * 66 87 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. 67 88 */ 68 - static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq) 89 + static int zcrypt_cex2c_rng_supported(struct ap_queue *aq) 69 90 { 70 91 struct ap_message ap_msg; 71 92 unsigned long long psmid; ··· 126 147 } 127 148 128 149 /** 129 - * Probe function for PCIXCC/CEX2C card devices. It always accepts the 130 - * AP device since the bus_match already checked the hardware type. The 131 - * PCIXCC cards come in two flavours: micro code level 2 and micro code 132 - * level 3. This is checked by sending a test message to the device. 150 + * Probe function for CEX2C/CEX3C card devices. It always accepts the 151 + * AP device since the bus_match already checked the hardware type. 133 152 * @ap_dev: pointer to the AP card device. 134 153 */ 135 - static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev) 154 + static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev) 136 155 { 137 156 /* 138 157 * Normalized speed ratings per crypto adapter ··· 156 179 zc->type_string = "CEX2C"; 157 180 memcpy(zc->speed_rating, CEX2C_SPEED_IDX, 158 181 sizeof(CEX2C_SPEED_IDX)); 159 - zc->min_mod_size = PCIXCC_MIN_MOD_SIZE; 160 - zc->max_mod_size = PCIXCC_MAX_MOD_SIZE; 161 - zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 182 + zc->min_mod_size = CEX2C_MIN_MOD_SIZE; 183 + zc->max_mod_size = CEX2C_MAX_MOD_SIZE; 184 + zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE; 162 185 break; 163 186 case AP_DEVICE_TYPE_CEX3C: 164 187 zc->user_space_type = ZCRYPT_CEX3C; ··· 185 208 } 186 209 187 210 /** 188 - * This is called to remove the PCIXCC/CEX2C card driver information 211 + * This is called to remove the CEX2C/CEX3C card driver information 189 212 * if an AP card device is removed. 190 213 */ 191 - static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev) 214 + static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev) 192 215 { 193 216 struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 194 217 ··· 196 219 zcrypt_card_unregister(zc); 197 220 } 198 221 199 - static struct ap_driver zcrypt_pcixcc_card_driver = { 200 - .probe = zcrypt_pcixcc_card_probe, 201 - .remove = zcrypt_pcixcc_card_remove, 202 - .ids = zcrypt_pcixcc_card_ids, 222 + static struct ap_driver zcrypt_cex2c_card_driver = { 223 + .probe = zcrypt_cex2c_card_probe, 224 + .remove = zcrypt_cex2c_card_remove, 225 + .ids = zcrypt_cex2c_card_ids, 203 226 .flags = AP_DRIVER_FLAG_DEFAULT, 204 227 }; 205 228 206 229 /** 207 - * Probe function for PCIXCC/CEX2C queue devices. It always accepts the 208 - * AP device since the bus_match already checked the hardware type. The 209 - * PCIXCC cards come in two flavours: micro code level 2 and micro code 210 - * level 3. This is checked by sending a test message to the device. 230 + * Probe function for CEX2C/CEX3C queue devices. It always accepts the 231 + * AP device since the bus_match already checked the hardware type. 211 232 * @ap_dev: pointer to the AP card device. 212 233 */ 213 - static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev) 234 + static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev) 214 235 { 215 236 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 216 237 struct zcrypt_queue *zq; 217 238 int rc; 218 239 219 - zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 240 + zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE); 220 241 if (!zq) 221 242 return -ENOMEM; 222 243 zq->queue = aq; 223 244 zq->online = 1; 224 245 atomic_set(&zq->load, 0); 225 - rc = zcrypt_pcixcc_rng_supported(aq); 246 + rc = zcrypt_cex2c_rng_supported(aq); 226 247 if (rc < 0) { 227 248 zcrypt_queue_free(zq); 228 249 return rc; ··· 232 257 zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 233 258 MSGTYPE06_VARIANT_NORNG); 234 259 ap_queue_init_reply(aq, &zq->reply); 235 - aq->request_timeout = PCIXCC_CLEANUP_TIME, 260 + aq->request_timeout = CEX2C_CLEANUP_TIME; 236 261 aq->private = zq; 237 262 rc = zcrypt_queue_register(zq); 238 263 if (rc) { ··· 243 268 } 244 269 245 270 /** 246 - * This is called to remove the PCIXCC/CEX2C queue driver information 271 + * This is called to remove the CEX2C/CEX3C queue driver information 247 272 * if an AP queue device is removed. 248 273 */ 249 - static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) 274 + static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev) 250 275 { 251 276 struct ap_queue *aq = to_ap_queue(&ap_dev->device); 252 277 struct zcrypt_queue *zq = aq->private; ··· 256 281 zcrypt_queue_unregister(zq); 257 282 } 258 283 259 - static struct ap_driver zcrypt_pcixcc_queue_driver = { 260 - .probe = zcrypt_pcixcc_queue_probe, 261 - .remove = zcrypt_pcixcc_queue_remove, 284 + static struct ap_driver zcrypt_cex2c_queue_driver = { 285 + .probe = zcrypt_cex2c_queue_probe, 286 + .remove = zcrypt_cex2c_queue_remove, 262 287 .suspend = ap_queue_suspend, 263 288 .resume = ap_queue_resume, 264 - .ids = zcrypt_pcixcc_queue_ids, 289 + .ids = zcrypt_cex2c_queue_ids, 265 290 .flags = AP_DRIVER_FLAG_DEFAULT, 266 291 }; 267 292 268 - int __init zcrypt_pcixcc_init(void) 293 + int __init zcrypt_cex2c_init(void) 269 294 { 270 295 int rc; 271 296 272 - rc = ap_driver_register(&zcrypt_pcixcc_card_driver, 273 - THIS_MODULE, "pcixcccard"); 297 + rc = ap_driver_register(&zcrypt_cex2c_card_driver, 298 + THIS_MODULE, "cex2card"); 274 299 if (rc) 275 300 return rc; 276 301 277 - rc = ap_driver_register(&zcrypt_pcixcc_queue_driver, 278 - THIS_MODULE, "pcixccqueue"); 302 + rc = ap_driver_register(&zcrypt_cex2c_queue_driver, 303 + THIS_MODULE, "cex2cqueue"); 279 304 if (rc) 280 - ap_driver_unregister(&zcrypt_pcixcc_card_driver); 305 + ap_driver_unregister(&zcrypt_cex2c_card_driver); 281 306 282 307 return rc; 283 308 } 284 309 285 - void zcrypt_pcixcc_exit(void) 310 + void zcrypt_cex2c_exit(void) 286 311 { 287 - ap_driver_unregister(&zcrypt_pcixcc_queue_driver); 288 - ap_driver_unregister(&zcrypt_pcixcc_card_driver); 312 + ap_driver_unregister(&zcrypt_cex2c_queue_driver); 313 + ap_driver_unregister(&zcrypt_cex2c_card_driver); 289 314 } 290 315 291 - module_init(zcrypt_pcixcc_init); 292 - module_exit(zcrypt_pcixcc_exit); 316 + module_init(zcrypt_cex2c_init); 317 + module_exit(zcrypt_cex2c_exit);
+6 -8
drivers/s390/crypto/zcrypt_pcixcc.h drivers/s390/crypto/zcrypt_cex2c.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 - * Copyright IBM Corp. 2001, 2012 3 + * Copyright IBM Corp. 2001, 2018 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com) 8 6 * ··· 9 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 10 12 */ 11 13 12 - #ifndef _ZCRYPT_PCIXCC_H_ 13 - #define _ZCRYPT_PCIXCC_H_ 14 + #ifndef _ZCRYPT_CEX2C_H_ 15 + #define _ZCRYPT_CEX2C_H_ 14 16 15 - int zcrypt_pcixcc_init(void); 16 - void zcrypt_pcixcc_exit(void); 17 + int zcrypt_cex2c_init(void); 18 + void zcrypt_cex2c_exit(void); 17 19 18 - #endif /* _ZCRYPT_PCIXCC_H_ */ 20 + #endif /* _ZCRYPT_CEX2C_H_ */
-2
drivers/s390/crypto/zcrypt_queue.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * zcrypt 2.1.0 4 - * 5 3 * Copyright IBM Corp. 2001, 2012 6 4 * Author(s): Robert Burroughs 7 5 * Eric Rossman (edrossma@us.ibm.com)
+7
include/linux/compiler-gcc.h
··· 194 194 * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 195 195 */ 196 196 #define __no_sanitize_address __attribute__((no_sanitize_address)) 197 + #ifdef CONFIG_KASAN 198 + #define __no_sanitize_address_or_inline \ 199 + __no_sanitize_address __maybe_unused notrace 200 + #else 201 + #define __no_sanitize_address_or_inline inline 202 + #endif 197 203 #endif 198 204 199 205 #if GCC_VERSION >= 50100 ··· 217 211 218 212 #if !defined(__no_sanitize_address) 219 213 #define __no_sanitize_address 214 + #define __no_sanitize_address_or_inline inline 220 215 #endif 221 216 222 217 /*
+2
include/linux/start_kernel.h
··· 9 9 up something else. */ 10 10 11 11 extern asmlinkage void __init start_kernel(void); 12 + extern void __init arch_call_rest_init(void); 13 + extern void __ref rest_init(void); 12 14 13 15 #endif /* _LINUX_START_KERNEL_H */
+7 -2
init/main.c
··· 394 394 395 395 static __initdata DECLARE_COMPLETION(kthreadd_done); 396 396 397 - static noinline void __ref rest_init(void) 397 + noinline void __ref rest_init(void) 398 398 { 399 399 struct task_struct *tsk; 400 400 int pid; ··· 526 526 init_espfix_bsp(); 527 527 /* Should be run after espfix64 is set up. */ 528 528 pti_init(); 529 + } 530 + 531 + void __init __weak arch_call_rest_init(void) 532 + { 533 + rest_init(); 529 534 } 530 535 531 536 asmlinkage __visible void __init start_kernel(void) ··· 741 736 } 742 737 743 738 /* Do the rest non-__init'ed, we're now alive */ 744 - rest_init(); 739 + arch_call_rest_init(); 745 740 } 746 741 747 742 /* Call all constructor functions linked into the kernel. */
+9
lib/Kconfig.kasan
··· 57 57 58 58 endchoice 59 59 60 + config KASAN_S390_4_LEVEL_PAGING 61 + bool "KASan: use 4-level paging" 62 + depends on KASAN && S390 63 + help 64 + Compiling the kernel with KASan disables automatic 3-level vs 65 + 4-level paging selection. 3-level paging is used by default (up 66 + to 3TB of RAM with KASan enabled). This options allows to force 67 + 4-level paging instead. 68 + 60 69 config TEST_KASAN 61 70 tristate "Module for testing kasan for bug detection" 62 71 depends on m && KASAN