Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Heiko Carstens:

- Make use of the IBM z16 processor activity instrumentation facility
to count cryptography operations: add a new PMU device driver so that
perf can make use of this.

- Add new IBM z16 extended counter set to cpumf support.

- Add vdso randomization support.

- Add missing KCSAN instrumentation to barriers and spinlocks, which
should make s390's KCSAN support complete.

- Add support for IPL-complete-control facility: notify the hypervisor
that kexec finished work and the kernel starts.

- Improve error logging for PCI.

- Various small changes to workaround llvm's integrated assembler
limitations, and one bug, to make it finally possible to compile the
kernel with llvm's integrated assembler. This also requires to raise
the minimum clang version to 14.0.0.

- Various other small enhancements, bug fixes, and cleanups all over
the place.

* tag 's390-5.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (48 commits)
s390/head: get rid of 31 bit leftovers
scripts/min-tool-version.sh: raise minimum clang version to 14.0.0 for s390
s390/boot: do not emit debug info for assembly with llvm's IAS
s390/boot: workaround llvm IAS bug
s390/purgatory: workaround llvm's IAS limitations
s390/entry: workaround llvm's IAS limitations
s390/alternatives: remove padding generation code
s390/alternatives: provide identical sized orginal/alternative sequences
s390/cpumf: add new extended counter set for IBM z16
s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES
s390/stp: clock_delta should be signed
s390/stp: fix todoff size
s390/pai: add support for cryptography counters
entry: Rename arch_check_user_regs() to arch_enter_from_user_mode()
s390/compat: cleanup compat_linux.h header file
s390/entry: remove broken and not needed code
s390/boot: convert parmarea to C
s390/boot: convert initial lowcore to C
s390/ptrace: move short psw definitions to ptrace header file
s390/head: initialize all new psws
...

+2553 -1534
+2
arch/s390/Makefile
··· 20 20 endif 21 21 aflags_dwarf := -Wa,-gdwarf-2 22 22 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ 23 + ifndef CONFIG_AS_IS_LLVM 23 24 KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) 25 + endif 24 26 KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack 25 27 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY 26 28 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+3
arch/s390/boot/.gitignore
··· 2 2 image 3 3 bzImage 4 4 section_cmp.* 5 + vmlinux 6 + vmlinux.lds 7 + vmlinux.syms
+69 -9
arch/s390/boot/Makefile
··· 37 37 38 38 obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o 39 39 obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o 40 - obj-y += version.o pgm_check_info.o ctype.o 40 + obj-y += version.o pgm_check_info.o ctype.o ipl_data.o 41 41 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o 42 42 obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o 43 43 obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 44 - targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) 45 - subdir- := compressed 44 + obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 45 + obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o 46 + obj-all := $(obj-y) piggy.o syms.o 47 + 48 + targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y) 49 + targets += vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 50 + targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 51 + targets += vmlinux.bin.zst info.bin syms.bin vmlinux.syms $(obj-all) 46 52 47 53 OBJECTS := $(addprefix $(obj)/,$(obj-y)) 54 + OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all)) 48 55 49 56 quiet_cmd_section_cmp = SECTCMP $* 50 57 define cmd_section_cmp ··· 66 59 touch $@ 67 60 endef 68 61 69 - $(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE 62 + $(obj)/bzImage: $(obj)/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE 70 63 $(call if_changed,objcopy) 71 64 72 - $(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE 65 + $(obj)/section_cmp%: vmlinux $(obj)/vmlinux FORCE 73 66 $(call if_changed,section_cmp) 74 67 75 - $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE 76 - $(Q)$(MAKE) $(build)=$(obj)/compressed $@ 68 + LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T 69 + $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS_ALL) FORCE 70 + $(call if_changed,ld) 77 71 78 - $(obj)/startup.a: $(OBJECTS) FORCE 79 - $(call if_changed,ar) 72 + LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T 73 + $(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(OBJECTS) FORCE 74 + $(call if_changed,ld) 75 + 76 + quiet_cmd_dumpsyms = DUMPSYMS $< 77 + define cmd_dumpsyms 78 + $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@" 79 + endef 80 + 81 + $(obj)/syms.bin: $(obj)/vmlinux.syms FORCE 82 + $(call if_changed,dumpsyms) 83 + 84 + OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms 85 + $(obj)/syms.o: $(obj)/syms.bin FORCE 86 + $(call if_changed,objcopy) 87 + 88 + OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load 89 + $(obj)/info.bin: vmlinux FORCE 90 + $(call if_changed,objcopy) 91 + 92 + OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info 93 + $(obj)/info.o: $(obj)/info.bin FORCE 94 + $(call if_changed,objcopy) 95 + 96 + OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S 97 + $(obj)/vmlinux.bin: vmlinux FORCE 98 + $(call if_changed,objcopy) 99 + 100 + suffix-$(CONFIG_KERNEL_GZIP) := .gz 101 + suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 102 + suffix-$(CONFIG_KERNEL_LZ4) := .lz4 103 + suffix-$(CONFIG_KERNEL_LZMA) := .lzma 104 + suffix-$(CONFIG_KERNEL_LZO) := .lzo 105 + suffix-$(CONFIG_KERNEL_XZ) := .xz 106 + suffix-$(CONFIG_KERNEL_ZSTD) := .zst 107 + 108 + $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 109 + $(call if_changed,gzip) 110 + $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE 111 + $(call if_changed,bzip2_with_size) 112 + $(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE 113 + $(call if_changed,lz4_with_size) 114 + $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 115 + $(call if_changed,lzma_with_size) 116 + $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE 117 + $(call if_changed,lzo_with_size) 118 + $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE 119 + $(call if_changed,xzkern_with_size) 120 + $(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE 121 + $(call if_changed,zstd22_with_size) 122 + 123 + OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed 124 + $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE 125 + $(call if_changed,objcopy)
+5 -1
arch/s390/boot/boot.h
··· 2 2 #ifndef BOOT_BOOT_H 3 3 #define BOOT_BOOT_H 4 4 5 - #include <asm/extable.h> 6 5 #include <linux/types.h> 6 + 7 + #define IPL_START 0x200 8 + 9 + #ifndef __ASSEMBLY__ 7 10 8 11 void startup_kernel(void); 9 12 unsigned long detect_memory(void); ··· 34 31 35 32 unsigned long read_ipl_report(unsigned long safe_offset); 36 33 34 + #endif /* __ASSEMBLY__ */ 37 35 #endif /* BOOT_BOOT_H */
-4
arch/s390/boot/compressed/.gitignore
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - vmlinux 3 - vmlinux.lds 4 - vmlinux.syms
-86
arch/s390/boot/compressed/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - # 3 - # linux/arch/s390/boot/compressed/Makefile 4 - # 5 - # create a compressed vmlinux image from the original vmlinux 6 - # 7 - 8 - KCOV_INSTRUMENT := n 9 - GCOV_PROFILE := n 10 - UBSAN_SANITIZE := n 11 - KASAN_SANITIZE := n 12 - KCSAN_SANITIZE := n 13 - 14 - obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o 15 - obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o 16 - obj-all := $(obj-y) piggy.o syms.o 17 - targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 18 - targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 19 - targets += vmlinux.bin.zst 20 - targets += info.bin syms.bin vmlinux.syms $(obj-all) 21 - 22 - KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR) 23 - KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR) 24 - OBJCOPYFLAGS := 25 - 26 - OBJECTS := $(addprefix $(obj)/,$(obj-y)) 27 - OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all)) 28 - 29 - LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T 30 - $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS_ALL) FORCE 31 - $(call if_changed,ld) 32 - 33 - LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T 34 - $(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE 35 - $(call if_changed,ld) 36 - 37 - quiet_cmd_dumpsyms = DUMPSYMS $< 38 - define cmd_dumpsyms 39 - $(NM) -n -S --format=bsd "$<" | sed -nE 's/^0*([0-9a-fA-F]+) 0*([0-9a-fA-F]+) [tT] ([^ ]*)$$/\1 \2 \3/p' | tr '\n' '\0' > "$@" 40 - endef 41 - 42 - $(obj)/syms.bin: $(obj)/vmlinux.syms FORCE 43 - $(call if_changed,dumpsyms) 44 - 45 - OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.decompressor.syms 46 - $(obj)/syms.o: $(obj)/syms.bin FORCE 47 - $(call if_changed,objcopy) 48 - 49 - OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load 50 - $(obj)/info.bin: vmlinux FORCE 51 - $(call if_changed,objcopy) 52 - 53 - OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info 54 - $(obj)/info.o: $(obj)/info.bin FORCE 55 - $(call if_changed,objcopy) 56 - 57 - OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S 58 - $(obj)/vmlinux.bin: vmlinux FORCE 59 - $(call if_changed,objcopy) 60 - 61 - suffix-$(CONFIG_KERNEL_GZIP) := .gz 62 - suffix-$(CONFIG_KERNEL_BZIP2) := .bz2 63 - suffix-$(CONFIG_KERNEL_LZ4) := .lz4 64 - suffix-$(CONFIG_KERNEL_LZMA) := .lzma 65 - suffix-$(CONFIG_KERNEL_LZO) := .lzo 66 - suffix-$(CONFIG_KERNEL_XZ) := .xz 67 - suffix-$(CONFIG_KERNEL_ZSTD) := .zst 68 - 69 - $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 70 - $(call if_changed,gzip) 71 - $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE 72 - $(call if_changed,bzip2_with_size) 73 - $(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE 74 - $(call if_changed,lz4_with_size) 75 - $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 76 - $(call if_changed,lzma_with_size) 77 - $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE 78 - $(call if_changed,lzo_with_size) 79 - $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE 80 - $(call if_changed,xzkern_with_size) 81 - $(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE 82 - $(call if_changed,zstd22_with_size) 83 - 84 - OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed 85 - $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE 86 - $(call if_changed,objcopy)
arch/s390/boot/compressed/clz_ctz.c arch/s390/boot/clz_ctz.c
arch/s390/boot/compressed/decompressor.c arch/s390/boot/decompressor.c
arch/s390/boot/compressed/decompressor.h arch/s390/boot/decompressor.h
+9
arch/s390/boot/compressed/vmlinux.lds.S arch/s390/boot/vmlinux.lds.S
··· 4 4 #include <asm/thread_info.h> 5 5 #include <asm/page.h> 6 6 #include <asm/sclp.h> 7 + #include "boot.h" 7 8 8 9 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 9 10 OUTPUT_ARCH(s390:64-bit) ··· 14 13 SECTIONS 15 14 { 16 15 . = 0; 16 + .ipldata : { 17 + *(.ipldata) 18 + } 19 + . = IPL_START; 17 20 .head.text : { 18 21 _head = . ; 19 22 HEAD_TEXT 20 23 _ehead = . ; 24 + } 25 + . = PARMAREA; 26 + .parmarea : { 27 + *(.parmarea) 21 28 } 22 29 .text : { 23 30 _text = .; /* Text */
+148 -218
arch/s390/boot/head.S
··· 27 27 #include <asm/page.h> 28 28 #include <asm/ptrace.h> 29 29 #include <asm/sclp.h> 30 - 31 - #define ARCH_OFFSET 4 30 + #include "boot.h" 32 31 33 32 #define EP_OFFSET 0x10008 34 33 #define EP_STRING "S390EP" 34 + #define IPL_BS 0x730 35 35 36 36 __HEAD 37 - 38 - #define IPL_BS 0x730 39 - .org 0 40 - .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded 41 - .long 0x02000018,0x60000050 # by ipl to addresses 0-23. 42 - .long 0x02000068,0x60000050 # (a PSW and two CCWs). 43 - .fill 80-24,1,0x40 # bytes 24-79 are discarded !! 44 - .long 0x020000f0,0x60000050 # The next 160 byte are loaded 45 - .long 0x02000140,0x60000050 # to addresses 0x18-0xb7 46 - .long 0x02000190,0x60000050 # They form the continuation 47 - .long 0x020001e0,0x60000050 # of the CCW program started 48 - .long 0x02000230,0x60000050 # by ipl and load the range 49 - .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image 50 - .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730 51 - .long 0x02000320,0x60000050 # in memory. At the end of 52 - .long 0x02000370,0x60000050 # the channel program the PSW 53 - .long 0x020003c0,0x60000050 # at location 0 is loaded. 54 - .long 0x02000410,0x60000050 # Initial processing starts 55 - .long 0x02000460,0x60000050 # at 0x200 = iplstart. 56 - .long 0x020004b0,0x60000050 57 - .long 0x02000500,0x60000050 58 - .long 0x02000550,0x60000050 59 - .long 0x020005a0,0x60000050 60 - .long 0x020005f0,0x60000050 61 - .long 0x02000640,0x60000050 62 - .long 0x02000690,0x60000050 63 - .long 0x020006e0,0x20000050 64 - 65 - .org __LC_RST_NEW_PSW # 0x1a0 66 - .quad 0,iplstart 67 - .org __LC_EXT_NEW_PSW # 0x1b0 68 - .quad 0x0002000180000000,0x1b0 # disabled wait 69 - .org __LC_PGM_NEW_PSW # 0x1d0 70 - .quad 0x0000000180000000,startup_pgm_check_handler 71 - .org __LC_IO_NEW_PSW # 0x1f0 72 - .quad 0x0002000180000000,0x1f0 # disabled wait 73 - 74 - .org 0x200 75 - 37 + ipl_start: 38 + mvi __LC_AR_MODE_ID,1 # set esame flag 39 + slr %r0,%r0 # set cpuid to zero 40 + lhi %r1,2 # mode 2 = esame (dump) 41 + sigp %r1,%r0,0x12 # switch to esame mode 42 + sam64 # switch to 64 bit addressing mode 43 + lgh %r1,__LC_SUBCHANNEL_ID # test if subchannel number 44 + brctg %r1,.Lnoload # is valid 45 + llgf %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number 46 + lghi %r2,IPL_BS # load start address 47 + bras %r14,.Lloader # load rest of ipl image 48 + larl %r12,parmarea # pointer to parameter area 49 + stg %r1,IPL_DEVICE-PARMAREA(%r12) # save ipl device number 50 + # 51 + # load parameter file from ipl device 52 + # 53 + .Lagain1: 54 + larl %r2,_end # ramdisk loc. is temp 55 + bras %r14,.Lloader # load parameter file 56 + ltgr %r2,%r2 # got anything ? 57 + jz .Lnopf 58 + lg %r3,MAX_COMMAND_LINE_SIZE-PARMAREA(%r12) 59 + aghi %r3,-1 60 + clgr %r2,%r3 61 + jl .Lnotrunc 62 + lgr %r2,%r3 63 + .Lnotrunc: 64 + larl %r4,_end 65 + larl %r13,.L_hdr 66 + clc 0(3,%r4),0(%r13) # if it is HDRx 67 + jz .Lagain1 # skip dataset header 68 + larl %r13,.L_eof 69 + clc 0(3,%r4),0(%r13) # if it is EOFx 70 + jz .Lagain1 # skip dateset trailer 71 + lgr %r5,%r2 72 + la %r6,COMMAND_LINE-PARMAREA(%r12) 73 + lgr %r7,%r2 74 + aghi %r7,1 75 + mvcl %r6,%r4 76 + .Lnopf: 77 + # 78 + # load ramdisk from ipl device 79 + # 80 + .Lagain2: 81 + larl %r2,_end # addr of ramdisk 82 + stg %r2,INITRD_START-PARMAREA(%r12) 83 + bras %r14,.Lloader # load ramdisk 84 + stg %r2,INITRD_SIZE-PARMAREA(%r12) # store size of rd 85 + ltgr %r2,%r2 86 + jnz .Lrdcont 87 + stg %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found 88 + .Lrdcont: 89 + larl %r2,_end 90 + larl %r13,.L_hdr # skip HDRx and EOFx 91 + clc 0(3,%r2),0(%r13) 92 + jz .Lagain2 93 + larl %r13,.L_eof 94 + clc 0(3,%r2),0(%r13) 95 + jz .Lagain2 96 + # 97 + # reset files in VM reader 98 + # 99 + larl %r13,.Lcpuid 100 + stidp 0(%r13) # store cpuid 101 + tm 0(%r13),0xff # running VM ? 102 + jno .Lnoreset 103 + larl %r2,.Lreset 104 + lghi %r3,26 105 + diag %r2,%r3,8 106 + larl %r5,.Lirb 107 + stsch 0(%r5) # check if irq is pending 108 + tm 30(%r5),0x0f # by verifying if any of the 109 + jnz .Lwaitforirq # activity or status control 110 + tm 31(%r5),0xff # bits is set in the schib 111 + jz .Lnoreset 112 + .Lwaitforirq: 113 + bras %r14,.Lirqwait # wait for IO interrupt 114 + c %r1,__LC_SUBCHANNEL_ID # compare subchannel number 115 + jne .Lwaitforirq 116 + larl %r5,.Lirb 117 + tsch 0(%r5) 118 + .Lnoreset: 119 + j .Lnoload 120 + # 121 + # everything loaded, go for it 122 + # 123 + .Lnoload: 124 + jg startup 76 125 # 77 126 # subroutine to wait for end I/O 78 127 # 79 128 .Lirqwait: 80 - mvc __LC_IO_NEW_PSW(16),.Lnewpsw # set up IO interrupt psw 81 - lpsw .Lwaitpsw 129 + larl %r13,.Lnewpswmask # set up IO interrupt psw 130 + mvc __LC_IO_NEW_PSW(8),0(%r13) 131 + stg %r14,__LC_IO_NEW_PSW+8 132 + larl %r13,.Lwaitpsw 133 + lpswe 0(%r13) 82 134 .Lioint: 83 - br %r14 84 - .align 8 85 - .Lnewpsw: 86 - .quad 0x0000000080000000,.Lioint 87 - .Lwaitpsw: 88 - .long 0x020a0000,0x80000000+.Lioint 89 - 90 135 # 91 136 # subroutine for loading cards from the reader 92 137 # 93 138 .Lloader: 94 - la %r4,0(%r14) 95 - la %r3,.Lorb # r2 = address of orb into r2 96 - la %r5,.Lirb # r4 = address of irb 97 - la %r6,.Lccws 98 - la %r7,20 139 + lgr %r4,%r14 140 + larl %r3,.Lorb # r2 = address of orb into r2 141 + larl %r5,.Lirb # r4 = address of irb 142 + larl %r6,.Lccws 143 + lghi %r7,20 99 144 .Linit: 100 145 st %r2,4(%r6) # initialize CCW data addresses 101 146 la %r2,0x50(%r2) 102 147 la %r6,8(%r6) 103 - bct 7,.Linit 104 - 105 - lctl %c6,%c6,.Lcr6 # set IO subclass mask 106 - slr %r2,%r2 148 + brctg %r7,.Linit 149 + larl %r13,.Lcr6 150 + lctlg %c6,%c6,0(%r13) 151 + xgr %r2,%r2 107 152 .Lldlp: 108 153 ssch 0(%r3) # load chunk of 1600 bytes 109 - bnz .Llderr 154 + jnz .Llderr 110 155 .Lwait4irq: 111 - bas %r14,.Lirqwait 156 + bras %r14,.Lirqwait 112 157 c %r1,__LC_SUBCHANNEL_ID # compare subchannel number 113 - bne .Lwait4irq 158 + jne .Lwait4irq 114 159 tsch 0(%r5) 115 - 116 - slr %r0,%r0 160 + xgr %r0,%r0 117 161 ic %r0,8(%r5) # get device status 118 - chi %r0,8 # channel end ? 119 - be .Lcont 120 - chi %r0,12 # channel end + device end ? 121 - be .Lcont 122 - 123 - l %r0,4(%r5) 124 - s %r0,8(%r3) # r0/8 = number of ccws executed 125 - mhi %r0,10 # *10 = number of bytes in ccws 126 - lh %r3,10(%r5) # get residual count 127 - sr %r0,%r3 # #ccws*80-residual=#bytes read 128 - ar %r2,%r0 129 - 162 + cghi %r0,8 # channel end ? 163 + je .Lcont 164 + cghi %r0,12 # channel end + device end ? 165 + je .Lcont 166 + llgf %r0,4(%r5) 167 + sgf %r0,8(%r3) # r0/8 = number of ccws executed 168 + mghi %r0,10 # *10 = number of bytes in ccws 169 + llgh %r3,10(%r5) # get residual count 170 + sgr %r0,%r3 # #ccws*80-residual=#bytes read 171 + agr %r2,%r0 130 172 br %r4 # r2 contains the total size 131 - 132 173 .Lcont: 133 - ahi %r2,0x640 # add 0x640 to total size 134 - la %r6,.Lccws 135 - la %r7,20 174 + aghi %r2,0x640 # add 0x640 to total size 175 + larl %r6,.Lccws 176 + lghi %r7,20 136 177 .Lincr: 137 178 l %r0,4(%r6) # update CCW data addresses 138 - ahi %r0,0x640 179 + aghi %r0,0x640 139 180 st %r0,4(%r6) 140 - ahi %r6,8 141 - bct 7,.Lincr 142 - 143 - b .Lldlp 181 + aghi %r6,8 182 + brctg %r7,.Lincr 183 + j .Lldlp 144 184 .Llderr: 145 - lpsw .Lcrash 185 + larl %r13,.Lcrash 186 + lpsw 0(%r13) 146 187 188 + .align 8 189 + .Lwaitpsw: 190 + .quad 0x0202000180000000,.Lioint 191 + .Lnewpswmask: 192 + .quad 0x0000000180000000 147 193 .align 8 148 194 .Lorb: .long 0x00000000,0x0080ff00,.Lccws 149 195 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 150 - .Lcr6: .long 0xff000000 151 - .Lloadp:.long 0,0 196 + .align 8 197 + .Lcr6: .quad 0x00000000ff000000 152 198 .align 8 153 199 .Lcrash:.long 0x000a0000,0x00000000 154 - 155 200 .align 8 156 201 .Lccws: .rept 19 157 202 .long 0x02600050,0x00000000 158 203 .endr 159 204 .long 0x02200050,0x00000000 160 - 161 - iplstart: 162 - mvi __LC_AR_MODE_ID,1 # set esame flag 163 - slr %r0,%r0 # set cpuid to zero 164 - lhi %r1,2 # mode 2 = esame (dump) 165 - sigp %r1,%r0,0x12 # switch to esame mode 166 - bras %r13,0f 167 - .fill 16,4,0x0 168 - 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 169 - sam31 # switch to 31 bit addressing mode 170 - lh %r1,__LC_SUBCHANNEL_ID # test if subchannel number 171 - bct %r1,.Lnoload # is valid 172 - l %r1,__LC_SUBCHANNEL_ID # load ipl subchannel number 173 - la %r2,IPL_BS # load start address 174 - bas %r14,.Lloader # load rest of ipl image 175 - l %r12,.Lparm # pointer to parameter area 176 - st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number 177 - 178 - # 179 - # load parameter file from ipl device 180 - # 181 - .Lagain1: 182 - l %r2,.Linitrd # ramdisk loc. is temp 183 - bas %r14,.Lloader # load parameter file 184 - ltr %r2,%r2 # got anything ? 185 - bz .Lnopf 186 - l %r3,MAX_COMMAND_LINE_SIZE+ARCH_OFFSET-PARMAREA(%r12) 187 - ahi %r3,-1 188 - clr %r2,%r3 189 - bl .Lnotrunc 190 - lr %r2,%r3 191 - .Lnotrunc: 192 - l %r4,.Linitrd 193 - clc 0(3,%r4),.L_hdr # if it is HDRx 194 - bz .Lagain1 # skip dataset header 195 - clc 0(3,%r4),.L_eof # if it is EOFx 196 - bz .Lagain1 # skip dateset trailer 197 - 198 - lr %r5,%r2 199 - la %r6,COMMAND_LINE-PARMAREA(%r12) 200 - lr %r7,%r2 201 - ahi %r7,1 202 - mvcl %r6,%r4 203 - .Lnopf: 204 - 205 - # 206 - # load ramdisk from ipl device 207 - # 208 - .Lagain2: 209 - l %r2,.Linitrd # addr of ramdisk 210 - st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) 211 - bas %r14,.Lloader # load ramdisk 212 - st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd 213 - ltr %r2,%r2 214 - bnz .Lrdcont 215 - st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found 216 - .Lrdcont: 217 - l %r2,.Linitrd 218 - 219 - clc 0(3,%r2),.L_hdr # skip HDRx and EOFx 220 - bz .Lagain2 221 - clc 0(3,%r2),.L_eof 222 - bz .Lagain2 223 - 224 - # 225 - # reset files in VM reader 226 - # 227 - stidp .Lcpuid # store cpuid 228 - tm .Lcpuid,0xff # running VM ? 229 - bno .Lnoreset 230 - la %r2,.Lreset 231 - lhi %r3,26 232 - diag %r2,%r3,8 233 - la %r5,.Lirb 234 - stsch 0(%r5) # check if irq is pending 235 - tm 30(%r5),0x0f # by verifying if any of the 236 - bnz .Lwaitforirq # activity or status control 237 - tm 31(%r5),0xff # bits is set in the schib 238 - bz .Lnoreset 239 - .Lwaitforirq: 240 - bas %r14,.Lirqwait # wait for IO interrupt 241 - c %r1,__LC_SUBCHANNEL_ID # compare subchannel number 242 - bne .Lwaitforirq 243 - la %r5,.Lirb 244 - tsch 0(%r5) 245 - .Lnoreset: 246 - b .Lnoload 247 - 248 - # 249 - # everything loaded, go for it 250 - # 251 - .Lnoload: 252 - l %r1,.Lstartup 253 - br %r1 254 - 255 - .Linitrd:.long _end # default address of initrd 256 - .Lparm: .long PARMAREA 257 - .Lstartup: .long startup 258 205 .Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40 259 206 .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6 260 207 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" ··· 215 268 # this is called either by the ipl loader or directly by PSW restart 216 269 # or linload or SALIPL 217 270 # 218 - .org STARTUP_NORMAL_OFFSET 271 + .org STARTUP_NORMAL_OFFSET - IPL_START 219 272 SYM_CODE_START(startup) 220 273 j startup_normal 221 - .org EP_OFFSET 274 + .org EP_OFFSET - IPL_START 222 275 # 223 276 # This is a list of s390 kernel entry points. At address 0x1000f the number of 224 277 # valid entry points is stored. ··· 230 283 # 231 284 # kdump startup-code, running in 64 bit absolute addressing mode 232 285 # 233 - .org STARTUP_KDUMP_OFFSET 286 + .org STARTUP_KDUMP_OFFSET - IPL_START 234 287 j startup_kdump 235 288 SYM_CODE_END(startup) 236 289 SYM_CODE_START_LOCAL(startup_normal) ··· 242 295 .fill 16,4,0x0 243 296 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 244 297 sam64 # switch to 64 bit addressing mode 245 - basr %r13,0 # get base 246 - .LPG0: 247 - mvc __LC_EXT_NEW_PSW(16),.Lext_new_psw-.LPG0(%r13) 248 - mvc __LC_PGM_NEW_PSW(16),.Lpgm_new_psw-.LPG0(%r13) 249 - mvc __LC_IO_NEW_PSW(16),.Lio_new_psw-.LPG0(%r13) 298 + larl %r13,.Lext_new_psw 299 + mvc __LC_EXT_NEW_PSW(16),0(%r13) 300 + larl %r13,.Lpgm_new_psw 301 + mvc __LC_PGM_NEW_PSW(16),0(%r13) 302 + larl %r13,.Lio_new_psw 303 + mvc __LC_IO_NEW_PSW(16),0(%r13) 250 304 xc 0x200(256),0x200 # partially clear lowcore 251 305 xc 0x300(256),0x300 252 306 xc 0xe00(256),0xe00 253 307 xc 0xf00(256),0xf00 254 - lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers 308 + larl %r13,.Lctl 309 + lctlg %c0,%c15,0(%r13) # load control registers 255 310 stcke __LC_BOOT_CLOCK 256 311 mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1 257 - spt 6f-.LPG0(%r13) 258 - mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 312 + larl %r13,6f 313 + spt 0(%r13) 314 + mvc __LC_LAST_UPDATE_TIMER(8),0(%r13) 259 315 larl %r15,_stack_end-STACK_FRAME_OVERHEAD 260 316 brasl %r14,sclp_early_setup_buffer 261 317 brasl %r14,verify_facilities ··· 318 368 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8) 319 369 lpswe __LC_RETURN_PSW # disabled wait 320 370 SYM_CODE_END(startup_pgm_check_handler) 321 - 322 - # 323 - # params at 10400 (setup.h) 324 - # Must be keept in sync with struct parmarea in setup.h 325 - # 326 - .org PARMAREA 327 - SYM_DATA_START(parmarea) 328 - .quad 0 # IPL_DEVICE 329 - .quad 0 # INITRD_START 330 - .quad 0 # INITRD_SIZE 331 - .quad 0 # OLDMEM_BASE 332 - .quad 0 # OLDMEM_SIZE 333 - .quad kernel_version # points to kernel version string 334 - .quad COMMAND_LINE_SIZE 335 - 336 - .org COMMAND_LINE 337 - .byte "root=/dev/ram0 ro" 338 - .byte 0 339 - .org PARMAREA+__PARMAREA_SIZE 340 - SYM_DATA_END(parmarea)
+84
arch/s390/boot/ipl_data.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/compat.h> 4 + #include <linux/ptrace.h> 5 + #include <asm/cio.h> 6 + #include <asm/asm-offsets.h> 7 + #include "boot.h" 8 + 9 + #define CCW0(cmd, addr, cnt, flg) \ 10 + { .cmd_code = cmd, .cda = addr, .count = cnt, .flags = flg, } 11 + 12 + #define PSW_MASK_DISABLED (PSW_MASK_WAIT | PSW_MASK_EA | PSW_MASK_BA) 13 + 14 + struct ipl_lowcore { 15 + psw_t32 ipl_psw; /* 0x0000 */ 16 + struct ccw0 ccwpgm[2]; /* 0x0008 */ 17 + u8 fill[56]; /* 0x0018 */ 18 + struct ccw0 ccwpgmcc[20]; /* 0x0050 */ 19 + u8 pad_0xf0[0x01a0-0x00f0]; /* 0x00f0 */ 20 + psw_t restart_psw; /* 0x01a0 */ 21 + psw_t external_new_psw; /* 0x01b0 */ 22 + psw_t svc_new_psw; /* 0x01c0 */ 23 + psw_t program_new_psw; /* 0x01d0 */ 24 + psw_t mcck_new_psw; /* 0x01e0 */ 25 + psw_t io_new_psw; /* 0x01f0 */ 26 + }; 27 + 28 + /* 29 + * Initial lowcore for IPL: the first 24 bytes are loaded by IPL to 30 + * addresses 0-23 (a PSW and two CCWs). Bytes 24-79 are discarded. 31 + * The next 160 bytes are loaded to addresses 0x18-0xb7. They form 32 + * the continuation of the CCW program started by IPL and load the 33 + * range 0x0f0-0x730 from the image to the range 0x0f0-0x730 in 34 + * memory. At the end of the channel program the PSW at location 0 is 35 + * loaded. 36 + * Initial processing starts at 0x200 = iplstart. 37 + * 38 + * The restart psw points to iplstart which allows to load a kernel 39 + * image into memory and starting it by a psw restart on any cpu. All 40 + * other default psw new locations contain a disabled wait psw where 41 + * the address indicates which psw was loaded. 42 + * 43 + * Note that the 'file' utility can detect s390 kernel images. For 44 + * that to succeed the two initial CCWs, and the 0x40 fill bytes must 45 + * be present. 46 + */ 47 + static struct ipl_lowcore ipl_lowcore __used __section(".ipldata") = { 48 + .ipl_psw = { .mask = PSW32_MASK_BASE, .addr = PSW32_ADDR_AMODE | IPL_START }, 49 + .ccwpgm = { 50 + [ 0] = CCW0(CCW_CMD_READ_IPL, 0x018, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 51 + [ 1] = CCW0(CCW_CMD_READ_IPL, 0x068, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 52 + }, 53 + .fill = { 54 + [ 0 ... 55] = 0x40, 55 + }, 56 + .ccwpgmcc = { 57 + [ 0] = CCW0(CCW_CMD_READ_IPL, 0x0f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 58 + [ 1] = CCW0(CCW_CMD_READ_IPL, 0x140, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 59 + [ 2] = CCW0(CCW_CMD_READ_IPL, 0x190, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 60 + [ 3] = CCW0(CCW_CMD_READ_IPL, 0x1e0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 61 + [ 4] = CCW0(CCW_CMD_READ_IPL, 0x230, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 62 + [ 5] = CCW0(CCW_CMD_READ_IPL, 0x280, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 63 + [ 6] = CCW0(CCW_CMD_READ_IPL, 0x2d0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 64 + [ 7] = CCW0(CCW_CMD_READ_IPL, 0x320, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 65 + [ 8] = CCW0(CCW_CMD_READ_IPL, 0x370, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 66 + [ 9] = CCW0(CCW_CMD_READ_IPL, 0x3c0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 67 + [10] = CCW0(CCW_CMD_READ_IPL, 0x410, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 68 + [11] = CCW0(CCW_CMD_READ_IPL, 0x460, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 69 + [12] = CCW0(CCW_CMD_READ_IPL, 0x4b0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 70 + [13] = CCW0(CCW_CMD_READ_IPL, 0x500, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 71 + [14] = CCW0(CCW_CMD_READ_IPL, 0x550, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 72 + [15] = CCW0(CCW_CMD_READ_IPL, 0x5a0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 73 + [16] = CCW0(CCW_CMD_READ_IPL, 0x5f0, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 74 + [17] = CCW0(CCW_CMD_READ_IPL, 0x640, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 75 + [18] = CCW0(CCW_CMD_READ_IPL, 0x690, 0x50, CCW_FLAG_SLI | CCW_FLAG_CC), 76 + [19] = CCW0(CCW_CMD_READ_IPL, 0x6e0, 0x50, CCW_FLAG_SLI), 77 + }, 78 + .restart_psw = { .mask = 0, .addr = IPL_START, }, 79 + .external_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_EXT_NEW_PSW, }, 80 + .svc_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_SVC_NEW_PSW, }, 81 + .program_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_PGM_NEW_PSW, }, 82 + .mcck_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_MCK_NEW_PSW, }, 83 + .io_new_psw = { .mask = PSW_MASK_DISABLED, .addr = __LC_IO_NEW_PSW, }, 84 + };
+7
arch/s390/boot/ipl_parm.c
··· 8 8 #include <asm/sections.h> 9 9 #include <asm/boot_data.h> 10 10 #include <asm/facility.h> 11 + #include <asm/setup.h> 11 12 #include <asm/uv.h> 12 13 #include "boot.h" 14 + 15 + struct parmarea parmarea __section(".parmarea") = { 16 + .kernel_version = (unsigned long)kernel_version, 17 + .max_command_line_size = COMMAND_LINE_SIZE, 18 + .command_line = "root=/dev/ram0 ro", 19 + }; 13 20 14 21 char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; 15 22 int __bootdata(noexec_disabled);
+1 -1
arch/s390/boot/kaslr.c
··· 8 8 #include <asm/timex.h> 9 9 #include <asm/sclp.h> 10 10 #include <asm/kasan.h> 11 - #include "compressed/decompressor.h" 11 + #include "decompressor.h" 12 12 #include "boot.h" 13 13 14 14 #define PRNG_MODE_TDES 1
+1 -1
arch/s390/boot/mem_detect.c
··· 7 7 #include <asm/sections.h> 8 8 #include <asm/mem_detect.h> 9 9 #include <asm/sparsemem.h> 10 - #include "compressed/decompressor.h" 10 + #include "decompressor.h" 11 11 #include "boot.h" 12 12 13 13 struct mem_detect_info __bootdata(mem_detect);
+1 -1
arch/s390/boot/startup.c
··· 10 10 #include <asm/sclp.h> 11 11 #include <asm/diag.h> 12 12 #include <asm/uv.h> 13 - #include "compressed/decompressor.h" 13 + #include "decompressor.h" 14 14 #include "boot.h" 15 15 #include "uv.h" 16 16
+1 -1
arch/s390/crypto/des_s390.c
··· 194 194 * same as DES. Implementers MUST reject keys that exhibit this 195 195 * property. 196 196 * 197 - * In fips mode additinally check for all 3 keys are unique. 197 + * In fips mode additionally check for all 3 keys are unique. 198 198 * 199 199 */ 200 200 static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
+1 -1
arch/s390/crypto/prng.c
··· 528 528 /* give mutex free before calling schedule() */ 529 529 mutex_unlock(&prng_data->mutex); 530 530 schedule(); 531 - /* occopy mutex again */ 531 + /* occupy mutex again */ 532 532 if (mutex_lock_interruptible(&prng_data->mutex)) { 533 533 if (ret == 0) 534 534 ret = -ERESTARTSYS;
+1 -1
arch/s390/hypfs/hypfs_vm.c
··· 190 190 if (IS_ERR(data)) 191 191 return PTR_ERR(data); 192 192 193 - /* Hpervisor Info */ 193 + /* Hypervisor Info */ 194 194 dir = hypfs_mkdir(root, "hyp"); 195 195 if (IS_ERR(dir)) { 196 196 rc = PTR_ERR(dir);
+9 -67
arch/s390/include/asm/alternative-asm.h
··· 5 5 #ifdef __ASSEMBLY__ 6 6 7 7 /* 8 - * Check the length of an instruction sequence. The length may not be larger 9 - * than 254 bytes and it has to be divisible by 2. 10 - */ 11 - .macro alt_len_check start,end 12 - .if ( \end - \start ) > 254 13 - .error "cpu alternatives does not support instructions blocks > 254 bytes\n" 14 - .endif 15 - .if ( \end - \start ) % 2 16 - .error "cpu alternatives instructions length is odd\n" 17 - .endif 18 - .endm 19 - 20 - /* 21 8 * Issue one struct alt_instr descriptor entry (need to put it into 22 9 * the section .altinstructions, see below). This entry contains 23 10 * enough information for the alternatives patching code to patch an ··· 15 28 .long \alt_start - . 16 29 .word \feature 17 30 .byte \orig_end - \orig_start 18 - .byte \alt_end - \alt_start 19 - .endm 20 - 21 - /* 22 - * Fill up @bytes with nops. The macro emits 6-byte nop instructions 23 - * for the bulk of the area, possibly followed by a 4-byte and/or 24 - * a 2-byte nop if the size of the area is not divisible by 6. 25 - */ 26 - .macro alt_pad_fill bytes 27 - .rept ( \bytes ) / 6 28 - brcl 0,0 29 - .endr 30 - .rept ( \bytes ) % 6 / 4 31 - nop 32 - .endr 33 - .rept ( \bytes ) % 6 % 4 / 2 34 - nopr 35 - .endr 36 - .endm 37 - 38 - /* 39 - * Fill up @bytes with nops. If the number of bytes is larger 40 - * than 6, emit a jg instruction to branch over all nops, then 41 - * fill an area of size (@bytes - 6) with nop instructions. 42 - */ 43 - .macro alt_pad bytes 44 - .if ( \bytes > 0 ) 45 - .if ( \bytes > 6 ) 46 - jg . + \bytes 47 - alt_pad_fill \bytes - 6 48 - .else 49 - alt_pad_fill \bytes 50 - .endif 51 - .endif 31 + .org . - ( \orig_end - \orig_start ) + ( \alt_end - \alt_start ) 32 + .org . - ( \alt_end - \alt_start ) + ( \orig_end - \orig_start ) 52 33 .endm 53 34 54 35 /* 55 36 * Define an alternative between two instructions. If @feature is 56 37 * present, early code in apply_alternatives() replaces @oldinstr with 57 - * @newinstr. ".skip" directive takes care of proper instruction padding 58 - * in case @newinstr is longer than @oldinstr. 38 + * @newinstr. 59 39 */ 60 40 .macro ALTERNATIVE oldinstr, newinstr, feature 61 41 .pushsection .altinstr_replacement,"ax" 62 42 770: \newinstr 63 43 771: .popsection 64 44 772: \oldinstr 65 - 773: alt_len_check 770b, 771b 66 - alt_len_check 772b, 773b 67 - alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) ) 68 - 774: .pushsection .altinstructions,"a" 69 - alt_entry 772b, 774b, 770b, 771b, \feature 45 + 773: .pushsection .altinstructions,"a" 46 + alt_entry 772b, 773b, 770b, 771b, \feature 70 47 .popsection 71 48 .endm 72 49 73 50 /* 74 51 * Define an alternative between two instructions. If @feature is 75 52 * present, early code in apply_alternatives() replaces @oldinstr with 76 - * @newinstr. ".skip" directive takes care of proper instruction padding 77 - * in case @newinstr is longer than @oldinstr. 53 + * @newinstr. 78 54 */ 79 55 .macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2 80 56 .pushsection .altinstr_replacement,"ax" ··· 45 95 771: \newinstr2 46 96 772: .popsection 47 97 773: \oldinstr 48 - 774: alt_len_check 770b, 771b 49 - alt_len_check 771b, 772b 50 - alt_len_check 773b, 774b 51 - .if ( 771b - 770b > 772b - 771b ) 52 - alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) ) 53 - .else 54 - alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) ) 55 - .endif 56 - 775: .pushsection .altinstructions,"a" 57 - alt_entry 773b, 775b, 770b, 771b,\feature1 58 - alt_entry 773b, 775b, 771b, 772b,\feature2 98 + 774: .pushsection .altinstructions,"a" 99 + alt_entry 773b, 774b, 770b, 771b,\feature1 100 + alt_entry 773b, 774b, 771b, 772b,\feature2 59 101 .popsection 60 102 .endm 61 103
+19 -72
arch/s390/include/asm/alternative.h
··· 13 13 s32 repl_offset; /* offset to replacement instruction */ 14 14 u16 facility; /* facility bit set for replacement */ 15 15 u8 instrlen; /* length of original instruction */ 16 - u8 replacementlen; /* length of new instruction */ 17 16 } __packed; 18 17 19 18 void apply_alternative_instructions(void); 20 19 void apply_alternatives(struct alt_instr *start, struct alt_instr *end); 21 20 22 21 /* 23 - * |661: |662: |6620 |663: 24 - * +-----------+---------------------+ 25 - * | oldinstr | oldinstr_padding | 26 - * | +----------+----------+ 27 - * | | | | 28 - * | | >6 bytes |6/4/2 nops| 29 - * | |6 bytes jg-----------> 30 - * +-----------+---------------------+ 31 - * ^^ static padding ^^ 22 + * +---------------------------------+ 23 + * |661: |662: 24 + * | oldinstr | 25 + * +---------------------------------+ 32 26 * 33 27 * .altinstr_replacement section 34 - * +---------------------+-----------+ 28 + * +---------------------------------+ 35 29 * |6641: |6651: 36 30 * | alternative instr 1 | 37 - * +-----------+---------+- - - - - -+ 38 - * |6642: |6652: | 39 - * | alternative instr 2 | padding 40 - * +---------------------+- - - - - -+ 41 - * ^ runtime ^ 31 + * +---------------------------------+ 32 + * |6642: |6652: 33 + * | alternative instr 2 | 34 + * +---------------------------------+ 42 35 * 43 36 * .altinstructions section 44 37 * +---------------------------------+ ··· 40 47 * +---------------------------------+ 41 48 */ 42 49 43 - #define b_altinstr(num) "664"#num 44 - #define e_altinstr(num) "665"#num 45 - 46 - #define e_oldinstr_pad_end "663" 50 + #define b_altinstr(num) "664"#num 51 + #define e_altinstr(num) "665"#num 47 52 #define oldinstr_len "662b-661b" 48 - #define oldinstr_total_len e_oldinstr_pad_end"b-661b" 49 53 #define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b" 50 - #define oldinstr_pad_len(num) \ 51 - "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \ 52 - "((" altinstr_len(num) ")-(" oldinstr_len "))" 53 54 54 - #define INSTR_LEN_SANITY_CHECK(len) \ 55 - ".if " len " > 254\n" \ 56 - "\t.error \"cpu alternatives does not support instructions " \ 57 - "blocks > 254 bytes\"\n" \ 58 - ".endif\n" \ 59 - ".if (" len ") %% 2\n" \ 60 - "\t.error \"cpu alternatives instructions length is odd\"\n" \ 61 - ".endif\n" 62 - 63 - #define OLDINSTR_PADDING(oldinstr, num) \ 64 - ".if " oldinstr_pad_len(num) " > 6\n" \ 65 - "\tjg " e_oldinstr_pad_end "f\n" \ 66 - "6620:\n" \ 67 - "\t.rept (" oldinstr_pad_len(num) " - (6620b-662b)) / 2\n" \ 68 - "\tnopr\n" \ 69 - ".else\n" \ 70 - "\t.rept " oldinstr_pad_len(num) " / 6\n" \ 71 - "\t.brcl 0,0\n" \ 72 - "\t.endr\n" \ 73 - "\t.rept " oldinstr_pad_len(num) " %% 6 / 4\n" \ 74 - "\tnop\n" \ 75 - "\t.endr\n" \ 76 - "\t.rept " oldinstr_pad_len(num) " %% 6 %% 4 / 2\n" \ 77 - "\tnopr\n" \ 78 - ".endr\n" \ 79 - ".endif\n" 80 - 81 - #define OLDINSTR(oldinstr, num) \ 82 - "661:\n\t" oldinstr "\n662:\n" \ 83 - OLDINSTR_PADDING(oldinstr, num) \ 84 - e_oldinstr_pad_end ":\n" \ 85 - INSTR_LEN_SANITY_CHECK(oldinstr_len) 86 - 87 - #define OLDINSTR_2(oldinstr, num1, num2) \ 88 - "661:\n\t" oldinstr "\n662:\n" \ 89 - ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \ 90 - OLDINSTR_PADDING(oldinstr, num2) \ 91 - ".else\n" \ 92 - OLDINSTR_PADDING(oldinstr, num1) \ 93 - ".endif\n" \ 94 - e_oldinstr_pad_end ":\n" \ 95 - INSTR_LEN_SANITY_CHECK(oldinstr_len) 55 + #define OLDINSTR(oldinstr) \ 56 + "661:\n\t" oldinstr "\n662:\n" 96 57 97 58 #define ALTINSTR_ENTRY(facility, num) \ 98 59 "\t.long 661b - .\n" /* old instruction */ \ 99 60 "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \ 100 61 "\t.word " __stringify(facility) "\n" /* facility bit */ \ 101 - "\t.byte " oldinstr_total_len "\n" /* source len */ \ 102 - "\t.byte " altinstr_len(num) "\n" /* alt instruction len */ 62 + "\t.byte " oldinstr_len "\n" /* instruction len */ \ 63 + "\t.org . - (" oldinstr_len ") + (" altinstr_len(num) ")\n" \ 64 + "\t.org . - (" altinstr_len(num) ") + (" oldinstr_len ")\n" 103 65 104 66 #define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \ 105 - b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \ 106 - INSTR_LEN_SANITY_CHECK(altinstr_len(num)) 67 + b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" 107 68 108 69 /* alternative assembly primitive: */ 109 70 #define ALTERNATIVE(oldinstr, altinstr, facility) \ 110 71 ".pushsection .altinstr_replacement, \"ax\"\n" \ 111 72 ALTINSTR_REPLACEMENT(altinstr, 1) \ 112 73 ".popsection\n" \ 113 - OLDINSTR(oldinstr, 1) \ 74 + OLDINSTR(oldinstr) \ 114 75 ".pushsection .altinstructions,\"a\"\n" \ 115 76 ALTINSTR_ENTRY(facility, 1) \ 116 77 ".popsection\n" ··· 74 127 ALTINSTR_REPLACEMENT(altinstr1, 1) \ 75 128 ALTINSTR_REPLACEMENT(altinstr2, 2) \ 76 129 ".popsection\n" \ 77 - OLDINSTR_2(oldinstr, 1, 2) \ 130 + OLDINSTR(oldinstr) \ 78 131 ".pushsection .altinstructions,\"a\"\n" \ 79 132 ALTINSTR_ENTRY(facility1, 1) \ 80 133 ALTINSTR_ENTRY(facility2, 2) \
+6 -6
arch/s390/include/asm/asm-extable.h
··· 26 26 stringify_in_c(.long (_target) - .;) \ 27 27 stringify_in_c(.short (_type);) \ 28 28 stringify_in_c(.macro extable_reg reg;) \ 29 - stringify_in_c(.set found, 0;) \ 30 - stringify_in_c(.set regnr, 0;) \ 29 + stringify_in_c(.set .Lfound, 0;) \ 30 + stringify_in_c(.set .Lregnr, 0;) \ 31 31 stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \ 32 32 stringify_in_c(.ifc "\reg", "%%\rs";) \ 33 - stringify_in_c(.set found, 1;) \ 34 - stringify_in_c(.short regnr;) \ 33 + stringify_in_c(.set .Lfound, 1;) \ 34 + stringify_in_c(.short .Lregnr;) \ 35 35 stringify_in_c(.endif;) \ 36 - stringify_in_c(.set regnr, regnr+1;) \ 36 + stringify_in_c(.set .Lregnr, .Lregnr+1;) \ 37 37 stringify_in_c(.endr;) \ 38 - stringify_in_c(.ifne (found != 1);) \ 38 + stringify_in_c(.ifne (.Lfound != 1);) \ 39 39 stringify_in_c(.error "extable_reg: bad register argument";) \ 40 40 stringify_in_c(.endif;) \ 41 41 stringify_in_c(.endm;) \
+8 -8
arch/s390/include/asm/barrier.h
··· 26 26 asm volatile(__ASM_BCR_SERIALIZE : : : "memory"); 27 27 } 28 28 29 - #define mb() bcr_serialize() 30 - #define rmb() barrier() 31 - #define wmb() barrier() 32 - #define dma_rmb() mb() 33 - #define dma_wmb() mb() 34 - #define __smp_mb() mb() 35 - #define __smp_rmb() rmb() 36 - #define __smp_wmb() wmb() 29 + #define __mb() bcr_serialize() 30 + #define __rmb() barrier() 31 + #define __wmb() barrier() 32 + #define __dma_rmb() __mb() 33 + #define __dma_wmb() __mb() 34 + #define __smp_mb() __mb() 35 + #define __smp_rmb() __rmb() 36 + #define __smp_wmb() __wmb() 37 37 38 38 #define __smp_store_release(p, v) \ 39 39 do { \
+1 -1
arch/s390/include/asm/cio.h
··· 369 369 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages); 370 370 371 371 /* Function from drivers/s390/cio/chsc.c */ 372 - int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); 372 + int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta); 373 373 int chsc_sstpi(void *page, void *result, size_t size); 374 374 int chsc_stzi(void *page, void *result, size_t size); 375 375 int chsc_sgib(u32 origin);
+1 -24
arch/s390/include/asm/compat.h
··· 8 8 #include <linux/sched.h> 9 9 #include <linux/sched/task_stack.h> 10 10 #include <linux/thread_info.h> 11 + #include <asm/ptrace.h> 11 12 12 13 #define compat_mode_t compat_mode_t 13 14 typedef u16 compat_mode_t; ··· 23 22 (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ 24 23 }) 25 24 26 - #define PSW32_MASK_PER 0x40000000UL 27 - #define PSW32_MASK_DAT 0x04000000UL 28 - #define PSW32_MASK_IO 0x02000000UL 29 - #define PSW32_MASK_EXT 0x01000000UL 30 - #define PSW32_MASK_KEY 0x00F00000UL 31 - #define PSW32_MASK_BASE 0x00080000UL /* Always one */ 32 - #define PSW32_MASK_MCHECK 0x00040000UL 33 - #define PSW32_MASK_WAIT 0x00020000UL 34 - #define PSW32_MASK_PSTATE 0x00010000UL 35 - #define PSW32_MASK_ASC 0x0000C000UL 36 - #define PSW32_MASK_CC 0x00003000UL 37 - #define PSW32_MASK_PM 0x00000f00UL 38 - #define PSW32_MASK_RI 0x00000080UL 39 - 40 25 #define PSW32_MASK_USER 0x0000FF00UL 41 - 42 - #define PSW32_ADDR_AMODE 0x80000000UL 43 - #define PSW32_ADDR_INSN 0x7FFFFFFFUL 44 - 45 - #define PSW32_DEFAULT_KEY (((u32) PAGE_DEFAULT_ACC) << 20) 46 - 47 - #define PSW32_ASC_PRIMARY 0x00000000UL 48 - #define PSW32_ASC_ACCREG 0x00004000UL 49 - #define PSW32_ASC_SECONDARY 0x00008000UL 50 - #define PSW32_ASC_HOME 0x0000C000UL 51 26 52 27 #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ 53 28 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+3 -1
arch/s390/include/asm/ctl_reg.h
··· 93 93 unsigned long tcx : 1; /* Transactional-Execution control */ 94 94 unsigned long pifo : 1; /* Transactional-Execution Program- 95 95 Interruption-Filtering Override */ 96 - unsigned long : 22; 96 + unsigned long : 3; 97 + unsigned long ccc : 1; /* Cryptography counter control */ 98 + unsigned long : 18; 97 99 unsigned long : 3; 98 100 unsigned long lap : 1; /* Low-address-protection control */ 99 101 unsigned long : 4;
+9 -5
arch/s390/include/asm/entry-common.h
··· 9 9 #include <linux/uaccess.h> 10 10 #include <asm/timex.h> 11 11 #include <asm/fpu/api.h> 12 + #include <asm/pai.h> 12 13 13 14 #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) 14 15 15 16 void do_per_trap(struct pt_regs *regs); 16 17 17 - #ifdef CONFIG_DEBUG_ENTRY 18 - static __always_inline void arch_check_user_regs(struct pt_regs *regs) 18 + static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) 19 19 { 20 - debug_user_asce(0); 20 + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 21 + debug_user_asce(0); 22 + 23 + pai_kernel_enter(regs); 21 24 } 22 25 23 - #define arch_check_user_regs arch_check_user_regs 24 - #endif /* CONFIG_DEBUG_ENTRY */ 26 + #define arch_enter_from_user_mode arch_enter_from_user_mode 25 27 26 28 static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 27 29 unsigned long ti_work) ··· 46 44 47 45 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 48 46 debug_user_asce(1); 47 + 48 + pai_kernel_exit(current_pt_regs()); 49 49 } 50 50 51 51 #define arch_exit_to_user_mode arch_exit_to_user_mode
+6
arch/s390/include/asm/ipl.h
··· 133 133 * DIAG 308 support 134 134 */ 135 135 enum diag308_subcode { 136 + DIAG308_CLEAR_RESET = 0, 137 + DIAG308_LOAD_NORMAL_RESET = 1, 136 138 DIAG308_REL_HSA = 2, 137 139 DIAG308_LOAD_CLEAR = 3, 138 140 DIAG308_LOAD_NORMAL_DUMP = 4, 139 141 DIAG308_SET = 5, 140 142 DIAG308_STORE = 6, 141 143 DIAG308_LOAD_NORMAL = 7, 144 + }; 145 + 146 + enum diag308_subcode_flags { 147 + DIAG308_FLAG_EI = 1UL << 16, 142 148 }; 143 149 144 150 enum diag308_rc {
+4 -1
arch/s390/include/asm/lowcore.h
··· 200 200 __u64 last_break_save_area; /* 0x1338 */ 201 201 __u32 access_regs_save_area[16]; /* 0x1340 */ 202 202 __u64 cregs_save_area[16]; /* 0x1380 */ 203 - __u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */ 203 + __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */ 204 + /* Cryptography-counter designation */ 205 + __u64 ccd; /* 0x1500 */ 206 + __u8 pad_0x1508[0x1800-0x1508]; /* 0x1508 */ 204 207 205 208 /* Transaction abort diagnostic block */ 206 209 struct pgm_tdb pgm_tdb; /* 0x1800 */
+1 -1
arch/s390/include/asm/nmi.h
··· 101 101 int nmi_alloc_mcesa(u64 *mcesad); 102 102 void nmi_free_mcesa(u64 *mcesad); 103 103 104 - void s390_handle_mcck(void); 104 + void s390_handle_mcck(struct pt_regs *regs); 105 105 void __s390_handle_mcck(void); 106 106 int s390_do_machine_check(struct pt_regs *regs); 107 107
+6 -6
arch/s390/include/asm/nospec-insn.h
··· 54 54 .endm 55 55 56 56 .macro __DECODE_R expand,reg 57 - .set __decode_fail,1 57 + .set .L__decode_fail,1 58 58 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 59 59 .ifc \reg,%r\r1 60 60 \expand \r1 61 - .set __decode_fail,0 61 + .set .L__decode_fail,0 62 62 .endif 63 63 .endr 64 - .if __decode_fail == 1 64 + .if .L__decode_fail == 1 65 65 .error "__DECODE_R failed" 66 66 .endif 67 67 .endm 68 68 69 69 .macro __DECODE_RR expand,rsave,rtarget 70 - .set __decode_fail,1 70 + .set .L__decode_fail,1 71 71 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 72 72 .ifc \rsave,%r\r1 73 73 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 74 74 .ifc \rtarget,%r\r2 75 75 \expand \r1,\r2 76 - .set __decode_fail,0 76 + .set .L__decode_fail,0 77 77 .endif 78 78 .endr 79 79 .endif 80 80 .endr 81 - .if __decode_fail == 1 81 + .if .L__decode_fail == 1 82 82 .error "__DECODE_RR failed" 83 83 .endif 84 84 .endm
+74
arch/s390/include/asm/pai.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Processor Activity Instrumentation support for cryptography counters 4 + * 5 + * Copyright IBM Corp. 2022 6 + * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 + */ 8 + #ifndef _ASM_S390_PAI_H 9 + #define _ASM_S390_PAI_H 10 + 11 + #include <linux/jump_label.h> 12 + #include <asm/lowcore.h> 13 + #include <asm/ptrace.h> 14 + 15 + struct qpaci_info_block { 16 + u64 header; 17 + struct { 18 + u64 : 8; 19 + u64 num_cc : 8; /* # of supported crypto counters */ 20 + u64 : 48; 21 + }; 22 + }; 23 + 24 + static inline int qpaci(struct qpaci_info_block *info) 25 + { 26 + /* Size of info (in double words minus one) */ 27 + size_t size = sizeof(*info) / sizeof(u64) - 1; 28 + int cc; 29 + 30 + asm volatile( 31 + " lgr 0,%[size]\n" 32 + " .insn s,0xb28f0000,%[info]\n" 33 + " lgr %[size],0\n" 34 + " ipm %[cc]\n" 35 + " srl %[cc],28\n" 36 + : [cc] "=d" (cc), [info] "=Q" (*info), [size] "+&d" (size) 37 + : 38 + : "0", "cc", "memory"); 39 + return cc ? (size + 1) * sizeof(u64) : 0; 40 + } 41 + 42 + #define PAI_CRYPTO_BASE 0x1000 /* First event number */ 43 + #define PAI_CRYPTO_MAXCTR 256 /* Max # of event counters */ 44 + #define PAI_CRYPTO_KERNEL_OFFSET 2048 45 + 46 + DECLARE_STATIC_KEY_FALSE(pai_key); 47 + 48 + static __always_inline void pai_kernel_enter(struct pt_regs *regs) 49 + { 50 + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) 51 + return; 52 + if (!static_branch_unlikely(&pai_key)) 53 + return; 54 + if (!S390_lowcore.ccd) 55 + return; 56 + if (!user_mode(regs)) 57 + return; 58 + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd | PAI_CRYPTO_KERNEL_OFFSET); 59 + } 60 + 61 + static __always_inline void pai_kernel_exit(struct pt_regs *regs) 62 + { 63 + if (!IS_ENABLED(CONFIG_PERF_EVENTS)) 64 + return; 65 + if (!static_branch_unlikely(&pai_key)) 66 + return; 67 + if (!S390_lowcore.ccd) 68 + return; 69 + if (!user_mode(regs)) 70 + return; 71 + WRITE_ONCE(S390_lowcore.ccd, S390_lowcore.ccd & ~PAI_CRYPTO_KERNEL_OFFSET); 72 + } 73 + 74 + #endif
+6 -1
arch/s390/include/asm/pci_debug.h
··· 17 17 debug_text_event(pci_debug_err_id, 0, debug_buffer); \ 18 18 } while (0) 19 19 20 + static inline void zpci_err_hex_level(int level, void *addr, int len) 21 + { 22 + debug_event(pci_debug_err_id, level, addr, len); 23 + } 24 + 20 25 static inline void zpci_err_hex(void *addr, int len) 21 26 { 22 - debug_event(pci_debug_err_id, 0, addr, len); 27 + zpci_err_hex_level(0, addr, len); 23 28 } 24 29 25 30 #endif
+11 -4
arch/s390/include/asm/preempt.h
··· 46 46 47 47 static inline void __preempt_count_add(int val) 48 48 { 49 - if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) 50 - __atomic_add_const(val, &S390_lowcore.preempt_count); 51 - else 52 - __atomic_add(val, &S390_lowcore.preempt_count); 49 + /* 50 + * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES 51 + * enabled, gcc 12 fails to handle __builtin_constant_p(). 52 + */ 53 + if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { 54 + if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { 55 + __atomic_add_const(val, &S390_lowcore.preempt_count); 56 + return; 57 + } 58 + } 59 + __atomic_add(val, &S390_lowcore.preempt_count); 53 60 } 54 61 55 62 static inline void __preempt_count_sub(int val)
+5 -3
arch/s390/include/asm/processor.h
··· 83 83 extern const struct seq_operations cpuinfo_op; 84 84 extern void execve_tail(void); 85 85 extern void __bpon(void); 86 + unsigned long vdso_size(void); 86 87 87 88 /* 88 89 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. ··· 95 94 (_REGION3_SIZE >> 1) : (_REGION2_SIZE >> 1)) 96 95 #define TASK_SIZE_MAX (-PAGE_SIZE) 97 96 98 - #define STACK_TOP (test_thread_flag(TIF_31BIT) ? \ 99 - _REGION3_SIZE : _REGION2_SIZE) 100 - #define STACK_TOP_MAX _REGION2_SIZE 97 + #define VDSO_BASE (STACK_TOP + PAGE_SIZE) 98 + #define VDSO_LIMIT (test_thread_flag(TIF_31BIT) ? _REGION3_SIZE : _REGION2_SIZE) 99 + #define STACK_TOP (VDSO_LIMIT - vdso_size() - PAGE_SIZE) 100 + #define STACK_TOP_MAX (_REGION2_SIZE - vdso_size() - PAGE_SIZE) 101 101 102 102 #define HAVE_ARCH_PICK_MMAP_LAYOUT 103 103
+29
arch/s390/include/asm/ptrace.h
··· 71 71 &(*(struct psw_bits *)(&(__psw))); \ 72 72 })) 73 73 74 + #define PSW32_MASK_PER 0x40000000UL 75 + #define PSW32_MASK_DAT 0x04000000UL 76 + #define PSW32_MASK_IO 0x02000000UL 77 + #define PSW32_MASK_EXT 0x01000000UL 78 + #define PSW32_MASK_KEY 0x00F00000UL 79 + #define PSW32_MASK_BASE 0x00080000UL /* Always one */ 80 + #define PSW32_MASK_MCHECK 0x00040000UL 81 + #define PSW32_MASK_WAIT 0x00020000UL 82 + #define PSW32_MASK_PSTATE 0x00010000UL 83 + #define PSW32_MASK_ASC 0x0000C000UL 84 + #define PSW32_MASK_CC 0x00003000UL 85 + #define PSW32_MASK_PM 0x00000f00UL 86 + #define PSW32_MASK_RI 0x00000080UL 87 + 88 + #define PSW32_ADDR_AMODE 0x80000000UL 89 + #define PSW32_ADDR_INSN 0x7FFFFFFFUL 90 + 91 + #define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20) 92 + 93 + #define PSW32_ASC_PRIMARY 0x00000000UL 94 + #define PSW32_ASC_ACCREG 0x00004000UL 95 + #define PSW32_ASC_SECONDARY 0x00008000UL 96 + #define PSW32_ASC_HOME 0x0000C000UL 97 + 98 + typedef struct { 99 + unsigned int mask; 100 + unsigned int addr; 101 + } psw_t32 __aligned(8); 102 + 74 103 #define PGM_INT_CODE_MASK 0x7f 75 104 #define PGM_INT_CODE_PER 0x80 76 105
+1
arch/s390/include/asm/sclp.h
··· 87 87 unsigned char has_diag318 : 1; 88 88 unsigned char has_sipl : 1; 89 89 unsigned char has_dirq : 1; 90 + unsigned char has_iplcc : 1; 90 91 unsigned int ibc; 91 92 unsigned int mtid; 92 93 unsigned int mtid_cp;
+68 -15
arch/s390/include/asm/scsw.h
··· 508 508 */ 509 509 static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) 510 510 { 511 - return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 512 - !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && 513 - (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); 511 + /* Must be status pending. */ 512 + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) 513 + return 0; 514 + 515 + /* Must have alert status. */ 516 + if (!(scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS)) 517 + return 0; 518 + 519 + /* Must be alone or together with primary, secondary or both, 520 + * => no intermediate status. 521 + */ 522 + if (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) 523 + return 0; 524 + 525 + return 1; 514 526 } 515 527 516 528 /** ··· 534 522 */ 535 523 static inline int scsw_cmd_is_valid_pno(union scsw *scsw) 536 524 { 537 - return (scsw->cmd.fctl != 0) && 538 - (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 539 - (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || 540 - (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)); 525 + /* Must indicate at least one I/O function. */ 526 + if (!scsw->cmd.fctl) 527 + return 0; 528 + 529 + /* Must be status pending. */ 530 + if (!(scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND)) 531 + return 0; 532 + 533 + /* Can be status pending alone, or with any combination of primary, 534 + * secondary and alert => no intermediate status. 535 + */ 536 + if (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS)) 537 + return 1; 538 + 539 + /* If intermediate, must be suspended. */ 540 + if (scsw->cmd.actl & SCSW_ACTL_SUSPENDED) 541 + return 1; 542 + 543 + return 0; 541 544 } 542 545 543 546 /** ··· 702 675 */ 703 676 static inline int scsw_tm_is_valid_ectl(union scsw *scsw) 704 677 { 705 - return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 706 - !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && 707 - (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); 678 + /* Must be status pending. */ 679 + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) 680 + return 0; 681 + 682 + /* Must have alert status. */ 683 + if (!(scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS)) 684 + return 0; 685 + 686 + /* Must be alone or together with primary, secondary or both, 687 + * => no intermediate status. 688 + */ 689 + if (scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) 690 + return 0; 691 + 692 + return 1; 708 693 } 709 694 710 695 /** ··· 728 689 */ 729 690 static inline int scsw_tm_is_valid_pno(union scsw *scsw) 730 691 { 731 - return (scsw->tm.fctl != 0) && 732 - (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 733 - (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || 734 - ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && 735 - (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); 692 + /* Must indicate at least one I/O function. */ 693 + if (!scsw->tm.fctl) 694 + return 0; 695 + 696 + /* Must be status pending. */ 697 + if (!(scsw->tm.stctl & SCSW_STCTL_STATUS_PEND)) 698 + return 0; 699 + 700 + /* Can be status pending alone, or with any combination of primary, 701 + * secondary and alert => no intermediate status. 702 + */ 703 + if (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS)) 704 + return 1; 705 + 706 + /* If intermediate, must be suspended. */ 707 + if (scsw->tm.actl & SCSW_ACTL_SUSPENDED) 708 + return 1; 709 + 710 + return 0; 736 711 } 737 712 738 713 /**
+2 -1
arch/s390/include/asm/spinlock.h
··· 77 77 static inline void arch_spin_unlock(arch_spinlock_t *lp) 78 78 { 79 79 typecheck(int, lp->lock); 80 + kcsan_release(); 80 81 asm_inline volatile( 81 - ALTERNATIVE("", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ 82 + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,7,0", 49) /* NIAI 7 */ 82 83 " sth %1,%0\n" 83 84 : "=R" (((unsigned short *) &lp->lock)[1]) 84 85 : "d" (0) : "cc", "memory");
+2 -2
arch/s390/include/asm/stp.h
··· 44 44 u32 : 32; 45 45 u32 ctnid[3]; 46 46 u32 : 32; 47 - u32 todoff[4]; 48 - u32 rsvd[48]; 47 + u64 todoff; 48 + u32 rsvd[50]; 49 49 } __packed; 50 50 51 51 struct stp_tzib {
+3 -3
arch/s390/include/asm/vx-insn.h
··· 366 366 .macro VLM vfrom, vto, disp, base, hint=3 367 367 VX_NUM v1, \vfrom 368 368 VX_NUM v3, \vto 369 - GR_NUM b2, \base /* Base register */ 369 + GR_NUM b2, \base 370 370 .word 0xE700 | ((v1&15) << 4) | (v3&15) 371 371 .word (b2 << 12) | (\disp) 372 372 MRXBOPC \hint, 0x36, v1, v3 ··· 376 376 .macro VST vr1, disp, index="%r0", base 377 377 VX_NUM v1, \vr1 378 378 GR_NUM x2, \index 379 - GR_NUM b2, \base /* Base register */ 379 + GR_NUM b2, \base 380 380 .word 0xE700 | ((v1&15) << 4) | (x2&15) 381 381 .word (b2 << 12) | (\disp) 382 382 MRXBOPC 0, 0x0E, v1 ··· 386 386 .macro VSTM vfrom, vto, disp, base, hint=3 387 387 VX_NUM v1, \vfrom 388 388 VX_NUM v3, \vto 389 - GR_NUM b2, \base /* Base register */ 389 + GR_NUM b2, \base 390 390 .word 0xE700 | ((v1&15) << 4) | (v3&15) 391 391 .word (b2 << 12) | (\disp) 392 392 MRXBOPC \hint, 0x3E, v1, v3
+1 -1
arch/s390/include/uapi/asm/pkey.h
··· 171 171 #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) 172 172 173 173 /* 174 - * Verify the given CCA AES secure key for being able to be useable with 174 + * Verify the given CCA AES secure key for being able to be usable with 175 175 * the pkey module. Check for correct key type and check for having at 176 176 * least one crypto card being able to handle this key (master key 177 177 * or old master key verification pattern matches).
+20 -22
arch/s390/include/uapi/asm/zcrypt.h
··· 4 4 * 5 5 * zcrypt 2.2.1 (user-visible header) 6 6 * 7 - * Copyright IBM Corp. 2001, 2019 7 + * Copyright IBM Corp. 2001, 2022 8 8 * Author(s): Robert Burroughs 9 9 * Eric Rossman (edrossma@us.ibm.com) 10 10 * ··· 85 85 struct CPRBX { 86 86 __u16 cprb_len; /* CPRB length 220 */ 87 87 __u8 cprb_ver_id; /* CPRB version id. 0x02 */ 88 - __u8 pad_000[3]; /* Alignment pad bytes */ 88 + __u8 _pad_000[3]; /* Alignment pad bytes */ 89 89 __u8 func_id[2]; /* function id 0x5432 */ 90 90 __u8 cprb_flags[4]; /* Flags */ 91 91 __u32 req_parml; /* request parameter buffer len */ ··· 95 95 __u32 rpl_datal; /* reply data block len */ 96 96 __u32 rpld_datal; /* replied data block len */ 97 97 __u32 req_extbl; /* request extension block len */ 98 - __u8 pad_001[4]; /* reserved */ 98 + __u8 _pad_001[4]; /* reserved */ 99 99 __u32 rpld_extbl; /* replied extension block len */ 100 - __u8 padx000[16 - sizeof(__u8 *)]; 100 + __u8 _pad_002[16 - sizeof(__u8 *)]; 101 101 __u8 __user *req_parmb; /* request parm block 'address' */ 102 - __u8 padx001[16 - sizeof(__u8 *)]; 102 + __u8 _pad_003[16 - sizeof(__u8 *)]; 103 103 __u8 __user *req_datab; /* request data block 'address' */ 104 - __u8 padx002[16 - sizeof(__u8 *)]; 104 + __u8 _pad_004[16 - sizeof(__u8 *)]; 105 105 __u8 __user *rpl_parmb; /* reply parm block 'address' */ 106 - __u8 padx003[16 - sizeof(__u8 *)]; 106 + __u8 _pad_005[16 - sizeof(__u8 *)]; 107 107 __u8 __user *rpl_datab; /* reply data block 'address' */ 108 - __u8 padx004[16 - sizeof(__u8 *)]; 108 + __u8 _pad_006[16 - sizeof(__u8 *)]; 109 109 __u8 __user *req_extb; /* request extension block 'addr'*/ 110 - __u8 padx005[16 - sizeof(__u8 *)]; 110 + __u8 _pad_007[16 - sizeof(__u8 *)]; 111 111 __u8 __user *rpl_extb; /* reply extension block 'address'*/ 112 112 __u16 ccp_rtcode; /* server return code */ 113 113 __u16 ccp_rscode; /* server reason code */ ··· 115 115 __u8 logon_id[8]; /* Logon Identifier */ 116 116 __u8 mac_value[8]; /* Mac Value */ 117 117 __u8 mac_content_flgs; /* Mac content flag byte */ 118 - __u8 pad_002; /* Alignment */ 118 + __u8 _pad_008; /* Alignment */ 119 119 __u16 domain; /* Domain */ 120 - __u8 usage_domain[4]; /* Usage domain */ 121 - __u8 cntrl_domain[4]; /* Control domain */ 122 - __u8 S390enf_mask[4]; /* S/390 enforcement mask */ 123 - __u8 pad_004[36]; /* reserved */ 120 + __u8 _pad_009[12]; /* reserved, checked for zeros */ 121 + __u8 _pad_010[36]; /* reserved */ 124 122 } __attribute__((packed)); 125 123 126 124 /** ··· 236 238 }; 237 239 238 240 #define AUTOSELECT 0xFFFFFFFF 239 - #define AUTOSEL_AP ((__u16) 0xFFFF) 240 - #define AUTOSEL_DOM ((__u16) 0xFFFF) 241 + #define AUTOSEL_AP ((__u16)0xFFFF) 242 + #define AUTOSEL_DOM ((__u16)0xFFFF) 241 243 242 244 #define ZCRYPT_IOCTL_MAGIC 'z' 243 245 ··· 303 305 /** 304 306 * Supported ioctl calls 305 307 */ 306 - #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) 307 - #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 308 - #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 309 - #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 308 + #define ICARSAMODEXPO _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) 309 + #define ICARSACRT _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 310 + #define ZSECSENDCPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 311 + #define ZSENDEP11CPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 310 312 311 - #define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) 313 + #define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) 312 314 #define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) 313 315 #define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) 314 316 #define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) ··· 350 352 }; 351 353 352 354 /* Deprecated: use ZCRYPT_DEVICE_STATUS */ 353 - #define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) 355 + #define ZDEVICESTATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) 354 356 /* Deprecated: use ZCRYPT_STATUS_MASK */ 355 357 #define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) 356 358 /* Deprecated: use ZCRYPT_QDEPTH_MASK */
+1
arch/s390/kernel/Makefile
··· 72 72 obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o 73 73 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o 74 74 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o 75 + obj-$(CONFIG_PERF_EVENTS) += perf_pai_crypto.o 75 76 76 77 obj-$(CONFIG_TRACEPOINTS) += trace.o 77 78 obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
+2 -59
arch/s390/kernel/alternative.c
··· 7 7 #include <asm/facility.h> 8 8 #include <asm/nospec-branch.h> 9 9 10 - #define MAX_PATCH_LEN (255 - 1) 11 - 12 10 static int __initdata_or_module alt_instr_disabled; 13 11 14 12 static int __init disable_alternative_instructions(char *str) ··· 17 19 18 20 early_param("noaltinstr", disable_alternative_instructions); 19 21 20 - struct brcl_insn { 21 - u16 opc; 22 - s32 disp; 23 - } __packed; 24 - 25 - static u16 __initdata_or_module nop16 = 0x0700; 26 - static u32 __initdata_or_module nop32 = 0x47000000; 27 - static struct brcl_insn __initdata_or_module nop48 = { 28 - 0xc004, 0 29 - }; 30 - 31 - static const void *nops[] __initdata_or_module = { 32 - &nop16, 33 - &nop32, 34 - &nop48 35 - }; 36 - 37 - static void __init_or_module add_jump_padding(void *insns, unsigned int len) 38 - { 39 - struct brcl_insn brcl = { 40 - 0xc0f4, 41 - len / 2 42 - }; 43 - 44 - memcpy(insns, &brcl, sizeof(brcl)); 45 - insns += sizeof(brcl); 46 - len -= sizeof(brcl); 47 - 48 - while (len > 0) { 49 - memcpy(insns, &nop16, 2); 50 - insns += 2; 51 - len -= 2; 52 - } 53 - } 54 - 55 - static void __init_or_module add_padding(void *insns, unsigned int len) 56 - { 57 - if (len > 6) 58 - add_jump_padding(insns, len); 59 - else if (len >= 2) 60 - memcpy(insns, nops[len / 2 - 1], len); 61 - } 62 - 63 22 static void __init_or_module __apply_alternatives(struct alt_instr *start, 64 23 struct alt_instr *end) 65 24 { 66 25 struct alt_instr *a; 67 26 u8 *instr, *replacement; 68 - u8 insnbuf[MAX_PATCH_LEN]; 69 27 70 28 /* 71 29 * The scan order should be from start to end. A later scanned 72 30 * alternative code can overwrite previously scanned alternative code. 73 31 */ 74 32 for (a = start; a < end; a++) { 75 - int insnbuf_sz = 0; 76 - 77 33 instr = (u8 *)&a->instr_offset + a->instr_offset; 78 34 replacement = (u8 *)&a->repl_offset + a->repl_offset; 79 35 80 36 if (!__test_facility(a->facility, alt_stfle_fac_list)) 81 37 continue; 82 38 83 - if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) { 39 + if (unlikely(a->instrlen % 2)) { 84 40 WARN_ONCE(1, "cpu alternatives instructions length is " 85 41 "odd, skipping patching\n"); 86 42 continue; 87 43 } 88 44 89 - memcpy(insnbuf, replacement, a->replacementlen); 90 - insnbuf_sz = a->replacementlen; 91 - 92 - if (a->instrlen > a->replacementlen) { 93 - add_padding(insnbuf + a->replacementlen, 94 - a->instrlen - a->replacementlen); 95 - insnbuf_sz += a->instrlen - a->replacementlen; 96 - } 97 - 98 - s390_kernel_write(instr, insnbuf, insnbuf_sz); 45 + s390_kernel_write(instr, replacement, a->instrlen); 99 46 } 100 47 } 101 48
+30 -59
arch/s390/kernel/compat_linux.h
··· 5 5 #include <linux/compat.h> 6 6 #include <linux/socket.h> 7 7 #include <linux/syscalls.h> 8 + #include <asm/ptrace.h> 8 9 9 - /* Macro that masks the high order bit of an 32 bit pointer and converts it*/ 10 - /* to a 64 bit pointer */ 11 - #define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) 12 - #define AA(__x) \ 13 - ((unsigned long)(__x)) 10 + /* 11 + * Macro that masks the high order bit of a 32 bit pointer and 12 + * converts it to a 64 bit pointer. 13 + */ 14 + #define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL)) 15 + #define AA(__x) ((unsigned long)(__x)) 14 16 15 17 /* Now 32bit compatibility types */ 16 18 struct ipc_kludge_32 { 17 - __u32 msgp; /* pointer */ 18 - __s32 msgtyp; 19 + __u32 msgp; /* pointer */ 20 + __s32 msgtyp; 19 21 }; 20 22 21 23 /* asm/sigcontext.h */ 22 - typedef union 23 - { 24 - __u64 d; 25 - __u32 f; 24 + typedef union { 25 + __u64 d; 26 + __u32 f; 26 27 } freg_t32; 27 28 28 - typedef struct 29 - { 29 + typedef struct { 30 30 unsigned int fpc; 31 31 unsigned int pad; 32 - freg_t32 fprs[__NUM_FPRS]; 32 + freg_t32 fprs[__NUM_FPRS]; 33 33 } _s390_fp_regs32; 34 34 35 - typedef struct 36 - { 37 - __u32 mask; 38 - __u32 addr; 39 - } _psw_t32 __attribute__ ((aligned(8))); 40 - 41 - typedef struct 42 - { 43 - _psw_t32 psw; 35 + typedef struct { 36 + psw_t32 psw; 44 37 __u32 gprs[__NUM_GPRS]; 45 38 __u32 acrs[__NUM_ACRS]; 46 39 } _s390_regs_common32; 47 40 48 - typedef struct 49 - { 41 + typedef struct { 50 42 _s390_regs_common32 regs; 51 - _s390_fp_regs32 fpregs; 43 + _s390_fp_regs32 fpregs; 52 44 } _sigregs32; 53 45 54 - typedef struct 55 - { 56 - __u32 gprs_high[__NUM_GPRS]; 57 - __u64 vxrs_low[__NUM_VXRS_LOW]; 58 - __vector128 vxrs_high[__NUM_VXRS_HIGH]; 59 - __u8 __reserved[128]; 46 + typedef struct { 47 + __u32 gprs_high[__NUM_GPRS]; 48 + __u64 vxrs_low[__NUM_VXRS_LOW]; 49 + __vector128 vxrs_high[__NUM_VXRS_HIGH]; 50 + __u8 __reserved[128]; 60 51 } _sigregs_ext32; 61 52 62 53 #define _SIGCONTEXT_NSIG32 64 63 54 #define _SIGCONTEXT_NSIG_BPW32 32 64 55 #define __SIGNAL_FRAMESIZE32 96 65 - #define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2) 56 + #define _SIGMASK_COPY_SIZE32 (sizeof(u32) * 2) 66 57 67 - struct sigcontext32 68 - { 58 + struct sigcontext32 { 69 59 __u32 oldmask[_COMPAT_NSIG_WORDS]; 70 - __u32 sregs; /* pointer */ 60 + __u32 sregs; /* pointer */ 71 61 }; 72 62 73 63 /* asm/signal.h */ ··· 65 75 /* asm/ucontext.h */ 66 76 struct ucontext32 { 67 77 __u32 uc_flags; 68 - __u32 uc_link; /* pointer */ 78 + __u32 uc_link; /* pointer */ 69 79 compat_stack_t uc_stack; 70 80 _sigregs32 uc_mcontext; 71 81 compat_sigset_t uc_sigmask; 72 - /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 82 + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ 73 83 unsigned char __unused[128 - sizeof(compat_sigset_t)]; 74 84 _sigregs_ext32 uc_mcontext_ext; 75 85 }; ··· 78 88 struct mmap_arg_struct_emu31; 79 89 struct fadvise64_64_args; 80 90 81 - long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group); 82 - long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group); 83 - long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group); 84 - long compat_sys_s390_setregid16(u16 rgid, u16 egid); 85 - long compat_sys_s390_setgid16(u16 gid); 86 - long compat_sys_s390_setreuid16(u16 ruid, u16 euid); 87 - long compat_sys_s390_setuid16(u16 uid); 88 - long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid); 89 - long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); 90 - long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid); 91 - long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); 92 - long compat_sys_s390_setfsuid16(u16 uid); 93 - long compat_sys_s390_setfsgid16(u16 gid); 94 - long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist); 95 - long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist); 96 - long compat_sys_s390_getuid16(void); 97 - long compat_sys_s390_geteuid16(void); 98 - long compat_sys_s390_getgid16(void); 99 - long compat_sys_s390_getegid16(void); 100 91 long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low); 101 92 long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low); 102 93 long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low); ··· 89 118 long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag); 90 119 long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg); 91 120 long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg); 92 - long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count); 93 - long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count); 121 + long compat_sys_s390_read(unsigned int fd, char __user *buf, compat_size_t count); 122 + long compat_sys_s390_write(unsigned int fd, const char __user *buf, compat_size_t count); 94 123 long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise); 95 124 long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args); 96 125 long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags);
+24 -16
arch/s390/kernel/entry.S
··· 53 53 _LPP_OFFSET = __LC_LPP 54 54 55 55 .macro STBEAR address 56 - ALTERNATIVE "", ".insn s,0xb2010000,\address", 193 56 + ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 57 57 .endm 58 58 59 59 .macro LBEAR address 60 - ALTERNATIVE "", ".insn s,0xb2000000,\address", 193 60 + ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 61 61 .endm 62 62 63 63 .macro LPSWEY address,lpswe 64 - ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193 64 + ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 65 65 .endm 66 66 67 67 .macro MBEAR reg 68 - ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 68 + ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 69 69 .endm 70 70 71 71 .macro CHECK_STACK savearea ··· 121 121 .endm 122 122 123 123 .macro BPOFF 124 - ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,12,0", 82 124 + ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 125 125 .endm 126 126 127 127 .macro BPON 128 - ALTERNATIVE "", ".insn rrf,0xb2e80000,0,0,13,0", 82 128 + ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 129 129 .endm 130 130 131 131 .macro BPENTER tif_ptr,tif_mask 132 132 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 133 - "", 82 133 + "j .+12; nop; nop", 82 134 134 .endm 135 135 136 136 .macro BPEXIT tif_ptr,tif_mask ··· 172 172 lgr %r14,\reg 173 173 larl %r13,\start 174 174 slgr %r14,%r13 175 - lghi %r13,\end - \start 176 - clgr %r14,%r13 175 + #ifdef CONFIG_AS_IS_LLVM 176 + clgfrl %r14,.Lrange_size\@ 177 + #else 178 + clgfi %r14,\end - \start 179 + #endif 177 180 jhe \outside_label 181 + #ifdef CONFIG_AS_IS_LLVM 182 + .section .rodata, "a" 183 + .align 4 184 + .Lrange_size\@: 185 + .long \end - \start 186 + .previous 187 + #endif 178 188 .endm 179 189 180 190 .macro SIEEXIT ··· 236 226 aghi %r3,__TASK_pid 237 227 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 238 228 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 239 - ALTERNATIVE "", "lpp _LPP_OFFSET", 40 229 + ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 240 230 BR_EX %r14 241 231 ENDPROC(__switch_to) 242 232 ··· 483 473 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 484 474 MBEAR %r11 485 475 stmg %r8,%r9,__PT_PSW(%r11) 486 - tm %r8,0x0001 # coming from user space? 487 - jno 1f 488 - lctlg %c1,%c1,__LC_KERNEL_ASCE 489 - 1: lgr %r2,%r11 # pass pointer to pt_regs 476 + lgr %r2,%r11 # pass pointer to pt_regs 490 477 brasl %r14,\handler 491 478 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 492 479 tmhh %r8,0x0001 # returning to user ? ··· 609 602 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 610 603 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 611 604 la %r11,STACK_FRAME_OVERHEAD(%r1) 605 + lgr %r2,%r11 612 606 lgr %r15,%r1 613 607 brasl %r14,s390_handle_mcck 614 608 .Lmcck_return: ··· 620 612 jno 0f 621 613 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 622 614 stpt __LC_EXIT_TIMER 623 - 0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 615 + 0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 624 616 LBEAR 0(%r12) 625 617 lmg %r11,%r15,__PT_R11(%r11) 626 618 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE ··· 656 648 ENDPROC(mcck_int_handler) 657 649 658 650 ENTRY(restart_int_handler) 659 - ALTERNATIVE "", "lpp _LPP_OFFSET", 40 651 + ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 660 652 stg %r15,__LC_SAVE_AREA_RESTART 661 653 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 662 654 jz 0f
+2 -2
arch/s390/kernel/irq.c
··· 205 205 unsigned long flags; 206 206 int cpu; 207 207 208 - irq_lock_sparse(); 208 + rcu_read_lock(); 209 209 desc = irq_to_desc(irq); 210 210 if (!desc) 211 211 goto out; ··· 224 224 seq_putc(p, '\n'); 225 225 raw_spin_unlock_irqrestore(&desc->lock, flags); 226 226 out: 227 - irq_unlock_sparse(); 227 + rcu_read_unlock(); 228 228 } 229 229 230 230 /*
+8 -2
arch/s390/kernel/machine_kexec.c
··· 26 26 #include <asm/stacktrace.h> 27 27 #include <asm/switch_to.h> 28 28 #include <asm/nmi.h> 29 + #include <asm/sclp.h> 29 30 30 - typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 31 + typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long, 32 + unsigned long); 31 33 32 34 extern const unsigned char relocate_kernel[]; 33 35 extern const unsigned long long relocate_kernel_len; ··· 245 243 */ 246 244 static void __do_machine_kexec(void *data) 247 245 { 246 + unsigned long diag308_subcode; 248 247 relocate_kernel_t data_mover; 249 248 struct kimage *image = data; 250 249 ··· 254 251 255 252 __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */ 256 253 /* Call the moving routine */ 257 - (*data_mover)(&image->head, image->start); 254 + diag308_subcode = DIAG308_CLEAR_RESET; 255 + if (sclp.has_iplcc) 256 + diag308_subcode |= DIAG308_FLAG_EI; 257 + (*data_mover)(&image->head, image->start, diag308_subcode); 258 258 259 259 /* Die if kexec returns */ 260 260 disabled_wait();
+5 -1
arch/s390/kernel/nmi.c
··· 29 29 #include <asm/switch_to.h> 30 30 #include <asm/ctl_reg.h> 31 31 #include <asm/asm-offsets.h> 32 + #include <asm/pai.h> 33 + 32 34 #include <linux/kvm_host.h> 33 35 34 36 struct mcck_struct { ··· 171 169 } 172 170 } 173 171 174 - void noinstr s390_handle_mcck(void) 172 + void noinstr s390_handle_mcck(struct pt_regs *regs) 175 173 { 176 174 trace_hardirqs_off(); 175 + pai_kernel_enter(regs); 177 176 __s390_handle_mcck(); 177 + pai_kernel_exit(regs); 178 178 trace_hardirqs_on(); 179 179 } 180 180 /*
+148
arch/s390/kernel/perf_cpum_cf_events.c
··· 295 295 CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); 296 296 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 297 297 CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 298 + CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080); 299 + CPUMF_EVENT_ATTR(cf_z16, DTLB2_WRITES, 0x0081); 300 + CPUMF_EVENT_ATTR(cf_z16, DTLB2_MISSES, 0x0082); 301 + CPUMF_EVENT_ATTR(cf_z16, CRSTE_1MB_WRITES, 0x0083); 302 + CPUMF_EVENT_ATTR(cf_z16, DTLB2_GPAGE_WRITES, 0x0084); 303 + CPUMF_EVENT_ATTR(cf_z16, ITLB2_WRITES, 0x0086); 304 + CPUMF_EVENT_ATTR(cf_z16, ITLB2_MISSES, 0x0087); 305 + CPUMF_EVENT_ATTR(cf_z16, TLB2_PTE_WRITES, 0x0089); 306 + CPUMF_EVENT_ATTR(cf_z16, TLB2_CRSTE_WRITES, 0x008a); 307 + CPUMF_EVENT_ATTR(cf_z16, TLB2_ENGINES_BUSY, 0x008b); 308 + CPUMF_EVENT_ATTR(cf_z16, TX_C_TEND, 0x008c); 309 + CPUMF_EVENT_ATTR(cf_z16, TX_NC_TEND, 0x008d); 310 + CPUMF_EVENT_ATTR(cf_z16, L1C_TLB2_MISSES, 0x008f); 311 + CPUMF_EVENT_ATTR(cf_z16, DCW_REQ, 0x0091); 312 + CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_IV, 0x0092); 313 + CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_CHIP_HIT, 0x0093); 314 + CPUMF_EVENT_ATTR(cf_z16, DCW_REQ_DRAWER_HIT, 0x0094); 315 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP, 0x0095); 316 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_IV, 0x0096); 317 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_CHIP_HIT, 0x0097); 318 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT, 0x0098); 319 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE, 0x0099); 320 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER, 0x009a); 321 + CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER, 0x009b); 322 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_CHIP_MEMORY, 0x009c); 323 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_MODULE_MEMORY, 0x009d); 324 + CPUMF_EVENT_ATTR(cf_z16, DCW_ON_DRAWER_MEMORY, 0x009e); 325 + CPUMF_EVENT_ATTR(cf_z16, DCW_OFF_DRAWER_MEMORY, 0x009f); 326 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_IV, 0x00a0); 327 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT, 0x00a1); 328 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2); 329 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_IV, 0x00a3); 330 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4); 331 + CPUMF_EVENT_ATTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5); 332 + CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_IV, 0x00a6); 333 + CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7); 334 + CPUMF_EVENT_ATTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8); 335 + CPUMF_EVENT_ATTR(cf_z16, ICW_REQ, 0x00a9); 336 + CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_IV, 0x00aa); 337 + CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_CHIP_HIT, 0x00ab); 338 + CPUMF_EVENT_ATTR(cf_z16, ICW_REQ_DRAWER_HIT, 0x00ac); 339 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP, 0x00ad); 340 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_IV, 0x00ae); 341 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_CHIP_HIT, 0x00af); 342 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT, 0x00b0); 343 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE, 0x00b1); 344 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER, 0x00b2); 345 + CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER, 0x00b3); 346 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_CHIP_MEMORY, 0x00b4); 347 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_MODULE_MEMORY, 0x00b5); 348 + CPUMF_EVENT_ATTR(cf_z16, ICW_ON_DRAWER_MEMORY, 0x00b6); 349 + CPUMF_EVENT_ATTR(cf_z16, ICW_OFF_DRAWER_MEMORY, 0x00b7); 350 + CPUMF_EVENT_ATTR(cf_z16, BCD_DFP_EXECUTION_SLOTS, 0x00e0); 351 + CPUMF_EVENT_ATTR(cf_z16, VX_BCD_EXECUTION_SLOTS, 0x00e1); 352 + CPUMF_EVENT_ATTR(cf_z16, DECIMAL_INSTRUCTIONS, 0x00e2); 353 + CPUMF_EVENT_ATTR(cf_z16, LAST_HOST_TRANSLATIONS, 0x00e8); 354 + CPUMF_EVENT_ATTR(cf_z16, TX_NC_TABORT, 0x00f4); 355 + CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_NO_SPECIAL, 0x00f5); 356 + CPUMF_EVENT_ATTR(cf_z16, TX_C_TABORT_SPECIAL, 0x00f6); 357 + CPUMF_EVENT_ATTR(cf_z16, DFLT_ACCESS, 0x00f8); 358 + CPUMF_EVENT_ATTR(cf_z16, DFLT_CYCLES, 0x00fd); 359 + CPUMF_EVENT_ATTR(cf_z16, SORTL, 0x0100); 360 + CPUMF_EVENT_ATTR(cf_z16, DFLT_CC, 0x0109); 361 + CPUMF_EVENT_ATTR(cf_z16, DFLT_CCFINISH, 0x010a); 362 + CPUMF_EVENT_ATTR(cf_z16, NNPA_INVOCATIONS, 0x010b); 363 + CPUMF_EVENT_ATTR(cf_z16, NNPA_COMPLETIONS, 0x010c); 364 + CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d); 365 + CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e); 366 + CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); 367 + CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); 298 368 299 369 static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = { 300 370 CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES), ··· 705 635 NULL, 706 636 }; 707 637 638 + static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = { 639 + CPUMF_EVENT_PTR(cf_z16, L1D_RO_EXCL_WRITES), 640 + CPUMF_EVENT_PTR(cf_z16, DTLB2_WRITES), 641 + CPUMF_EVENT_PTR(cf_z16, DTLB2_MISSES), 642 + CPUMF_EVENT_PTR(cf_z16, CRSTE_1MB_WRITES), 643 + CPUMF_EVENT_PTR(cf_z16, DTLB2_GPAGE_WRITES), 644 + CPUMF_EVENT_PTR(cf_z16, ITLB2_WRITES), 645 + CPUMF_EVENT_PTR(cf_z16, ITLB2_MISSES), 646 + CPUMF_EVENT_PTR(cf_z16, TLB2_PTE_WRITES), 647 + CPUMF_EVENT_PTR(cf_z16, TLB2_CRSTE_WRITES), 648 + CPUMF_EVENT_PTR(cf_z16, TLB2_ENGINES_BUSY), 649 + CPUMF_EVENT_PTR(cf_z16, TX_C_TEND), 650 + CPUMF_EVENT_PTR(cf_z16, TX_NC_TEND), 651 + CPUMF_EVENT_PTR(cf_z16, L1C_TLB2_MISSES), 652 + CPUMF_EVENT_PTR(cf_z16, DCW_REQ), 653 + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_IV), 654 + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_CHIP_HIT), 655 + CPUMF_EVENT_PTR(cf_z16, DCW_REQ_DRAWER_HIT), 656 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP), 657 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_IV), 658 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_CHIP_HIT), 659 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_DRAWER_HIT), 660 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE), 661 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER), 662 + CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER), 663 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_CHIP_MEMORY), 664 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_MODULE_MEMORY), 665 + CPUMF_EVENT_PTR(cf_z16, DCW_ON_DRAWER_MEMORY), 666 + CPUMF_EVENT_PTR(cf_z16, DCW_OFF_DRAWER_MEMORY), 667 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_IV), 668 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_CHIP_HIT), 669 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_MODULE_DRAWER_HIT), 670 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_IV), 671 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_CHIP_HIT), 672 + CPUMF_EVENT_PTR(cf_z16, IDCW_ON_DRAWER_DRAWER_HIT), 673 + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_IV), 674 + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_CHIP_HIT), 675 + CPUMF_EVENT_PTR(cf_z16, IDCW_OFF_DRAWER_DRAWER_HIT), 676 + CPUMF_EVENT_PTR(cf_z16, ICW_REQ), 677 + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_IV), 678 + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_CHIP_HIT), 679 + CPUMF_EVENT_PTR(cf_z16, ICW_REQ_DRAWER_HIT), 680 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP), 681 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_IV), 682 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_CHIP_HIT), 683 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_DRAWER_HIT), 684 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE), 685 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER), 686 + CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER), 687 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_CHIP_MEMORY), 688 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_MODULE_MEMORY), 689 + CPUMF_EVENT_PTR(cf_z16, ICW_ON_DRAWER_MEMORY), 690 + CPUMF_EVENT_PTR(cf_z16, ICW_OFF_DRAWER_MEMORY), 691 + CPUMF_EVENT_PTR(cf_z16, BCD_DFP_EXECUTION_SLOTS), 692 + CPUMF_EVENT_PTR(cf_z16, VX_BCD_EXECUTION_SLOTS), 693 + CPUMF_EVENT_PTR(cf_z16, DECIMAL_INSTRUCTIONS), 694 + CPUMF_EVENT_PTR(cf_z16, LAST_HOST_TRANSLATIONS), 695 + CPUMF_EVENT_PTR(cf_z16, TX_NC_TABORT), 696 + CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_NO_SPECIAL), 697 + CPUMF_EVENT_PTR(cf_z16, TX_C_TABORT_SPECIAL), 698 + CPUMF_EVENT_PTR(cf_z16, DFLT_ACCESS), 699 + CPUMF_EVENT_PTR(cf_z16, DFLT_CYCLES), 700 + CPUMF_EVENT_PTR(cf_z16, SORTL), 701 + CPUMF_EVENT_PTR(cf_z16, DFLT_CC), 702 + CPUMF_EVENT_PTR(cf_z16, DFLT_CCFINISH), 703 + CPUMF_EVENT_PTR(cf_z16, NNPA_INVOCATIONS), 704 + CPUMF_EVENT_PTR(cf_z16, NNPA_COMPLETIONS), 705 + CPUMF_EVENT_PTR(cf_z16, NNPA_WAIT_LOCK), 706 + CPUMF_EVENT_PTR(cf_z16, NNPA_HOLD_LOCK), 707 + CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE), 708 + CPUMF_EVENT_PTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE), 709 + NULL, 710 + }; 711 + 708 712 /* END: CPUM_CF COUNTER DEFINITIONS ===================================== */ 709 713 710 714 static struct attribute_group cpumcf_pmu_events_group = { ··· 892 748 case 0x8561: 893 749 case 0x8562: 894 750 model = cpumcf_z15_pmu_event_attr; 751 + break; 752 + case 0x3931: 753 + case 0x3932: 754 + model = cpumcf_z16_pmu_event_attr; 895 755 break; 896 756 default: 897 757 model = none;
+688
arch/s390/kernel/perf_pai_crypto.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Performance event support - Processor Activity Instrumentation Facility 4 + * 5 + * Copyright IBM Corp. 2022 6 + * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 + */ 8 + #define KMSG_COMPONENT "pai_crypto" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/kernel.h> 12 + #include <linux/kernel_stat.h> 13 + #include <linux/percpu.h> 14 + #include <linux/notifier.h> 15 + #include <linux/init.h> 16 + #include <linux/export.h> 17 + #include <linux/io.h> 18 + #include <linux/perf_event.h> 19 + 20 + #include <asm/ctl_reg.h> 21 + #include <asm/pai.h> 22 + #include <asm/debug.h> 23 + 24 + static debug_info_t *cfm_dbg; 25 + static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */ 26 + /* extracted with QPACI instruction */ 27 + 28 + DEFINE_STATIC_KEY_FALSE(pai_key); 29 + 30 + struct pai_userdata { 31 + u16 num; 32 + u64 value; 33 + } __packed; 34 + 35 + struct paicrypt_map { 36 + unsigned long *page; /* Page for CPU to store counters */ 37 + struct pai_userdata *save; /* Page to store no-zero counters */ 38 + unsigned int users; /* # of PAI crypto users */ 39 + unsigned int sampler; /* # of PAI crypto samplers */ 40 + unsigned int counter; /* # of PAI crypto counters */ 41 + struct perf_event *event; /* Perf event for sampling */ 42 + }; 43 + 44 + static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map); 45 + 46 + /* Release the PMU if event is the last perf event */ 47 + static DEFINE_MUTEX(pai_reserve_mutex); 48 + 49 + /* Adjust usage counters and remove allocated memory when all users are 50 + * gone. 51 + */ 52 + static void paicrypt_event_destroy(struct perf_event *event) 53 + { 54 + struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu); 55 + 56 + cpump->event = NULL; 57 + static_branch_dec(&pai_key); 58 + mutex_lock(&pai_reserve_mutex); 59 + if (event->attr.sample_period) 60 + cpump->sampler -= 1; 61 + else 62 + cpump->counter -= 1; 63 + debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d" 64 + " sampler %d counter %d\n", __func__, 65 + event->attr.config, event->cpu, cpump->sampler, 66 + cpump->counter); 67 + if (!cpump->counter && !cpump->sampler) { 68 + debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", 69 + __func__, (unsigned long)cpump->page, 70 + cpump->save); 71 + free_page((unsigned long)cpump->page); 72 + cpump->page = NULL; 73 + kvfree(cpump->save); 74 + cpump->save = NULL; 75 + } 76 + mutex_unlock(&pai_reserve_mutex); 77 + } 78 + 79 + static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel) 80 + { 81 + if (kernel) 82 + nr += PAI_CRYPTO_MAXCTR; 83 + return cpump->page[nr]; 84 + } 85 + 86 + /* Read the counter values. Return value from location in CMP. For event 87 + * CRYPTO_ALL sum up all events. 88 + */ 89 + static u64 paicrypt_getdata(struct perf_event *event, bool kernel) 90 + { 91 + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 92 + u64 sum = 0; 93 + int i; 94 + 95 + if (event->attr.config != PAI_CRYPTO_BASE) { 96 + return paicrypt_getctr(cpump, 97 + event->attr.config - PAI_CRYPTO_BASE, 98 + kernel); 99 + } 100 + 101 + for (i = 1; i <= paicrypt_cnt; i++) { 102 + u64 val = paicrypt_getctr(cpump, i, kernel); 103 + 104 + if (!val) 105 + continue; 106 + sum += val; 107 + } 108 + return sum; 109 + } 110 + 111 + static u64 paicrypt_getall(struct perf_event *event) 112 + { 113 + u64 sum = 0; 114 + 115 + if (!event->attr.exclude_kernel) 116 + sum += paicrypt_getdata(event, true); 117 + if (!event->attr.exclude_user) 118 + sum += paicrypt_getdata(event, false); 119 + 120 + return sum; 121 + } 122 + 123 + /* Used to avoid races in checking concurrent access of counting and 124 + * sampling for crypto events 125 + * 126 + * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is 127 + * allowed and when this event is running, no counting event is allowed. 128 + * Several counting events are allowed in parallel, but no sampling event 129 + * is allowed while one (or more) counting events are running. 130 + * 131 + * This function is called in process context and it is save to block. 132 + * When the event initialization functions fails, no other call back will 133 + * be invoked. 134 + * 135 + * Allocate the memory for the event. 136 + */ 137 + static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) 138 + { 139 + unsigned int *use_ptr; 140 + int rc = 0; 141 + 142 + mutex_lock(&pai_reserve_mutex); 143 + if (a->sample_period) { /* Sampling requested */ 144 + use_ptr = &cpump->sampler; 145 + if (cpump->counter || cpump->sampler) 146 + rc = -EBUSY; /* ... sampling/counting active */ 147 + } else { /* Counting requested */ 148 + use_ptr = &cpump->counter; 149 + if (cpump->sampler) 150 + rc = -EBUSY; /* ... and sampling active */ 151 + } 152 + if (rc) 153 + goto unlock; 154 + 155 + /* Allocate memory for counter page and counter extraction. 156 + * Only the first counting event has to allocate a page. 157 + */ 158 + if (cpump->page) 159 + goto unlock; 160 + 161 + rc = -ENOMEM; 162 + cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); 163 + if (!cpump->page) 164 + goto unlock; 165 + cpump->save = kvmalloc_array(paicrypt_cnt + 1, 166 + sizeof(struct pai_userdata), GFP_KERNEL); 167 + if (!cpump->save) { 168 + free_page((unsigned long)cpump->page); 169 + cpump->page = NULL; 170 + goto unlock; 171 + } 172 + rc = 0; 173 + 174 + unlock: 175 + /* If rc is non-zero, do not increment counter/sampler. */ 176 + if (!rc) 177 + *use_ptr += 1; 178 + debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx sampler %d" 179 + " counter %d page %#lx save %p rc %d\n", __func__, 180 + a->sample_period, cpump->sampler, cpump->counter, 181 + (unsigned long)cpump->page, cpump->save, rc); 182 + mutex_unlock(&pai_reserve_mutex); 183 + return rc; 184 + } 185 + 186 + /* Might be called on different CPU than the one the event is intended for. */ 187 + static int paicrypt_event_init(struct perf_event *event) 188 + { 189 + struct perf_event_attr *a = &event->attr; 190 + struct paicrypt_map *cpump; 191 + int rc; 192 + 193 + /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 194 + if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 195 + return -ENOENT; 196 + /* PAI crypto event must be valid */ 197 + if (a->config > PAI_CRYPTO_BASE + paicrypt_cnt) 198 + return -EINVAL; 199 + /* Allow only CPU wide operation, no process context for now. */ 200 + if (event->hw.target || event->cpu == -1) 201 + return -ENOENT; 202 + /* Allow only CRYPTO_ALL for sampling. */ 203 + if (a->sample_period && a->config != PAI_CRYPTO_BASE) 204 + return -EINVAL; 205 + 206 + cpump = per_cpu_ptr(&paicrypt_map, event->cpu); 207 + rc = paicrypt_busy(a, cpump); 208 + if (rc) 209 + return rc; 210 + 211 + cpump->event = event; 212 + event->destroy = paicrypt_event_destroy; 213 + 214 + if (a->sample_period) { 215 + a->sample_period = 1; 216 + a->freq = 0; 217 + /* Register for paicrypt_sched_task() to be called */ 218 + event->attach_state |= PERF_ATTACH_SCHED_CB; 219 + /* Add raw data which contain the memory mapped counters */ 220 + a->sample_type |= PERF_SAMPLE_RAW; 221 + /* Turn off inheritance */ 222 + a->inherit = 0; 223 + } 224 + 225 + static_branch_inc(&pai_key); 226 + return 0; 227 + } 228 + 229 + static void paicrypt_read(struct perf_event *event) 230 + { 231 + u64 prev, new, delta; 232 + 233 + prev = local64_read(&event->hw.prev_count); 234 + new = paicrypt_getall(event); 235 + local64_set(&event->hw.prev_count, new); 236 + delta = (prev <= new) ? new - prev 237 + : (-1ULL - prev) + new + 1; /* overflow */ 238 + local64_add(delta, &event->count); 239 + } 240 + 241 + static void paicrypt_start(struct perf_event *event, int flags) 242 + { 243 + u64 sum; 244 + 245 + sum = paicrypt_getall(event); /* Get current value */ 246 + local64_set(&event->hw.prev_count, sum); 247 + local64_set(&event->count, 0); 248 + } 249 + 250 + static int paicrypt_add(struct perf_event *event, int flags) 251 + { 252 + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 253 + unsigned long ccd; 254 + 255 + if (cpump->users++ == 0) { 256 + ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; 257 + WRITE_ONCE(S390_lowcore.ccd, ccd); 258 + __ctl_set_bit(0, 50); 259 + } 260 + cpump->event = event; 261 + if (flags & PERF_EF_START && !event->attr.sample_period) { 262 + /* Only counting needs initial counter value */ 263 + paicrypt_start(event, PERF_EF_RELOAD); 264 + } 265 + event->hw.state = 0; 266 + if (event->attr.sample_period) 267 + perf_sched_cb_inc(event->pmu); 268 + return 0; 269 + } 270 + 271 + static void paicrypt_stop(struct perf_event *event, int flags) 272 + { 273 + paicrypt_read(event); 274 + event->hw.state = PERF_HES_STOPPED; 275 + } 276 + 277 + static void paicrypt_del(struct perf_event *event, int flags) 278 + { 279 + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 280 + 281 + if (event->attr.sample_period) 282 + perf_sched_cb_dec(event->pmu); 283 + if (!event->attr.sample_period) 284 + /* Only counting needs to read counter */ 285 + paicrypt_stop(event, PERF_EF_UPDATE); 286 + if (cpump->users-- == 1) { 287 + __ctl_clear_bit(0, 50); 288 + WRITE_ONCE(S390_lowcore.ccd, 0); 289 + } 290 + } 291 + 292 + /* Create raw data and save it in buffer. Returns number of bytes copied. 293 + * Saves only positive counter entries of the form 294 + * 2 bytes: Number of counter 295 + * 8 bytes: Value of counter 296 + */ 297 + static size_t paicrypt_copy(struct pai_userdata *userdata, 298 + struct paicrypt_map *cpump, 299 + bool exclude_user, bool exclude_kernel) 300 + { 301 + int i, outidx = 0; 302 + 303 + for (i = 1; i <= paicrypt_cnt; i++) { 304 + u64 val = 0; 305 + 306 + if (!exclude_kernel) 307 + val += paicrypt_getctr(cpump, i, true); 308 + if (!exclude_user) 309 + val += paicrypt_getctr(cpump, i, false); 310 + if (val) { 311 + userdata[outidx].num = i; 312 + userdata[outidx].value = val; 313 + outidx++; 314 + } 315 + } 316 + return outidx * sizeof(struct pai_userdata); 317 + } 318 + 319 + static int paicrypt_push_sample(void) 320 + { 321 + struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 322 + struct perf_event *event = cpump->event; 323 + struct perf_sample_data data; 324 + struct perf_raw_record raw; 325 + struct pt_regs regs; 326 + size_t rawsize; 327 + int overflow; 328 + 329 + if (!cpump->event) /* No event active */ 330 + return 0; 331 + rawsize = paicrypt_copy(cpump->save, cpump, 332 + cpump->event->attr.exclude_user, 333 + cpump->event->attr.exclude_kernel); 334 + if (!rawsize) /* No incremented counters */ 335 + return 0; 336 + 337 + /* Setup perf sample */ 338 + memset(&regs, 0, sizeof(regs)); 339 + memset(&raw, 0, sizeof(raw)); 340 + memset(&data, 0, sizeof(data)); 341 + perf_sample_data_init(&data, 0, event->hw.last_period); 342 + if (event->attr.sample_type & PERF_SAMPLE_TID) { 343 + data.tid_entry.pid = task_tgid_nr(current); 344 + data.tid_entry.tid = task_pid_nr(current); 345 + } 346 + if (event->attr.sample_type & PERF_SAMPLE_TIME) 347 + data.time = event->clock(); 348 + if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 349 + data.id = event->id; 350 + if (event->attr.sample_type & PERF_SAMPLE_CPU) { 351 + data.cpu_entry.cpu = smp_processor_id(); 352 + data.cpu_entry.reserved = 0; 353 + } 354 + if (event->attr.sample_type & PERF_SAMPLE_RAW) { 355 + raw.frag.size = rawsize; 356 + raw.frag.data = cpump->save; 357 + raw.size = raw.frag.size; 358 + data.raw = &raw; 359 + } 360 + 361 + overflow = perf_event_overflow(event, &data, &regs); 362 + perf_event_update_userpage(event); 363 + /* Clear lowcore page after read */ 364 + memset(cpump->page, 0, PAGE_SIZE); 365 + return overflow; 366 + } 367 + 368 + /* Called on schedule-in and schedule-out. No access to event structure, 369 + * but for sampling only event CRYPTO_ALL is allowed. 370 + */ 371 + static void paicrypt_sched_task(struct perf_event_context *ctx, bool sched_in) 372 + { 373 + /* We started with a clean page on event installation. So read out 374 + * results on schedule_out and if page was dirty, clear values. 375 + */ 376 + if (!sched_in) 377 + paicrypt_push_sample(); 378 + } 379 + 380 + /* Attribute definitions for paicrypt interface. As with other CPU 381 + * Measurement Facilities, there is one attribute per mapped counter. 382 + * The number of mapped counters may vary per machine generation. Use 383 + * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 384 + * to determine the number of mapped counters. The instructions returns 385 + * a positive number, which is the highest number of supported counters. 386 + * All counters less than this number are also supported, there are no 387 + * holes. A returned number of zero means no support for mapped counters. 388 + * 389 + * The identification of the counter is a unique number. The chosen range 390 + * is 0x1000 + offset in mapped kernel page. 391 + * All CPU Measurement Facility counters identifiers must be unique and 392 + * the numbers from 0 to 496 are already used for the CPU Measurement 393 + * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 394 + * used for the CPU Measurement Sampling facility. 395 + */ 396 + PMU_FORMAT_ATTR(event, "config:0-63"); 397 + 398 + static struct attribute *paicrypt_format_attr[] = { 399 + &format_attr_event.attr, 400 + NULL, 401 + }; 402 + 403 + static struct attribute_group paicrypt_events_group = { 404 + .name = "events", 405 + .attrs = NULL /* Filled in attr_event_init() */ 406 + }; 407 + 408 + static struct attribute_group paicrypt_format_group = { 409 + .name = "format", 410 + .attrs = paicrypt_format_attr, 411 + }; 412 + 413 + static const struct attribute_group *paicrypt_attr_groups[] = { 414 + &paicrypt_events_group, 415 + &paicrypt_format_group, 416 + NULL, 417 + }; 418 + 419 + /* Performance monitoring unit for mapped counters */ 420 + static struct pmu paicrypt = { 421 + .task_ctx_nr = perf_invalid_context, 422 + .event_init = paicrypt_event_init, 423 + .add = paicrypt_add, 424 + .del = paicrypt_del, 425 + .start = paicrypt_start, 426 + .stop = paicrypt_stop, 427 + .read = paicrypt_read, 428 + .sched_task = paicrypt_sched_task, 429 + .attr_groups = paicrypt_attr_groups 430 + }; 431 + 432 + /* List of symbolic PAI counter names. */ 433 + static const char * const paicrypt_ctrnames[] = { 434 + [0] = "CRYPTO_ALL", 435 + [1] = "KM_DEA", 436 + [2] = "KM_TDEA_128", 437 + [3] = "KM_TDEA_192", 438 + [4] = "KM_ENCRYPTED_DEA", 439 + [5] = "KM_ENCRYPTED_TDEA_128", 440 + [6] = "KM_ENCRYPTED_TDEA_192", 441 + [7] = "KM_AES_128", 442 + [8] = "KM_AES_192", 443 + [9] = "KM_AES_256", 444 + [10] = "KM_ENCRYPTED_AES_128", 445 + [11] = "KM_ENCRYPTED_AES_192", 446 + [12] = "KM_ENCRYPTED_AES_256", 447 + [13] = "KM_XTS_AES_128", 448 + [14] = "KM_XTS_AES_256", 449 + [15] = "KM_XTS_ENCRYPTED_AES_128", 450 + [16] = "KM_XTS_ENCRYPTED_AES_256", 451 + [17] = "KMC_DEA", 452 + [18] = "KMC_TDEA_128", 453 + [19] = "KMC_TDEA_192", 454 + [20] = "KMC_ENCRYPTED_DEA", 455 + [21] = "KMC_ENCRYPTED_TDEA_128", 456 + [22] = "KMC_ENCRYPTED_TDEA_192", 457 + [23] = "KMC_AES_128", 458 + [24] = "KMC_AES_192", 459 + [25] = "KMC_AES_256", 460 + [26] = "KMC_ENCRYPTED_AES_128", 461 + [27] = "KMC_ENCRYPTED_AES_192", 462 + [28] = "KMC_ENCRYPTED_AES_256", 463 + [29] = "KMC_PRNG", 464 + [30] = "KMA_GCM_AES_128", 465 + [31] = "KMA_GCM_AES_192", 466 + [32] = "KMA_GCM_AES_256", 467 + [33] = "KMA_GCM_ENCRYPTED_AES_128", 468 + [34] = "KMA_GCM_ENCRYPTED_AES_192", 469 + [35] = "KMA_GCM_ENCRYPTED_AES_256", 470 + [36] = "KMF_DEA", 471 + [37] = "KMF_TDEA_128", 472 + [38] = "KMF_TDEA_192", 473 + [39] = "KMF_ENCRYPTED_DEA", 474 + [40] = "KMF_ENCRYPTED_TDEA_128", 475 + [41] = "KMF_ENCRYPTED_TDEA_192", 476 + [42] = "KMF_AES_128", 477 + [43] = "KMF_AES_192", 478 + [44] = "KMF_AES_256", 479 + [45] = "KMF_ENCRYPTED_AES_128", 480 + [46] = "KMF_ENCRYPTED_AES_192", 481 + [47] = "KMF_ENCRYPTED_AES_256", 482 + [48] = "KMCTR_DEA", 483 + [49] = "KMCTR_TDEA_128", 484 + [50] = "KMCTR_TDEA_192", 485 + [51] = "KMCTR_ENCRYPTED_DEA", 486 + [52] = "KMCTR_ENCRYPTED_TDEA_128", 487 + [53] = "KMCTR_ENCRYPTED_TDEA_192", 488 + [54] = "KMCTR_AES_128", 489 + [55] = "KMCTR_AES_192", 490 + [56] = "KMCTR_AES_256", 491 + [57] = "KMCTR_ENCRYPTED_AES_128", 492 + [58] = "KMCTR_ENCRYPTED_AES_192", 493 + [59] = "KMCTR_ENCRYPTED_AES_256", 494 + [60] = "KMO_DEA", 495 + [61] = "KMO_TDEA_128", 496 + [62] = "KMO_TDEA_192", 497 + [63] = "KMO_ENCRYPTED_DEA", 498 + [64] = "KMO_ENCRYPTED_TDEA_128", 499 + [65] = "KMO_ENCRYPTED_TDEA_192", 500 + [66] = "KMO_AES_128", 501 + [67] = "KMO_AES_192", 502 + [68] = "KMO_AES_256", 503 + [69] = "KMO_ENCRYPTED_AES_128", 504 + [70] = "KMO_ENCRYPTED_AES_192", 505 + [71] = "KMO_ENCRYPTED_AES_256", 506 + [72] = "KIMD_SHA_1", 507 + [73] = "KIMD_SHA_256", 508 + [74] = "KIMD_SHA_512", 509 + [75] = "KIMD_SHA3_224", 510 + [76] = "KIMD_SHA3_256", 511 + [77] = "KIMD_SHA3_384", 512 + [78] = "KIMD_SHA3_512", 513 + [79] = "KIMD_SHAKE_128", 514 + [80] = "KIMD_SHAKE_256", 515 + [81] = "KIMD_GHASH", 516 + [82] = "KLMD_SHA_1", 517 + [83] = "KLMD_SHA_256", 518 + [84] = "KLMD_SHA_512", 519 + [85] = "KLMD_SHA3_224", 520 + [86] = "KLMD_SHA3_256", 521 + [87] = "KLMD_SHA3_384", 522 + [88] = "KLMD_SHA3_512", 523 + [89] = "KLMD_SHAKE_128", 524 + [90] = "KLMD_SHAKE_256", 525 + [91] = "KMAC_DEA", 526 + [92] = "KMAC_TDEA_128", 527 + [93] = "KMAC_TDEA_192", 528 + [94] = "KMAC_ENCRYPTED_DEA", 529 + [95] = "KMAC_ENCRYPTED_TDEA_128", 530 + [96] = "KMAC_ENCRYPTED_TDEA_192", 531 + [97] = "KMAC_AES_128", 532 + [98] = "KMAC_AES_192", 533 + [99] = "KMAC_AES_256", 534 + [100] = "KMAC_ENCRYPTED_AES_128", 535 + [101] = "KMAC_ENCRYPTED_AES_192", 536 + [102] = "KMAC_ENCRYPTED_AES_256", 537 + [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 538 + [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 539 + [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 540 + [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 541 + [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 542 + [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 543 + [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 544 + [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 545 + [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 546 + [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 547 + [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 548 + [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A", 549 + [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 550 + [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 551 + [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 552 + [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 553 + [119] = "PCC_SCALAR_MULTIPLY_P256", 554 + [120] = "PCC_SCALAR_MULTIPLY_P384", 555 + [121] = "PCC_SCALAR_MULTIPLY_P521", 556 + [122] = "PCC_SCALAR_MULTIPLY_ED25519", 557 + [123] = "PCC_SCALAR_MULTIPLY_ED448", 558 + [124] = "PCC_SCALAR_MULTIPLY_X25519", 559 + [125] = "PCC_SCALAR_MULTIPLY_X448", 560 + [126] = "PRNO_SHA_512_DRNG", 561 + [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 562 + [128] = "PRNO_TRNG", 563 + [129] = "KDSA_ECDSA_VERIFY_P256", 564 + [130] = "KDSA_ECDSA_VERIFY_P384", 565 + [131] = "KDSA_ECDSA_VERIFY_P521", 566 + [132] = "KDSA_ECDSA_SIGN_P256", 567 + [133] = "KDSA_ECDSA_SIGN_P384", 568 + [134] = "KDSA_ECDSA_SIGN_P521", 569 + [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 570 + [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 571 + [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 572 + [138] = "KDSA_EDDSA_VERIFY_ED25519", 573 + [139] = "KDSA_EDDSA_VERIFY_ED448", 574 + [140] = "KDSA_EDDSA_SIGN_ED25519", 575 + [141] = "KDSA_EDDSA_SIGN_ED448", 576 + [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 577 + [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 578 + [144] = "PCKMO_ENCRYPT_DEA_KEY", 579 + [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 580 + [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 581 + [147] = "PCKMO_ENCRYPT_AES_128_KEY", 582 + [148] = "PCKMO_ENCRYPT_AES_192_KEY", 583 + [149] = "PCKMO_ENCRYPT_AES_256_KEY", 584 + [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 585 + [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 586 + [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 587 + [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 588 + [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 589 + [155] = "IBM_RESERVED_155", 590 + [156] = "IBM_RESERVED_156", 591 + }; 592 + 593 + static void __init attr_event_free(struct attribute **attrs, int num) 594 + { 595 + struct perf_pmu_events_attr *pa; 596 + int i; 597 + 598 + for (i = 0; i < num; i++) { 599 + struct device_attribute *dap; 600 + 601 + dap = container_of(attrs[i], struct device_attribute, attr); 602 + pa = container_of(dap, struct perf_pmu_events_attr, attr); 603 + kfree(pa); 604 + } 605 + kfree(attrs); 606 + } 607 + 608 + static int __init attr_event_init_one(struct attribute **attrs, int num) 609 + { 610 + struct perf_pmu_events_attr *pa; 611 + 612 + pa = kzalloc(sizeof(*pa), GFP_KERNEL); 613 + if (!pa) 614 + return -ENOMEM; 615 + 616 + sysfs_attr_init(&pa->attr.attr); 617 + pa->id = PAI_CRYPTO_BASE + num; 618 + pa->attr.attr.name = paicrypt_ctrnames[num]; 619 + pa->attr.attr.mode = 0444; 620 + pa->attr.show = cpumf_events_sysfs_show; 621 + pa->attr.store = NULL; 622 + attrs[num] = &pa->attr.attr; 623 + return 0; 624 + } 625 + 626 + /* Create PMU sysfs event attributes on the fly. */ 627 + static int __init attr_event_init(void) 628 + { 629 + struct attribute **attrs; 630 + int ret, i; 631 + 632 + attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs), 633 + GFP_KERNEL); 634 + if (!attrs) 635 + return -ENOMEM; 636 + for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) { 637 + ret = attr_event_init_one(attrs, i); 638 + if (ret) { 639 + attr_event_free(attrs, i - 1); 640 + return ret; 641 + } 642 + } 643 + attrs[i] = NULL; 644 + paicrypt_events_group.attrs = attrs; 645 + return 0; 646 + } 647 + 648 + static int __init paicrypt_init(void) 649 + { 650 + struct qpaci_info_block ib; 651 + int rc; 652 + 653 + if (!test_facility(196)) 654 + return 0; 655 + 656 + qpaci(&ib); 657 + paicrypt_cnt = ib.num_cc; 658 + if (paicrypt_cnt == 0) 659 + return 0; 660 + if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) 661 + paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1; 662 + 663 + rc = attr_event_init(); /* Export known PAI crypto events */ 664 + if (rc) { 665 + pr_err("Creation of PMU pai_crypto /sysfs failed\n"); 666 + return rc; 667 + } 668 + 669 + /* Setup s390dbf facility */ 670 + cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); 671 + if (!cfm_dbg) { 672 + pr_err("Registration of s390dbf pai_crypto failed\n"); 673 + return -ENOMEM; 674 + } 675 + debug_register_view(cfm_dbg, &debug_sprintf_view); 676 + 677 + rc = perf_pmu_register(&paicrypt, "pai_crypto", -1); 678 + if (rc) { 679 + pr_err("Registering the pai_crypto PMU failed with rc=%i\n", 680 + rc); 681 + debug_unregister_view(cfm_dbg, &debug_sprintf_view); 682 + debug_unregister(cfm_dbg); 683 + return rc; 684 + } 685 + return 0; 686 + } 687 + 688 + device_initcall(paicrypt_init);
+2 -1
arch/s390/kernel/relocate_kernel.S
··· 14 14 * moves the new kernel to its destination... 15 15 * %r2 = pointer to first kimage_entry_t 16 16 * %r3 = start address - where to jump to after the job is done... 17 + * %r4 = subcode 17 18 * 18 19 * %r5 will be used as temp. storage 19 20 * %r6 holds the destination address ··· 57 56 jo 0b 58 57 j .base 59 58 .done: 60 - sgr %r0,%r0 # clear register r0 59 + lgr %r0,%r4 # subcode 61 60 cghi %r3,0 62 61 je .diag 63 62 la %r4,load_psw-.base(%r13) # load psw-address into the register
+1 -1
arch/s390/kernel/setup.c
··· 494 494 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 495 495 lc->preempt_count = PREEMPT_DISABLED; 496 496 497 - set_prefix((u32)(unsigned long) lc); 497 + set_prefix(__pa(lc)); 498 498 lowcore_ptr[0] = lc; 499 499 } 500 500
+5 -7
arch/s390/kernel/time.c
··· 364 364 * Apply clock delta to the global data structures. 365 365 * This is called once on the CPU that performed the clock sync. 366 366 */ 367 - static void clock_sync_global(unsigned long delta) 367 + static void clock_sync_global(long delta) 368 368 { 369 369 unsigned long now, adj; 370 370 struct ptff_qto qto; ··· 400 400 * Apply clock delta to the per-CPU data structures of this CPU. 401 401 * This is called for each online CPU after the call to clock_sync_global. 402 402 */ 403 - static void clock_sync_local(unsigned long delta) 403 + static void clock_sync_local(long delta) 404 404 { 405 405 /* Add the delta to the clock comparator. */ 406 406 if (S390_lowcore.clock_comparator != clock_comparator_max) { ··· 424 424 struct clock_sync_data { 425 425 atomic_t cpus; 426 426 int in_sync; 427 - unsigned long clock_delta; 427 + long clock_delta; 428 428 }; 429 429 430 430 /* ··· 544 544 static int stp_sync_clock(void *data) 545 545 { 546 546 struct clock_sync_data *sync = data; 547 - u64 clock_delta, flags; 547 + long clock_delta, flags; 548 548 static int first; 549 549 int rc; 550 550 ··· 554 554 while (atomic_read(&sync->cpus) != 0) 555 555 cpu_relax(); 556 556 rc = 0; 557 - if (stp_info.todoff[0] || stp_info.todoff[1] || 558 - stp_info.todoff[2] || stp_info.todoff[3] || 559 - stp_info.tmd != 2) { 557 + if (stp_info.todoff || stp_info.tmd != 2) { 560 558 flags = vdso_update_begin(); 561 559 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, 562 560 &clock_delta);
+50 -5
arch/s390/kernel/vdso.c
··· 16 16 #include <linux/slab.h> 17 17 #include <linux/smp.h> 18 18 #include <linux/time_namespace.h> 19 + #include <linux/random.h> 19 20 #include <vdso/datapage.h> 20 21 #include <asm/vdso.h> 21 22 ··· 161 160 } 162 161 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */ 163 162 164 - int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 163 + static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len) 165 164 { 166 - unsigned long vdso_text_len, vdso_mapping_len; 167 - unsigned long vvar_start, vdso_text_start; 165 + unsigned long vvar_start, vdso_text_start, vdso_text_len; 168 166 struct vm_special_mapping *vdso_mapping; 169 167 struct mm_struct *mm = current->mm; 170 168 struct vm_area_struct *vma; ··· 180 180 vdso_text_len = vdso64_end - vdso64_start; 181 181 vdso_mapping = &vdso64_mapping; 182 182 } 183 - vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE; 184 - vvar_start = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 183 + vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0); 185 184 rc = vvar_start; 186 185 if (IS_ERR_VALUE(vvar_start)) 187 186 goto out; ··· 207 208 out: 208 209 mmap_write_unlock(mm); 209 210 return rc; 211 + } 212 + 213 + static unsigned long vdso_addr(unsigned long start, unsigned long len) 214 + { 215 + unsigned long addr, end, offset; 216 + 217 + /* 218 + * Round up the start address. It can start out unaligned as a result 219 + * of stack start randomization. 220 + */ 221 + start = PAGE_ALIGN(start); 222 + 223 + /* Round the lowest possible end address up to a PMD boundary. */ 224 + end = (start + len + PMD_SIZE - 1) & PMD_MASK; 225 + if (end >= VDSO_BASE) 226 + end = VDSO_BASE; 227 + end -= len; 228 + 229 + if (end > start) { 230 + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); 231 + addr = start + (offset << PAGE_SHIFT); 232 + } else { 233 + addr = start; 234 + } 235 + return addr; 236 + } 237 + 238 + unsigned long vdso_size(void) 239 + { 240 + unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; 241 + 242 + if (is_compat_task()) 243 + size += vdso32_end - vdso32_start; 244 + else 245 + size += vdso64_end - vdso64_start; 246 + return PAGE_ALIGN(size); 247 + } 248 + 249 + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 250 + { 251 + unsigned long addr = VDSO_BASE; 252 + unsigned long size = vdso_size(); 253 + 254 + if (current->flags & PF_RANDOMIZE) 255 + addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size); 256 + return map_vdso(addr, size); 210 257 } 211 258 212 259 static struct page ** __init vdso_setup_pages(void *start, void *end)
-1
arch/s390/kvm/priv.c
··· 11 11 #include <linux/kvm.h> 12 12 #include <linux/gfp.h> 13 13 #include <linux/errno.h> 14 - #include <linux/compat.h> 15 14 #include <linux/mm_types.h> 16 15 #include <linux/pgtable.h> 17 16
+2 -2
arch/s390/lib/spinlock.c
··· 75 75 int owner; 76 76 77 77 asm_inline volatile( 78 - ALTERNATIVE("", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ 78 + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */ 79 79 " l %0,%1\n" 80 80 : "=d" (owner) : "Q" (*lock) : "memory"); 81 81 return owner; ··· 86 86 int expected = old; 87 87 88 88 asm_inline volatile( 89 - ALTERNATIVE("", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ 89 + ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */ 90 90 " cs %0,%3,%1\n" 91 91 : "=d" (old), "=Q" (*lock) 92 92 : "0" (old), "d" (new), "Q" (*lock)
+2 -2
arch/s390/mm/mmap.c
··· 58 58 59 59 /* 60 60 * Top of mmap area (just below the process stack). 61 - * Leave at least a ~32 MB hole. 61 + * Leave at least a ~128 MB hole. 62 62 */ 63 - gap_min = 32 * 1024 * 1024UL; 63 + gap_min = SZ_128M; 64 64 gap_max = (STACK_TOP / 6) * 5; 65 65 66 66 if (gap < gap_min)
+1 -1
arch/s390/pci/pci.c
··· 799 799 struct zpci_dev *zdev; 800 800 int rc; 801 801 802 - zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); 802 + zpci_dbg(1, "add fid:%x, fh:%x, c:%d\n", fid, fh, state); 803 803 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 804 804 if (!zdev) 805 805 return ERR_PTR(-ENOMEM);
+1 -1
arch/s390/pci/pci_clp.c
··· 30 30 void update_uid_checking(bool new) 31 31 { 32 32 if (zpci_unique_uid != new) 33 - zpci_dbg(1, "uid checking:%d\n", new); 33 + zpci_dbg(3, "uid checking:%d\n", new); 34 34 35 35 zpci_unique_uid = new; 36 36 }
+1 -1
arch/s390/pci/pci_debug.c
··· 196 196 if (!pci_debug_err_id) 197 197 return -EINVAL; 198 198 debug_register_view(pci_debug_err_id, &debug_hex_ascii_view); 199 - debug_set_level(pci_debug_err_id, 6); 199 + debug_set_level(pci_debug_err_id, 3); 200 200 201 201 debugfs_root = debugfs_create_dir("pci", NULL); 202 202 return 0;
-3
arch/s390/pci/pci_event.c
··· 321 321 322 322 zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n", 323 323 ccdf->fid, ccdf->fh, ccdf->pec); 324 - zpci_err("avail CCDF:\n"); 325 - zpci_err_hex(ccdf, sizeof(*ccdf)); 326 - 327 324 switch (ccdf->pec) { 328 325 case 0x0301: /* Reserved|Standby -> Configured */ 329 326 if (!zdev) {
+86 -22
arch/s390/pci/pci_insn.c
··· 18 18 19 19 #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */ 20 20 21 - static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) 22 - { 23 - struct { 24 - u64 req; 25 - u64 offset; 26 - u8 cc; 27 - u8 status; 28 - } __packed data = {req, offset, cc, status}; 21 + struct zpci_err_insn_data { 22 + u8 insn; 23 + u8 cc; 24 + u8 status; 25 + union { 26 + struct { 27 + u64 req; 28 + u64 offset; 29 + }; 30 + struct { 31 + u64 addr; 32 + u64 len; 33 + }; 34 + }; 35 + } __packed; 29 36 30 - zpci_err_hex(&data, sizeof(data)); 37 + static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status, 38 + u64 req, u64 offset) 39 + { 40 + struct zpci_err_insn_data data = { 41 + .insn = insn, .cc = cc, .status = status, 42 + .req = req, .offset = offset}; 43 + 44 + zpci_err_hex_level(lvl, &data, sizeof(data)); 45 + } 46 + 47 + static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status, 48 + u64 addr, u64 len) 49 + { 50 + struct zpci_err_insn_data data = { 51 + .insn = insn, .cc = cc, .status = status, 52 + .addr = addr, .len = len}; 53 + 54 + zpci_err_hex_level(lvl, &data, sizeof(data)); 31 55 } 32 56 33 57 /* Modify PCI Function Controls */ ··· 71 47 72 48 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status) 73 49 { 50 + bool retried = false; 74 51 u8 cc; 75 52 76 53 do { 77 54 cc = __mpcifc(req, fib, status); 78 - if (cc == 2) 55 + if (cc == 2) { 79 56 msleep(ZPCI_INSN_BUSY_DELAY); 57 + if (!retried) { 58 + zpci_err_insn_req(1, 'M', cc, *status, req, 0); 59 + retried = true; 60 + } 61 + } 80 62 } while (cc == 2); 81 63 82 64 if (cc) 83 - zpci_err_insn(cc, *status, req, 0); 65 + zpci_err_insn_req(0, 'M', cc, *status, req, 0); 66 + else if (retried) 67 + zpci_err_insn_req(1, 'M', cc, *status, req, 0); 84 68 85 69 return cc; 86 70 } ··· 112 80 113 81 int zpci_refresh_trans(u64 fn, u64 addr, u64 range) 114 82 { 83 + bool retried = false; 115 84 u8 cc, status; 116 85 117 86 do { 118 87 cc = __rpcit(fn, addr, range, &status); 119 - if (cc == 2) 88 + if (cc == 2) { 120 89 udelay(ZPCI_INSN_BUSY_DELAY); 90 + if (!retried) { 91 + zpci_err_insn_addr(1, 'R', cc, status, addr, range); 92 + retried = true; 93 + } 94 + } 121 95 } while (cc == 2); 122 96 123 97 if (cc) 124 - zpci_err_insn(cc, status, addr, range); 98 + zpci_err_insn_addr(0, 'R', cc, status, addr, range); 99 + else if (retried) 100 + zpci_err_insn_addr(1, 'R', cc, status, addr, range); 125 101 126 102 if (cc == 1 && (status == 4 || status == 16)) 127 103 return -ENOMEM; ··· 184 144 185 145 int __zpci_load(u64 *data, u64 req, u64 offset) 186 146 { 147 + bool retried = false; 187 148 u8 status; 188 149 int cc; 189 150 190 151 do { 191 152 cc = __pcilg(data, req, offset, &status); 192 - if (cc == 2) 153 + if (cc == 2) { 193 154 udelay(ZPCI_INSN_BUSY_DELAY); 155 + if (!retried) { 156 + zpci_err_insn_req(1, 'l', cc, status, req, offset); 157 + retried = true; 158 + } 159 + } 194 160 } while (cc == 2); 195 161 196 162 if (cc) 197 - zpci_err_insn(cc, status, req, offset); 163 + zpci_err_insn_req(0, 'l', cc, status, req, offset); 164 + else if (retried) 165 + zpci_err_insn_req(1, 'l', cc, status, req, offset); 198 166 199 167 return (cc > 0) ? -EIO : cc; 200 168 } ··· 246 198 247 199 cc = __pcilg_mio(data, (__force u64) addr, len, &status); 248 200 if (cc) 249 - zpci_err_insn(cc, status, 0, (__force u64) addr); 201 + zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len); 250 202 251 203 return (cc > 0) ? -EIO : cc; 252 204 } ··· 273 225 274 226 int __zpci_store(u64 data, u64 req, u64 offset) 275 227 { 228 + bool retried = false; 276 229 u8 status; 277 230 int cc; 278 231 279 232 do { 280 233 cc = __pcistg(data, req, offset, &status); 281 - if (cc == 2) 234 + if (cc == 2) { 282 235 udelay(ZPCI_INSN_BUSY_DELAY); 236 + if (!retried) { 237 + zpci_err_insn_req(1, 's', cc, status, req, offset); 238 + retried = true; 239 + } 240 + } 283 241 } while (cc == 2); 284 242 285 243 if (cc) 286 - zpci_err_insn(cc, status, req, offset); 244 + zpci_err_insn_req(0, 's', cc, status, req, offset); 245 + else if (retried) 246 + zpci_err_insn_req(1, 's', cc, status, req, offset); 287 247 288 248 return (cc > 0) ? -EIO : cc; 289 249 } ··· 334 278 335 279 cc = __pcistg_mio(data, (__force u64) addr, len, &status); 336 280 if (cc) 337 - zpci_err_insn(cc, status, 0, (__force u64) addr); 281 + zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len); 338 282 339 283 return (cc > 0) ? -EIO : cc; 340 284 } ··· 360 304 361 305 int __zpci_store_block(const u64 *data, u64 req, u64 offset) 362 306 { 307 + bool retried = false; 363 308 u8 status; 364 309 int cc; 365 310 366 311 do { 367 312 cc = __pcistb(data, req, offset, &status); 368 - if (cc == 2) 313 + if (cc == 2) { 369 314 udelay(ZPCI_INSN_BUSY_DELAY); 315 + if (!retried) { 316 + zpci_err_insn_req(0, 'b', cc, status, req, offset); 317 + retried = true; 318 + } 319 + } 370 320 } while (cc == 2); 371 321 372 322 if (cc) 373 - zpci_err_insn(cc, status, req, offset); 323 + zpci_err_insn_req(0, 'b', cc, status, req, offset); 324 + else if (retried) 325 + zpci_err_insn_req(1, 'b', cc, status, req, offset); 374 326 375 327 return (cc > 0) ? -EIO : cc; 376 328 } ··· 422 358 423 359 cc = __pcistb_mio(src, (__force u64) dst, len, &status); 424 360 if (cc) 425 - zpci_err_insn(cc, status, 0, (__force u64) dst); 361 + zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len); 426 362 427 363 return (cc > 0) ? -EIO : cc; 428 364 }
+23 -7
arch/s390/purgatory/head.S
··· 44 44 .endm 45 45 46 46 .macro MEMSWAP dst,src,buf,len 47 - 10: cghi \len,bufsz 47 + 10: larl %r0,purgatory_end 48 + larl %r1,stack 49 + slgr %r0,%r1 50 + cgr \len,%r0 48 51 jh 11f 49 52 lgr %r4,\len 50 53 j 12f 51 - 11: lghi %r4,bufsz 54 + 11: lgr %r4,%r0 52 55 53 56 12: MEMCPY \buf,\dst,%r4 54 57 MEMCPY \dst,\src,%r4 ··· 138 135 139 136 .start_crash_kernel: 140 137 /* Location of purgatory_start in crash memory */ 138 + larl %r0,.base_crash 139 + larl %r1,purgatory_start 140 + slgr %r0,%r1 141 141 lgr %r8,%r13 142 - aghi %r8,-(.base_crash-purgatory_start) 142 + sgr %r8,%r0 143 143 144 144 /* Destination for this code i.e. end of memory to be swapped. */ 145 + larl %r0,purgatory_end 146 + larl %r1,purgatory_start 147 + slgr %r0,%r1 145 148 lg %r9,crash_size-.base_crash(%r13) 146 - aghi %r9,-(purgatory_end-purgatory_start) 149 + sgr %r9,%r0 147 150 148 151 /* Destination in crash memory, i.e. same as r9 but in crash memory. */ 149 152 lg %r10,crash_start-.base_crash(%r13) ··· 158 149 /* Buffer location (in crash memory) and size. As the purgatory is 159 150 * behind the point of no return it can re-use the stack as buffer. 160 151 */ 161 - lghi %r11,bufsz 152 + larl %r11,purgatory_end 162 153 larl %r12,stack 154 + slgr %r11,%r12 163 155 164 156 MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ 165 157 MEMCPY %r9,%r8,%r11 /* self -> dst */ 166 158 167 159 /* Jump to new location. */ 168 160 lgr %r7,%r9 169 - aghi %r7,.jump_to_dst-purgatory_start 161 + larl %r0,.jump_to_dst 162 + larl %r1,purgatory_start 163 + slgr %r0,%r1 164 + agr %r7,%r0 170 165 br %r7 171 166 172 167 .jump_to_dst: ··· 182 169 183 170 /* Load new buffer location after jump */ 184 171 larl %r7,stack 185 - aghi %r10,stack-purgatory_start 172 + lgr %r0,%r7 173 + larl %r1,purgatory_start 174 + slgr %r0,%r1 175 + agr %r10,%r0 186 176 MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ 187 177 188 178 /* Now the code is set up to run from its designated location. Start
+2 -2
arch/x86/include/asm/entry-common.h
··· 10 10 #include <asm/fpu/api.h> 11 11 12 12 /* Check that the stack and regs on entry from user mode are sane. */ 13 - static __always_inline void arch_check_user_regs(struct pt_regs *regs) 13 + static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) 14 14 { 15 15 if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { 16 16 /* ··· 42 42 WARN_ON_ONCE(regs != task_pt_regs(current)); 43 43 } 44 44 } 45 - #define arch_check_user_regs arch_check_user_regs 45 + #define arch_enter_from_user_mode arch_enter_from_user_mode 46 46 47 47 static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, 48 48 unsigned long ti_work)
+13 -12
drivers/s390/char/con3215.c
··· 771 771 } 772 772 773 773 /* 774 - * panic() calls con3215_flush through a panic_notifier 775 - * before the system enters a disabled, endless loop. 774 + * The below function is called as a panic/reboot notifier before the 775 + * system enters a disabled, endless loop. 776 + * 777 + * Notice we must use the spin_trylock() alternative, to prevent lockups 778 + * in atomic context (panic routine runs with secondary CPUs, local IRQs 779 + * and preemption disabled). 776 780 */ 777 - static void con3215_flush(void) 781 + static int con3215_notify(struct notifier_block *self, 782 + unsigned long event, void *data) 778 783 { 779 784 struct raw3215_info *raw; 780 785 unsigned long flags; 781 786 782 787 raw = raw3215[0]; /* console 3215 is the first one */ 783 - spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 788 + if (!spin_trylock_irqsave(get_ccwdev_lock(raw->cdev), flags)) 789 + return NOTIFY_DONE; 784 790 raw3215_make_room(raw, RAW3215_BUFFER_SIZE); 785 791 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 786 - } 787 792 788 - static int con3215_notify(struct notifier_block *self, 789 - unsigned long event, void *data) 790 - { 791 - con3215_flush(); 792 - return NOTIFY_OK; 793 + return NOTIFY_DONE; 793 794 } 794 795 795 796 static struct notifier_block on_panic_nb = { 796 797 .notifier_call = con3215_notify, 797 - .priority = 0, 798 + .priority = INT_MIN + 1, /* run the callback late */ 798 799 }; 799 800 800 801 static struct notifier_block on_reboot_nb = { 801 802 .notifier_call = con3215_notify, 802 - .priority = 0, 803 + .priority = INT_MIN + 1, /* run the callback late */ 803 804 }; 804 805 805 806 /*
+16 -15
drivers/s390/char/con3270.c
··· 535 535 } 536 536 537 537 /* 538 - * panic() calls con3270_flush through a panic_notifier 539 - * before the system enters a disabled, endless loop. 538 + * The below function is called as a panic/reboot notifier before the 539 + * system enters a disabled, endless loop. 540 + * 541 + * Notice we must use the spin_trylock() alternative, to prevent lockups 542 + * in atomic context (panic routine runs with secondary CPUs, local IRQs 543 + * and preemption disabled). 540 544 */ 541 - static void 542 - con3270_flush(void) 545 + static int con3270_notify(struct notifier_block *self, 546 + unsigned long event, void *data) 543 547 { 544 548 struct con3270 *cp; 545 549 unsigned long flags; 546 550 547 551 cp = condev; 548 552 if (!cp->view.dev) 549 - return; 550 - raw3270_activate_view(&cp->view); 551 - spin_lock_irqsave(&cp->view.lock, flags); 553 + return NOTIFY_DONE; 554 + if (!raw3270_view_lock_unavailable(&cp->view)) 555 + raw3270_activate_view(&cp->view); 556 + if (!spin_trylock_irqsave(&cp->view.lock, flags)) 557 + return NOTIFY_DONE; 552 558 con3270_wait_write(cp); 553 559 cp->nr_up = 0; 554 560 con3270_rebuild_update(cp); ··· 566 560 con3270_wait_write(cp); 567 561 } 568 562 spin_unlock_irqrestore(&cp->view.lock, flags); 569 - } 570 563 571 - static int con3270_notify(struct notifier_block *self, 572 - unsigned long event, void *data) 573 - { 574 - con3270_flush(); 575 - return NOTIFY_OK; 564 + return NOTIFY_DONE; 576 565 } 577 566 578 567 static struct notifier_block on_panic_nb = { 579 568 .notifier_call = con3270_notify, 580 - .priority = 0, 569 + .priority = INT_MIN + 1, /* run the callback late */ 581 570 }; 582 571 583 572 static struct notifier_block on_reboot_nb = { 584 573 .notifier_call = con3270_notify, 585 - .priority = 0, 574 + .priority = INT_MIN + 1, /* run the callback late */ 586 575 }; 587 576 588 577 /*
+15
drivers/s390/char/raw3270.c
··· 831 831 } 832 832 833 833 /* 834 + * This helper just validates that it is safe to activate a 835 + * view in the panic() context, due to locking restrictions. 836 + */ 837 + int raw3270_view_lock_unavailable(struct raw3270_view *view) 838 + { 839 + struct raw3270 *rp = view->dev; 840 + 841 + if (!rp) 842 + return -ENODEV; 843 + if (spin_is_locked(get_ccwdev_lock(rp->cdev))) 844 + return -EBUSY; 845 + return 0; 846 + } 847 + 848 + /* 834 849 * Activate a view. 835 850 */ 836 851 int
+1
drivers/s390/char/raw3270.h
··· 160 160 }; 161 161 162 162 int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); 163 + int raw3270_view_lock_unavailable(struct raw3270_view *view); 163 164 int raw3270_activate_view(struct raw3270_view *); 164 165 void raw3270_del_view(struct raw3270_view *); 165 166 void raw3270_deactivate_view(struct raw3270_view *);
+16 -12
drivers/s390/char/sclp_con.c
··· 220 220 } 221 221 222 222 /* 223 - * Make sure that all buffers will be flushed to the SCLP. 223 + * This panic/reboot notifier makes sure that all buffers 224 + * will be flushed to the SCLP. 224 225 */ 225 - static void 226 - sclp_console_flush(void) 227 - { 228 - sclp_conbuf_emit(); 229 - sclp_console_sync_queue(); 230 - } 231 - 232 226 static int sclp_console_notify(struct notifier_block *self, 233 227 unsigned long event, void *data) 234 228 { 235 - sclp_console_flush(); 236 - return NOTIFY_OK; 229 + /* 230 + * Perform the lock check before effectively getting the 231 + * lock on sclp_conbuf_emit() / sclp_console_sync_queue() 232 + * to prevent potential lockups in atomic context. 233 + */ 234 + if (spin_is_locked(&sclp_con_lock)) 235 + return NOTIFY_DONE; 236 + 237 + sclp_conbuf_emit(); 238 + sclp_console_sync_queue(); 239 + 240 + return NOTIFY_DONE; 237 241 } 238 242 239 243 static struct notifier_block on_panic_nb = { 240 244 .notifier_call = sclp_console_notify, 241 - .priority = 1, 245 + .priority = INT_MIN + 1, /* run the callback late */ 242 246 }; 243 247 244 248 static struct notifier_block on_reboot_nb = { 245 249 .notifier_call = sclp_console_notify, 246 - .priority = 1, 250 + .priority = INT_MIN + 1, /* run the callback late */ 247 251 }; 248 252 249 253 /*
+3 -1
drivers/s390/char/sclp_early.c
··· 49 49 S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; 50 50 if (sccb->fac91 & 0x40) 51 51 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; 52 - if (sccb->cpuoff > 134) 52 + if (sccb->cpuoff > 134) { 53 53 sclp.has_diag318 = !!(sccb->byte_134 & 0x80); 54 + sclp.has_iplcc = !!(sccb->byte_134 & 0x02); 55 + } 54 56 if (sccb->cpuoff > 137) 55 57 sclp.has_sipl = !!(sccb->cbl & 0x4000); 56 58 sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
+23 -19
drivers/s390/char/sclp_vt220.c
··· 769 769 770 770 #ifdef CONFIG_SCLP_VT220_CONSOLE 771 771 772 - static void __sclp_vt220_flush_buffer(void) 773 - { 774 - unsigned long flags; 775 - 776 - sclp_vt220_emit_current(); 777 - spin_lock_irqsave(&sclp_vt220_lock, flags); 778 - del_timer(&sclp_vt220_timer); 779 - while (sclp_vt220_queue_running) { 780 - spin_unlock_irqrestore(&sclp_vt220_lock, flags); 781 - sclp_sync_wait(); 782 - spin_lock_irqsave(&sclp_vt220_lock, flags); 783 - } 784 - spin_unlock_irqrestore(&sclp_vt220_lock, flags); 785 - } 786 - 787 772 static void 788 773 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count) 789 774 { ··· 782 797 return sclp_vt220_driver; 783 798 } 784 799 800 + /* 801 + * This panic/reboot notifier runs in atomic context, so 802 + * locking restrictions apply to prevent potential lockups. 803 + */ 785 804 static int 786 805 sclp_vt220_notify(struct notifier_block *self, 787 806 unsigned long event, void *data) 788 807 { 789 - __sclp_vt220_flush_buffer(); 790 - return NOTIFY_OK; 808 + unsigned long flags; 809 + 810 + if (spin_is_locked(&sclp_vt220_lock)) 811 + return NOTIFY_DONE; 812 + 813 + sclp_vt220_emit_current(); 814 + 815 + spin_lock_irqsave(&sclp_vt220_lock, flags); 816 + del_timer(&sclp_vt220_timer); 817 + while (sclp_vt220_queue_running) { 818 + spin_unlock_irqrestore(&sclp_vt220_lock, flags); 819 + sclp_sync_wait(); 820 + spin_lock_irqsave(&sclp_vt220_lock, flags); 821 + } 822 + spin_unlock_irqrestore(&sclp_vt220_lock, flags); 823 + 824 + return NOTIFY_DONE; 791 825 } 792 826 793 827 static struct notifier_block on_panic_nb = { 794 828 .notifier_call = sclp_vt220_notify, 795 - .priority = 1, 829 + .priority = INT_MIN + 1, /* run the callback late */ 796 830 }; 797 831 798 832 static struct notifier_block on_reboot_nb = { 799 833 .notifier_call = sclp_vt220_notify, 800 - .priority = 1, 834 + .priority = INT_MIN + 1, /* run the callback late */ 801 835 }; 802 836 803 837 /* Structure needed to register with printk */
+2 -2
drivers/s390/cio/chsc.c
··· 1255 1255 EXPORT_SYMBOL_GPL(css_general_characteristics); 1256 1256 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1257 1257 1258 - int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta) 1258 + int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta) 1259 1259 { 1260 1260 struct { 1261 1261 struct chsc_header request; ··· 1266 1266 unsigned int rsvd2[5]; 1267 1267 struct chsc_header response; 1268 1268 unsigned int rsvd3[3]; 1269 - u64 clock_delta; 1269 + s64 clock_delta; 1270 1270 unsigned int rsvd4[2]; 1271 1271 } *rr; 1272 1272 int rc;
+63 -33
drivers/s390/crypto/ap_bus.c
··· 179 179 * ap_apft_available(): Test if AP facilities test (APFT) 180 180 * facility is available. 181 181 * 182 - * Returns 1 if APFT is is available. 182 + * Returns 1 if APFT is available. 183 183 */ 184 184 static int ap_apft_available(void) 185 185 { ··· 693 693 } 694 694 EXPORT_SYMBOL(ap_send_online_uevent); 695 695 696 + static void ap_send_mask_changed_uevent(unsigned long *newapm, 697 + unsigned long *newaqm) 698 + { 699 + char buf[100]; 700 + char *envp[] = { buf, NULL }; 701 + 702 + if (newapm) 703 + snprintf(buf, sizeof(buf), 704 + "APMASK=0x%016lx%016lx%016lx%016lx\n", 705 + newapm[0], newapm[1], newapm[2], newapm[3]); 706 + else 707 + snprintf(buf, sizeof(buf), 708 + "AQMASK=0x%016lx%016lx%016lx%016lx\n", 709 + newaqm[0], newaqm[1], newaqm[2], newaqm[3]); 710 + 711 + kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp); 712 + } 713 + 696 714 /* 697 715 * calc # of bound APQNs 698 716 */ ··· 722 704 723 705 static int __ap_calc_helper(struct device *dev, void *arg) 724 706 { 725 - struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *) arg; 707 + struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg; 726 708 727 709 if (is_queue_dev(dev)) { 728 710 pctrs->apqns++; ··· 738 720 struct __ap_calc_ctrs ctrs; 739 721 740 722 memset(&ctrs, 0, sizeof(ctrs)); 741 - bus_for_each_dev(&ap_bus_type, NULL, (void *) &ctrs, __ap_calc_helper); 723 + bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper); 742 724 743 725 *apqns = ctrs.apqns; 744 726 *bound = ctrs.bound; ··· 799 781 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) 800 782 { 801 783 if (is_queue_dev(dev) && 802 - AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) 784 + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data) 803 785 device_unregister(dev); 804 786 return 0; 805 787 } ··· 812 794 card = AP_QID_CARD(to_ap_queue(dev)->qid); 813 795 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 814 796 mutex_lock(&ap_perms_mutex); 815 - devres = test_bit_inv(card, ap_perms.apm) 816 - && test_bit_inv(queue, ap_perms.aqm); 797 + devres = test_bit_inv(card, ap_perms.apm) && 798 + test_bit_inv(queue, ap_perms.aqm); 817 799 mutex_unlock(&ap_perms_mutex); 818 800 drvres = to_ap_drv(dev->driver)->flags 819 801 & AP_DRIVER_FLAG_DEFAULT; ··· 844 826 845 827 mutex_lock(&ap_perms_mutex); 846 828 847 - if (test_bit_inv(card, ap_perms.apm) 848 - && test_bit_inv(queue, ap_perms.aqm)) 829 + if (test_bit_inv(card, ap_perms.apm) && 830 + test_bit_inv(queue, ap_perms.aqm)) 849 831 rc = 1; 850 832 851 833 mutex_unlock(&ap_perms_mutex); ··· 894 876 card = AP_QID_CARD(to_ap_queue(dev)->qid); 895 877 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 896 878 mutex_lock(&ap_perms_mutex); 897 - devres = test_bit_inv(card, ap_perms.apm) 898 - && test_bit_inv(queue, ap_perms.aqm); 879 + devres = test_bit_inv(card, ap_perms.apm) && 880 + test_bit_inv(queue, ap_perms.aqm); 899 881 mutex_unlock(&ap_perms_mutex); 900 882 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 901 883 if (!!devres != !!drvres) ··· 916 898 if (is_queue_dev(dev)) 917 899 hash_del(&to_ap_queue(dev)->hnode); 918 900 spin_unlock_bh(&ap_queues_lock); 919 - } else 901 + } else { 920 902 ap_check_bindings_complete(); 903 + } 921 904 922 905 out: 923 906 if (rc) ··· 999 980 EXPORT_SYMBOL(ap_bus_force_rescan); 1000 981 1001 982 /* 1002 - * A config change has happened, force an ap bus rescan. 1003 - */ 983 + * A config change has happened, force an ap bus rescan. 984 + */ 1004 985 void ap_bus_cfg_chg(void) 1005 986 { 1006 987 AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); ··· 1124 1105 if (bits & 0x07) 1125 1106 return -EINVAL; 1126 1107 1127 - size = BITS_TO_LONGS(bits)*sizeof(unsigned long); 1108 + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); 1128 1109 newmap = kmalloc(size, GFP_KERNEL); 1129 1110 if (!newmap) 1130 1111 return -ENOMEM; ··· 1260 1241 rc = ap_poll_thread_start(); 1261 1242 if (rc) 1262 1243 count = rc; 1263 - } else 1244 + } else { 1264 1245 ap_poll_thread_stop(); 1246 + } 1265 1247 return count; 1266 1248 } 1267 1249 ··· 1375 1355 static ssize_t apmask_store(struct bus_type *bus, const char *buf, 1376 1356 size_t count) 1377 1357 { 1378 - int rc; 1358 + int rc, changes = 0; 1379 1359 DECLARE_BITMAP(newapm, AP_DEVICES); 1380 1360 1381 1361 if (mutex_lock_interruptible(&ap_perms_mutex)) ··· 1385 1365 if (rc) 1386 1366 goto done; 1387 1367 1388 - rc = apmask_commit(newapm); 1368 + changes = memcmp(ap_perms.apm, newapm, APMASKSIZE); 1369 + if (changes) 1370 + rc = apmask_commit(newapm); 1389 1371 1390 1372 done: 1391 1373 mutex_unlock(&ap_perms_mutex); 1392 1374 if (rc) 1393 1375 return rc; 1394 1376 1395 - ap_bus_revise_bindings(); 1377 + if (changes) { 1378 + ap_bus_revise_bindings(); 1379 + ap_send_mask_changed_uevent(newapm, NULL); 1380 + } 1396 1381 1397 1382 return count; 1398 1383 } ··· 1468 1443 static ssize_t aqmask_store(struct bus_type *bus, const char *buf, 1469 1444 size_t count) 1470 1445 { 1471 - int rc; 1446 + int rc, changes = 0; 1472 1447 DECLARE_BITMAP(newaqm, AP_DOMAINS); 1473 1448 1474 1449 if (mutex_lock_interruptible(&ap_perms_mutex)) ··· 1478 1453 if (rc) 1479 1454 goto done; 1480 1455 1481 - rc = aqmask_commit(newaqm); 1456 + changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE); 1457 + if (changes) 1458 + rc = aqmask_commit(newaqm); 1482 1459 1483 1460 done: 1484 1461 mutex_unlock(&ap_perms_mutex); 1485 1462 if (rc) 1486 1463 return rc; 1487 1464 1488 - ap_bus_revise_bindings(); 1465 + if (changes) { 1466 + ap_bus_revise_bindings(); 1467 + ap_send_mask_changed_uevent(NULL, newaqm); 1468 + } 1489 1469 1490 1470 return count; 1491 1471 } ··· 1635 1605 apinfo.mode = (func >> 26) & 0x07; 1636 1606 apinfo.cat = AP_DEVICE_TYPE_CEX8; 1637 1607 status = ap_qact(qid, 0, &apinfo); 1638 - if (status.response_code == AP_RESPONSE_NORMAL 1639 - && apinfo.cat >= AP_DEVICE_TYPE_CEX2A 1640 - && apinfo.cat <= AP_DEVICE_TYPE_CEX8) 1608 + if (status.response_code == AP_RESPONSE_NORMAL && 1609 + apinfo.cat >= AP_DEVICE_TYPE_CEX2A && 1610 + apinfo.cat <= AP_DEVICE_TYPE_CEX8) 1641 1611 comp_type = apinfo.cat; 1642 1612 } 1643 1613 if (!comp_type) ··· 1657 1627 */ 1658 1628 static int __match_card_device_with_id(struct device *dev, const void *data) 1659 1629 { 1660 - return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data; 1630 + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data; 1661 1631 } 1662 1632 1663 1633 /* ··· 1666 1636 */ 1667 1637 static int __match_queue_device_with_qid(struct device *dev, const void *data) 1668 1638 { 1669 - return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; 1639 + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data; 1670 1640 } 1671 1641 1672 1642 /* ··· 1675 1645 */ 1676 1646 static int __match_queue_device_with_queue_id(struct device *dev, const void *data) 1677 1647 { 1678 - return is_queue_dev(dev) 1679 - && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data; 1648 + return is_queue_dev(dev) && 1649 + AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data; 1680 1650 } 1681 1651 1682 1652 /* Helper function for notify_config_changed */ ··· 1729 1699 static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac) 1730 1700 { 1731 1701 bus_for_each_dev(&ap_bus_type, NULL, 1732 - (void *)(long) ac->id, 1702 + (void *)(long)ac->id, 1733 1703 __ap_queue_devices_with_id_unregister); 1734 1704 device_unregister(&ac->ap_dev.device); 1735 1705 } ··· 1757 1727 for (dom = 0; dom <= ap_max_domain_id; dom++) { 1758 1728 qid = AP_MKQID(ac->id, dom); 1759 1729 dev = bus_find_device(&ap_bus_type, NULL, 1760 - (void *)(long) qid, 1730 + (void *)(long)qid, 1761 1731 __match_queue_device_with_qid); 1762 1732 aq = dev ? to_ap_queue(dev) : NULL; 1763 1733 if (!ap_test_config_usage_domain(dom)) { ··· 1903 1873 1904 1874 /* Is there currently a card device for this adapter ? */ 1905 1875 dev = bus_find_device(&ap_bus_type, NULL, 1906 - (void *)(long) ap, 1876 + (void *)(long)ap, 1907 1877 __match_card_device_with_id); 1908 1878 ac = dev ? to_ap_card(dev) : NULL; 1909 1879 ··· 2104 2074 if (ap_domain_index >= 0) { 2105 2075 struct device *dev = 2106 2076 bus_find_device(&ap_bus_type, NULL, 2107 - (void *)(long) ap_domain_index, 2077 + (void *)(long)ap_domain_index, 2108 2078 __match_queue_device_with_queue_id); 2109 2079 if (dev) 2110 2080 put_device(dev); ··· 2139 2109 2140 2110 static void __init ap_perms_init(void) 2141 2111 { 2142 - /* all resources useable if no kernel parameter string given */ 2112 + /* all resources usable if no kernel parameter string given */ 2143 2113 memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm)); 2144 2114 memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm)); 2145 2115 memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
+1
drivers/s390/crypto/ap_bus.h
··· 317 317 unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)]; 318 318 unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)]; 319 319 }; 320 + 320 321 extern struct ap_perms ap_perms; 321 322 extern struct mutex ap_perms_mutex; 322 323
+4 -3
drivers/s390/crypto/ap_queue.c
··· 99 99 { 100 100 struct ap_queue_status status; 101 101 102 - if (msg == NULL) 102 + if (!msg) 103 103 return -EINVAL; 104 104 status = ap_dqap(qid, psmid, msg, length, NULL, NULL); 105 105 switch (status.response_code) { ··· 603 603 static DEVICE_ATTR_RO(interrupt); 604 604 605 605 static ssize_t config_show(struct device *dev, 606 - struct device_attribute *attr, char *buf) 606 + struct device_attribute *attr, char *buf) 607 607 { 608 608 struct ap_queue *aq = to_ap_queue(dev); 609 609 int rc; ··· 827 827 aq->requestq_count++; 828 828 aq->total_request_count++; 829 829 atomic64_inc(&aq->card->total_request_count); 830 - } else 830 + } else { 831 831 rc = -ENODEV; 832 + } 832 833 833 834 /* Send/receive as many request from the queue as possible. */ 834 835 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+78 -71
drivers/s390/crypto/pkey_api.c
··· 232 232 int i, rc; 233 233 u16 card, dom; 234 234 u32 nr_apqns, *apqns = NULL; 235 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 235 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 236 236 237 237 zcrypt_wait_api_operational(); 238 238 ··· 267 267 u16 *pcardnr, u16 *pdomain, 268 268 u16 *pkeysize, u32 *pattributes) 269 269 { 270 - struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 270 + struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; 271 271 u16 cardnr, domain; 272 272 int rc; 273 273 274 274 /* check the secure key for valid AES secure key */ 275 - rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0); 275 + rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0); 276 276 if (rc) 277 277 goto out; 278 278 if (pattributes) ··· 425 425 t = (struct clearaeskeytoken *)key; 426 426 if (keylen != sizeof(*t) + t->len) 427 427 goto out; 428 - if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) 429 - || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) 430 - || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) 428 + if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) || 429 + (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) || 430 + (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) 431 431 memcpy(ckey.clrkey, t->clearkey, t->len); 432 432 else 433 433 goto out; ··· 541 541 542 542 DEBUG_DBG("%s rc=%d\n", __func__, rc); 543 543 return rc; 544 - 545 544 } 546 545 EXPORT_SYMBOL(pkey_keyblob2pkey); 547 546 ··· 587 588 } else if (ktype == PKEY_TYPE_CCA_DATA) { 588 589 rc = cca_genseckey(card, dom, ksize, keybuf); 589 590 *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 590 - } else /* TOKVER_CCA_VLSC */ 591 + } else { 592 + /* TOKVER_CCA_VLSC */ 591 593 rc = cca_gencipherkey(card, dom, ksize, kflags, 592 594 keybuf, keybufsize); 595 + } 593 596 if (rc == 0) 594 597 break; 595 598 } ··· 646 645 rc = cca_clr2seckey(card, dom, ksize, 647 646 clrkey, keybuf); 648 647 *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 649 - } else /* TOKVER_CCA_VLSC */ 648 + } else { 649 + /* TOKVER_CCA_VLSC */ 650 650 rc = cca_clr2cipherkey(card, dom, ksize, kflags, 651 651 clrkey, keybuf, keybufsize); 652 + } 652 653 if (rc == 0) 653 654 break; 654 655 } ··· 670 667 if (keylen < sizeof(struct keytoken_header)) 671 668 return -EINVAL; 672 669 673 - if (hdr->type == TOKTYPE_CCA_INTERNAL 674 - && hdr->version == TOKVER_CCA_AES) { 670 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 671 + hdr->version == TOKVER_CCA_AES) { 675 672 struct secaeskeytoken *t = (struct secaeskeytoken *)key; 676 673 677 674 rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); ··· 680 677 if (ktype) 681 678 *ktype = PKEY_TYPE_CCA_DATA; 682 679 if (ksize) 683 - *ksize = (enum pkey_key_size) t->bitsize; 680 + *ksize = (enum pkey_key_size)t->bitsize; 684 681 685 682 rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 686 683 ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); ··· 700 697 *cardnr = ((struct pkey_apqn *)_apqns)->card; 701 698 *domain = ((struct pkey_apqn *)_apqns)->domain; 702 699 703 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL 704 - && hdr->version == TOKVER_CCA_VLSC) { 700 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 701 + hdr->version == TOKVER_CCA_VLSC) { 705 702 struct cipherkeytoken *t = (struct cipherkeytoken *)key; 706 703 707 704 rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); ··· 737 734 *cardnr = ((struct pkey_apqn *)_apqns)->card; 738 735 *domain = ((struct pkey_apqn *)_apqns)->domain; 739 736 740 - } else if (hdr->type == TOKTYPE_NON_CCA 741 - && hdr->version == TOKVER_EP11_AES) { 737 + } else if (hdr->type == TOKTYPE_NON_CCA && 738 + hdr->version == TOKVER_EP11_AES) { 742 739 struct ep11keyblob *kb = (struct ep11keyblob *)key; 743 740 744 741 rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); ··· 760 757 *cardnr = ((struct pkey_apqn *)_apqns)->card; 761 758 *domain = ((struct pkey_apqn *)_apqns)->domain; 762 759 763 - } else 760 + } else { 764 761 rc = -EINVAL; 762 + } 765 763 766 764 out: 767 765 kfree(_apqns); ··· 820 816 for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 821 817 card = apqns[i].card; 822 818 dom = apqns[i].domain; 823 - if (hdr->type == TOKTYPE_CCA_INTERNAL 824 - && hdr->version == TOKVER_CCA_AES) 819 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 820 + hdr->version == TOKVER_CCA_AES) { 825 821 rc = cca_sec2protkey(card, dom, key, pkey->protkey, 826 822 &pkey->len, &pkey->type); 827 - else if (hdr->type == TOKTYPE_CCA_INTERNAL 828 - && hdr->version == TOKVER_CCA_VLSC) 823 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 824 + hdr->version == TOKVER_CCA_VLSC) { 829 825 rc = cca_cipher2protkey(card, dom, key, pkey->protkey, 830 826 &pkey->len, &pkey->type); 831 - else { /* EP11 AES secure key blob */ 832 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 827 + } else { 828 + /* EP11 AES secure key blob */ 829 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 833 830 834 831 pkey->len = sizeof(pkey->protkey); 835 832 rc = ep11_kblob2protkey(card, dom, key, kb->head.len, ··· 856 851 857 852 zcrypt_wait_api_operational(); 858 853 859 - if (hdr->type == TOKTYPE_NON_CCA 860 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 861 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 862 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 854 + if (hdr->type == TOKTYPE_NON_CCA && 855 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 856 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 857 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 863 858 int minhwtype = 0, api = 0; 864 859 struct ep11keyblob *kb = (struct ep11keyblob *) 865 860 (key + sizeof(struct ep11kblob_header)); ··· 874 869 minhwtype, api, kb->wkvp); 875 870 if (rc) 876 871 goto out; 877 - } else if (hdr->type == TOKTYPE_NON_CCA 878 - && hdr->version == TOKVER_EP11_AES 879 - && is_ep11_keyblob(key)) { 872 + } else if (hdr->type == TOKTYPE_NON_CCA && 873 + hdr->version == TOKVER_EP11_AES && 874 + is_ep11_keyblob(key)) { 880 875 int minhwtype = 0, api = 0; 881 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 876 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 882 877 883 878 if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 884 879 return -EINVAL; ··· 936 931 cur_mkvp, old_mkvp, 1); 937 932 if (rc) 938 933 goto out; 939 - } else 934 + } else { 940 935 return -EINVAL; 936 + } 941 937 942 938 if (apqns) { 943 939 if (*nr_apqns < _nr_apqns) ··· 967 961 int minhwtype = ZCRYPT_CEX3C; 968 962 969 963 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 970 - cur_mkvp = *((u64 *) cur_mkvp); 964 + cur_mkvp = *((u64 *)cur_mkvp); 971 965 if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 972 - old_mkvp = *((u64 *) alt_mkvp); 966 + old_mkvp = *((u64 *)alt_mkvp); 973 967 if (ktype == PKEY_TYPE_CCA_CIPHER) 974 968 minhwtype = ZCRYPT_CEX6; 975 969 rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ··· 981 975 u64 cur_mkvp = 0, old_mkvp = 0; 982 976 983 977 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 984 - cur_mkvp = *((u64 *) cur_mkvp); 978 + cur_mkvp = *((u64 *)cur_mkvp); 985 979 if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 986 - old_mkvp = *((u64 *) alt_mkvp); 980 + old_mkvp = *((u64 *)alt_mkvp); 987 981 rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 988 982 ZCRYPT_CEX7, APKA_MK_SET, 989 983 cur_mkvp, old_mkvp, 1); ··· 1002 996 if (rc) 1003 997 goto out; 1004 998 1005 - } else 999 + } else { 1006 1000 return -EINVAL; 1001 + } 1007 1002 1008 1003 if (apqns) { 1009 1004 if (*nr_apqns < _nr_apqns) ··· 1033 1026 if (keylen < sizeof(struct keytoken_header)) 1034 1027 return -EINVAL; 1035 1028 1036 - if (hdr->type == TOKTYPE_NON_CCA 1037 - && hdr->version == TOKVER_EP11_AES_WITH_HEADER 1038 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1029 + if (hdr->type == TOKTYPE_NON_CCA && 1030 + hdr->version == TOKVER_EP11_AES_WITH_HEADER && 1031 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1039 1032 /* EP11 AES key blob with header */ 1040 1033 if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) 1041 1034 return -EINVAL; 1042 - } else if (hdr->type == TOKTYPE_NON_CCA 1043 - && hdr->version == TOKVER_EP11_ECC_WITH_HEADER 1044 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1035 + } else if (hdr->type == TOKTYPE_NON_CCA && 1036 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 1037 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1045 1038 /* EP11 ECC key blob with header */ 1046 1039 if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) 1047 1040 return -EINVAL; 1048 - } else if (hdr->type == TOKTYPE_NON_CCA 1049 - && hdr->version == TOKVER_EP11_AES 1050 - && is_ep11_keyblob(key)) { 1041 + } else if (hdr->type == TOKTYPE_NON_CCA && 1042 + hdr->version == TOKVER_EP11_AES && 1043 + is_ep11_keyblob(key)) { 1051 1044 /* EP11 AES key blob with header in session field */ 1052 1045 if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) 1053 1046 return -EINVAL; ··· 1095 1088 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 1096 1089 card = apqns[i].card; 1097 1090 dom = apqns[i].domain; 1098 - if (hdr->type == TOKTYPE_NON_CCA 1099 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 1100 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 1101 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) 1091 + if (hdr->type == TOKTYPE_NON_CCA && 1092 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1093 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1094 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) 1102 1095 rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1103 1096 protkey, protkeylen, protkeytype); 1104 - else if (hdr->type == TOKTYPE_NON_CCA 1105 - && hdr->version == TOKVER_EP11_AES 1106 - && is_ep11_keyblob(key)) 1097 + else if (hdr->type == TOKTYPE_NON_CCA && 1098 + hdr->version == TOKVER_EP11_AES && 1099 + is_ep11_keyblob(key)) 1107 1100 rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1108 1101 protkey, protkeylen, protkeytype); 1109 1102 else if (hdr->type == TOKTYPE_CCA_INTERNAL && ··· 1151 1144 1152 1145 switch (cmd) { 1153 1146 case PKEY_GENSECK: { 1154 - struct pkey_genseck __user *ugs = (void __user *) arg; 1147 + struct pkey_genseck __user *ugs = (void __user *)arg; 1155 1148 struct pkey_genseck kgs; 1156 1149 1157 1150 if (copy_from_user(&kgs, ugs, sizeof(kgs))) ··· 1166 1159 break; 1167 1160 } 1168 1161 case PKEY_CLR2SECK: { 1169 - struct pkey_clr2seck __user *ucs = (void __user *) arg; 1162 + struct pkey_clr2seck __user *ucs = (void __user *)arg; 1170 1163 struct pkey_clr2seck kcs; 1171 1164 1172 1165 if (copy_from_user(&kcs, ucs, sizeof(kcs))) ··· 1182 1175 break; 1183 1176 } 1184 1177 case PKEY_SEC2PROTK: { 1185 - struct pkey_sec2protk __user *usp = (void __user *) arg; 1178 + struct pkey_sec2protk __user *usp = (void __user *)arg; 1186 1179 struct pkey_sec2protk ksp; 1187 1180 1188 1181 if (copy_from_user(&ksp, usp, sizeof(ksp))) ··· 1198 1191 break; 1199 1192 } 1200 1193 case PKEY_CLR2PROTK: { 1201 - struct pkey_clr2protk __user *ucp = (void __user *) arg; 1194 + struct pkey_clr2protk __user *ucp = (void __user *)arg; 1202 1195 struct pkey_clr2protk kcp; 1203 1196 1204 1197 if (copy_from_user(&kcp, ucp, sizeof(kcp))) ··· 1214 1207 break; 1215 1208 } 1216 1209 case PKEY_FINDCARD: { 1217 - struct pkey_findcard __user *ufc = (void __user *) arg; 1210 + struct pkey_findcard __user *ufc = (void __user *)arg; 1218 1211 struct pkey_findcard kfc; 1219 1212 1220 1213 if (copy_from_user(&kfc, ufc, sizeof(kfc))) ··· 1229 1222 break; 1230 1223 } 1231 1224 case PKEY_SKEY2PKEY: { 1232 - struct pkey_skey2pkey __user *usp = (void __user *) arg; 1225 + struct pkey_skey2pkey __user *usp = (void __user *)arg; 1233 1226 struct pkey_skey2pkey ksp; 1234 1227 1235 1228 if (copy_from_user(&ksp, usp, sizeof(ksp))) ··· 1243 1236 break; 1244 1237 } 1245 1238 case PKEY_VERIFYKEY: { 1246 - struct pkey_verifykey __user *uvk = (void __user *) arg; 1239 + struct pkey_verifykey __user *uvk = (void __user *)arg; 1247 1240 struct pkey_verifykey kvk; 1248 1241 1249 1242 if (copy_from_user(&kvk, uvk, sizeof(kvk))) ··· 1258 1251 break; 1259 1252 } 1260 1253 case PKEY_GENPROTK: { 1261 - struct pkey_genprotk __user *ugp = (void __user *) arg; 1254 + struct pkey_genprotk __user *ugp = (void __user *)arg; 1262 1255 struct pkey_genprotk kgp; 1263 1256 1264 1257 if (copy_from_user(&kgp, ugp, sizeof(kgp))) ··· 1272 1265 break; 1273 1266 } 1274 1267 case PKEY_VERIFYPROTK: { 1275 - struct pkey_verifyprotk __user *uvp = (void __user *) arg; 1268 + struct pkey_verifyprotk __user *uvp = (void __user *)arg; 1276 1269 struct pkey_verifyprotk kvp; 1277 1270 1278 1271 if (copy_from_user(&kvp, uvp, sizeof(kvp))) ··· 1282 1275 break; 1283 1276 } 1284 1277 case PKEY_KBLOB2PROTK: { 1285 - struct pkey_kblob2pkey __user *utp = (void __user *) arg; 1278 + struct pkey_kblob2pkey __user *utp = (void __user *)arg; 1286 1279 struct pkey_kblob2pkey ktp; 1287 1280 u8 *kkey; 1288 1281 ··· 1301 1294 break; 1302 1295 } 1303 1296 case PKEY_GENSECK2: { 1304 - struct pkey_genseck2 __user *ugs = (void __user *) arg; 1297 + struct pkey_genseck2 __user *ugs = (void __user *)arg; 1305 1298 struct pkey_genseck2 kgs; 1306 1299 struct pkey_apqn *apqns; 1307 1300 size_t klen = KEYBLOBBUFSIZE; ··· 1343 1336 break; 1344 1337 } 1345 1338 case PKEY_CLR2SECK2: { 1346 - struct pkey_clr2seck2 __user *ucs = (void __user *) arg; 1339 + struct pkey_clr2seck2 __user *ucs = (void __user *)arg; 1347 1340 struct pkey_clr2seck2 kcs; 1348 1341 struct pkey_apqn *apqns; 1349 1342 size_t klen = KEYBLOBBUFSIZE; ··· 1386 1379 break; 1387 1380 } 1388 1381 case PKEY_VERIFYKEY2: { 1389 - struct pkey_verifykey2 __user *uvk = (void __user *) arg; 1382 + struct pkey_verifykey2 __user *uvk = (void __user *)arg; 1390 1383 struct pkey_verifykey2 kvk; 1391 1384 u8 *kkey; 1392 1385 ··· 1407 1400 break; 1408 1401 } 1409 1402 case PKEY_KBLOB2PROTK2: { 1410 - struct pkey_kblob2pkey2 __user *utp = (void __user *) arg; 1403 + struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; 1411 1404 struct pkey_kblob2pkey2 ktp; 1412 1405 struct pkey_apqn *apqns = NULL; 1413 1406 u8 *kkey; ··· 1434 1427 break; 1435 1428 } 1436 1429 case PKEY_APQNS4K: { 1437 - struct pkey_apqns4key __user *uak = (void __user *) arg; 1430 + struct pkey_apqns4key __user *uak = (void __user *)arg; 1438 1431 struct pkey_apqns4key kak; 1439 1432 struct pkey_apqn *apqns = NULL; 1440 1433 size_t nr_apqns, len; ··· 1483 1476 break; 1484 1477 } 1485 1478 case PKEY_APQNS4KT: { 1486 - struct pkey_apqns4keytype __user *uat = (void __user *) arg; 1479 + struct pkey_apqns4keytype __user *uat = (void __user *)arg; 1487 1480 struct pkey_apqns4keytype kat; 1488 1481 struct pkey_apqn *apqns = NULL; 1489 1482 size_t nr_apqns, len; ··· 1525 1518 break; 1526 1519 } 1527 1520 case PKEY_KBLOB2PROTK3: { 1528 - struct pkey_kblob2pkey3 __user *utp = (void __user *) arg; 1521 + struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; 1529 1522 struct pkey_kblob2pkey3 ktp; 1530 1523 struct pkey_apqn *apqns = NULL; 1531 1524 u32 protkeylen = PROTKEYBLOBBUFSIZE; ··· 1715 1708 loff_t off, size_t count) 1716 1709 { 1717 1710 int rc; 1718 - struct pkey_seckey *seckey = (struct pkey_seckey *) buf; 1711 + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; 1719 1712 1720 1713 if (off != 0 || count < sizeof(struct secaeskeytoken)) 1721 1714 return -EINVAL;
-2
drivers/s390/crypto/vfio_ap_drv.c
··· 46 46 { /* end of sibling */ }, 47 47 }; 48 48 49 - MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); 50 - 51 49 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) 52 50 { 53 51 struct ap_matrix_mdev *matrix_mdev;
+108 -107
drivers/s390/crypto/zcrypt_api.c
··· 104 104 struct zcrypt_ops *zops; 105 105 106 106 list_for_each_entry(zops, &zcrypt_ops_list, list) 107 - if ((zops->variant == variant) && 107 + if (zops->variant == variant && 108 108 (!strncmp(zops->name, name, sizeof(zops->name)))) 109 109 return zops; 110 110 return NULL; ··· 438 438 strncpy(nodename, name, sizeof(nodename)); 439 439 else 440 440 snprintf(nodename, sizeof(nodename), 441 - ZCRYPT_NAME "_%d", (int) MINOR(devt)); 442 - nodename[sizeof(nodename)-1] = '\0'; 441 + ZCRYPT_NAME "_%d", (int)MINOR(devt)); 442 + nodename[sizeof(nodename) - 1] = '\0'; 443 443 if (dev_set_name(&zcdndev->device, nodename)) { 444 444 rc = -EINVAL; 445 445 goto unlockout; ··· 519 519 /* 520 520 * zcrypt_write(): Not allowed. 521 521 * 522 - * Write is is not allowed 522 + * Write is not allowed 523 523 */ 524 524 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 525 525 size_t count, loff_t *f_pos) ··· 549 549 perms = &zcdndev->perms; 550 550 } 551 551 #endif 552 - filp->private_data = (void *) perms; 552 + filp->private_data = (void *)perms; 553 553 554 554 atomic_inc(&zcrypt_open_count); 555 555 return stream_open(inode, filp); ··· 713 713 pref_zq = NULL; 714 714 spin_lock(&zcrypt_list_lock); 715 715 for_each_zcrypt_card(zc) { 716 - /* Check for useable accelarator or CCA card */ 716 + /* Check for usable accelarator or CCA card */ 717 717 if (!zc->online || !zc->card->config || zc->card->chkstop || 718 718 !(zc->card->functions & 0x18000000)) 719 719 continue; ··· 733 733 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 734 734 continue; 735 735 for_each_zcrypt_queue(zq, zc) { 736 - /* check if device is useable and eligible */ 736 + /* check if device is usable and eligible */ 737 737 if (!zq->online || !zq->ops->rsa_modexpo || 738 738 !zq->queue->config || zq->queue->chkstop) 739 739 continue; ··· 823 823 pref_zq = NULL; 824 824 spin_lock(&zcrypt_list_lock); 825 825 for_each_zcrypt_card(zc) { 826 - /* Check for useable accelarator or CCA card */ 826 + /* Check for usable accelarator or CCA card */ 827 827 if (!zc->online || !zc->card->config || zc->card->chkstop || 828 828 !(zc->card->functions & 0x18000000)) 829 829 continue; ··· 843 843 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 844 844 continue; 845 845 for_each_zcrypt_queue(zq, zc) { 846 - /* check if device is useable and eligible */ 846 + /* check if device is usable and eligible */ 847 847 if (!zq->online || !zq->ops->rsa_modexpo_crt || 848 848 !zq->queue->config || zq->queue->chkstop) 849 849 continue; ··· 893 893 894 894 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 895 895 struct zcrypt_track *tr, 896 - struct ica_xcRB *xcRB) 896 + struct ica_xcRB *xcrb) 897 897 { 898 898 struct zcrypt_card *zc, *pref_zc; 899 899 struct zcrypt_queue *zq, *pref_zq; ··· 904 904 int cpen, qpen, qid = 0, rc = -ENODEV; 905 905 struct module *mod; 906 906 907 - trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 907 + trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 908 908 909 - xcRB->status = 0; 909 + xcrb->status = 0; 910 910 ap_init_message(&ap_msg); 911 911 912 912 #ifdef CONFIG_ZCRYPT_DEBUG ··· 915 915 if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { 916 916 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", 917 917 __func__, tr->fi.cmd); 918 - xcRB->agent_ID = 0x4646; 918 + xcrb->agent_ID = 0x4646; 919 919 } 920 920 #endif 921 921 922 - rc = prep_cca_ap_msg(userspace, xcRB, &ap_msg, &func_code, &domain); 922 + rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 923 923 if (rc) 924 924 goto out; 925 925 ··· 948 948 pref_zq = NULL; 949 949 spin_lock(&zcrypt_list_lock); 950 950 for_each_zcrypt_card(zc) { 951 - /* Check for useable CCA card */ 951 + /* Check for usable CCA card */ 952 952 if (!zc->online || !zc->card->config || zc->card->chkstop || 953 953 !(zc->card->functions & 0x10000000)) 954 954 continue; 955 955 /* Check for user selected CCA card */ 956 - if (xcRB->user_defined != AUTOSELECT && 957 - xcRB->user_defined != zc->card->id) 956 + if (xcrb->user_defined != AUTOSELECT && 957 + xcrb->user_defined != zc->card->id) 958 958 continue; 959 959 /* check if request size exceeds card max msg size */ 960 960 if (ap_msg.len > zc->card->maxmsgsize) ··· 971 971 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 972 972 continue; 973 973 for_each_zcrypt_queue(zq, zc) { 974 - /* check for device useable and eligible */ 974 + /* check for device usable and eligible */ 975 975 if (!zq->online || !zq->ops->send_cprb || 976 976 !zq->queue->config || zq->queue->chkstop || 977 977 (tdom != AUTOSEL_DOM && ··· 998 998 999 999 if (!pref_zq) { 1000 1000 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1001 - __func__, xcRB->user_defined, *domain); 1001 + __func__, xcrb->user_defined, *domain); 1002 1002 rc = -ENODEV; 1003 1003 goto out; 1004 1004 } ··· 1016 1016 } 1017 1017 #endif 1018 1018 1019 - rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg); 1019 + rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 1020 1020 1021 1021 spin_lock(&zcrypt_list_lock); 1022 1022 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); ··· 1028 1028 tr->last_rc = rc; 1029 1029 tr->last_qid = qid; 1030 1030 } 1031 - trace_s390_zcrypt_rep(xcRB, func_code, rc, 1031 + trace_s390_zcrypt_rep(xcrb, func_code, rc, 1032 1032 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1033 1033 return rc; 1034 1034 } 1035 1035 1036 - long zcrypt_send_cprb(struct ica_xcRB *xcRB) 1036 + long zcrypt_send_cprb(struct ica_xcRB *xcrb) 1037 1037 { 1038 - return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB); 1038 + return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); 1039 1039 } 1040 1040 EXPORT_SYMBOL(zcrypt_send_cprb); 1041 1041 ··· 1089 1089 ap_msg.fi.cmd = tr->fi.cmd; 1090 1090 #endif 1091 1091 1092 - target_num = (unsigned short) xcrb->targets_num; 1092 + target_num = (unsigned short)xcrb->targets_num; 1093 1093 1094 1094 /* empty list indicates autoselect (all available targets) */ 1095 1095 targets = NULL; ··· 1103 1103 goto out; 1104 1104 } 1105 1105 1106 - uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 1106 + uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1107 1107 if (z_copy_from_user(userspace, targets, uptr, 1108 - target_num * sizeof(*targets))) { 1108 + target_num * sizeof(*targets))) { 1109 1109 func_code = 0; 1110 1110 rc = -EFAULT; 1111 1111 goto out_free; ··· 1132 1132 pref_zq = NULL; 1133 1133 spin_lock(&zcrypt_list_lock); 1134 1134 for_each_zcrypt_card(zc) { 1135 - /* Check for useable EP11 card */ 1135 + /* Check for usable EP11 card */ 1136 1136 if (!zc->online || !zc->card->config || zc->card->chkstop || 1137 1137 !(zc->card->functions & 0x04000000)) 1138 1138 continue; ··· 1155 1155 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1156 1156 continue; 1157 1157 for_each_zcrypt_queue(zq, zc) { 1158 - /* check if device is useable and eligible */ 1158 + /* check if device is usable and eligible */ 1159 1159 if (!zq->online || !zq->ops->send_ep11_cprb || 1160 1160 !zq->queue->config || zq->queue->chkstop || 1161 1161 (targets && ··· 1184 1184 if (!pref_zq) { 1185 1185 if (targets && target_num == 1) { 1186 1186 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1187 - __func__, (int) targets->ap_id, 1188 - (int) targets->dom_id); 1187 + __func__, (int)targets->ap_id, 1188 + (int)targets->dom_id); 1189 1189 } else if (targets) { 1190 1190 ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", 1191 - __func__, (int) target_num); 1191 + __func__, (int)target_num); 1192 1192 } else { 1193 1193 ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", 1194 1194 __func__); ··· 1245 1245 pref_zq = NULL; 1246 1246 spin_lock(&zcrypt_list_lock); 1247 1247 for_each_zcrypt_card(zc) { 1248 - /* Check for useable CCA card */ 1248 + /* Check for usable CCA card */ 1249 1249 if (!zc->online || !zc->card->config || zc->card->chkstop || 1250 1250 !(zc->card->functions & 0x10000000)) 1251 1251 continue; ··· 1254 1254 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1255 1255 continue; 1256 1256 for_each_zcrypt_queue(zq, zc) { 1257 - /* check if device is useable and eligible */ 1257 + /* check if device is usable and eligible */ 1258 1258 if (!zq->online || !zq->ops->rng || 1259 1259 !zq->queue->config || zq->queue->chkstop) 1260 1260 continue; ··· 1270 1270 1271 1271 if (!pref_zq) { 1272 1272 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 1273 - __func__); 1273 + __func__); 1274 1274 rc = -ENODEV; 1275 1275 goto out; 1276 1276 } ··· 1381 1381 for_each_zcrypt_card(zc) { 1382 1382 for_each_zcrypt_queue(zq, zc) { 1383 1383 card = AP_QID_CARD(zq->queue->qid); 1384 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1385 - || card >= max_adapters) 1384 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1385 + card >= max_adapters) 1386 1386 continue; 1387 1387 status[card] = zc->online ? zc->user_space_type : 0x0d; 1388 1388 } ··· 1402 1402 for_each_zcrypt_card(zc) { 1403 1403 for_each_zcrypt_queue(zq, zc) { 1404 1404 card = AP_QID_CARD(zq->queue->qid); 1405 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1406 - || card >= max_adapters) 1405 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1406 + card >= max_adapters) 1407 1407 continue; 1408 1408 spin_lock(&zq->queue->lock); 1409 1409 qdepth[card] = ··· 1429 1429 for_each_zcrypt_card(zc) { 1430 1430 for_each_zcrypt_queue(zq, zc) { 1431 1431 card = AP_QID_CARD(zq->queue->qid); 1432 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1433 - || card >= max_adapters) 1432 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1433 + card >= max_adapters) 1434 1434 continue; 1435 1435 spin_lock(&zq->queue->lock); 1436 1436 cnt = zq->queue->total_request_count; 1437 1437 spin_unlock(&zq->queue->lock); 1438 - reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; 1438 + reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1439 1439 } 1440 1440 } 1441 1441 local_bh_enable(); ··· 1493 1493 int rc; 1494 1494 struct zcrypt_track tr; 1495 1495 struct ica_rsa_modexpo mex; 1496 - struct ica_rsa_modexpo __user *umex = (void __user *) arg; 1496 + struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1497 1497 1498 1498 memset(&tr, 0, sizeof(tr)); 1499 1499 if (copy_from_user(&mex, umex, sizeof(mex))) ··· 1538 1538 int rc; 1539 1539 struct zcrypt_track tr; 1540 1540 struct ica_rsa_modexpo_crt crt; 1541 - struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 1541 + struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1542 1542 1543 1543 memset(&tr, 0, sizeof(tr)); 1544 1544 if (copy_from_user(&crt, ucrt, sizeof(crt))) ··· 1581 1581 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1582 1582 { 1583 1583 int rc; 1584 - struct ica_xcRB xcRB; 1584 + struct ica_xcRB xcrb; 1585 1585 struct zcrypt_track tr; 1586 - struct ica_xcRB __user *uxcRB = (void __user *) arg; 1586 + struct ica_xcRB __user *uxcrb = (void __user *)arg; 1587 1587 1588 1588 memset(&tr, 0, sizeof(tr)); 1589 - if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 1589 + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1590 1590 return -EFAULT; 1591 1591 1592 1592 #ifdef CONFIG_ZCRYPT_DEBUG 1593 - if ((xcRB.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { 1593 + if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { 1594 1594 if (!capable(CAP_SYS_ADMIN)) 1595 1595 return -EPERM; 1596 - tr.fi.cmd = (u16)(xcRB.status >> 16); 1596 + tr.fi.cmd = (u16)(xcrb.status >> 16); 1597 1597 } 1598 - xcRB.status = 0; 1598 + xcrb.status = 0; 1599 1599 #endif 1600 1600 1601 1601 do { 1602 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1602 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1603 1603 if (rc == -EAGAIN) 1604 1604 tr.again_counter++; 1605 1605 #ifdef CONFIG_ZCRYPT_DEBUG ··· 1610 1610 /* on failure: retry once again after a requested rescan */ 1611 1611 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1612 1612 do { 1613 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1613 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1614 1614 if (rc == -EAGAIN) 1615 1615 tr.again_counter++; 1616 1616 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); ··· 1618 1618 rc = -EIO; 1619 1619 if (rc) 1620 1620 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1621 - rc, xcRB.status); 1622 - if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 1621 + rc, xcrb.status); 1622 + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1623 1623 return -EFAULT; 1624 1624 return rc; 1625 1625 } ··· 1674 1674 { 1675 1675 int rc; 1676 1676 struct ap_perms *perms = 1677 - (struct ap_perms *) filp->private_data; 1677 + (struct ap_perms *)filp->private_data; 1678 1678 1679 1679 rc = zcrypt_check_ioctl(perms, cmd); 1680 1680 if (rc) ··· 1698 1698 if (!device_status) 1699 1699 return -ENOMEM; 1700 1700 zcrypt_device_status_mask_ext(device_status); 1701 - if (copy_to_user((char __user *) arg, device_status, 1701 + if (copy_to_user((char __user *)arg, device_status, 1702 1702 total_size)) 1703 1703 rc = -EFAULT; 1704 1704 kfree(device_status); ··· 1708 1708 char status[AP_DEVICES]; 1709 1709 1710 1710 zcrypt_status_mask(status, AP_DEVICES); 1711 - if (copy_to_user((char __user *) arg, status, sizeof(status))) 1711 + if (copy_to_user((char __user *)arg, status, sizeof(status))) 1712 1712 return -EFAULT; 1713 1713 return 0; 1714 1714 } ··· 1716 1716 char qdepth[AP_DEVICES]; 1717 1717 1718 1718 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1719 - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1719 + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1720 1720 return -EFAULT; 1721 1721 return 0; 1722 1722 } ··· 1727 1727 if (!reqcnt) 1728 1728 return -ENOMEM; 1729 1729 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1730 - if (copy_to_user((int __user *) arg, reqcnt, 1730 + if (copy_to_user((int __user *)arg, reqcnt, 1731 1731 sizeof(u32) * AP_DEVICES)) 1732 1732 rc = -EFAULT; 1733 1733 kfree(reqcnt); 1734 1734 return rc; 1735 1735 } 1736 1736 case Z90STAT_REQUESTQ_COUNT: 1737 - return put_user(zcrypt_requestq_count(), (int __user *) arg); 1737 + return put_user(zcrypt_requestq_count(), (int __user *)arg); 1738 1738 case Z90STAT_PENDINGQ_COUNT: 1739 - return put_user(zcrypt_pendingq_count(), (int __user *) arg); 1739 + return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1740 1740 case Z90STAT_TOTALOPEN_COUNT: 1741 1741 return put_user(atomic_read(&zcrypt_open_count), 1742 - (int __user *) arg); 1742 + (int __user *)arg); 1743 1743 case Z90STAT_DOMAIN_INDEX: 1744 - return put_user(ap_domain_index, (int __user *) arg); 1744 + return put_user(ap_domain_index, (int __user *)arg); 1745 1745 /* 1746 1746 * Deprecated ioctls 1747 1747 */ ··· 1755 1755 if (!device_status) 1756 1756 return -ENOMEM; 1757 1757 zcrypt_device_status_mask(device_status); 1758 - if (copy_to_user((char __user *) arg, device_status, 1758 + if (copy_to_user((char __user *)arg, device_status, 1759 1759 total_size)) 1760 1760 rc = -EFAULT; 1761 1761 kfree(device_status); ··· 1766 1766 char status[MAX_ZDEV_CARDIDS]; 1767 1767 1768 1768 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1769 - if (copy_to_user((char __user *) arg, status, sizeof(status))) 1769 + if (copy_to_user((char __user *)arg, status, sizeof(status))) 1770 1770 return -EFAULT; 1771 1771 return 0; 1772 1772 } ··· 1775 1775 char qdepth[MAX_ZDEV_CARDIDS]; 1776 1776 1777 1777 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1778 - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1778 + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1779 1779 return -EFAULT; 1780 1780 return 0; 1781 1781 } ··· 1784 1784 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1785 1785 1786 1786 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1787 - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 1787 + if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1788 1788 return -EFAULT; 1789 1789 return 0; 1790 1790 } ··· 1899 1899 &ucrt32->outputdatalength); 1900 1900 } 1901 1901 1902 - struct compat_ica_xcRB { 1902 + struct compat_ica_xcrb { 1903 1903 unsigned short agent_ID; 1904 1904 unsigned int user_defined; 1905 1905 unsigned short request_ID; ··· 1919 1919 unsigned int status; 1920 1920 } __packed; 1921 1921 1922 - static long trans_xcRB32(struct ap_perms *perms, struct file *filp, 1922 + static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1923 1923 unsigned int cmd, unsigned long arg) 1924 1924 { 1925 - struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1926 - struct compat_ica_xcRB xcRB32; 1925 + struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1926 + struct compat_ica_xcrb xcrb32; 1927 1927 struct zcrypt_track tr; 1928 - struct ica_xcRB xcRB64; 1928 + struct ica_xcRB xcrb64; 1929 1929 long rc; 1930 1930 1931 1931 memset(&tr, 0, sizeof(tr)); 1932 - if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1932 + if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1933 1933 return -EFAULT; 1934 - xcRB64.agent_ID = xcRB32.agent_ID; 1935 - xcRB64.user_defined = xcRB32.user_defined; 1936 - xcRB64.request_ID = xcRB32.request_ID; 1937 - xcRB64.request_control_blk_length = 1938 - xcRB32.request_control_blk_length; 1939 - xcRB64.request_control_blk_addr = 1940 - compat_ptr(xcRB32.request_control_blk_addr); 1941 - xcRB64.request_data_length = 1942 - xcRB32.request_data_length; 1943 - xcRB64.request_data_address = 1944 - compat_ptr(xcRB32.request_data_address); 1945 - xcRB64.reply_control_blk_length = 1946 - xcRB32.reply_control_blk_length; 1947 - xcRB64.reply_control_blk_addr = 1948 - compat_ptr(xcRB32.reply_control_blk_addr); 1949 - xcRB64.reply_data_length = xcRB32.reply_data_length; 1950 - xcRB64.reply_data_addr = 1951 - compat_ptr(xcRB32.reply_data_addr); 1952 - xcRB64.priority_window = xcRB32.priority_window; 1953 - xcRB64.status = xcRB32.status; 1934 + xcrb64.agent_ID = xcrb32.agent_ID; 1935 + xcrb64.user_defined = xcrb32.user_defined; 1936 + xcrb64.request_ID = xcrb32.request_ID; 1937 + xcrb64.request_control_blk_length = 1938 + xcrb32.request_control_blk_length; 1939 + xcrb64.request_control_blk_addr = 1940 + compat_ptr(xcrb32.request_control_blk_addr); 1941 + xcrb64.request_data_length = 1942 + xcrb32.request_data_length; 1943 + xcrb64.request_data_address = 1944 + compat_ptr(xcrb32.request_data_address); 1945 + xcrb64.reply_control_blk_length = 1946 + xcrb32.reply_control_blk_length; 1947 + xcrb64.reply_control_blk_addr = 1948 + compat_ptr(xcrb32.reply_control_blk_addr); 1949 + xcrb64.reply_data_length = xcrb32.reply_data_length; 1950 + xcrb64.reply_data_addr = 1951 + compat_ptr(xcrb32.reply_data_addr); 1952 + xcrb64.priority_window = xcrb32.priority_window; 1953 + xcrb64.status = xcrb32.status; 1954 1954 do { 1955 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1955 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1956 1956 if (rc == -EAGAIN) 1957 1957 tr.again_counter++; 1958 1958 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1959 1959 /* on failure: retry once again after a requested rescan */ 1960 1960 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1961 1961 do { 1962 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1962 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1963 1963 if (rc == -EAGAIN) 1964 1964 tr.again_counter++; 1965 1965 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1966 1966 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1967 1967 rc = -EIO; 1968 - xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1969 - xcRB32.reply_data_length = xcRB64.reply_data_length; 1970 - xcRB32.status = xcRB64.status; 1971 - if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1968 + xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1969 + xcrb32.reply_data_length = xcrb64.reply_data_length; 1970 + xcrb32.status = xcrb64.status; 1971 + if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1972 1972 return -EFAULT; 1973 1973 return rc; 1974 1974 } 1975 1975 1976 1976 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1977 - unsigned long arg) 1977 + unsigned long arg) 1978 1978 { 1979 1979 int rc; 1980 1980 struct ap_perms *perms = 1981 - (struct ap_perms *) filp->private_data; 1981 + (struct ap_perms *)filp->private_data; 1982 1982 1983 1983 rc = zcrypt_check_ioctl(perms, cmd); 1984 1984 if (rc) ··· 1989 1989 if (cmd == ICARSACRT) 1990 1990 return trans_modexpo_crt32(perms, filp, cmd, arg); 1991 1991 if (cmd == ZSECSENDCPRB) 1992 - return trans_xcRB32(perms, filp, cmd, arg); 1992 + return trans_xcrb32(perms, filp, cmd, arg); 1993 1993 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1994 1994 } 1995 1995 #endif ··· 2033 2033 * read method calls. 2034 2034 */ 2035 2035 if (zcrypt_rng_buffer_index == 0) { 2036 - rc = zcrypt_rng((char *) zcrypt_rng_buffer); 2036 + rc = zcrypt_rng((char *)zcrypt_rng_buffer); 2037 2037 /* on failure: retry once again after a requested rescan */ 2038 2038 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 2039 - rc = zcrypt_rng((char *) zcrypt_rng_buffer); 2039 + rc = zcrypt_rng((char *)zcrypt_rng_buffer); 2040 2040 if (rc < 0) 2041 2041 return -EIO; 2042 2042 zcrypt_rng_buffer_index = rc / sizeof(*data); ··· 2057 2057 2058 2058 mutex_lock(&zcrypt_rng_mutex); 2059 2059 if (zcrypt_rng_device_count == 0) { 2060 - zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 2060 + zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 2061 2061 if (!zcrypt_rng_buffer) { 2062 2062 rc = -ENOMEM; 2063 2063 goto out; ··· 2069 2069 if (rc) 2070 2070 goto out_free; 2071 2071 zcrypt_rng_device_count = 1; 2072 - } else 2072 + } else { 2073 2073 zcrypt_rng_device_count++; 2074 + } 2074 2075 mutex_unlock(&zcrypt_rng_mutex); 2075 2076 return 0; 2076 2077 2077 2078 out_free: 2078 - free_page((unsigned long) zcrypt_rng_buffer); 2079 + free_page((unsigned long)zcrypt_rng_buffer); 2079 2080 out: 2080 2081 mutex_unlock(&zcrypt_rng_mutex); 2081 2082 return rc; ··· 2088 2087 zcrypt_rng_device_count--; 2089 2088 if (zcrypt_rng_device_count == 0) { 2090 2089 hwrng_unregister(&zcrypt_rng_dev); 2091 - free_page((unsigned long) zcrypt_rng_buffer); 2090 + free_page((unsigned long)zcrypt_rng_buffer); 2092 2091 } 2093 2092 mutex_unlock(&zcrypt_rng_mutex); 2094 2093 }
+2 -2
drivers/s390/crypto/zcrypt_api.h
··· 170 170 { 171 171 if (likely(userspace)) 172 172 return copy_from_user(to, from, n); 173 - memcpy(to, (void __force *) from, n); 173 + memcpy(to, (void __force *)from, n); 174 174 return 0; 175 175 } 176 176 ··· 181 181 { 182 182 if (likely(userspace)) 183 183 return copy_to_user(to, from, n); 184 - memcpy((void __force *) to, from, n); 184 + memcpy((void __force *)to, from, n); 185 185 return 0; 186 186 } 187 187
+1 -1
drivers/s390/crypto/zcrypt_card.c
··· 138 138 { 139 139 struct zcrypt_card *zc; 140 140 141 - zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); 141 + zc = kzalloc(sizeof(*zc), GFP_KERNEL); 142 142 if (!zc) 143 143 return NULL; 144 144 INIT_LIST_HEAD(&zc->list);
+29 -29
drivers/s390/crypto/zcrypt_cca_key.h
··· 11 11 #ifndef _ZCRYPT_CCA_KEY_H_ 12 12 #define _ZCRYPT_CCA_KEY_H_ 13 13 14 - struct T6_keyBlock_hdr { 14 + struct t6_keyblock_hdr { 15 15 unsigned short blen; 16 16 unsigned short ulen; 17 17 unsigned short flags; ··· 63 63 * complement of the residue modulo 8 of the sum of 64 64 * (p_len + q_len + dp_len + dq_len + u_len). 65 65 */ 66 - struct cca_pvt_ext_CRT_sec { 66 + struct cca_pvt_ext_crt_sec { 67 67 unsigned char section_identifier; 68 68 unsigned char version; 69 69 unsigned short section_length; ··· 108 108 .section_identifier = 0x04, 109 109 }; 110 110 struct { 111 - struct T6_keyBlock_hdr t6_hdr; 112 - struct cca_token_hdr pubHdr; 113 - struct cca_public_sec pubSec; 111 + struct t6_keyblock_hdr t6_hdr; 112 + struct cca_token_hdr pubhdr; 113 + struct cca_public_sec pubsec; 114 114 char exponent[0]; 115 115 } __packed *key = p; 116 116 unsigned char *temp; ··· 127 127 128 128 memset(key, 0, sizeof(*key)); 129 129 130 - key->pubHdr = static_pub_hdr; 131 - key->pubSec = static_pub_sec; 130 + key->pubhdr = static_pub_hdr; 131 + key->pubsec = static_pub_sec; 132 132 133 133 /* key parameter block */ 134 134 temp = key->exponent; ··· 146 146 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) 147 147 return -EFAULT; 148 148 149 - key->pubSec.modulus_bit_len = 8 * mex->inputdatalength; 150 - key->pubSec.modulus_byte_len = mex->inputdatalength; 151 - key->pubSec.exponent_len = mex->inputdatalength - i; 152 - key->pubSec.section_length = sizeof(key->pubSec) + 153 - 2*mex->inputdatalength - i; 154 - key->pubHdr.token_length = 155 - key->pubSec.section_length + sizeof(key->pubHdr); 156 - key->t6_hdr.ulen = key->pubHdr.token_length + 4; 157 - key->t6_hdr.blen = key->pubHdr.token_length + 6; 158 - return sizeof(*key) + 2*mex->inputdatalength - i; 149 + key->pubsec.modulus_bit_len = 8 * mex->inputdatalength; 150 + key->pubsec.modulus_byte_len = mex->inputdatalength; 151 + key->pubsec.exponent_len = mex->inputdatalength - i; 152 + key->pubsec.section_length = sizeof(key->pubsec) + 153 + 2 * mex->inputdatalength - i; 154 + key->pubhdr.token_length = 155 + key->pubsec.section_length + sizeof(key->pubhdr); 156 + key->t6_hdr.ulen = key->pubhdr.token_length + 4; 157 + key->t6_hdr.blen = key->pubhdr.token_length + 6; 158 + return sizeof(*key) + 2 * mex->inputdatalength - i; 159 159 } 160 160 161 161 /** ··· 177 177 }; 178 178 static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; 179 179 struct { 180 - struct T6_keyBlock_hdr t6_hdr; 180 + struct t6_keyblock_hdr t6_hdr; 181 181 struct cca_token_hdr token; 182 - struct cca_pvt_ext_CRT_sec pvt; 182 + struct cca_pvt_ext_crt_sec pvt; 183 183 char key_parts[0]; 184 184 } __packed *key = p; 185 185 struct cca_public_sec *pub; ··· 198 198 199 199 short_len = (crt->inputdatalength + 1) / 2; 200 200 long_len = short_len + 8; 201 - pad_len = -(3*long_len + 2*short_len) & 7; 202 - key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; 201 + pad_len = -(3 * long_len + 2 * short_len) & 7; 202 + key_len = 3 * long_len + 2 * short_len + pad_len + crt->inputdatalength; 203 203 size = sizeof(*key) + key_len + sizeof(*pub) + 3; 204 204 205 205 /* parameter block.key block */ ··· 223 223 /* key parts */ 224 224 if (copy_from_user(key->key_parts, crt->np_prime, long_len) || 225 225 copy_from_user(key->key_parts + long_len, 226 - crt->nq_prime, short_len) || 226 + crt->nq_prime, short_len) || 227 227 copy_from_user(key->key_parts + long_len + short_len, 228 - crt->bp_key, long_len) || 229 - copy_from_user(key->key_parts + 2*long_len + short_len, 230 - crt->bq_key, short_len) || 231 - copy_from_user(key->key_parts + 2*long_len + 2*short_len, 232 - crt->u_mult_inv, long_len)) 228 + crt->bp_key, long_len) || 229 + copy_from_user(key->key_parts + 2 * long_len + short_len, 230 + crt->bq_key, short_len) || 231 + copy_from_user(key->key_parts + 2 * long_len + 2 * short_len, 232 + crt->u_mult_inv, long_len)) 233 233 return -EFAULT; 234 - memset(key->key_parts + 3*long_len + 2*short_len + pad_len, 234 + memset(key->key_parts + 3 * long_len + 2 * short_len + pad_len, 235 235 0xff, crt->inputdatalength); 236 236 pub = (struct cca_public_sec *)(key->key_parts + key_len); 237 237 *pub = static_cca_pub_sec; ··· 241 241 * section. So, an arbitrary public exponent of 0x010001 will be 242 242 * used. 243 243 */ 244 - memcpy((char *) (pub + 1), pk_exponent, 3); 244 + memcpy((char *)(pub + 1), pk_exponent, 3); 245 245 return size; 246 246 } 247 247
+141 -131
drivers/s390/crypto/zcrypt_ccamisc.c
··· 53 53 int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl, 54 54 const u8 *token, int keybitsize) 55 55 { 56 - struct secaeskeytoken *t = (struct secaeskeytoken *) token; 56 + struct secaeskeytoken *t = (struct secaeskeytoken *)token; 57 57 58 58 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 59 59 60 60 if (t->type != TOKTYPE_CCA_INTERNAL) { 61 61 if (dbg) 62 62 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 63 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); 63 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); 64 64 return -EINVAL; 65 65 } 66 66 if (t->version != TOKVER_CCA_AES) { 67 67 if (dbg) 68 68 DBF("%s token check failed, version 0x%02x != 0x%02x\n", 69 - __func__, (int) t->version, TOKVER_CCA_AES); 69 + __func__, (int)t->version, TOKVER_CCA_AES); 70 70 return -EINVAL; 71 71 } 72 72 if (keybitsize > 0 && t->bitsize != keybitsize) { 73 73 if (dbg) 74 74 DBF("%s token check failed, bitsize %d != %d\n", 75 - __func__, (int) t->bitsize, keybitsize); 75 + __func__, (int)t->bitsize, keybitsize); 76 76 return -EINVAL; 77 77 } 78 78 ··· 93 93 const u8 *token, int keybitsize, 94 94 int checkcpacfexport) 95 95 { 96 - struct cipherkeytoken *t = (struct cipherkeytoken *) token; 96 + struct cipherkeytoken *t = (struct cipherkeytoken *)token; 97 97 bool keybitsizeok = true; 98 98 99 99 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) ··· 101 101 if (t->type != TOKTYPE_CCA_INTERNAL) { 102 102 if (dbg) 103 103 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 104 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); 104 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); 105 105 return -EINVAL; 106 106 } 107 107 if (t->version != TOKVER_CCA_VLSC) { 108 108 if (dbg) 109 109 DBF("%s token check failed, version 0x%02x != 0x%02x\n", 110 - __func__, (int) t->version, TOKVER_CCA_VLSC); 110 + __func__, (int)t->version, TOKVER_CCA_VLSC); 111 111 return -EINVAL; 112 112 } 113 113 if (t->algtype != 0x02) { 114 114 if (dbg) 115 115 DBF("%s token check failed, algtype 0x%02x != 0x02\n", 116 - __func__, (int) t->algtype); 116 + __func__, (int)t->algtype); 117 117 return -EINVAL; 118 118 } 119 119 if (t->keytype != 0x0001) { 120 120 if (dbg) 121 121 DBF("%s token check failed, keytype 0x%04x != 0x0001\n", 122 - __func__, (int) t->keytype); 122 + __func__, (int)t->keytype); 123 123 return -EINVAL; 124 124 } 125 125 if (t->plfver != 0x00 && t->plfver != 0x01) { 126 126 if (dbg) 127 127 DBF("%s token check failed, unknown plfver 0x%02x\n", 128 - __func__, (int) t->plfver); 128 + __func__, (int)t->plfver); 129 129 return -EINVAL; 130 130 } 131 131 if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) { 132 132 if (dbg) 133 133 DBF("%s token check failed, unknown wpllen %d\n", 134 - __func__, (int) t->wpllen); 134 + __func__, (int)t->wpllen); 135 135 return -EINVAL; 136 136 } 137 137 if (keybitsize > 0) { ··· 180 180 const u8 *token, size_t keysize, 181 181 int checkcpacfexport) 182 182 { 183 - struct eccprivkeytoken *t = (struct eccprivkeytoken *) token; 183 + struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; 184 184 185 185 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 186 186 187 187 if (t->type != TOKTYPE_CCA_INTERNAL_PKA) { 188 188 if (dbg) 189 189 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 190 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA); 190 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA); 191 191 return -EINVAL; 192 192 } 193 193 if (t->len > keysize) { 194 194 if (dbg) 195 195 DBF("%s token check failed, len %d > keysize %zu\n", 196 - __func__, (int) t->len, keysize); 196 + __func__, (int)t->len, keysize); 197 197 return -EINVAL; 198 198 } 199 199 if (t->secid != 0x20) { 200 200 if (dbg) 201 201 DBF("%s token check failed, secid 0x%02x != 0x20\n", 202 - __func__, (int) t->secid); 202 + __func__, (int)t->secid); 203 203 return -EINVAL; 204 204 } 205 205 if (checkcpacfexport && !(t->kutc & 0x01)) { ··· 222 222 * on failure. 223 223 */ 224 224 static int alloc_and_prep_cprbmem(size_t paramblen, 225 - u8 **pcprbmem, 226 - struct CPRBX **preqCPRB, 227 - struct CPRBX **prepCPRB) 225 + u8 **p_cprb_mem, 226 + struct CPRBX **p_req_cprb, 227 + struct CPRBX **p_rep_cprb) 228 228 { 229 229 u8 *cprbmem; 230 230 size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; ··· 238 238 if (!cprbmem) 239 239 return -ENOMEM; 240 240 241 - preqcblk = (struct CPRBX *) cprbmem; 242 - prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); 241 + preqcblk = (struct CPRBX *)cprbmem; 242 + prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); 243 243 244 244 /* fill request cprb struct */ 245 245 preqcblk->cprb_len = sizeof(struct CPRBX); ··· 248 248 preqcblk->rpl_msgbl = cprbplusparamblen; 249 249 if (paramblen) { 250 250 preqcblk->req_parmb = 251 - ((u8 __user *) preqcblk) + sizeof(struct CPRBX); 251 + ((u8 __user *)preqcblk) + sizeof(struct CPRBX); 252 252 preqcblk->rpl_parmb = 253 - ((u8 __user *) prepcblk) + sizeof(struct CPRBX); 253 + ((u8 __user *)prepcblk) + sizeof(struct CPRBX); 254 254 } 255 255 256 - *pcprbmem = cprbmem; 257 - *preqCPRB = preqcblk; 258 - *prepCPRB = prepcblk; 256 + *p_cprb_mem = cprbmem; 257 + *p_req_cprb = preqcblk; 258 + *p_rep_cprb = prepcblk; 259 259 260 260 return 0; 261 261 } ··· 286 286 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); 287 287 pxcrb->request_control_blk_length = 288 288 preqcblk->cprb_len + preqcblk->req_parml; 289 - pxcrb->request_control_blk_addr = (void __user *) preqcblk; 289 + pxcrb->request_control_blk_addr = (void __user *)preqcblk; 290 290 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; 291 - pxcrb->reply_control_blk_addr = (void __user *) prepcblk; 291 + pxcrb->reply_control_blk_addr = (void __user *)prepcblk; 292 292 } 293 293 294 294 /* ··· 345 345 preqcblk->domain = domain; 346 346 347 347 /* fill request cprb param block with KG request */ 348 - preqparm = (struct kgreqparm __force *) preqcblk->req_parmb; 348 + preqparm = (struct kgreqparm __force *)preqcblk->req_parmb; 349 349 memcpy(preqparm->subfunc_code, "KG", 2); 350 350 preqparm->rule_array_len = sizeof(preqparm->rule_array_len); 351 351 preqparm->lv1.len = sizeof(struct lv1); ··· 387 387 rc = zcrypt_send_cprb(&xcrb); 388 388 if (rc) { 389 389 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", 390 - __func__, (int) cardnr, (int) domain, rc); 390 + __func__, (int)cardnr, (int)domain, rc); 391 391 goto out; 392 392 } 393 393 ··· 395 395 if (prepcblk->ccp_rtcode != 0) { 396 396 DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", 397 397 __func__, 398 - (int) prepcblk->ccp_rtcode, 399 - (int) prepcblk->ccp_rscode); 398 + (int)prepcblk->ccp_rtcode, 399 + (int)prepcblk->ccp_rscode); 400 400 rc = -EIO; 401 401 goto out; 402 402 } 403 403 404 404 /* process response cprb param block */ 405 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 406 - prepcblk->rpl_parmb = (u8 __user *) ptr; 407 - prepparm = (struct kgrepparm *) ptr; 405 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 406 + prepcblk->rpl_parmb = (u8 __user *)ptr; 407 + prepparm = (struct kgrepparm *)ptr; 408 408 409 409 /* check length of the returned secure key token */ 410 410 seckeysize = prepparm->lv3.keyblock.toklen ··· 419 419 420 420 /* check secure key token */ 421 421 rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, 422 - prepparm->lv3.keyblock.tok, 8*keysize); 422 + prepparm->lv3.keyblock.tok, 8 * keysize); 423 423 if (rc) { 424 424 rc = -EIO; 425 425 goto out; ··· 486 486 preqcblk->domain = domain; 487 487 488 488 /* fill request cprb param block with CM request */ 489 - preqparm = (struct cmreqparm __force *) preqcblk->req_parmb; 489 + preqparm = (struct cmreqparm __force *)preqcblk->req_parmb; 490 490 memcpy(preqparm->subfunc_code, "CM", 2); 491 491 memcpy(preqparm->rule_array, "AES ", 8); 492 492 preqparm->rule_array_len = ··· 512 512 } 513 513 preqparm->lv1.len = sizeof(struct lv1) + keysize; 514 514 memcpy(preqparm->lv1.clrkey, clrkey, keysize); 515 - plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); 515 + plv2 = (struct lv2 *)(((u8 *)&preqparm->lv2) + keysize); 516 516 plv2->len = sizeof(struct lv2); 517 517 plv2->keyid.len = sizeof(struct keyid); 518 518 plv2->keyid.attr = 0x30; ··· 525 525 rc = zcrypt_send_cprb(&xcrb); 526 526 if (rc) { 527 527 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 528 - __func__, (int) cardnr, (int) domain, rc); 528 + __func__, (int)cardnr, (int)domain, rc); 529 529 goto out; 530 530 } 531 531 ··· 533 533 if (prepcblk->ccp_rtcode != 0) { 534 534 DEBUG_ERR("%s clear key import failure, card response %d/%d\n", 535 535 __func__, 536 - (int) prepcblk->ccp_rtcode, 537 - (int) prepcblk->ccp_rscode); 536 + (int)prepcblk->ccp_rtcode, 537 + (int)prepcblk->ccp_rscode); 538 538 rc = -EIO; 539 539 goto out; 540 540 } 541 541 542 542 /* process response cprb param block */ 543 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 544 - prepcblk->rpl_parmb = (u8 __user *) ptr; 545 - prepparm = (struct cmrepparm *) ptr; 543 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 544 + prepcblk->rpl_parmb = (u8 __user *)ptr; 545 + prepparm = (struct cmrepparm *)ptr; 546 546 547 547 /* check length of the returned secure key token */ 548 548 seckeysize = prepparm->lv3.keyblock.toklen ··· 557 557 558 558 /* check secure key token */ 559 559 rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, 560 - prepparm->lv3.keyblock.tok, 8*keysize); 560 + prepparm->lv3.keyblock.tok, 8 * keysize); 561 561 if (rc) { 562 562 rc = -EIO; 563 563 goto out; ··· 632 632 preqcblk->domain = domain; 633 633 634 634 /* fill request cprb param block with USK request */ 635 - preqparm = (struct uskreqparm __force *) preqcblk->req_parmb; 635 + preqparm = (struct uskreqparm __force *)preqcblk->req_parmb; 636 636 memcpy(preqparm->subfunc_code, "US", 2); 637 637 preqparm->rule_array_len = sizeof(preqparm->rule_array_len); 638 638 preqparm->lv1.len = sizeof(struct lv1); ··· 652 652 rc = zcrypt_send_cprb(&xcrb); 653 653 if (rc) { 654 654 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 655 - __func__, (int) cardnr, (int) domain, rc); 655 + __func__, (int)cardnr, (int)domain, rc); 656 656 goto out; 657 657 } 658 658 ··· 660 660 if (prepcblk->ccp_rtcode != 0) { 661 661 DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", 662 662 __func__, 663 - (int) prepcblk->ccp_rtcode, 664 - (int) prepcblk->ccp_rscode); 663 + (int)prepcblk->ccp_rtcode, 664 + (int)prepcblk->ccp_rscode); 665 665 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 666 666 rc = -EAGAIN; 667 667 else ··· 671 671 if (prepcblk->ccp_rscode != 0) { 672 672 DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", 673 673 __func__, 674 - (int) prepcblk->ccp_rtcode, 675 - (int) prepcblk->ccp_rscode); 674 + (int)prepcblk->ccp_rtcode, 675 + (int)prepcblk->ccp_rscode); 676 676 } 677 677 678 678 /* process response cprb param block */ 679 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 680 - prepcblk->rpl_parmb = (u8 __user *) ptr; 681 - prepparm = (struct uskrepparm *) ptr; 679 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 680 + prepcblk->rpl_parmb = (u8 __user *)ptr; 681 + prepparm = (struct uskrepparm *)ptr; 682 682 683 683 /* check the returned keyblock */ 684 684 if (prepparm->lv3.ckb.version != 0x01 && 685 685 prepparm->lv3.ckb.version != 0x02) { 686 686 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 687 - __func__, (int) prepparm->lv3.ckb.version); 687 + __func__, (int)prepparm->lv3.ckb.version); 688 688 rc = -EIO; 689 689 goto out; 690 690 } 691 691 692 692 /* copy the tanslated protected key */ 693 693 switch (prepparm->lv3.ckb.len) { 694 - case 16+32: 694 + case 16 + 32: 695 695 /* AES 128 protected key */ 696 696 if (protkeytype) 697 697 *protkeytype = PKEY_KEYTYPE_AES_128; 698 698 break; 699 - case 24+32: 699 + case 24 + 32: 700 700 /* AES 192 protected key */ 701 701 if (protkeytype) 702 702 *protkeytype = PKEY_KEYTYPE_AES_192; 703 703 break; 704 - case 32+32: 704 + case 32 + 32: 705 705 /* AES 256 protected key */ 706 706 if (protkeytype) 707 707 *protkeytype = PKEY_KEYTYPE_AES_256; ··· 751 751 struct gkreqparm { 752 752 u8 subfunc_code[2]; 753 753 u16 rule_array_len; 754 - char rule_array[2*8]; 754 + char rule_array[2 * 8]; 755 755 struct { 756 756 u16 len; 757 757 u8 key_type_1[8]; ··· 827 827 preqcblk->req_parml = sizeof(struct gkreqparm); 828 828 829 829 /* prepare request param block with GK request */ 830 - preqparm = (struct gkreqparm __force *) preqcblk->req_parmb; 830 + preqparm = (struct gkreqparm __force *)preqcblk->req_parmb; 831 831 memcpy(preqparm->subfunc_code, "GK", 2); 832 832 preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8; 833 - memcpy(preqparm->rule_array, "AES OP ", 2*8); 833 + memcpy(preqparm->rule_array, "AES OP ", 2 * 8); 834 834 835 835 /* prepare vud block */ 836 836 preqparm->vud.len = sizeof(preqparm->vud); ··· 869 869 870 870 /* patch the skeleton key token export flags inside the kb block */ 871 871 if (keygenflags) { 872 - t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1; 873 - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); 874 - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); 872 + t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1; 873 + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); 874 + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); 875 875 } 876 876 877 877 /* prepare xcrb struct */ ··· 882 882 if (rc) { 883 883 DEBUG_ERR( 884 884 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 885 - __func__, (int) cardnr, (int) domain, rc); 885 + __func__, (int)cardnr, (int)domain, rc); 886 886 goto out; 887 887 } 888 888 ··· 891 891 DEBUG_ERR( 892 892 "%s cipher key generate failure, card response %d/%d\n", 893 893 __func__, 894 - (int) prepcblk->ccp_rtcode, 895 - (int) prepcblk->ccp_rscode); 894 + (int)prepcblk->ccp_rtcode, 895 + (int)prepcblk->ccp_rscode); 896 896 rc = -EIO; 897 897 goto out; 898 898 } 899 899 900 900 /* process response cprb param block */ 901 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 902 - prepcblk->rpl_parmb = (u8 __user *) ptr; 903 - prepparm = (struct gkrepparm *) ptr; 901 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 902 + prepcblk->rpl_parmb = (u8 __user *)ptr; 903 + prepparm = (struct gkrepparm *)ptr; 904 904 905 905 /* do some plausibility checks on the key block */ 906 906 if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || ··· 921 921 } 922 922 923 923 /* copy the generated vlsc key token */ 924 - t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key; 924 + t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key; 925 925 if (keybuf) { 926 926 if (*keybufsize >= t->len) 927 927 memcpy(keybuf, t, t->len); ··· 1006 1006 preqcblk->req_parml = 0; 1007 1007 1008 1008 /* prepare request param block with IP request */ 1009 - preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb; 1009 + preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb; 1010 1010 memcpy(preq_ra_block->subfunc_code, "IP", 2); 1011 1011 preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8; 1012 1012 memcpy(preq_ra_block->rule_array, rule_array_1, 8); ··· 1050 1050 if (rc) { 1051 1051 DEBUG_ERR( 1052 1052 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1053 - __func__, (int) cardnr, (int) domain, rc); 1053 + __func__, (int)cardnr, (int)domain, rc); 1054 1054 goto out; 1055 1055 } 1056 1056 ··· 1059 1059 DEBUG_ERR( 1060 1060 "%s CSNBKPI2 failure, card response %d/%d\n", 1061 1061 __func__, 1062 - (int) prepcblk->ccp_rtcode, 1063 - (int) prepcblk->ccp_rscode); 1062 + (int)prepcblk->ccp_rtcode, 1063 + (int)prepcblk->ccp_rscode); 1064 1064 rc = -EIO; 1065 1065 goto out; 1066 1066 } 1067 1067 1068 1068 /* process response cprb param block */ 1069 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1070 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1071 - prepparm = (struct iprepparm *) ptr; 1069 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1070 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1071 + prepparm = (struct iprepparm *)ptr; 1072 1072 1073 1073 /* do some plausibility checks on the key block */ 1074 1074 if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || ··· 1082 1082 /* do not check the key here, it may be incomplete */ 1083 1083 1084 1084 /* copy the vlsc key token back */ 1085 - t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token; 1085 + t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token; 1086 1086 memcpy(key_token, t, t->len); 1087 1087 *key_token_size = t->len; 1088 1088 ··· 1117 1117 1118 1118 /* patch the skeleton key token export flags */ 1119 1119 if (keygenflags) { 1120 - t = (struct cipherkeytoken *) token; 1121 - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); 1122 - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); 1120 + t = (struct cipherkeytoken *)token; 1121 + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); 1122 + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); 1123 1123 } 1124 1124 1125 1125 /* ··· 1241 1241 preqcblk->domain = domain; 1242 1242 1243 1243 /* fill request cprb param block with AU request */ 1244 - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; 1244 + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; 1245 1245 memcpy(preqparm->subfunc_code, "AU", 2); 1246 1246 preqparm->rule_array_len = 1247 1247 sizeof(preqparm->rule_array_len) ··· 1267 1267 if (rc) { 1268 1268 DEBUG_ERR( 1269 1269 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1270 - __func__, (int) cardnr, (int) domain, rc); 1270 + __func__, (int)cardnr, (int)domain, rc); 1271 1271 goto out; 1272 1272 } 1273 1273 ··· 1276 1276 DEBUG_ERR( 1277 1277 "%s unwrap secure key failure, card response %d/%d\n", 1278 1278 __func__, 1279 - (int) prepcblk->ccp_rtcode, 1280 - (int) prepcblk->ccp_rscode); 1279 + (int)prepcblk->ccp_rtcode, 1280 + (int)prepcblk->ccp_rscode); 1281 1281 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 1282 1282 rc = -EAGAIN; 1283 1283 else ··· 1288 1288 DEBUG_WARN( 1289 1289 "%s unwrap secure key warning, card response %d/%d\n", 1290 1290 __func__, 1291 - (int) prepcblk->ccp_rtcode, 1292 - (int) prepcblk->ccp_rscode); 1291 + (int)prepcblk->ccp_rtcode, 1292 + (int)prepcblk->ccp_rscode); 1293 1293 } 1294 1294 1295 1295 /* process response cprb param block */ 1296 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1297 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1298 - prepparm = (struct aurepparm *) ptr; 1296 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1297 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1298 + prepparm = (struct aurepparm *)ptr; 1299 1299 1300 1300 /* check the returned keyblock */ 1301 1301 if (prepparm->vud.ckb.version != 0x01 && 1302 1302 prepparm->vud.ckb.version != 0x02) { 1303 1303 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 1304 - __func__, (int) prepparm->vud.ckb.version); 1304 + __func__, (int)prepparm->vud.ckb.version); 1305 1305 rc = -EIO; 1306 1306 goto out; 1307 1307 } 1308 1308 if (prepparm->vud.ckb.algo != 0x02) { 1309 1309 DEBUG_ERR( 1310 1310 "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", 1311 - __func__, (int) prepparm->vud.ckb.algo); 1311 + __func__, (int)prepparm->vud.ckb.algo); 1312 1312 rc = -EIO; 1313 1313 goto out; 1314 1314 } 1315 1315 1316 1316 /* copy the translated protected key */ 1317 1317 switch (prepparm->vud.ckb.keylen) { 1318 - case 16+32: 1318 + case 16 + 32: 1319 1319 /* AES 128 protected key */ 1320 1320 if (protkeytype) 1321 1321 *protkeytype = PKEY_KEYTYPE_AES_128; 1322 1322 break; 1323 - case 24+32: 1323 + case 24 + 32: 1324 1324 /* AES 192 protected key */ 1325 1325 if (protkeytype) 1326 1326 *protkeytype = PKEY_KEYTYPE_AES_192; 1327 1327 break; 1328 - case 32+32: 1328 + case 32 + 32: 1329 1329 /* AES 256 protected key */ 1330 1330 if (protkeytype) 1331 1331 *protkeytype = PKEY_KEYTYPE_AES_256; ··· 1410 1410 preqcblk->domain = domain; 1411 1411 1412 1412 /* fill request cprb param block with AU request */ 1413 - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; 1413 + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; 1414 1414 memcpy(preqparm->subfunc_code, "AU", 2); 1415 1415 preqparm->rule_array_len = 1416 1416 sizeof(preqparm->rule_array_len) ··· 1436 1436 if (rc) { 1437 1437 DEBUG_ERR( 1438 1438 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1439 - __func__, (int) cardnr, (int) domain, rc); 1439 + __func__, (int)cardnr, (int)domain, rc); 1440 1440 goto out; 1441 1441 } 1442 1442 ··· 1445 1445 DEBUG_ERR( 1446 1446 "%s unwrap secure key failure, card response %d/%d\n", 1447 1447 __func__, 1448 - (int) prepcblk->ccp_rtcode, 1449 - (int) prepcblk->ccp_rscode); 1448 + (int)prepcblk->ccp_rtcode, 1449 + (int)prepcblk->ccp_rscode); 1450 1450 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 1451 1451 rc = -EAGAIN; 1452 1452 else ··· 1457 1457 DEBUG_WARN( 1458 1458 "%s unwrap secure key warning, card response %d/%d\n", 1459 1459 __func__, 1460 - (int) prepcblk->ccp_rtcode, 1461 - (int) prepcblk->ccp_rscode); 1460 + (int)prepcblk->ccp_rtcode, 1461 + (int)prepcblk->ccp_rscode); 1462 1462 } 1463 1463 1464 1464 /* process response cprb param block */ 1465 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1466 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1467 - prepparm = (struct aurepparm *) ptr; 1465 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1466 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1467 + prepparm = (struct aurepparm *)ptr; 1468 1468 1469 1469 /* check the returned keyblock */ 1470 1470 if (prepparm->vud.ckb.version != 0x02) { 1471 1471 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", 1472 - __func__, (int) prepparm->vud.ckb.version); 1472 + __func__, (int)prepparm->vud.ckb.version); 1473 1473 rc = -EIO; 1474 1474 goto out; 1475 1475 } 1476 1476 if (prepparm->vud.ckb.algo != 0x81) { 1477 1477 DEBUG_ERR( 1478 1478 "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", 1479 - __func__, (int) prepparm->vud.ckb.algo); 1479 + __func__, (int)prepparm->vud.ckb.algo); 1480 1480 rc = -EIO; 1481 1481 goto out; 1482 1482 } ··· 1537 1537 preqcblk->domain = domain; 1538 1538 1539 1539 /* fill request cprb param block with FQ request */ 1540 - preqparm = (struct fqreqparm __force *) preqcblk->req_parmb; 1540 + preqparm = (struct fqreqparm __force *)preqcblk->req_parmb; 1541 1541 memcpy(preqparm->subfunc_code, "FQ", 2); 1542 1542 memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); 1543 1543 preqparm->rule_array_len = ··· 1553 1553 rc = zcrypt_send_cprb(&xcrb); 1554 1554 if (rc) { 1555 1555 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1556 - __func__, (int) cardnr, (int) domain, rc); 1556 + __func__, (int)cardnr, (int)domain, rc); 1557 1557 goto out; 1558 1558 } 1559 1559 ··· 1561 1561 if (prepcblk->ccp_rtcode != 0) { 1562 1562 DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", 1563 1563 __func__, 1564 - (int) prepcblk->ccp_rtcode, 1565 - (int) prepcblk->ccp_rscode); 1564 + (int)prepcblk->ccp_rtcode, 1565 + (int)prepcblk->ccp_rscode); 1566 1566 rc = -EIO; 1567 1567 goto out; 1568 1568 } 1569 1569 1570 1570 /* process response cprb param block */ 1571 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1572 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1573 - prepparm = (struct fqrepparm *) ptr; 1571 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1572 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1573 + prepparm = (struct fqrepparm *)ptr; 1574 1574 ptr = prepparm->lvdata; 1575 1575 1576 1576 /* check and possibly copy reply rule array */ 1577 - len = *((u16 *) ptr); 1577 + len = *((u16 *)ptr); 1578 1578 if (len > sizeof(u16)) { 1579 1579 ptr += sizeof(u16); 1580 1580 len -= sizeof(u16); ··· 1585 1585 ptr += len; 1586 1586 } 1587 1587 /* check and possible copy reply var array */ 1588 - len = *((u16 *) ptr); 1588 + len = *((u16 *)ptr); 1589 1589 if (len > sizeof(u16)) { 1590 1590 ptr += sizeof(u16); 1591 1591 len -= sizeof(u16); ··· 1696 1696 ci->hwtype = devstat.hwtype; 1697 1697 1698 1698 /* prep page for rule array and var array use */ 1699 - pg = (u8 *) __get_free_page(GFP_KERNEL); 1699 + pg = (u8 *)__get_free_page(GFP_KERNEL); 1700 1700 if (!pg) 1701 1701 return -ENOMEM; 1702 1702 rarray = pg; 1703 - varray = pg + PAGE_SIZE/2; 1704 - rlen = vlen = PAGE_SIZE/2; 1703 + varray = pg + PAGE_SIZE / 2; 1704 + rlen = vlen = PAGE_SIZE / 2; 1705 1705 1706 1706 /* QF for this card/domain */ 1707 1707 rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", 1708 1708 rarray, &rlen, varray, &vlen); 1709 - if (rc == 0 && rlen >= 10*8 && vlen >= 204) { 1709 + if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { 1710 1710 memcpy(ci->serial, rarray, 8); 1711 - ci->new_aes_mk_state = (char) rarray[7*8]; 1712 - ci->cur_aes_mk_state = (char) rarray[8*8]; 1713 - ci->old_aes_mk_state = (char) rarray[9*8]; 1711 + ci->new_asym_mk_state = (char)rarray[4 * 8]; 1712 + ci->cur_asym_mk_state = (char)rarray[5 * 8]; 1713 + ci->old_asym_mk_state = (char)rarray[6 * 8]; 1714 + if (ci->old_asym_mk_state == '2') 1715 + memcpy(ci->old_asym_mkvp, varray + 64, 16); 1716 + if (ci->cur_asym_mk_state == '2') 1717 + memcpy(ci->cur_asym_mkvp, varray + 84, 16); 1718 + if (ci->new_asym_mk_state == '3') 1719 + memcpy(ci->new_asym_mkvp, varray + 104, 16); 1720 + ci->new_aes_mk_state = (char)rarray[7 * 8]; 1721 + ci->cur_aes_mk_state = (char)rarray[8 * 8]; 1722 + ci->old_aes_mk_state = (char)rarray[9 * 8]; 1714 1723 if (ci->old_aes_mk_state == '2') 1715 1724 memcpy(&ci->old_aes_mkvp, varray + 172, 8); 1716 1725 if (ci->cur_aes_mk_state == '2') ··· 1730 1721 } 1731 1722 if (!found) 1732 1723 goto out; 1733 - rlen = vlen = PAGE_SIZE/2; 1724 + rlen = vlen = PAGE_SIZE / 2; 1734 1725 rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", 1735 1726 rarray, &rlen, varray, &vlen); 1736 - if (rc == 0 && rlen >= 13*8 && vlen >= 240) { 1737 - ci->new_apka_mk_state = (char) rarray[10*8]; 1738 - ci->cur_apka_mk_state = (char) rarray[11*8]; 1739 - ci->old_apka_mk_state = (char) rarray[12*8]; 1727 + if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { 1728 + ci->new_apka_mk_state = (char)rarray[10 * 8]; 1729 + ci->cur_apka_mk_state = (char)rarray[11 * 8]; 1730 + ci->old_apka_mk_state = (char)rarray[12 * 8]; 1740 1731 if (ci->old_apka_mk_state == '2') 1741 1732 memcpy(&ci->old_apka_mkvp, varray + 208, 8); 1742 1733 if (ci->cur_apka_mk_state == '2') ··· 1747 1738 } 1748 1739 1749 1740 out: 1750 - free_page((unsigned long) pg); 1741 + free_page((unsigned long)pg); 1751 1742 return found == 2 ? 0 : -ENOENT; 1752 1743 } 1753 1744 ··· 1855 1846 if (pdomain) 1856 1847 *pdomain = dom; 1857 1848 rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); 1858 - } else 1849 + } else { 1859 1850 rc = -ENODEV; 1851 + } 1860 1852 1861 1853 kvfree(device_status); 1862 1854 return rc; ··· 1871 1861 { 1872 1862 u64 mkvp; 1873 1863 int minhwtype = 0; 1874 - const struct keytoken_header *hdr = (struct keytoken_header *) key; 1864 + const struct keytoken_header *hdr = (struct keytoken_header *)key; 1875 1865 1876 1866 if (hdr->type != TOKTYPE_CCA_INTERNAL) 1877 1867 return -EINVAL; ··· 1964 1954 } 1965 1955 /* apqn passed all filtering criterons, add to the array */ 1966 1956 if (_nr_apqns < 256) 1967 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); 1957 + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1968 1958 } 1969 1959 1970 1960 /* nothing found ? */
+6
drivers/s390/crypto/zcrypt_ccamisc.h
··· 251 251 char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */ 252 252 char cur_apka_mk_state; /* '1' invalid, '2' valid */ 253 253 char old_apka_mk_state; /* '1' invalid, '2' valid */ 254 + char new_asym_mk_state; /* '1' empty, '2' partially full, '3' full */ 255 + char cur_asym_mk_state; /* '1' invalid, '2' valid */ 256 + char old_asym_mk_state; /* '1' invalid, '2' valid */ 254 257 u64 new_aes_mkvp; /* truncated sha256 of new aes master key */ 255 258 u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */ 256 259 u64 old_aes_mkvp; /* truncated sha256 of old aes master key */ 257 260 u64 new_apka_mkvp; /* truncated sha256 of new apka master key */ 258 261 u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */ 259 262 u64 old_apka_mkvp; /* truncated sha256 of old apka mk */ 263 + u8 new_asym_mkvp[16]; /* verify pattern of new asym master key */ 264 + u8 cur_asym_mkvp[16]; /* verify pattern of current asym master key */ 265 + u8 old_asym_mkvp[16]; /* verify pattern of old asym master key */ 260 266 char serial[9]; /* serial number (8 ascii numbers + 0x00) */ 261 267 }; 262 268
+5 -6
drivers/s390/crypto/zcrypt_cex2a.c
··· 34 34 35 35 #define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus 36 36 * (max outputdatalength) + 37 - * type80_hdr*/ 37 + * type80_hdr 38 + */ 38 39 #define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) 39 40 40 - #define CEX2A_CLEANUP_TIME (15*HZ) 41 + #define CEX2A_CLEANUP_TIME (15 * HZ) 41 42 #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 42 43 43 44 MODULE_AUTHOR("IBM Corporation"); ··· 118 117 zc->online = 1; 119 118 120 119 rc = zcrypt_card_register(zc); 121 - if (rc) { 120 + if (rc) 122 121 zcrypt_card_free(zc); 123 - } 124 122 125 123 return rc; 126 124 } ··· 176 176 aq->request_timeout = CEX2A_CLEANUP_TIME; 177 177 dev_set_drvdata(&ap_dev->device, zq); 178 178 rc = zcrypt_queue_register(zq); 179 - if (rc) { 179 + if (rc) 180 180 zcrypt_queue_free(zq); 181 - } 182 181 183 182 return rc; 184 183 }
+5 -5
drivers/s390/crypto/zcrypt_cex2c.c
··· 31 31 #define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */ 32 32 #define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */ 33 33 #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ 34 - #define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024) 35 - #define CEX2C_CLEANUP_TIME (15*HZ) 34 + #define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024) 35 + #define CEX2C_CLEANUP_TIME (15 * HZ) 36 36 37 37 MODULE_AUTHOR("IBM Corporation"); 38 38 MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \ ··· 200 200 int rc, i; 201 201 202 202 ap_init_message(&ap_msg); 203 - ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL); 203 + ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL); 204 204 if (!ap_msg.msg) 205 205 return -ENOMEM; 206 206 207 - rng_type6CPRB_msgX(&ap_msg, 4, &domain); 207 + rng_type6cprb_msgx(&ap_msg, 4, &domain); 208 208 209 209 msg = ap_msg.msg; 210 210 msg->cprbx.domain = AP_QID_QUEUE(aq->qid); ··· 233 233 else 234 234 rc = 0; 235 235 out_free: 236 - free_page((unsigned long) ap_msg.msg); 236 + free_page((unsigned long)ap_msg.msg); 237 237 return rc; 238 238 } 239 239
+37 -7
drivers/s390/crypto/zcrypt_cex4.c
··· 33 33 * But the maximum time limit managed by the stomper code is set to 60sec. 34 34 * Hence we have to wait at least that time period. 35 35 */ 36 - #define CEX4_CLEANUP_TIME (900*HZ) 36 + #define CEX4_CLEANUP_TIME (900 * HZ) 37 37 38 38 MODULE_AUTHOR("IBM Corporation"); 39 39 MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \ ··· 123 123 &ci, zq->online); 124 124 125 125 if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') 126 - n = scnprintf(buf, PAGE_SIZE, "AES NEW: %s 0x%016llx\n", 127 - new_state[ci.new_aes_mk_state - '1'], 128 - ci.new_aes_mkvp); 126 + n += scnprintf(buf + n, PAGE_SIZE, 127 + "AES NEW: %s 0x%016llx\n", 128 + new_state[ci.new_aes_mk_state - '1'], 129 + ci.new_aes_mkvp); 129 130 else 130 - n = scnprintf(buf, PAGE_SIZE, "AES NEW: - -\n"); 131 + n += scnprintf(buf + n, PAGE_SIZE, "AES NEW: - -\n"); 131 132 132 133 if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2') 133 134 n += scnprintf(buf + n, PAGE_SIZE - n, ··· 169 168 ci.old_apka_mkvp); 170 169 else 171 170 n += scnprintf(buf + n, PAGE_SIZE - n, "APKA OLD: - -\n"); 171 + 172 + if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3') 173 + n += scnprintf(buf + n, PAGE_SIZE, 174 + "ASYM NEW: %s 0x%016llx%016llx\n", 175 + new_state[ci.new_asym_mk_state - '1'], 176 + *((u64 *)(ci.new_asym_mkvp)), 177 + *((u64 *)(ci.new_asym_mkvp + sizeof(u64)))); 178 + else 179 + n += scnprintf(buf + n, PAGE_SIZE, "ASYM NEW: - -\n"); 180 + 181 + if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2') 182 + n += scnprintf(buf + n, PAGE_SIZE - n, 183 + "ASYM CUR: %s 0x%016llx%016llx\n", 184 + cao_state[ci.cur_asym_mk_state - '1'], 185 + *((u64 *)(ci.cur_asym_mkvp)), 186 + *((u64 *)(ci.cur_asym_mkvp + sizeof(u64)))); 187 + else 188 + n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM CUR: - -\n"); 189 + 190 + if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2') 191 + n += scnprintf(buf + n, PAGE_SIZE - n, 192 + "ASYM OLD: %s 0x%016llx%016llx\n", 193 + cao_state[ci.old_asym_mk_state - '1'], 194 + *((u64 *)(ci.old_asym_mkvp)), 195 + *((u64 *)(ci.old_asym_mkvp + sizeof(u64)))); 196 + else 197 + n += scnprintf(buf + n, PAGE_SIZE - n, "ASYM OLD: - -\n"); 172 198 173 199 return n; 174 200 } ··· 364 336 bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp)); 365 337 n += 2 * sizeof(di.cur_wkvp); 366 338 n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 367 - } else 339 + } else { 368 340 n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); 341 + } 369 342 370 343 if (di.new_wk_state == '0') { 371 344 n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", ··· 377 348 bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp)); 378 349 n += 2 * sizeof(di.new_wkvp); 379 350 n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 380 - } else 351 + } else { 381 352 n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); 353 + } 382 354 383 355 return n; 384 356 }
+84 -84
drivers/s390/crypto/zcrypt_ep11misc.c
··· 119 119 int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, 120 120 const u8 *key, size_t keylen, int checkcpacfexp) 121 121 { 122 - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; 123 - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); 122 + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 123 + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); 124 124 125 125 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 126 126 ··· 133 133 if (hdr->type != TOKTYPE_NON_CCA) { 134 134 if (dbg) 135 135 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 136 - __func__, (int) hdr->type, TOKTYPE_NON_CCA); 136 + __func__, (int)hdr->type, TOKTYPE_NON_CCA); 137 137 return -EINVAL; 138 138 } 139 139 if (hdr->hver != 0x00) { 140 140 if (dbg) 141 141 DBF("%s key check failed, header version 0x%02x != 0x00\n", 142 - __func__, (int) hdr->hver); 142 + __func__, (int)hdr->hver); 143 143 return -EINVAL; 144 144 } 145 145 if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) { 146 146 if (dbg) 147 147 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 148 - __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER); 148 + __func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER); 149 149 return -EINVAL; 150 150 } 151 151 if (hdr->len > keylen) { 152 152 if (dbg) 153 153 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 154 - __func__, (int) hdr->len, keylen); 154 + __func__, (int)hdr->len, keylen); 155 155 return -EINVAL; 156 156 } 157 157 if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { 158 158 if (dbg) 159 159 DBF("%s key check failed, header len %d < %zu\n", 160 - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); 160 + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); 161 161 return -EINVAL; 162 162 } 163 163 164 164 if (kb->version != EP11_STRUCT_MAGIC) { 165 165 if (dbg) 166 166 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 167 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 167 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 168 168 return -EINVAL; 169 169 } 170 170 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 186 186 int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, 187 187 const u8 *key, size_t keylen, int checkcpacfexp) 188 188 { 189 - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; 190 - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); 189 + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 190 + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); 191 191 192 192 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 193 193 ··· 200 200 if (hdr->type != TOKTYPE_NON_CCA) { 201 201 if (dbg) 202 202 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 203 - __func__, (int) hdr->type, TOKTYPE_NON_CCA); 203 + __func__, (int)hdr->type, TOKTYPE_NON_CCA); 204 204 return -EINVAL; 205 205 } 206 206 if (hdr->hver != 0x00) { 207 207 if (dbg) 208 208 DBF("%s key check failed, header version 0x%02x != 0x00\n", 209 - __func__, (int) hdr->hver); 209 + __func__, (int)hdr->hver); 210 210 return -EINVAL; 211 211 } 212 212 if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) { 213 213 if (dbg) 214 214 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 215 - __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER); 215 + __func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER); 216 216 return -EINVAL; 217 217 } 218 218 if (hdr->len > keylen) { 219 219 if (dbg) 220 220 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 221 - __func__, (int) hdr->len, keylen); 221 + __func__, (int)hdr->len, keylen); 222 222 return -EINVAL; 223 223 } 224 224 if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { 225 225 if (dbg) 226 226 DBF("%s key check failed, header len %d < %zu\n", 227 - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); 227 + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); 228 228 return -EINVAL; 229 229 } 230 230 231 231 if (kb->version != EP11_STRUCT_MAGIC) { 232 232 if (dbg) 233 233 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 234 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 234 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 235 235 return -EINVAL; 236 236 } 237 237 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 254 254 int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, 255 255 const u8 *key, size_t keylen, int checkcpacfexp) 256 256 { 257 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 257 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 258 258 259 259 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 260 260 ··· 267 267 if (kb->head.type != TOKTYPE_NON_CCA) { 268 268 if (dbg) 269 269 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 270 - __func__, (int) kb->head.type, TOKTYPE_NON_CCA); 270 + __func__, (int)kb->head.type, TOKTYPE_NON_CCA); 271 271 return -EINVAL; 272 272 } 273 273 if (kb->head.version != TOKVER_EP11_AES) { 274 274 if (dbg) 275 275 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 276 - __func__, (int) kb->head.version, TOKVER_EP11_AES); 276 + __func__, (int)kb->head.version, TOKVER_EP11_AES); 277 277 return -EINVAL; 278 278 } 279 279 if (kb->head.len > keylen) { 280 280 if (dbg) 281 281 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 282 - __func__, (int) kb->head.len, keylen); 282 + __func__, (int)kb->head.len, keylen); 283 283 return -EINVAL; 284 284 } 285 285 if (kb->head.len < sizeof(*kb)) { 286 286 if (dbg) 287 287 DBF("%s key check failed, header len %d < %zu\n", 288 - __func__, (int) kb->head.len, sizeof(*kb)); 288 + __func__, (int)kb->head.len, sizeof(*kb)); 289 289 return -EINVAL; 290 290 } 291 291 292 292 if (kb->version != EP11_STRUCT_MAGIC) { 293 293 if (dbg) 294 294 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 295 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 295 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 296 296 return -EINVAL; 297 297 } 298 298 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 347 347 } 348 348 if (valuelen > 127) { 349 349 ptr[1] = 0x81; 350 - ptr[2] = (u8) valuelen; 350 + ptr[2] = (u8)valuelen; 351 351 memcpy(ptr + 3, pvalue, valuelen); 352 352 return 3 + valuelen; 353 353 } 354 - ptr[1] = (u8) valuelen; 354 + ptr[1] = (u8)valuelen; 355 355 memcpy(ptr + 2, pvalue, valuelen); 356 356 return 2 + valuelen; 357 357 } ··· 389 389 struct ep11_cprb *req, size_t req_len, 390 390 struct ep11_cprb *rep, size_t rep_len) 391 391 { 392 - u->targets = (u8 __user *) t; 392 + u->targets = (u8 __user *)t; 393 393 u->targets_num = nt; 394 - u->req = (u8 __user *) req; 394 + u->req = (u8 __user *)req; 395 395 u->req_len = req_len; 396 - u->resp = (u8 __user *) rep; 396 + u->resp = (u8 __user *)rep; 397 397 u->resp_len = rep_len; 398 398 } 399 399 ··· 462 462 return 0; 463 463 } 464 464 465 - 466 465 /* 467 466 * Helper function which does an ep11 query with given query type. 468 467 */ ··· 495 496 req = alloc_cprb(sizeof(struct ep11_info_req_pl)); 496 497 if (!req) 497 498 goto out; 498 - req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req)); 499 + req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); 499 500 prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */ 500 501 req_pl->query_type_tag = 0x04; 501 502 req_pl->query_type_len = sizeof(u32); ··· 507 508 rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); 508 509 if (!rep) 509 510 goto out; 510 - rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 511 + rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 511 512 512 513 /* urb and target */ 513 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 514 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 514 515 if (!urb) 515 516 goto out; 516 517 target.ap_id = cardnr; ··· 523 524 if (rc) { 524 525 DEBUG_ERR( 525 526 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 526 - __func__, (int) cardnr, (int) domain, rc); 527 + __func__, (int)cardnr, (int)domain, rc); 527 528 goto out; 528 529 } 529 530 ··· 542 543 goto out; 543 544 } 544 545 545 - memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len); 546 + memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); 546 547 547 548 out: 548 549 kfree(req); ··· 591 592 return -ENOMEM; 592 593 rc = ep11_query_info(card, AUTOSEL_DOM, 593 594 0x01 /* module info query */, 594 - sizeof(*pmqi), (u8 *) pmqi); 595 + sizeof(*pmqi), (u8 *)pmqi); 595 596 if (rc) { 596 597 if (rc == -ENODEV) 597 598 card_cache_scrub(card); ··· 631 632 return -ENOMEM; 632 633 633 634 rc = ep11_query_info(card, domain, 0x03 /* domain info query */, 634 - sizeof(*p_dom_info), (u8 *) p_dom_info); 635 + sizeof(*p_dom_info), (u8 *)p_dom_info); 635 636 if (rc) 636 637 goto out; 637 638 ··· 643 644 info->cur_wk_state = '1'; 644 645 memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); 645 646 } 646 - if (p_dom_info->dom_flags & 0x04 /* new wk present */ 647 - || p_dom_info->dom_flags & 0x08 /* new wk committed */) { 647 + if (p_dom_info->dom_flags & 0x04 || /* new wk present */ 648 + p_dom_info->dom_flags & 0x08 /* new wk committed */) { 648 649 info->new_wk_state = 649 650 p_dom_info->dom_flags & 0x08 ? '2' : '1'; 650 651 memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); ··· 721 722 req = alloc_cprb(sizeof(struct keygen_req_pl)); 722 723 if (!req) 723 724 goto out; 724 - req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req)); 725 + req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); 725 726 api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; 726 727 prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */ 727 728 req_pl->var_tag = 0x04; ··· 745 746 rep = alloc_cprb(sizeof(struct keygen_rep_pl)); 746 747 if (!rep) 747 748 goto out; 748 - rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 749 + rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 749 750 750 751 /* urb and target */ 751 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 752 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 752 753 if (!urb) 753 754 goto out; 754 755 target.ap_id = card; ··· 761 762 if (rc) { 762 763 DEBUG_ERR( 763 764 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 764 - __func__, (int) card, (int) domain, rc); 765 + __func__, (int)card, (int)domain, rc); 765 766 goto out; 766 767 } 767 768 ··· 783 784 /* copy key blob and set header values */ 784 785 memcpy(keybuf, rep_pl->data, rep_pl->data_len); 785 786 *keybufsize = rep_pl->data_len; 786 - kb = (struct ep11keyblob *) keybuf; 787 + kb = (struct ep11keyblob *)keybuf; 787 788 kb->head.type = TOKTYPE_NON_CCA; 788 789 kb->head.len = rep_pl->data_len; 789 790 kb->head.version = TOKVER_EP11_AES; ··· 843 844 req = alloc_cprb(req_pl_size); 844 845 if (!req) 845 846 goto out; 846 - req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req)); 847 + req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); 847 848 prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19)); 848 849 req_pl->var_tag = 0x04; 849 850 req_pl->var_len = sizeof(u32); ··· 851 852 req_pl->mech_tag = 0x04; 852 853 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 853 854 req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ 854 - p = ((u8 *) req_pl) + sizeof(*req_pl); 855 + p = ((u8 *)req_pl) + sizeof(*req_pl); 855 856 if (iv) { 856 857 memcpy(p, iv, 16); 857 858 p += 16; ··· 865 866 rep = alloc_cprb(rep_pl_size); 866 867 if (!rep) 867 868 goto out; 868 - rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 869 + rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 869 870 870 871 /* urb and target */ 871 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 872 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 872 873 if (!urb) 873 874 goto out; 874 875 target.ap_id = card; ··· 881 882 if (rc) { 882 883 DEBUG_ERR( 883 884 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 884 - __func__, (int) card, (int) domain, rc); 885 + __func__, (int)card, (int)domain, rc); 885 886 goto out; 886 887 } 887 888 ··· 893 894 rc = -EIO; 894 895 goto out; 895 896 } 896 - p = ((u8 *) rep_pl) + sizeof(*rep_pl); 897 - if (rep_pl->data_lenfmt <= 127) 897 + p = ((u8 *)rep_pl) + sizeof(*rep_pl); 898 + if (rep_pl->data_lenfmt <= 127) { 898 899 n = rep_pl->data_lenfmt; 899 - else if (rep_pl->data_lenfmt == 0x81) 900 + } else if (rep_pl->data_lenfmt == 0x81) { 900 901 n = *p++; 901 - else if (rep_pl->data_lenfmt == 0x82) { 902 - n = *((u16 *) p); 902 + } else if (rep_pl->data_lenfmt == 0x82) { 903 + n = *((u16 *)p); 903 904 p += 2; 904 905 } else { 905 906 DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", ··· 977 978 req = alloc_cprb(req_pl_size); 978 979 if (!req) 979 980 goto out; 980 - req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req)); 981 + req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); 981 982 api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; 982 983 prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */ 983 984 req_pl->attr_tag = 0x04; ··· 993 994 req_pl->mech_tag = 0x04; 994 995 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 995 996 req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ 996 - p = ((u8 *) req_pl) + sizeof(*req_pl); 997 + p = ((u8 *)req_pl) + sizeof(*req_pl); 997 998 if (iv) { 998 999 memcpy(p, iv, 16); 999 1000 p += 16; ··· 1013 1014 rep = alloc_cprb(sizeof(struct uw_rep_pl)); 1014 1015 if (!rep) 1015 1016 goto out; 1016 - rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 1017 + rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1017 1018 1018 1019 /* urb and target */ 1019 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 1020 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1020 1021 if (!urb) 1021 1022 goto out; 1022 1023 target.ap_id = card; ··· 1029 1030 if (rc) { 1030 1031 DEBUG_ERR( 1031 1032 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1032 - __func__, (int) card, (int) domain, rc); 1033 + __func__, (int)card, (int)domain, rc); 1033 1034 goto out; 1034 1035 } 1035 1036 ··· 1051 1052 /* copy key blob and set header values */ 1052 1053 memcpy(keybuf, rep_pl->data, rep_pl->data_len); 1053 1054 *keybufsize = rep_pl->data_len; 1054 - kb = (struct ep11keyblob *) keybuf; 1055 + kb = (struct ep11keyblob *)keybuf; 1055 1056 kb->head.type = TOKTYPE_NON_CCA; 1056 1057 kb->head.len = rep_pl->data_len; 1057 1058 kb->head.version = TOKVER_EP11_AES; ··· 1104 1105 u8 *p; 1105 1106 1106 1107 /* maybe the session field holds a header with key info */ 1107 - kb = (struct ep11keyblob *) key; 1108 + kb = (struct ep11keyblob *)key; 1108 1109 if (kb->head.type == TOKTYPE_NON_CCA && 1109 1110 kb->head.version == TOKVER_EP11_AES) { 1110 1111 has_header = true; ··· 1119 1120 goto out; 1120 1121 if (!mech || mech == 0x80060001) 1121 1122 req->flags |= 0x20; /* CPACF_WRAP needs special bit */ 1122 - req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req)); 1123 + req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req)); 1123 1124 api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */ 1124 1125 prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */ 1125 1126 req_pl->var_tag = 0x04; ··· 1128 1129 req_pl->mech_tag = 0x04; 1129 1130 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 1130 1131 req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */ 1131 - p = ((u8 *) req_pl) + sizeof(*req_pl); 1132 + p = ((u8 *)req_pl) + sizeof(*req_pl); 1132 1133 if (iv) { 1133 1134 memcpy(p, iv, 16); 1134 1135 p += 16; ··· 1151 1152 rep = alloc_cprb(sizeof(struct wk_rep_pl)); 1152 1153 if (!rep) 1153 1154 goto out; 1154 - rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 1155 + rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1155 1156 1156 1157 /* urb and target */ 1157 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 1158 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1158 1159 if (!urb) 1159 1160 goto out; 1160 1161 target.ap_id = card; ··· 1167 1168 if (rc) { 1168 1169 DEBUG_ERR( 1169 1170 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1170 - __func__, (int) card, (int) domain, rc); 1171 + __func__, (int)card, (int)domain, rc); 1171 1172 goto out; 1172 1173 } 1173 1174 ··· 1205 1206 u8 encbuf[64], *kek = NULL; 1206 1207 size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); 1207 1208 1208 - if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) 1209 + if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { 1209 1210 clrkeylen = keybitsize / 8; 1210 - else { 1211 + } else { 1211 1212 DEBUG_ERR( 1212 1213 "%s unknown/unsupported keybitsize %d\n", 1213 1214 __func__, keybitsize); ··· 1232 1233 __func__, rc); 1233 1234 goto out; 1234 1235 } 1235 - kb = (struct ep11keyblob *) kek; 1236 + kb = (struct ep11keyblob *)kek; 1236 1237 memset(&kb->head, 0, sizeof(kb->head)); 1237 1238 1238 1239 /* Step 2: encrypt clear key value with the kek key */ ··· 1281 1282 struct ep11kblob_header *hdr; 1282 1283 1283 1284 /* key with or without header ? */ 1284 - hdr = (struct ep11kblob_header *) keyblob; 1285 - if (hdr->type == TOKTYPE_NON_CCA 1286 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 1287 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 1288 - && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { 1285 + hdr = (struct ep11kblob_header *)keyblob; 1286 + if (hdr->type == TOKTYPE_NON_CCA && 1287 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1288 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1289 + is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { 1289 1290 /* EP11 AES or ECC key with header */ 1290 1291 key = keyblob + sizeof(struct ep11kblob_header); 1291 1292 keylen = hdr->len - sizeof(struct ep11kblob_header); 1292 - } else if (hdr->type == TOKTYPE_NON_CCA 1293 - && hdr->version == TOKVER_EP11_AES 1294 - && is_ep11_keyblob(keyblob)) { 1293 + } else if (hdr->type == TOKTYPE_NON_CCA && 1294 + hdr->version == TOKVER_EP11_AES && 1295 + is_ep11_keyblob(keyblob)) { 1295 1296 /* EP11 AES key (old style) */ 1296 1297 key = keyblob; 1297 1298 keylen = hdr->len; ··· 1299 1300 /* raw EP11 key blob */ 1300 1301 key = keyblob; 1301 1302 keylen = keybloblen; 1302 - } else 1303 + } else { 1303 1304 return -EINVAL; 1305 + } 1304 1306 1305 1307 /* alloc temp working buffer */ 1306 1308 wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); ··· 1318 1318 __func__, rc); 1319 1319 goto out; 1320 1320 } 1321 - wki = (struct wk_info *) wkbuf; 1321 + wki = (struct wk_info *)wkbuf; 1322 1322 1323 1323 /* check struct version and pkey type */ 1324 1324 if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { 1325 1325 DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", 1326 - __func__, (int) wki->version, (int) wki->pkeytype); 1326 + __func__, (int)wki->version, (int)wki->pkeytype); 1327 1327 rc = -EIO; 1328 1328 goto out; 1329 1329 } ··· 1332 1332 switch (wki->pkeytype) { 1333 1333 case 1: /* AES */ 1334 1334 switch (wki->pkeysize) { 1335 - case 16+32: 1335 + case 16 + 32: 1336 1336 /* AES 128 protected key */ 1337 1337 if (protkeytype) 1338 1338 *protkeytype = PKEY_KEYTYPE_AES_128; 1339 1339 break; 1340 - case 24+32: 1340 + case 24 + 32: 1341 1341 /* AES 192 protected key */ 1342 1342 if (protkeytype) 1343 1343 *protkeytype = PKEY_KEYTYPE_AES_192; 1344 1344 break; 1345 - case 32+32: 1345 + case 32 + 32: 1346 1346 /* AES 256 protected key */ 1347 1347 if (protkeytype) 1348 1348 *protkeytype = PKEY_KEYTYPE_AES_256; 1349 1349 break; 1350 1350 default: 1351 1351 DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", 1352 - __func__, (int) wki->pkeysize); 1352 + __func__, (int)wki->pkeysize); 1353 1353 rc = -EIO; 1354 1354 goto out; 1355 1355 } ··· 1363 1363 case 2: /* TDES */ 1364 1364 default: 1365 1365 DEBUG_ERR("%s unknown/unsupported key type %d\n", 1366 - __func__, (int) wki->pkeytype); 1366 + __func__, (int)wki->pkeytype); 1367 1367 rc = -EIO; 1368 1368 goto out; 1369 1369 } ··· 1445 1445 } 1446 1446 /* apqn passed all filtering criterons, add to the array */ 1447 1447 if (_nr_apqns < 256) 1448 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); 1448 + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1449 1449 } 1450 1450 1451 1451 /* nothing found ? */
+1 -1
drivers/s390/crypto/zcrypt_ep11misc.h
··· 50 50 /* check ep11 key magic to find out if this is an ep11 key blob */ 51 51 static inline bool is_ep11_keyblob(const u8 *key) 52 52 { 53 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 53 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 54 54 55 55 return (kb->version == EP11_STRUCT_MAGIC); 56 56 }
+2 -1
drivers/s390/crypto/zcrypt_error.h
··· 121 121 ZCRYPT_DBF_WARN( 122 122 "%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n", 123 123 __func__, card, queue, ehdr->reply_code, apfs); 124 - } else 124 + } else { 125 125 ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n", 126 126 __func__, card, queue, 127 127 ehdr->reply_code); 128 + } 128 129 return -EAGAIN; 129 130 default: 130 131 /* Assume request is valid and a retry will be worth it */
+18 -13
drivers/s390/crypto/zcrypt_msgtype50.c
··· 158 158 159 159 int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) 160 160 { 161 - 162 161 if (!mex->inputdatalength) 163 162 return -EINVAL; 164 163 ··· 173 174 174 175 int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) 175 176 { 176 - 177 177 if (!crt->inputdatalength) 178 178 return -EINVAL; 179 179 ··· 237 239 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; 238 240 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; 239 241 inp = meb3->message + sizeof(meb3->message) - mod_len; 240 - } else 242 + } else { 241 243 return -EINVAL; 244 + } 242 245 243 246 if (copy_from_user(mod, mex->n_modulus, mod_len) || 244 247 copy_from_user(exp, mex->b_key, mod_len) || ··· 322 323 dq = crb3->dq + sizeof(crb3->dq) - short_len; 323 324 u = crb3->u + sizeof(crb3->u) - short_len; 324 325 inp = crb3->message + sizeof(crb3->message) - mod_len; 325 - } else 326 + } else { 326 327 return -EINVAL; 328 + } 327 329 328 330 /* 329 331 * correct the offset of p, bp and mult_inv according zcrypt.h ··· 392 392 unsigned int outputdatalength) 393 393 { 394 394 /* Response type byte is the second byte in the response. */ 395 - unsigned char rtype = ((unsigned char *) reply->msg)[1]; 395 + unsigned char rtype = ((unsigned char *)reply->msg)[1]; 396 396 397 397 switch (rtype) { 398 398 case TYPE82_RSP_CODE: ··· 406 406 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 407 407 AP_QID_CARD(zq->queue->qid), 408 408 AP_QID_QUEUE(zq->queue->qid), 409 - (int) rtype); 409 + (int)rtype); 410 410 ZCRYPT_DBF_ERR( 411 411 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 412 412 __func__, AP_QID_CARD(zq->queue->qid), 413 - AP_QID_QUEUE(zq->queue->qid), (int) rtype); 413 + AP_QID_QUEUE(zq->queue->qid), (int)rtype); 414 414 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 415 415 return -EAGAIN; 416 416 } ··· 447 447 memcpy(msg->msg, reply->msg, len); 448 448 msg->len = len; 449 449 } 450 - } else 450 + } else { 451 451 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 452 + } 452 453 out: 453 - complete((struct completion *) msg->private); 454 + complete((struct completion *)msg->private); 454 455 } 455 456 456 457 static atomic_t zcrypt_step = ATOMIC_INIT(0); ··· 476 475 if (!ap_msg->msg) 477 476 return -ENOMEM; 478 477 ap_msg->receive = zcrypt_cex2a_receive; 479 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 478 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 480 479 atomic_inc_return(&zcrypt_step); 481 480 ap_msg->private = &work; 482 481 rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); ··· 493 492 rc = convert_response_cex2a(zq, ap_msg, 494 493 mex->outputdata, 495 494 mex->outputdatalength); 496 - } else 495 + } else { 497 496 /* Signal pending. */ 498 497 ap_cancel_message(zq->queue, ap_msg); 498 + } 499 + 499 500 out: 500 501 ap_msg->private = NULL; 501 502 if (rc) ··· 527 524 if (!ap_msg->msg) 528 525 return -ENOMEM; 529 526 ap_msg->receive = zcrypt_cex2a_receive; 530 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 527 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 531 528 atomic_inc_return(&zcrypt_step); 532 529 ap_msg->private = &work; 533 530 rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); ··· 544 541 rc = convert_response_cex2a(zq, ap_msg, 545 542 crt->outputdata, 546 543 crt->outputdatalength); 547 - } else 544 + } else { 548 545 /* Signal pending. */ 549 546 ap_cancel_message(zq->queue, ap_msg); 547 + } 548 + 550 549 out: 551 550 ap_msg->private = NULL; 552 551 if (rc)
+147 -193
drivers/s390/crypto/zcrypt_msgtype6.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 /* 3 - * Copyright IBM Corp. 2001, 2012 3 + * Copyright IBM Corp. 2001, 2022 4 4 * Author(s): Robert Burroughs 5 5 * Eric Rossman (edrossma@us.ibm.com) 6 6 * ··· 29 29 30 30 #define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 31 31 32 - #define CEIL4(x) ((((x)+3)/4)*4) 32 + #define CEIL4(x) ((((x) + 3) / 4) * 4) 33 33 34 34 struct response_type { 35 35 struct completion work; 36 36 int type; 37 37 }; 38 + 38 39 #define CEXXC_RESPONSE_TYPE_ICA 0 39 40 #define CEXXC_RESPONSE_TYPE_XCRB 1 40 41 #define CEXXC_RESPONSE_TYPE_EP11 2 ··· 44 43 MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ 45 44 "Copyright IBM Corp. 2001, 2012"); 46 45 MODULE_LICENSE("GPL"); 47 - 48 - /* 49 - * CPRB 50 - * Note that all shorts, ints and longs are little-endian. 51 - * All pointer fields are 32-bits long, and mean nothing 52 - * 53 - * A request CPRB is followed by a request_parameter_block. 54 - * 55 - * The request (or reply) parameter block is organized thus: 56 - * function code 57 - * VUD block 58 - * key block 59 - */ 60 - struct CPRB { 61 - unsigned short cprb_len; /* CPRB length */ 62 - unsigned char cprb_ver_id; /* CPRB version id. */ 63 - unsigned char pad_000; /* Alignment pad byte. */ 64 - unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ 65 - unsigned char srpi_verb; /* SRPI verb type */ 66 - unsigned char flags; /* flags */ 67 - unsigned char func_id[2]; /* function id */ 68 - unsigned char checkpoint_flag; /* */ 69 - unsigned char resv2; /* reserved */ 70 - unsigned short req_parml; /* request parameter buffer */ 71 - /* length 16-bit little endian */ 72 - unsigned char req_parmp[4]; /* request parameter buffer * 73 - * pointer (means nothing: the * 74 - * parameter buffer follows * 75 - * the CPRB). */ 76 - unsigned char req_datal[4]; /* request data buffer */ 77 - /* length ULELONG */ 78 - unsigned char req_datap[4]; /* request data buffer */ 79 - /* pointer */ 80 - unsigned short rpl_parml; /* reply parameter buffer */ 81 - /* length 16-bit little endian */ 82 - unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ 83 - unsigned char rpl_parmp[4]; /* reply parameter buffer * 84 - * pointer (means nothing: the * 85 - * parameter buffer follows * 86 - * the CPRB). */ 87 - unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ 88 - unsigned char rpl_datap[4]; /* reply data buffer */ 89 - /* pointer */ 90 - unsigned short ccp_rscode; /* server reason code ULESHORT */ 91 - unsigned short ccp_rtcode; /* server return code ULESHORT */ 92 - unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ 93 - unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ 94 - unsigned char repd_datal[4]; /* replied data length ULELONG */ 95 - unsigned char req_pc[2]; /* PC identifier */ 96 - unsigned char res_origin[8]; /* resource origin */ 97 - unsigned char mac_value[8]; /* Mac Value */ 98 - unsigned char logon_id[8]; /* Logon Identifier */ 99 - unsigned char usage_domain[2]; /* cdx */ 100 - unsigned char resv3[18]; /* reserved for requestor */ 101 - unsigned short svr_namel; /* server name length ULESHORT */ 102 - unsigned char svr_name[8]; /* server name */ 103 - } __packed; 104 46 105 47 struct function_and_rules_block { 106 48 unsigned char function_code[2]; ··· 179 235 } 180 236 } 181 237 182 - 183 238 /* 184 239 * Convert a ICAMEX message to a type6 MEX message. 185 240 * ··· 188 245 * 189 246 * Returns 0 on success or negative errno value. 190 247 */ 191 - static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, 248 + static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq, 192 249 struct ap_message *ap_msg, 193 250 struct ica_rsa_modexpo *mex) 194 251 { ··· 226 283 return -EFAULT; 227 284 228 285 /* Set up key which is located after the variable length text. */ 229 - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); 286 + size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength); 230 287 if (size < 0) 231 288 return size; 232 289 size += sizeof(*msg) + mex->inputdatalength; 233 290 234 291 /* message header, cprbx and f&r */ 235 292 msg->hdr = static_type6_hdrX; 236 - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 237 - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 293 + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); 294 + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 238 295 239 296 msg->cprbx = static_cprbx; 240 297 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 241 - msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; 298 + msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1; 242 299 243 300 msg->fr = static_pke_fnr; 244 301 ··· 257 314 * 258 315 * Returns 0 on success or negative errno value. 259 316 */ 260 - static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, 317 + static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq, 261 318 struct ap_message *ap_msg, 262 319 struct ica_rsa_modexpo_crt *crt) 263 320 { ··· 303 360 304 361 /* message header, cprbx and f&r */ 305 362 msg->hdr = static_type6_hdrX; 306 - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 307 - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 363 + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); 364 + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 308 365 309 366 msg->cprbx = static_cprbx; 310 367 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); ··· 331 388 struct type86_fmt2_ext fmt2; 332 389 } __packed; 333 390 334 - static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, 335 - struct ica_xcRB *xcRB, 391 + static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, 392 + struct ica_xcRB *xcrb, 336 393 unsigned int *fcode, 337 394 unsigned short **dom) 338 395 { ··· 345 402 struct CPRBX cprbx; 346 403 } __packed * msg = ap_msg->msg; 347 404 348 - int rcblen = CEIL4(xcRB->request_control_blk_length); 405 + int rcblen = CEIL4(xcrb->request_control_blk_length); 349 406 int req_sumlen, resp_sumlen; 350 407 char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen; 351 408 char *function_code; 352 409 353 - if (CEIL4(xcRB->request_control_blk_length) < 354 - xcRB->request_control_blk_length) 410 + if (CEIL4(xcrb->request_control_blk_length) < 411 + xcrb->request_control_blk_length) 355 412 return -EINVAL; /* overflow after alignment*/ 356 413 357 414 /* length checks */ 358 415 ap_msg->len = sizeof(struct type6_hdr) + 359 - CEIL4(xcRB->request_control_blk_length) + 360 - xcRB->request_data_length; 416 + CEIL4(xcrb->request_control_blk_length) + 417 + xcrb->request_data_length; 361 418 if (ap_msg->len > ap_msg->bufsize) 362 419 return -EINVAL; 363 420 ··· 365 422 * Overflow check 366 423 * sum must be greater (or equal) than the largest operand 367 424 */ 368 - req_sumlen = CEIL4(xcRB->request_control_blk_length) + 369 - xcRB->request_data_length; 370 - if ((CEIL4(xcRB->request_control_blk_length) <= 371 - xcRB->request_data_length) ? 372 - (req_sumlen < xcRB->request_data_length) : 373 - (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { 425 + req_sumlen = CEIL4(xcrb->request_control_blk_length) + 426 + xcrb->request_data_length; 427 + if ((CEIL4(xcrb->request_control_blk_length) <= 428 + xcrb->request_data_length) ? 429 + req_sumlen < xcrb->request_data_length : 430 + req_sumlen < CEIL4(xcrb->request_control_blk_length)) { 374 431 return -EINVAL; 375 432 } 376 433 377 - if (CEIL4(xcRB->reply_control_blk_length) < 378 - xcRB->reply_control_blk_length) 434 + if (CEIL4(xcrb->reply_control_blk_length) < 435 + xcrb->reply_control_blk_length) 379 436 return -EINVAL; /* overflow after alignment*/ 380 437 381 438 /* 382 439 * Overflow check 383 440 * sum must be greater (or equal) than the largest operand 384 441 */ 385 - resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + 386 - xcRB->reply_data_length; 387 - if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? 388 - (resp_sumlen < xcRB->reply_data_length) : 389 - (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { 442 + resp_sumlen = CEIL4(xcrb->reply_control_blk_length) + 443 + xcrb->reply_data_length; 444 + if ((CEIL4(xcrb->reply_control_blk_length) <= 445 + xcrb->reply_data_length) ? 446 + resp_sumlen < xcrb->reply_data_length : 447 + resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) { 390 448 return -EINVAL; 391 449 } 392 450 393 451 /* prepare type6 header */ 394 452 msg->hdr = static_type6_hdrX; 395 - memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); 396 - msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; 397 - if (xcRB->request_data_length) { 453 + memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID)); 454 + msg->hdr.tocardlen1 = xcrb->request_control_blk_length; 455 + if (xcrb->request_data_length) { 398 456 msg->hdr.offset2 = msg->hdr.offset1 + rcblen; 399 - msg->hdr.ToCardLen2 = xcRB->request_data_length; 457 + msg->hdr.tocardlen2 = xcrb->request_data_length; 400 458 } 401 - msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; 402 - msg->hdr.FromCardLen2 = xcRB->reply_data_length; 459 + msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length; 460 + msg->hdr.fromcardlen2 = xcrb->reply_data_length; 403 461 404 462 /* prepare CPRB */ 405 - if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr, 406 - xcRB->request_control_blk_length)) 463 + if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr, 464 + xcrb->request_control_blk_length)) 407 465 return -EFAULT; 408 466 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > 409 - xcRB->request_control_blk_length) 467 + xcrb->request_control_blk_length) 410 468 return -EINVAL; 411 469 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 412 470 memcpy(msg->hdr.function_code, function_code, ··· 417 473 *dom = (unsigned short *)&msg->cprbx.domain; 418 474 419 475 /* check subfunction, US and AU need special flag with NQAP */ 420 - if (memcmp(function_code, "US", 2) == 0 421 - || memcmp(function_code, "AU", 2) == 0) 476 + if (memcmp(function_code, "US", 2) == 0 || 477 + memcmp(function_code, "AU", 2) == 0) 422 478 ap_msg->flags |= AP_MSG_FLAG_SPECIAL; 423 479 424 480 #ifdef CONFIG_ZCRYPT_DEBUG ··· 444 500 } 445 501 446 502 /* copy data block */ 447 - if (xcRB->request_data_length && 448 - z_copy_from_user(userspace, req_data, xcRB->request_data_address, 449 - xcRB->request_data_length)) 503 + if (xcrb->request_data_length && 504 + z_copy_from_user(userspace, req_data, xcrb->request_data_address, 505 + xcrb->request_data_length)) 450 506 return -EFAULT; 451 507 452 508 return 0; 453 509 } 454 510 455 511 static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg, 456 - struct ep11_urb *xcRB, 512 + struct ep11_urb *xcrb, 457 513 unsigned int *fcode, 458 514 unsigned int *domain) 459 515 { ··· 483 539 unsigned int dom_val; /* domain id */ 484 540 } __packed * payload_hdr = NULL; 485 541 486 - if (CEIL4(xcRB->req_len) < xcRB->req_len) 542 + if (CEIL4(xcrb->req_len) < xcrb->req_len) 487 543 return -EINVAL; /* overflow after alignment*/ 488 544 489 545 /* length checks */ 490 - ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len); 546 + ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len); 491 547 if (ap_msg->len > ap_msg->bufsize) 492 548 return -EINVAL; 493 549 494 - if (CEIL4(xcRB->resp_len) < xcRB->resp_len) 550 + if (CEIL4(xcrb->resp_len) < xcrb->resp_len) 495 551 return -EINVAL; /* overflow after alignment*/ 496 552 497 553 /* prepare type6 header */ 498 554 msg->hdr = static_type6_ep11_hdr; 499 - msg->hdr.ToCardLen1 = xcRB->req_len; 500 - msg->hdr.FromCardLen1 = xcRB->resp_len; 555 + msg->hdr.tocardlen1 = xcrb->req_len; 556 + msg->hdr.fromcardlen1 = xcrb->resp_len; 501 557 502 558 /* Import CPRB data from the ioctl input parameter */ 503 - if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len), 504 - (char __force __user *)xcRB->req, xcRB->req_len)) { 559 + if (z_copy_from_user(userspace, &msg->cprbx.cprb_len, 560 + (char __force __user *)xcrb->req, xcrb->req_len)) { 505 561 return -EFAULT; 506 562 } 507 563 ··· 519 575 } else { 520 576 lfmt = 1; /* length format #1 */ 521 577 } 522 - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 578 + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); 523 579 *fcode = payload_hdr->func_val & 0xFFFF; 524 580 525 581 /* enable special processing based on the cprbs flags special bit */ ··· 568 624 } __packed; 569 625 570 626 static int convert_type86_ica(struct zcrypt_queue *zq, 571 - struct ap_message *reply, 572 - char __user *outputdata, 573 - unsigned int outputdatalength) 627 + struct ap_message *reply, 628 + char __user *outputdata, 629 + unsigned int outputdatalength) 574 630 { 575 631 static unsigned char static_pad[] = { 576 632 0x00, 0x02, ··· 623 679 ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", 624 680 __func__, AP_QID_CARD(zq->queue->qid), 625 681 AP_QID_QUEUE(zq->queue->qid), 626 - (int) service_rc, (int) service_rs); 682 + (int)service_rc, (int)service_rs); 627 683 return -EINVAL; 628 684 } 629 685 zq->online = 0; 630 686 pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n", 631 687 AP_QID_CARD(zq->queue->qid), 632 688 AP_QID_QUEUE(zq->queue->qid), 633 - (int) service_rc, (int) service_rs); 689 + (int)service_rc, (int)service_rs); 634 690 ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", 635 691 __func__, AP_QID_CARD(zq->queue->qid), 636 692 AP_QID_QUEUE(zq->queue->qid), 637 - (int) service_rc, (int) service_rs); 693 + (int)service_rc, (int)service_rs); 638 694 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 639 695 return -EAGAIN; 640 696 } ··· 673 729 * 674 730 * @zq: crypto device pointer 675 731 * @reply: reply AP message. 676 - * @xcRB: pointer to XCRB 732 + * @xcrb: pointer to XCRB 677 733 * 678 734 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 679 735 */ 680 736 static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, 681 737 struct ap_message *reply, 682 - struct ica_xcRB *xcRB) 738 + struct ica_xcRB *xcrb) 683 739 { 684 740 struct type86_fmt2_msg *msg = reply->msg; 685 741 char *data = reply->msg; 686 742 687 743 /* Copy CPRB to user */ 688 - if (xcRB->reply_control_blk_length < msg->fmt2.count1) { 744 + if (xcrb->reply_control_blk_length < msg->fmt2.count1) { 689 745 ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", 690 - __func__, xcRB->reply_control_blk_length, 746 + __func__, xcrb->reply_control_blk_length, 691 747 msg->fmt2.count1); 692 748 return -EMSGSIZE; 693 749 } 694 - if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr, 750 + if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, 695 751 data + msg->fmt2.offset1, msg->fmt2.count1)) 696 752 return -EFAULT; 697 - xcRB->reply_control_blk_length = msg->fmt2.count1; 753 + xcrb->reply_control_blk_length = msg->fmt2.count1; 698 754 699 755 /* Copy data buffer to user */ 700 756 if (msg->fmt2.count2) { 701 - if (xcRB->reply_data_length < msg->fmt2.count2) { 757 + if (xcrb->reply_data_length < msg->fmt2.count2) { 702 758 ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", 703 - __func__, xcRB->reply_data_length, 759 + __func__, xcrb->reply_data_length, 704 760 msg->fmt2.count2); 705 761 return -EMSGSIZE; 706 762 } 707 - if (z_copy_to_user(userspace, xcRB->reply_data_addr, 763 + if (z_copy_to_user(userspace, xcrb->reply_data_addr, 708 764 data + msg->fmt2.offset2, msg->fmt2.count2)) 709 765 return -EFAULT; 710 766 } 711 - xcRB->reply_data_length = msg->fmt2.count2; 767 + xcrb->reply_data_length = msg->fmt2.count2; 712 768 713 769 return 0; 714 770 } ··· 718 774 * 719 775 * @zq: crypto device pointer 720 776 * @reply: reply AP message. 721 - * @xcRB: pointer to EP11 user request block 777 + * @xcrb: pointer to EP11 user request block 722 778 * 723 779 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 724 780 */ 725 781 static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, 726 782 struct ap_message *reply, 727 - struct ep11_urb *xcRB) 783 + struct ep11_urb *xcrb) 728 784 { 729 785 struct type86_fmt2_msg *msg = reply->msg; 730 786 char *data = reply->msg; 731 787 732 - if (xcRB->resp_len < msg->fmt2.count1) { 788 + if (xcrb->resp_len < msg->fmt2.count1) { 733 789 ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", 734 - __func__, (unsigned int)xcRB->resp_len, 790 + __func__, (unsigned int)xcrb->resp_len, 735 791 msg->fmt2.count1); 736 792 return -EMSGSIZE; 737 793 } 738 794 739 795 /* Copy response CPRB to user */ 740 - if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp, 796 + if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp, 741 797 data + msg->fmt2.offset1, msg->fmt2.count1)) 742 798 return -EFAULT; 743 - xcRB->resp_len = msg->fmt2.count1; 799 + xcrb->resp_len = msg->fmt2.count1; 744 800 return 0; 745 801 } 746 802 747 803 static int convert_type86_rng(struct zcrypt_queue *zq, 748 - struct ap_message *reply, 749 - char *buffer) 804 + struct ap_message *reply, 805 + char *buffer) 750 806 { 751 807 struct { 752 808 struct type86_hdr hdr; ··· 762 818 } 763 819 764 820 static int convert_response_ica(struct zcrypt_queue *zq, 765 - struct ap_message *reply, 766 - char __user *outputdata, 767 - unsigned int outputdatalength) 821 + struct ap_message *reply, 822 + char __user *outputdata, 823 + unsigned int outputdatalength) 768 824 { 769 825 struct type86x_reply *msg = reply->msg; 770 826 ··· 774 830 return convert_error(zq, reply); 775 831 case TYPE86_RSP_CODE: 776 832 if (msg->cprbx.ccp_rtcode && 777 - (msg->cprbx.ccp_rscode == 0x14f) && 778 - (outputdatalength > 256)) { 833 + msg->cprbx.ccp_rscode == 0x14f && 834 + outputdatalength > 256) { 779 835 if (zq->zcard->max_exp_bit_length <= 17) { 780 836 zq->zcard->max_exp_bit_length = 17; 781 837 return -EAGAIN; 782 - } else 838 + } else { 783 839 return -EINVAL; 840 + } 784 841 } 785 842 if (msg->hdr.reply_code) 786 843 return convert_error(zq, reply); ··· 795 850 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 796 851 AP_QID_CARD(zq->queue->qid), 797 852 AP_QID_QUEUE(zq->queue->qid), 798 - (int) msg->hdr.type); 853 + (int)msg->hdr.type); 799 854 ZCRYPT_DBF_ERR( 800 855 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 801 856 __func__, AP_QID_CARD(zq->queue->qid), 802 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 857 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 803 858 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 804 859 return -EAGAIN; 805 860 } ··· 807 862 808 863 static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq, 809 864 struct ap_message *reply, 810 - struct ica_xcRB *xcRB) 865 + struct ica_xcRB *xcrb) 811 866 { 812 867 struct type86x_reply *msg = reply->msg; 813 868 814 869 switch (msg->hdr.type) { 815 870 case TYPE82_RSP_CODE: 816 871 case TYPE88_RSP_CODE: 817 - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 872 + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ 818 873 return convert_error(zq, reply); 819 874 case TYPE86_RSP_CODE: 820 875 if (msg->hdr.reply_code) { 821 - memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); 876 + memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32)); 822 877 return convert_error(zq, reply); 823 878 } 824 879 if (msg->cprbx.cprb_ver_id == 0x02) 825 - return convert_type86_xcrb(userspace, zq, reply, xcRB); 880 + return convert_type86_xcrb(userspace, zq, reply, xcrb); 826 881 fallthrough; /* wrong cprb version is an unknown response */ 827 882 default: /* Unknown response type, this should NEVER EVER happen */ 828 - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 883 + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ 829 884 zq->online = 0; 830 885 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 831 886 AP_QID_CARD(zq->queue->qid), 832 887 AP_QID_QUEUE(zq->queue->qid), 833 - (int) msg->hdr.type); 888 + (int)msg->hdr.type); 834 889 ZCRYPT_DBF_ERR( 835 890 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 836 891 __func__, AP_QID_CARD(zq->queue->qid), 837 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 892 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 838 893 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 839 894 return -EAGAIN; 840 895 } 841 896 } 842 897 843 898 static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, 844 - struct ap_message *reply, struct ep11_urb *xcRB) 899 + struct ap_message *reply, struct ep11_urb *xcrb) 845 900 { 846 901 struct type86_ep11_reply *msg = reply->msg; 847 902 ··· 853 908 if (msg->hdr.reply_code) 854 909 return convert_error(zq, reply); 855 910 if (msg->cprbx.cprb_ver_id == 0x04) 856 - return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB); 911 + return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb); 857 912 fallthrough; /* wrong cprb version is an unknown resp */ 858 913 default: /* Unknown response type, this should NEVER EVER happen */ 859 914 zq->online = 0; 860 915 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 861 916 AP_QID_CARD(zq->queue->qid), 862 917 AP_QID_QUEUE(zq->queue->qid), 863 - (int) msg->hdr.type); 918 + (int)msg->hdr.type); 864 919 ZCRYPT_DBF_ERR( 865 920 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 866 921 __func__, AP_QID_CARD(zq->queue->qid), 867 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 922 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 868 923 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 869 924 return -EAGAIN; 870 925 } 871 926 } 872 927 873 928 static int convert_response_rng(struct zcrypt_queue *zq, 874 - struct ap_message *reply, 875 - char *data) 929 + struct ap_message *reply, 930 + char *data) 876 931 { 877 932 struct type86x_reply *msg = reply->msg; 878 933 ··· 891 946 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 892 947 AP_QID_CARD(zq->queue->qid), 893 948 AP_QID_QUEUE(zq->queue->qid), 894 - (int) msg->hdr.type); 949 + (int)msg->hdr.type); 895 950 ZCRYPT_DBF_ERR( 896 951 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 897 952 __func__, AP_QID_CARD(zq->queue->qid), 898 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 953 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 899 954 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 900 955 return -EAGAIN; 901 956 } ··· 910 965 * @reply: pointer to the AP reply message 911 966 */ 912 967 static void zcrypt_msgtype6_receive(struct ap_queue *aq, 913 - struct ap_message *msg, 914 - struct ap_message *reply) 968 + struct ap_message *msg, 969 + struct ap_message *reply) 915 970 { 916 971 static struct error_hdr error_reply = { 917 972 .type = TYPE82_RSP_CODE, 918 973 .reply_code = REP82_ERROR_MACHINE_FAILURE, 919 974 }; 920 975 struct response_type *resp_type = 921 - (struct response_type *) msg->private; 976 + (struct response_type *)msg->private; 922 977 struct type86x_reply *t86r; 923 978 int len; 924 979 ··· 927 982 goto out; /* ap_msg->rc indicates the error */ 928 983 t86r = reply->msg; 929 984 if (t86r->hdr.type == TYPE86_RSP_CODE && 930 - t86r->cprbx.cprb_ver_id == 0x02) { 985 + t86r->cprbx.cprb_ver_id == 0x02) { 931 986 switch (resp_type->type) { 932 987 case CEXXC_RESPONSE_TYPE_ICA: 933 988 len = sizeof(struct type86x_reply) + t86r->length - 2; ··· 950 1005 default: 951 1006 memcpy(msg->msg, &error_reply, sizeof(error_reply)); 952 1007 } 953 - } else 1008 + } else { 954 1009 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 1010 + } 955 1011 out: 956 - complete(&(resp_type->work)); 1012 + complete(&resp_type->work); 957 1013 } 958 1014 959 1015 /* ··· 1001 1055 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 1002 1056 } 1003 1057 out: 1004 - complete(&(resp_type->work)); 1058 + complete(&resp_type->work); 1005 1059 } 1006 1060 1007 1061 static atomic_t zcrypt_step = ATOMIC_INIT(0); ··· 1022 1076 }; 1023 1077 int rc; 1024 1078 1025 - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); 1079 + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 1026 1080 if (!ap_msg->msg) 1027 1081 return -ENOMEM; 1028 1082 ap_msg->bufsize = PAGE_SIZE; 1029 1083 ap_msg->receive = zcrypt_msgtype6_receive; 1030 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1084 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1031 1085 atomic_inc_return(&zcrypt_step); 1032 1086 ap_msg->private = &resp_type; 1033 - rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex); 1087 + rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); 1034 1088 if (rc) 1035 1089 goto out_free; 1036 1090 init_completion(&resp_type.work); ··· 1044 1098 rc = convert_response_ica(zq, ap_msg, 1045 1099 mex->outputdata, 1046 1100 mex->outputdatalength); 1047 - } else 1101 + } else { 1048 1102 /* Signal pending. */ 1049 1103 ap_cancel_message(zq->queue, ap_msg); 1104 + } 1105 + 1050 1106 out_free: 1051 - free_page((unsigned long) ap_msg->msg); 1107 + free_page((unsigned long)ap_msg->msg); 1052 1108 ap_msg->private = NULL; 1053 1109 ap_msg->msg = NULL; 1054 1110 return rc; ··· 1072 1124 }; 1073 1125 int rc; 1074 1126 1075 - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); 1127 + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 1076 1128 if (!ap_msg->msg) 1077 1129 return -ENOMEM; 1078 1130 ap_msg->bufsize = PAGE_SIZE; 1079 1131 ap_msg->receive = zcrypt_msgtype6_receive; 1080 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1132 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1081 1133 atomic_inc_return(&zcrypt_step); 1082 1134 ap_msg->private = &resp_type; 1083 - rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt); 1135 + rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); 1084 1136 if (rc) 1085 1137 goto out_free; 1086 1138 init_completion(&resp_type.work); ··· 1098 1150 /* Signal pending. */ 1099 1151 ap_cancel_message(zq->queue, ap_msg); 1100 1152 } 1153 + 1101 1154 out_free: 1102 - free_page((unsigned long) ap_msg->msg); 1155 + free_page((unsigned long)ap_msg->msg); 1103 1156 ap_msg->private = NULL; 1104 1157 ap_msg->msg = NULL; 1105 1158 return rc; ··· 1115 1166 * by the caller with ap_init_message(). Also the caller has to 1116 1167 * make sure ap_release_message() is always called even on failure. 1117 1168 */ 1118 - int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, 1169 + int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, 1119 1170 struct ap_message *ap_msg, 1120 1171 unsigned int *func_code, unsigned short **dom) 1121 1172 { ··· 1128 1179 if (!ap_msg->msg) 1129 1180 return -ENOMEM; 1130 1181 ap_msg->receive = zcrypt_msgtype6_receive; 1131 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1182 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1132 1183 atomic_inc_return(&zcrypt_step); 1133 1184 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1134 1185 if (!ap_msg->private) 1135 1186 return -ENOMEM; 1136 - return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom); 1187 + return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); 1137 1188 } 1138 1189 1139 1190 /* ··· 1141 1192 * device to handle a send_cprb request. 1142 1193 * @zq: pointer to zcrypt_queue structure that identifies the 1143 1194 * CEXxC device to the request distributor 1144 - * @xcRB: pointer to the send_cprb request buffer 1195 + * @xcrb: pointer to the send_cprb request buffer 1145 1196 */ 1146 1197 static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, 1147 - struct ica_xcRB *xcRB, 1198 + struct ica_xcRB *xcrb, 1148 1199 struct ap_message *ap_msg) 1149 1200 { 1150 1201 int rc; ··· 1159 1210 * Set the queue's reply buffer length minus 128 byte padding 1160 1211 * as reply limit for the card firmware. 1161 1212 */ 1162 - msg->hdr.FromCardLen1 = min_t(unsigned int, msg->hdr.FromCardLen1, 1213 + msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1, 1163 1214 zq->reply.bufsize - 128); 1164 - if (msg->hdr.FromCardLen2) 1165 - msg->hdr.FromCardLen2 = 1166 - zq->reply.bufsize - msg->hdr.FromCardLen1 - 128; 1215 + if (msg->hdr.fromcardlen2) 1216 + msg->hdr.fromcardlen2 = 1217 + zq->reply.bufsize - msg->hdr.fromcardlen1 - 128; 1167 1218 1168 1219 init_completion(&rtype->work); 1169 1220 rc = ap_queue_message(zq->queue, ap_msg); ··· 1173 1224 if (rc == 0) { 1174 1225 rc = ap_msg->rc; 1175 1226 if (rc == 0) 1176 - rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB); 1177 - } else 1227 + rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb); 1228 + } else { 1178 1229 /* Signal pending. */ 1179 1230 ap_cancel_message(zq->queue, ap_msg); 1231 + } 1232 + 1180 1233 out: 1181 1234 if (rc) 1182 1235 ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", ··· 1209 1258 if (!ap_msg->msg) 1210 1259 return -ENOMEM; 1211 1260 ap_msg->receive = zcrypt_msgtype6_receive_ep11; 1212 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1261 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1213 1262 atomic_inc_return(&zcrypt_step); 1214 1263 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1215 1264 if (!ap_msg->private) ··· 1223 1272 * device to handle a send_ep11_cprb request. 1224 1273 * @zq: pointer to zcrypt_queue structure that identifies the 1225 1274 * CEX4P device to the request distributor 1226 - * @xcRB: pointer to the ep11 user request block 1275 + * @xcrb: pointer to the ep11 user request block 1227 1276 */ 1228 1277 static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq, 1229 1278 struct ep11_urb *xcrb, ··· 1273 1322 } else { 1274 1323 lfmt = 1; /* length format #1 */ 1275 1324 } 1276 - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 1325 + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); 1277 1326 payload_hdr->dom_val = (unsigned int) 1278 1327 AP_QID_QUEUE(zq->queue->qid); 1279 1328 } ··· 1282 1331 * Set the queue's reply buffer length minus the two prepend headers 1283 1332 * as reply limit for the card firmware. 1284 1333 */ 1285 - msg->hdr.FromCardLen1 = zq->reply.bufsize - 1334 + msg->hdr.fromcardlen1 = zq->reply.bufsize - 1286 1335 sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); 1287 1336 1288 1337 init_completion(&rtype->work); ··· 1294 1343 rc = ap_msg->rc; 1295 1344 if (rc == 0) 1296 1345 rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb); 1297 - } else 1346 + } else { 1298 1347 /* Signal pending. */ 1299 1348 ap_cancel_message(zq->queue, ap_msg); 1349 + } 1350 + 1300 1351 out: 1301 1352 if (rc) 1302 1353 ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", ··· 1319 1366 if (!ap_msg->msg) 1320 1367 return -ENOMEM; 1321 1368 ap_msg->receive = zcrypt_msgtype6_receive; 1322 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1369 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1323 1370 atomic_inc_return(&zcrypt_step); 1324 1371 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1325 1372 if (!ap_msg->private) 1326 1373 return -ENOMEM; 1327 1374 1328 - rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1375 + rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1329 1376 1330 1377 *func_code = HWRNG; 1331 1378 return 0; ··· 1364 1411 rc = ap_msg->rc; 1365 1412 if (rc == 0) 1366 1413 rc = convert_response_rng(zq, ap_msg, buffer); 1367 - } else 1414 + } else { 1368 1415 /* Signal pending. */ 1369 1416 ap_cancel_message(zq->queue, ap_msg); 1417 + } 1370 1418 out: 1371 1419 return rc; 1372 1420 }
+13 -13
drivers/s390/crypto/zcrypt_msgtype6.h
··· 45 45 unsigned char reserved5[2]; /* 0x0000 */ 46 46 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ 47 47 unsigned char reserved6[2]; /* 0x0000 */ 48 - unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ 49 - unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ 50 - unsigned int ToCardLen3; /* 0x00000000 */ 51 - unsigned int ToCardLen4; /* 0x00000000 */ 52 - unsigned int FromCardLen1; /* response buffer length */ 53 - unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ 54 - unsigned int FromCardLen3; /* 0x00000000 */ 55 - unsigned int FromCardLen4; /* 0x00000000 */ 48 + unsigned int tocardlen1; /* (request CPRB len + 3) & -4 */ 49 + unsigned int tocardlen2; /* db len 0x00000000 for PKD */ 50 + unsigned int tocardlen3; /* 0x00000000 */ 51 + unsigned int tocardlen4; /* 0x00000000 */ 52 + unsigned int fromcardlen1; /* response buffer length */ 53 + unsigned int fromcardlen2; /* db len 0x00000000 for PKD */ 54 + unsigned int fromcardlen3; /* 0x00000000 */ 55 + unsigned int fromcardlen4; /* 0x00000000 */ 56 56 } __packed; 57 57 58 58 /** ··· 116 116 * @ap_dev: AP device pointer 117 117 * @ap_msg: pointer to AP message 118 118 */ 119 - static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, 119 + static inline void rng_type6cprb_msgx(struct ap_message *ap_msg, 120 120 unsigned int random_number_length, 121 121 unsigned int *domain) 122 122 { ··· 134 134 .offset1 = 0x00000058, 135 135 .agent_id = {'C', 'A'}, 136 136 .function_code = {'R', 'L'}, 137 - .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), 138 - .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), 137 + .tocardlen1 = sizeof(*msg) - sizeof(msg->hdr), 138 + .fromcardlen1 = sizeof(*msg) - sizeof(msg->hdr), 139 139 }; 140 140 static struct CPRBX local_cprbx = { 141 141 .cprb_len = 0x00dc, ··· 147 147 }; 148 148 149 149 msg->hdr = static_type6_hdrX; 150 - msg->hdr.FromCardLen2 = random_number_length, 150 + msg->hdr.fromcardlen2 = random_number_length; 151 151 msg->cprbx = local_cprbx; 152 - msg->cprbx.rpl_datal = random_number_length, 152 + msg->cprbx.rpl_datal = random_number_length; 153 153 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 154 154 msg->rule_length = 0x0a; 155 155 memcpy(msg->rule, "RANDOM ", 8);
+1 -1
drivers/s390/crypto/zcrypt_queue.c
··· 114 114 { 115 115 struct zcrypt_queue *zq; 116 116 117 - zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); 117 + zq = kzalloc(sizeof(*zq), GFP_KERNEL); 118 118 if (!zq) 119 119 return NULL; 120 120 zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);
+4 -4
include/linux/entry-common.h
··· 63 63 ARCH_EXIT_TO_USER_MODE_WORK) 64 64 65 65 /** 66 - * arch_check_user_regs - Architecture specific sanity check for user mode regs 66 + * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs 67 67 * @regs: Pointer to currents pt_regs 68 68 * 69 69 * Defaults to an empty implementation. Can be replaced by architecture ··· 73 73 * section. Use __always_inline so the compiler cannot push it out of line 74 74 * and make it instrumentable. 75 75 */ 76 - static __always_inline void arch_check_user_regs(struct pt_regs *regs); 76 + static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); 77 77 78 - #ifndef arch_check_user_regs 79 - static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} 78 + #ifndef arch_enter_from_user_mode 79 + static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} 80 80 #endif 81 81 82 82 /**
+1 -1
kernel/entry/common.c
··· 17 17 /* See comment for enter_from_user_mode() in entry-common.h */ 18 18 static __always_inline void __enter_from_user_mode(struct pt_regs *regs) 19 19 { 20 - arch_check_user_regs(regs); 20 + arch_enter_from_user_mode(regs); 21 21 lockdep_hardirqs_off(CALLER_ADDR0); 22 22 23 23 CT_WARN_ON(ct_state() != CONTEXT_USER);
+1 -2
scripts/min-tool-version.sh
··· 24 24 echo 16.0.3 25 25 ;; 26 26 llvm) 27 - # https://lore.kernel.org/r/YMtib5hKVyNknZt3@osiris/ 28 27 if [ "$SRCARCH" = s390 ]; then 29 - echo 13.0.0 28 + echo 14.0.0 30 29 else 31 30 echo 11.0.0 32 31 fi