Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Alexander Gordeev:

- Fix virtual vs physical address confusion in vmem_add_range() and
vmem_remove_range() functions

- Include <linux/io.h> instead of <asm/io.h> and <asm-generic/io.h>
throughout s390 code

- Make all PSW related defines also available for assembler files.
Remove PSW_DEFAULT_KEY define from uapi for that

- When adding an undefined symbol the build still succeeds, but
userspace crashes trying to execute VDSO, because the symbol is not
resolved. Add undefined symbols check to prevent that

- Use kvmalloc_array() instead of kzalloc() for allocaton of 256k
memory when executing s390 crypto adapter IOCTL

- Add -fPIE flag to prevent decompressor misaligned symbol build error
with clang

- Use .balign instead of .align everywhere. This is a no-op for s390,
but with this there no mix in using .align and .balign anymore

- Filter out -mno-pic-data-is-text-relative flag when compiling kernel
to prevent VDSO build error

- Rework entering of DAT-on mode on CPU restart to use PSW_KERNEL_BITS
mask directly

- Do not retry administrative requests to some s390 crypto cards, since
the firmware assumes replay attacks

- Remove most of the debug code, which is build in when kernel config
option CONFIG_ZCRYPT_DEBUG is enabled

- Remove CONFIG_ZCRYPT_MULTIDEVNODES kernel config option and switch
off the multiple devices support for the s390 zcrypt device driver

- With the conversion to generic entry machine checks are accounted to
the current context instead of irq time. As result, the STCKF
instruction at the beginning of the machine check handler and the
lowcore member are no longer required, therefore remove it

- Fix various typos found with codespell

- Minor cleanups to CPU-measurement Counter and Sampling Facilities
code

- Revert patch that removes VMEM_MAX_PHYS macro, since it causes a
regression

* tag 's390-6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (25 commits)
Revert "s390/mm: get rid of VMEM_MAX_PHYS macro"
s390/cpum_sf: remove check on CPU being online
s390/cpum_sf: handle casts consistently
s390/cpum_sf: remove unnecessary debug statement
s390/cpum_sf: remove parameter in call to pr_err
s390/cpum_sf: simplify function setup_pmu_cpu
s390/cpum_cf: remove unneeded debug statements
s390/entry: remove mcck clock
s390: fix various typos
s390/zcrypt: remove ZCRYPT_MULTIDEVNODES kernel config option
s390/zcrypt: do not retry administrative requests
s390/zcrypt: cleanup some debug code
s390/entry: rework entering DAT-on mode on CPU restart
s390/mm: fence off VM macros from asm and linker
s390: include linux/io.h instead of asm/io.h
s390/ptrace: make all psw related defines also available for asm
s390/ptrace: remove PSW_DEFAULT_KEY from uapi
s390/vdso: filter out mno-pic-data-is-text-relative cflag
s390: consistently use .balign instead of .align
s390/decompressor: fix misaligned symbol build error
...

+235 -428
+1
arch/s390/Makefile
··· 27 27 KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables 28 28 KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding 29 29 KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector 30 + KBUILD_CFLAGS_DECOMPRESSOR += -fPIE 30 31 KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) 31 32 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) 32 33 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
+2 -2
arch/s390/appldata/appldata_base.c
··· 26 26 #include <linux/notifier.h> 27 27 #include <linux/cpu.h> 28 28 #include <linux/workqueue.h> 29 + #include <linux/uaccess.h> 30 + #include <linux/io.h> 29 31 #include <asm/appldata.h> 30 32 #include <asm/vtimer.h> 31 - #include <linux/uaccess.h> 32 - #include <asm/io.h> 33 33 #include <asm/smp.h> 34 34 35 35 #include "appldata.h"
+1 -1
arch/s390/appldata/appldata_mem.c
··· 15 15 #include <linux/pagemap.h> 16 16 #include <linux/swap.h> 17 17 #include <linux/slab.h> 18 - #include <asm/io.h> 18 + #include <linux/io.h> 19 19 20 20 #include "appldata.h" 21 21
+8 -8
arch/s390/boot/head.S
··· 67 67 jz .Lagain1 # skip dataset header 68 68 larl %r13,.L_eof 69 69 clc 0(3,%r4),0(%r13) # if it is EOFx 70 - jz .Lagain1 # skip dateset trailer 70 + jz .Lagain1 # skip data set trailer 71 71 lgr %r5,%r2 72 72 la %r6,COMMAND_LINE-PARMAREA(%r12) 73 73 lgr %r7,%r2 ··· 185 185 larl %r13,.Lcrash 186 186 lpsw 0(%r13) 187 187 188 - .align 8 188 + .balign 8 189 189 .Lwaitpsw: 190 190 .quad 0x0202000180000000,.Lioint 191 191 .Lnewpswmask: 192 192 .quad 0x0000000180000000 193 - .align 8 193 + .balign 8 194 194 .Lorb: .long 0x00000000,0x0080ff00,.Lccws 195 195 .Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 196 - .align 8 196 + .balign 8 197 197 .Lcr6: .quad 0x00000000ff000000 198 - .align 8 198 + .balign 8 199 199 .Lcrash:.long 0x000a0000,0x00000000 200 - .align 8 200 + .balign 8 201 201 .Lccws: .rept 19 202 202 .long 0x02600050,0x00000000 203 203 .endr ··· 207 207 .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold" 208 208 .L_eof: .long 0xc5d6c600 /* C'EOF' */ 209 209 .L_hdr: .long 0xc8c4d900 /* C'HDR' */ 210 - .align 8 210 + .balign 8 211 211 .Lcpuid:.fill 8,1,0 212 212 213 213 # ··· 265 265 brasl %r14,startup_kernel 266 266 SYM_CODE_END(startup_normal) 267 267 268 - .align 8 268 + .balign 8 269 269 6: .long 0x7fffffff,0xffffffff 270 270 .Lext_new_psw: 271 271 .quad 0x0002000180000000,0x1b0 # disabled wait
+3 -3
arch/s390/boot/head_kdump.S
··· 82 82 # 83 83 # Startup of kdump (relocated new kernel) 84 84 # 85 - .align 2 85 + .balign 2 86 86 startup_kdump_relocated: 87 87 basr %r13,0 88 88 0: lpswe .Lrestart_psw-0b(%r13) # Start new kernel... 89 89 SYM_CODE_END(startup_kdump) 90 - .align 8 90 + .balign 8 91 91 .Lrestart_psw: 92 92 .quad 0x0000000080000000,0x0000000000000000 + startup 93 93 #else ··· 95 95 larl %r13,startup_kdump_crash 96 96 lpswe 0(%r13) 97 97 SYM_CODE_END(startup_kdump) 98 - .align 8 98 + .balign 8 99 99 startup_kdump_crash: 100 100 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash 101 101 #endif /* CONFIG_CRASH_DUMP */
+1 -1
arch/s390/crypto/crc32be-vx.S
··· 48 48 * 49 49 * Note that the constant definitions below are extended in order to compute 50 50 * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. 51 - * The righmost doubleword can be 0 to prevent contribution to the result or 51 + * The rightmost doubleword can be 0 to prevent contribution to the result or 52 52 * can be multiplied by 1 to perform an XOR without the need for a separate 53 53 * VECTOR EXCLUSIVE OR instruction. 54 54 *
+1 -1
arch/s390/include/asm/ap.h
··· 333 333 }; 334 334 335 335 /** 336 - * ap_qact(): Query AP combatibility type. 336 + * ap_qact(): Query AP compatibility type. 337 337 * @qid: The AP queue number 338 338 * @apinfo: On input the info about the AP queue. On output the 339 339 * alternate AP queue info provided by the qact function
+1 -1
arch/s390/include/asm/appldata.h
··· 8 8 #ifndef _ASM_S390_APPLDATA_H 9 9 #define _ASM_S390_APPLDATA_H 10 10 11 + #include <linux/io.h> 11 12 #include <asm/diag.h> 12 - #include <asm/io.h> 13 13 14 14 #define APPLDATA_START_INTERVAL_REC 0x80 15 15 #define APPLDATA_STOP_REC 0x81
+2 -2
arch/s390/include/asm/asm-extable.h
··· 25 25 26 26 #define __EX_TABLE(_section, _fault, _target, _type) \ 27 27 stringify_in_c(.section _section,"a";) \ 28 - stringify_in_c(.align 4;) \ 28 + stringify_in_c(.balign 4;) \ 29 29 stringify_in_c(.long (_fault) - .;) \ 30 30 stringify_in_c(.long (_target) - .;) \ 31 31 stringify_in_c(.short (_type);) \ ··· 34 34 35 35 #define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\ 36 36 stringify_in_c(.section _section,"a";) \ 37 - stringify_in_c(.align 4;) \ 37 + stringify_in_c(.balign 4;) \ 38 38 stringify_in_c(.long (_fault) - .;) \ 39 39 stringify_in_c(.long (_target) - .;) \ 40 40 stringify_in_c(.short (_type);) \
+1 -1
arch/s390/include/asm/dma.h
··· 2 2 #ifndef _ASM_S390_DMA_H 3 3 #define _ASM_S390_DMA_H 4 4 5 - #include <asm/io.h> 5 + #include <linux/io.h> 6 6 7 7 /* 8 8 * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
+2 -2
arch/s390/include/asm/lowcore.h
··· 118 118 __u64 avg_steal_timer; /* 0x0300 */ 119 119 __u64 last_update_timer; /* 0x0308 */ 120 120 __u64 last_update_clock; /* 0x0310 */ 121 - __u64 int_clock; /* 0x0318*/ 122 - __u64 mcck_clock; /* 0x0320 */ 121 + __u64 int_clock; /* 0x0318 */ 122 + __u8 pad_0x0320[0x0328-0x0320]; /* 0x0320 */ 123 123 __u64 clock_comparator; /* 0x0328 */ 124 124 __u64 boot_clock[2]; /* 0x0330 */ 125 125
+3 -3
arch/s390/include/asm/page.h
··· 19 19 #define PAGE_SHIFT _PAGE_SHIFT 20 20 #define PAGE_SIZE _PAGE_SIZE 21 21 #define PAGE_MASK _PAGE_MASK 22 - #define PAGE_DEFAULT_ACC 0 22 + #define PAGE_DEFAULT_ACC _AC(0, UL) 23 23 /* storage-protection override */ 24 24 #define PAGE_SPO_ACC 9 25 25 #define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) ··· 179 179 #define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE 180 180 #endif 181 181 182 - #endif /* !__ASSEMBLY__ */ 183 - 184 182 #define __PAGE_OFFSET 0x0UL 185 183 #define PAGE_OFFSET 0x0UL 186 184 ··· 201 203 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 202 204 203 205 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 206 + 207 + #endif /* !__ASSEMBLY__ */ 204 208 205 209 #include <asm-generic/memory_model.h> 206 210 #include <asm-generic/getorder.h>
+27 -25
arch/s390/include/asm/ptrace.h
··· 23 23 #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) 24 24 #define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) 25 25 26 - #ifndef __ASSEMBLY__ 26 + #define PSW32_MASK_PER _AC(0x40000000, UL) 27 + #define PSW32_MASK_DAT _AC(0x04000000, UL) 28 + #define PSW32_MASK_IO _AC(0x02000000, UL) 29 + #define PSW32_MASK_EXT _AC(0x01000000, UL) 30 + #define PSW32_MASK_KEY _AC(0x00F00000, UL) 31 + #define PSW32_MASK_BASE _AC(0x00080000, UL) /* Always one */ 32 + #define PSW32_MASK_MCHECK _AC(0x00040000, UL) 33 + #define PSW32_MASK_WAIT _AC(0x00020000, UL) 34 + #define PSW32_MASK_PSTATE _AC(0x00010000, UL) 35 + #define PSW32_MASK_ASC _AC(0x0000C000, UL) 36 + #define PSW32_MASK_CC _AC(0x00003000, UL) 37 + #define PSW32_MASK_PM _AC(0x00000f00, UL) 38 + #define PSW32_MASK_RI _AC(0x00000080, UL) 39 + 40 + #define PSW32_ADDR_AMODE _AC(0x80000000, UL) 41 + #define PSW32_ADDR_INSN _AC(0x7FFFFFFF, UL) 42 + 43 + #define PSW32_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 20) 44 + 45 + #define PSW32_ASC_PRIMARY _AC(0x00000000, UL) 46 + #define PSW32_ASC_ACCREG _AC(0x00004000, UL) 47 + #define PSW32_ASC_SECONDARY _AC(0x00008000, UL) 48 + #define PSW32_ASC_HOME _AC(0x0000C000, UL) 49 + 50 + #define PSW_DEFAULT_KEY ((PAGE_DEFAULT_ACC) << 52) 27 51 28 52 #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ 29 53 PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_DAT) 30 54 #define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 31 55 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ 32 56 PSW_MASK_PSTATE | PSW_ASC_PRIMARY) 57 + 58 + #ifndef __ASSEMBLY__ 33 59 34 60 struct psw_bits { 35 61 unsigned long : 1; ··· 96 70 typecheck(psw_t, __psw); \ 97 71 &(*(struct psw_bits *)(&(__psw))); \ 98 72 })) 99 - 100 - #define PSW32_MASK_PER 0x40000000UL 101 - #define PSW32_MASK_DAT 0x04000000UL 102 - #define PSW32_MASK_IO 0x02000000UL 103 - #define PSW32_MASK_EXT 0x01000000UL 104 - #define PSW32_MASK_KEY 0x00F00000UL 105 - #define PSW32_MASK_BASE 0x00080000UL /* Always one */ 106 - #define PSW32_MASK_MCHECK 0x00040000UL 107 - #define PSW32_MASK_WAIT 0x00020000UL 108 - #define PSW32_MASK_PSTATE 0x00010000UL 109 - #define PSW32_MASK_ASC 0x0000C000UL 110 - #define PSW32_MASK_CC 0x00003000UL 111 - #define PSW32_MASK_PM 0x00000f00UL 112 - #define PSW32_MASK_RI 0x00000080UL 113 - 114 - #define PSW32_ADDR_AMODE 0x80000000UL 115 - #define PSW32_ADDR_INSN 0x7FFFFFFFUL 116 - 117 - #define PSW32_DEFAULT_KEY (((u32)PAGE_DEFAULT_ACC) << 20) 118 - 119 - #define PSW32_ASC_PRIMARY 0x00000000UL 120 - #define PSW32_ASC_ACCREG 0x00004000UL 121 - #define PSW32_ASC_SECONDARY 0x00008000UL 122 - #define PSW32_ASC_HOME 0x0000C000UL 123 73 124 74 typedef struct { 125 75 unsigned int mask;
+1 -1
arch/s390/include/uapi/asm/cmb.h
··· 31 31 struct cmbdata { 32 32 __u64 size; 33 33 __u64 elapsed_time; 34 - /* basic and exended format: */ 34 + /* basic and extended format: */ 35 35 __u64 ssch_rsch_count; 36 36 __u64 sample_count; 37 37 __u64 device_connect_time;
+1 -1
arch/s390/include/uapi/asm/dasd.h
··· 24 24 /* 25 25 * struct dasd_information2_t 26 26 * represents any data about the device, which is visible to userspace. 27 - * including foramt and featueres. 27 + * including format and featueres. 28 28 */ 29 29 typedef struct dasd_information2_t { 30 30 unsigned int devno; /* S/390 devno */
+3 -3
arch/s390/include/uapi/asm/pkey.h
··· 353 353 * Is able to find out which type of secure key is given (CCA AES secure 354 354 * key, CCA AES cipher key, CCA ECC private key, EP11 AES key, EP11 ECC private 355 355 * key) and tries to find all matching crypto cards based on the MKVP and maybe 356 - * other criterias (like CCA AES cipher keys need a CEX5C or higher, EP11 keys 356 + * other criteria (like CCA AES cipher keys need a CEX5C or higher, EP11 keys 357 357 * with BLOB_PKEY_EXTRACTABLE need a CEX7 and EP11 api version 4). The list of 358 358 * APQNs is further filtered by the key's mkvp which needs to match to either 359 359 * the current mkvp (CCA and EP11) or the alternate mkvp (old mkvp, CCA adapters ··· 370 370 * is empty (apqn_entries is 0) the apqn_entries field is updated to the number 371 371 * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0 372 372 * but the number of apqn targets does not fit into the list, the apqn_targets 373 - * field is updatedd with the number of reqired entries but there are no apqn 373 + * field is updated with the number of required entries but there are no apqn 374 374 * values stored in the list and the ioctl returns with ENOSPC. If no matching 375 375 * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0. 376 376 */ ··· 408 408 * is empty (apqn_entries is 0) the apqn_entries field is updated to the number 409 409 * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0 410 410 * but the number of apqn targets does not fit into the list, the apqn_targets 411 - * field is updatedd with the number of reqired entries but there are no apqn 411 + * field is updated with the number of required entries but there are no apqn 412 412 * values stored in the list and the ioctl returns with ENOSPC. If no matching 413 413 * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0. 414 414 */
+58 -63
arch/s390/include/uapi/asm/ptrace.h
··· 166 166 167 167 #endif /* __s390x__ */ 168 168 169 + #ifndef __s390x__ 170 + 171 + #define PSW_MASK_PER _AC(0x40000000, UL) 172 + #define PSW_MASK_DAT _AC(0x04000000, UL) 173 + #define PSW_MASK_IO _AC(0x02000000, UL) 174 + #define PSW_MASK_EXT _AC(0x01000000, UL) 175 + #define PSW_MASK_KEY _AC(0x00F00000, UL) 176 + #define PSW_MASK_BASE _AC(0x00080000, UL) /* always one */ 177 + #define PSW_MASK_MCHECK _AC(0x00040000, UL) 178 + #define PSW_MASK_WAIT _AC(0x00020000, UL) 179 + #define PSW_MASK_PSTATE _AC(0x00010000, UL) 180 + #define PSW_MASK_ASC _AC(0x0000C000, UL) 181 + #define PSW_MASK_CC _AC(0x00003000, UL) 182 + #define PSW_MASK_PM _AC(0x00000F00, UL) 183 + #define PSW_MASK_RI _AC(0x00000000, UL) 184 + #define PSW_MASK_EA _AC(0x00000000, UL) 185 + #define PSW_MASK_BA _AC(0x00000000, UL) 186 + 187 + #define PSW_MASK_USER _AC(0x0000FF00, UL) 188 + 189 + #define PSW_ADDR_AMODE _AC(0x80000000, UL) 190 + #define PSW_ADDR_INSN _AC(0x7FFFFFFF, UL) 191 + 192 + #define PSW_ASC_PRIMARY _AC(0x00000000, UL) 193 + #define PSW_ASC_ACCREG _AC(0x00004000, UL) 194 + #define PSW_ASC_SECONDARY _AC(0x00008000, UL) 195 + #define PSW_ASC_HOME _AC(0x0000C000, UL) 196 + 197 + #else /* __s390x__ */ 198 + 199 + #define PSW_MASK_PER _AC(0x4000000000000000, UL) 200 + #define PSW_MASK_DAT _AC(0x0400000000000000, UL) 201 + #define PSW_MASK_IO _AC(0x0200000000000000, UL) 202 + #define PSW_MASK_EXT _AC(0x0100000000000000, UL) 203 + #define PSW_MASK_BASE _AC(0x0000000000000000, UL) 204 + #define PSW_MASK_KEY _AC(0x00F0000000000000, UL) 205 + #define PSW_MASK_MCHECK _AC(0x0004000000000000, UL) 206 + #define PSW_MASK_WAIT _AC(0x0002000000000000, UL) 207 + #define PSW_MASK_PSTATE _AC(0x0001000000000000, UL) 208 + #define PSW_MASK_ASC _AC(0x0000C00000000000, UL) 209 + #define PSW_MASK_CC _AC(0x0000300000000000, UL) 210 + #define PSW_MASK_PM _AC(0x00000F0000000000, UL) 211 + #define PSW_MASK_RI _AC(0x0000008000000000, UL) 212 + #define PSW_MASK_EA _AC(0x0000000100000000, UL) 213 + #define PSW_MASK_BA _AC(0x0000000080000000, UL) 214 + 215 + #define PSW_MASK_USER _AC(0x0000FF0180000000, UL) 216 + 217 + #define PSW_ADDR_AMODE _AC(0x0000000000000000, UL) 218 + #define PSW_ADDR_INSN _AC(0xFFFFFFFFFFFFFFFF, UL) 219 + 220 + #define PSW_ASC_PRIMARY _AC(0x0000000000000000, UL) 221 + #define PSW_ASC_ACCREG _AC(0x0000400000000000, UL) 222 + #define PSW_ASC_SECONDARY _AC(0x0000800000000000, UL) 223 + #define PSW_ASC_HOME _AC(0x0000C00000000000, UL) 224 + 225 + #endif /* __s390x__ */ 226 + 169 227 #define NUM_GPRS 16 170 228 #define NUM_FPRS 16 171 229 #define NUM_CRS 16 ··· 271 213 unsigned long mask; 272 214 unsigned long addr; 273 215 } __attribute__ ((aligned(8))) psw_t; 274 - 275 - #ifndef __s390x__ 276 - 277 - #define PSW_MASK_PER 0x40000000UL 278 - #define PSW_MASK_DAT 0x04000000UL 279 - #define PSW_MASK_IO 0x02000000UL 280 - #define PSW_MASK_EXT 0x01000000UL 281 - #define PSW_MASK_KEY 0x00F00000UL 282 - #define PSW_MASK_BASE 0x00080000UL /* always one */ 283 - #define PSW_MASK_MCHECK 0x00040000UL 284 - #define PSW_MASK_WAIT 0x00020000UL 285 - #define PSW_MASK_PSTATE 0x00010000UL 286 - #define PSW_MASK_ASC 0x0000C000UL 287 - #define PSW_MASK_CC 0x00003000UL 288 - #define PSW_MASK_PM 0x00000F00UL 289 - #define PSW_MASK_RI 0x00000000UL 290 - #define PSW_MASK_EA 0x00000000UL 291 - #define PSW_MASK_BA 0x00000000UL 292 - 293 - #define PSW_MASK_USER 0x0000FF00UL 294 - 295 - #define PSW_ADDR_AMODE 0x80000000UL 296 - #define PSW_ADDR_INSN 0x7FFFFFFFUL 297 - 298 - #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) 299 - 300 - #define PSW_ASC_PRIMARY 0x00000000UL 301 - #define PSW_ASC_ACCREG 0x00004000UL 302 - #define PSW_ASC_SECONDARY 0x00008000UL 303 - #define PSW_ASC_HOME 0x0000C000UL 304 - 305 - #else /* __s390x__ */ 306 - 307 - #define PSW_MASK_PER 0x4000000000000000UL 308 - #define PSW_MASK_DAT 0x0400000000000000UL 309 - #define PSW_MASK_IO 0x0200000000000000UL 310 - #define PSW_MASK_EXT 0x0100000000000000UL 311 - #define PSW_MASK_BASE 0x0000000000000000UL 312 - #define PSW_MASK_KEY 0x00F0000000000000UL 313 - #define PSW_MASK_MCHECK 0x0004000000000000UL 314 - #define PSW_MASK_WAIT 0x0002000000000000UL 315 - #define PSW_MASK_PSTATE 0x0001000000000000UL 316 - #define PSW_MASK_ASC 0x0000C00000000000UL 317 - #define PSW_MASK_CC 0x0000300000000000UL 318 - #define PSW_MASK_PM 0x00000F0000000000UL 319 - #define PSW_MASK_RI 0x0000008000000000UL 320 - #define PSW_MASK_EA 0x0000000100000000UL 321 - #define PSW_MASK_BA 0x0000000080000000UL 322 - 323 - #define PSW_MASK_USER 0x0000FF0180000000UL 324 - 325 - #define PSW_ADDR_AMODE 0x0000000000000000UL 326 - #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL 327 - 328 - #define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) 329 - 330 - #define PSW_ASC_PRIMARY 0x0000000000000000UL 331 - #define PSW_ASC_ACCREG 0x0000400000000000UL 332 - #define PSW_ASC_SECONDARY 0x0000800000000000UL 333 - #define PSW_ASC_HOME 0x0000C00000000000UL 334 - 335 - #endif /* __s390x__ */ 336 - 337 216 338 217 /* 339 218 * The s390_regs structure is used to define the elf_gregset_t.
-1
arch/s390/kernel/asm-offsets.c
··· 122 122 OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer); 123 123 OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock); 124 124 OFFSET(__LC_INT_CLOCK, lowcore, int_clock); 125 - OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock); 126 125 OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock); 127 126 OFFSET(__LC_CURRENT, lowcore, current_task); 128 127 OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
+1 -1
arch/s390/kernel/cpcmd.c
··· 16 16 #include <linux/stddef.h> 17 17 #include <linux/string.h> 18 18 #include <linux/mm.h> 19 + #include <linux/io.h> 19 20 #include <asm/diag.h> 20 21 #include <asm/ebcdic.h> 21 22 #include <asm/cpcmd.h> 22 - #include <asm/io.h> 23 23 24 24 static DEFINE_SPINLOCK(cpcmd_lock); 25 25 static char cpcmd_buf[241];
+2 -2
arch/s390/kernel/dis.c
··· 24 24 #include <linux/kdebug.h> 25 25 #include <linux/uaccess.h> 26 26 #include <linux/atomic.h> 27 + #include <linux/io.h> 27 28 #include <asm/dis.h> 28 - #include <asm/io.h> 29 29 #include <asm/cpcmd.h> 30 30 #include <asm/lowcore.h> 31 31 #include <asm/debug.h> ··· 516 516 if (copy_from_regs(regs, code + end, (void *)addr, 2)) 517 517 break; 518 518 } 519 - /* Code snapshot useable ? */ 519 + /* Code snapshot usable ? */ 520 520 if ((regs->psw.addr & 1) || start >= end) { 521 521 printk("%s Code: Bad PSW.\n", mode); 522 522 return;
+9 -5
arch/s390/kernel/entry.S
··· 136 136 clgfrl %r14,.Lrange_size\@ 137 137 jhe \outside_label 138 138 .section .rodata, "a" 139 - .align 4 139 + .balign 4 140 140 .Lrange_size\@: 141 141 .long \end - \start 142 142 .previous ··· 488 488 * Machine check handler routines 489 489 */ 490 490 SYM_CODE_START(mcck_int_handler) 491 - stckf __LC_MCCK_CLOCK 492 491 BPOFF 493 492 la %r1,4095 # validate r1 494 493 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer ··· 597 598 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 598 599 jz 0f 599 600 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 600 - 0: larl %r15,stosm_tmp 601 - stosm 0(%r15),0x04 # turn dat on, keep irqs off 601 + 0: larl %r15,daton_psw 602 + lpswe 0(%r15) # turn dat on, keep irqs off 603 + .Ldaton: 602 604 lg %r15,__LC_RESTART_STACK 603 605 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 604 606 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) ··· 646 646 .balign 4 647 647 SYM_DATA_LOCAL(stop_lock, .long 0) 648 648 SYM_DATA_LOCAL(this_cpu, .short 0) 649 - SYM_DATA_LOCAL(stosm_tmp, .byte 0) 649 + .balign 8 650 + SYM_DATA_START_LOCAL(daton_psw) 651 + .quad PSW_KERNEL_BITS 652 + .quad .Ldaton 653 + SYM_DATA_END(daton_psw) 650 654 651 655 .section .rodata, "a" 652 656 #define SYSCALL(esame,emu) .quad __s390x_ ## esame
+1 -1
arch/s390/kernel/head64.S
··· 36 36 lpswe dw_psw-.(%r13) # load disabled wait psw 37 37 SYM_CODE_END(startup_continue) 38 38 39 - .align 16 39 + .balign 16 40 40 SYM_DATA_LOCAL(dw_psw, .quad 0x0002000180000000,0x0000000000000000)
+1 -1
arch/s390/kernel/kprobes_insn_page.S
··· 13 13 * would be in the data section instead. 14 14 */ 15 15 .section .kprobes.text, "ax" 16 - .align 4096 16 + .balign 4096 17 17 SYM_CODE_START(kprobes_insn_page) 18 18 .rept 2048 19 19 .word 0x07fe
+1 -1
arch/s390/kernel/nospec-branch.c
··· 14 14 return rc; 15 15 if (enabled && test_facility(82)) { 16 16 /* 17 - * The user explicitely requested nobp=1, enable it and 17 + * The user explicitly requested nobp=1, enable it and 18 18 * disable the expoline support. 19 19 */ 20 20 __set_facility(82, alt_stfle_fac_list);
+2 -30
arch/s390/kernel/perf_cpum_cf.c
··· 172 172 cpu_cf_root.cfptr = NULL; 173 173 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 174 174 on_each_cpu(cpum_cf_reset_cpu, NULL, 1); 175 - debug_sprintf_event(cf_dbg, 4, "%s2 root.refcnt %u cfptr %px\n", 175 + debug_sprintf_event(cf_dbg, 4, "%s root.refcnt %u cfptr %d\n", 176 176 __func__, refcount_read(&cpu_cf_root.refcnt), 177 - cpu_cf_root.cfptr); 177 + !cpu_cf_root.cfptr); 178 178 } 179 179 180 180 /* ··· 975 975 } 976 976 977 977 overflow = perf_event_overflow(event, &data, &regs); 978 - debug_sprintf_event(cf_dbg, 3, 979 - "%s event %#llx sample_type %#llx raw %d ov %d\n", 980 - __func__, event->hw.config, 981 - event->attr.sample_type, raw.size, overflow); 982 978 if (overflow) 983 979 event->pmu->stop(event, 0); 984 980 ··· 1101 1105 { 1102 1106 int rc = 0; 1103 1107 1104 - debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d " 1105 - "opencnt %d\n", __func__, cpu, 1106 - refcount_read(&cpu_cf_root.refcnt), 1107 - refcount_read(&cfset_opencnt)); 1108 1108 /* 1109 1109 * Ignore notification for perf_event_open(). 1110 1110 * Handle only /dev/hwctr device sessions. ··· 1119 1127 1120 1128 static int cpum_cf_offline_cpu(unsigned int cpu) 1121 1129 { 1122 - debug_sprintf_event(cf_dbg, 4, "%s cpu %d root.refcnt %d opencnt %d\n", 1123 - __func__, cpu, refcount_read(&cpu_cf_root.refcnt), 1124 - refcount_read(&cfset_opencnt)); 1125 1130 /* 1126 1131 * During task exit processing of grouped perf events triggered by CPU 1127 1132 * hotplug processing, pmu_disable() is called as part of perf context ··· 1326 1337 cpuhw->state, S390_HWCTR_DEVICE, rc); 1327 1338 if (!cpuhw->dev_state) 1328 1339 cpuhw->flags &= ~PMU_F_IN_USE; 1329 - debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n", 1330 - __func__, rc, cpuhw->state, cpuhw->dev_state); 1331 1340 } 1332 1341 1333 1342 /* Start counter sets on particular CPU */ ··· 1347 1360 else 1348 1361 pr_err("Counter set start %#llx of /dev/%s failed rc=%i\n", 1349 1362 cpuhw->dev_state | cpuhw->state, S390_HWCTR_DEVICE, rc); 1350 - debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n", 1351 - __func__, rc, cpuhw->state, cpuhw->dev_state); 1352 1363 } 1353 1364 1354 1365 static void cfset_release_cpu(void *p) ··· 1354 1369 struct cpu_cf_events *cpuhw = this_cpu_cfhw(); 1355 1370 int rc; 1356 1371 1357 - debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n", 1358 - __func__, cpuhw->state, cpuhw->dev_state); 1359 1372 cpuhw->dev_state = 0; 1360 1373 rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */ 1361 1374 if (rc) ··· 1442 1459 if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) { 1443 1460 on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1); 1444 1461 rc = -EIO; 1445 - debug_sprintf_event(cf_dbg, 4, "%s CPUs missing", __func__); 1446 1462 } 1447 1463 free_cpumask_var(mask); 1448 1464 return rc; ··· 1498 1516 if (put_user(cpus, &ctrset_read->no_cpus)) 1499 1517 rc = -EFAULT; 1500 1518 out: 1501 - debug_sprintf_event(cf_dbg, 4, "%s rc %d copied %ld\n", __func__, rc, 1502 - uptr - (void __user *)ctrset_read->data); 1503 1519 return rc; 1504 1520 } 1505 1521 ··· 1545 1565 cpuhw->used += space; 1546 1566 cpuhw->sets += 1; 1547 1567 } 1548 - debug_sprintf_event(cf_dbg, 4, "%s sets %d used %zd\n", __func__, 1549 - cpuhw->sets, cpuhw->used); 1550 1568 } 1551 1569 } 1552 1570 ··· 1639 1661 if (!ret) { 1640 1662 cfset_session_add(preq); 1641 1663 file->private_data = preq; 1642 - debug_sprintf_event(cf_dbg, 4, "%s set %#lx need %ld ret %d\n", 1643 - __func__, preq->ctrset, need, ret); 1644 1664 } else { 1645 1665 kfree(preq); 1646 1666 } ··· 1737 1761 1738 1762 static void cfdiag_read(struct perf_event *event) 1739 1763 { 1740 - debug_sprintf_event(cf_dbg, 3, "%s event %#llx count %ld\n", __func__, 1741 - event->attr.config, local64_read(&event->count)); 1742 1764 } 1743 1765 1744 1766 static int get_authctrsets(void) ··· 1781 1807 if (!event->hw.config_base) 1782 1808 err = -EINVAL; 1783 1809 1784 - debug_sprintf_event(cf_dbg, 5, "%s err %d config_base %#lx\n", 1785 - __func__, err, event->hw.config_base); 1786 1810 return err; 1787 1811 } 1788 1812
+22 -36
arch/s390/kernel/perf_cpum_sf.c
··· 22 22 #include <asm/irq.h> 23 23 #include <asm/debug.h> 24 24 #include <asm/timex.h> 25 - #include <asm-generic/io.h> 25 + #include <linux/io.h> 26 26 27 27 /* Minimum number of sample-data-block-tables: 28 28 * At least one table is required for the sampling buffer structure. ··· 43 43 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 44 44 static inline int require_table_link(const void *sdbt) 45 45 { 46 - return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 + return ((unsigned long)sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 47 47 } 48 48 49 49 /* Minimum and maximum sampling buffer sizes: ··· 192 192 if (is_link_entry(curr)) { 193 193 curr = get_next_sdbt(curr); 194 194 if (sdbt) 195 - free_page((unsigned long) sdbt); 195 + free_page((unsigned long)sdbt); 196 196 197 197 /* If the origin is reached, sampling buffer is freed */ 198 198 if (curr == sfb->sdbt) ··· 278 278 for (i = 0; i < num_sdb; i++) { 279 279 /* Allocate a new SDB-table if it is full. */ 280 280 if (require_table_link(tail)) { 281 - new = (unsigned long *) get_zeroed_page(gfp_flags); 281 + new = (unsigned long *)get_zeroed_page(gfp_flags); 282 282 if (!new) { 283 283 rc = -ENOMEM; 284 284 break; ··· 304 304 */ 305 305 if (tail_prev) { 306 306 sfb->num_sdbt--; 307 - free_page((unsigned long) new); 307 + free_page((unsigned long)new); 308 308 tail = tail_prev; 309 309 } 310 310 break; ··· 343 343 return -EINVAL; 344 344 345 345 /* Allocate the sample-data-block-table origin */ 346 - sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 346 + sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); 347 347 if (!sfb->sdbt) 348 348 return -ENOMEM; 349 349 sfb->num_sdb = 0; ··· 594 594 #define PMC_FAILURE 2 595 595 static void setup_pmc_cpu(void *flags) 596 596 { 597 - int err; 598 597 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 598 + int err = 0; 599 599 600 - err = 0; 601 - switch (*((int *) flags)) { 600 + switch (*((int *)flags)) { 602 601 case PMC_INIT: 603 602 memset(cpusf, 0, sizeof(*cpusf)); 604 603 err = qsi(&cpusf->qsi); ··· 605 606 break; 606 607 cpusf->flags |= PMU_F_RESERVED; 607 608 err = sf_disable(); 608 - if (err) 609 - pr_err("Switching off the sampling facility failed " 610 - "with rc %i\n", err); 611 609 break; 612 610 case PMC_RELEASE: 613 611 cpusf->flags &= ~PMU_F_RESERVED; 614 612 err = sf_disable(); 615 - if (err) { 616 - pr_err("Switching off the sampling facility failed " 617 - "with rc %i\n", err); 618 - } else 613 + if (!err) 619 614 deallocate_buffers(cpusf); 620 615 break; 621 616 } 622 - if (err) 623 - *((int *) flags) |= PMC_FAILURE; 617 + if (err) { 618 + *((int *)flags) |= PMC_FAILURE; 619 + pr_err("Switching off the sampling facility failed with rc %i\n", err); 620 + } 624 621 } 625 622 626 623 static void release_pmc_hardware(void) ··· 958 963 return -ENOENT; 959 964 } 960 965 961 - /* Check online status of the CPU to which the event is pinned */ 962 - if (event->cpu >= 0 && !cpu_online(event->cpu)) 963 - return -ENODEV; 964 - 965 966 /* Force reset of idle/hv excludes regardless of what the 966 967 * user requested. 967 968 */ ··· 1017 1026 err = lsctl(&cpuhw->lsctl); 1018 1027 if (err) { 1019 1028 cpuhw->flags &= ~PMU_F_ENABLED; 1020 - pr_err("Loading sampling controls failed: op %i err %i\n", 1021 - 1, err); 1029 + pr_err("Loading sampling controls failed: op 1 err %i\n", err); 1022 1030 return; 1023 1031 } 1024 1032 ··· 1051 1061 1052 1062 err = lsctl(&inactive); 1053 1063 if (err) { 1054 - pr_err("Loading sampling controls failed: op %i err %i\n", 1055 - 2, err); 1064 + pr_err("Loading sampling controls failed: op 2 err %i\n", err); 1056 1065 return; 1057 1066 } 1058 1067 ··· 1210 1221 1211 1222 te = trailer_entry_ptr((unsigned long)sdbt); 1212 1223 sample = (struct hws_basic_entry *)sdbt; 1213 - while ((unsigned long *) sample < (unsigned long *) te) { 1224 + while ((unsigned long *)sample < (unsigned long *)te) { 1214 1225 /* Check for an empty sample */ 1215 1226 if (!sample->def || sample->LS) 1216 1227 break; ··· 1287 1298 if (SAMPL_DIAG_MODE(&event->hw)) 1288 1299 return; 1289 1300 1290 - sdbt = (unsigned long *) TEAR_REG(hwc); 1301 + sdbt = (unsigned long *)TEAR_REG(hwc); 1291 1302 done = event_overflow = sampl_overflow = num_sdb = 0; 1292 1303 while (!done) { 1293 1304 /* Get the trailer entry of the sample-data-block */ ··· 1659 1670 pr_err("The AUX buffer with %lu pages for the " 1660 1671 "diagnostic-sampling mode is full\n", 1661 1672 num_sdb); 1662 - debug_sprintf_event(sfdbg, 1, 1663 - "%s: AUX buffer used up\n", 1664 - __func__); 1665 1673 break; 1666 1674 } 1667 1675 if (WARN_ON_ONCE(!aux)) ··· 1790 1804 1791 1805 /* Allocate the first SDBT */ 1792 1806 sfb->num_sdbt = 0; 1793 - sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1807 + sfb->sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL); 1794 1808 if (!sfb->sdbt) 1795 1809 goto no_sdbt; 1796 1810 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; ··· 1802 1816 */ 1803 1817 for (i = 0; i < nr_pages; i++, tail++) { 1804 1818 if (require_table_link(tail)) { 1805 - new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1819 + new = (unsigned long *)get_zeroed_page(GFP_KERNEL); 1806 1820 if (!new) 1807 1821 goto no_sdbt; 1808 1822 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; ··· 1851 1865 /* Nothing to do ... updates are interrupt-driven */ 1852 1866 } 1853 1867 1854 - /* Check if the new sampling period/freqeuncy is appropriate. 1868 + /* Check if the new sampling period/frequency is appropriate. 1855 1869 * 1856 1870 * Return non-zero on error and zero on passed checks. 1857 1871 */ ··· 1959 1973 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1960 1974 if (!SAMPL_DIAG_MODE(&event->hw)) { 1961 1975 cpuhw->lsctl.tear = virt_to_phys(cpuhw->sfb.sdbt); 1962 - cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1963 - TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1976 + cpuhw->lsctl.dear = *(unsigned long *)cpuhw->sfb.sdbt; 1977 + TEAR_REG(&event->hw) = (unsigned long)cpuhw->sfb.sdbt; 1964 1978 } 1965 1979 1966 1980 /* Ensure sampling functions are in the disabled state. If disabled,
+2 -2
arch/s390/kernel/perf_pai_ext.c
··· 84 84 /* The memory is already zeroed. */ 85 85 paiext_root.mapptr = alloc_percpu(struct paiext_mapptr); 86 86 if (!paiext_root.mapptr) { 87 - /* Returing without refcnt adjustment is ok. The 87 + /* Returning without refcnt adjustment is ok. The 88 88 * error code is handled by paiext_alloc() which 89 89 * decrements refcnt when an event can not be 90 90 * created. ··· 190 190 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING 191 191 : PAI_MODE_COUNTING; 192 192 } else { 193 - /* Multiple invocation, check whats active. 193 + /* Multiple invocation, check what is active. 194 194 * Supported are multiple counter events or only one sampling 195 195 * event concurrently at any one time. 196 196 */
+1 -1
arch/s390/kernel/process.c
··· 30 30 #include <linux/export.h> 31 31 #include <linux/init_task.h> 32 32 #include <linux/entry-common.h> 33 + #include <linux/io.h> 33 34 #include <asm/cpu_mf.h> 34 - #include <asm/io.h> 35 35 #include <asm/processor.h> 36 36 #include <asm/vtimer.h> 37 37 #include <asm/exec.h>
+1 -1
arch/s390/kernel/setup.c
··· 529 529 res->start = start; 530 530 /* 531 531 * In memblock, end points to the first byte after the 532 - * range while in resourses, end points to the last byte in 532 + * range while in resources, end points to the last byte in 533 533 * the range. 534 534 */ 535 535 res->end = end - 1;
+1 -1
arch/s390/kernel/smp.c
··· 113 113 114 114 /* 115 115 * The smp_cpu_state_mutex must be held when changing the state or polarization 116 - * member of a pcpu data structure within the pcpu_devices arreay. 116 + * member of a pcpu data structure within the pcpu_devices array. 117 117 */ 118 118 DEFINE_MUTEX(smp_cpu_state_mutex); 119 119
+1 -1
arch/s390/kernel/time.c
··· 702 702 703 703 if (!check_sync_clock()) 704 704 /* 705 - * There is a usable clock but the synchonization failed. 705 + * There is a usable clock but the synchronization failed. 706 706 * Retry after a second. 707 707 */ 708 708 mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
+5 -1
arch/s390/kernel/vdso32/Makefile
··· 19 19 KBUILD_AFLAGS_32 += -m31 -s 20 20 21 21 KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) 22 + KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32)) 22 23 KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin 23 24 24 25 LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \ ··· 41 40 # Force dependency (incbin is bad) 42 41 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 43 42 43 + quiet_cmd_vdso_and_check = VDSO $@ 44 + cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) 45 + 44 46 $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE 45 - $(call if_changed,ld) 47 + $(call if_changed,vdso_and_check) 46 48 47 49 # strip rule for the .so file 48 50 $(obj)/%.so: OBJCOPYFLAGS := -S
+5 -1
arch/s390/kernel/vdso64/Makefile
··· 24 24 KBUILD_AFLAGS_64 += -m64 25 25 26 26 KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) 27 + KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64)) 27 28 KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin 28 29 ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \ 29 30 --hash-style=both --build-id=sha1 -T ··· 45 44 # Force dependency (incbin is bad) 46 45 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so 47 46 47 + quiet_cmd_vdso_and_check = VDSO $@ 48 + cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) 49 + 48 50 # link rule for the .so file, .lds has to be first 49 51 $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE 50 - $(call if_changed,ld) 52 + $(call if_changed,vdso_and_check) 51 53 52 54 # strip rule for the .so file 53 55 $(obj)/%.so: OBJCOPYFLAGS := -S
+2 -2
arch/s390/kvm/gaccess.c
··· 478 478 }; 479 479 480 480 enum { 481 - FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ 481 + FSI_UNKNOWN = 0, /* Unknown whether fetch or store */ 482 482 FSI_STORE = 1, /* Exception was due to store operation */ 483 483 FSI_FETCH = 2 /* Exception was due to fetch operation */ 484 484 }; ··· 625 625 * Returns: - zero on success; @gpa contains the resulting absolute address 626 626 * - a negative value if guest access failed due to e.g. broken 627 627 * guest mapping 628 - * - a positve value if an access exception happened. In this case 628 + * - a positive value if an access exception happened. In this case 629 629 * the returned value is the program interruption code as defined 630 630 * by the architecture 631 631 */
+1 -1
arch/s390/kvm/intercept.c
··· 630 630 return -EOPNOTSUPP; 631 631 } 632 632 633 - /* process PER, also if the instrution is processed in user space */ 633 + /* process PER, also if the instruction is processed in user space */ 634 634 if (vcpu->arch.sie_block->icptstatus & 0x02 && 635 635 (!rc || rc == -EOPNOTSUPP)) 636 636 per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
+1 -1
arch/s390/kvm/kvm-s390.c
··· 4161 4161 vcpu->run->s.regs.fpc = 0; 4162 4162 /* 4163 4163 * Do not reset these registers in the protected case, as some of 4164 - * them are overlayed and they are not accessible in this case 4164 + * them are overlaid and they are not accessible in this case 4165 4165 * anyway. 4166 4166 */ 4167 4167 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
+1 -1
arch/s390/kvm/pci.c
··· 427 427 428 428 429 429 /* 430 - * Register device with the specified KVM. If interpetation facilities are 430 + * Register device with the specified KVM. If interpretation facilities are 431 431 * available, enable them and let userspace indicate whether or not they will 432 432 * be used (specify SHM bit to disable). 433 433 */
+1 -2
arch/s390/kvm/priv.c
··· 13 13 #include <linux/errno.h> 14 14 #include <linux/mm_types.h> 15 15 #include <linux/pgtable.h> 16 - 16 + #include <linux/io.h> 17 17 #include <asm/asm-offsets.h> 18 18 #include <asm/facility.h> 19 19 #include <asm/current.h> ··· 22 22 #include <asm/sysinfo.h> 23 23 #include <asm/page-states.h> 24 24 #include <asm/gmap.h> 25 - #include <asm/io.h> 26 25 #include <asm/ptrace.h> 27 26 #include <asm/sclp.h> 28 27 #include <asm/ap.h>
+1 -1
arch/s390/kvm/pv.c
··· 273 273 uvcb.header.rc, uvcb.header.rrc); 274 274 WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x", 275 275 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc); 276 - /* Inteded memory leak on "impossible" error */ 276 + /* Intended memory leak on "impossible" error */ 277 277 if (!cc) 278 278 kvm_s390_pv_dealloc_vm(kvm); 279 279 return cc ? -EIO : 0;
+1 -1
arch/s390/kvm/sigp.c
··· 469 469 * 470 470 * This interception will occur at the source cpu when a source cpu sends an 471 471 * external call to a target cpu and the target cpu has the WAIT bit set in 472 - * its cpuflags. Interception will occurr after the interrupt indicator bits at 472 + * its cpuflags. Interception will occur after the interrupt indicator bits at 473 473 * the target cpu have been set. All error cases will lead to instruction 474 474 * interception, therefore nothing is to be checked or prepared. 475 475 */
+2 -2
arch/s390/kvm/vsie.c
··· 504 504 scb_s->mso = new_mso; 505 505 scb_s->prefix = new_prefix; 506 506 507 - /* We have to definetly flush the tlb if this scb never ran */ 507 + /* We have to definitely flush the tlb if this scb never ran */ 508 508 if (scb_s->ihcpu != 0xffffU) 509 509 scb_s->ihcpu = scb_o->ihcpu; 510 510 ··· 901 901 (vaddr & 0xfffffffffffff000UL) | 902 902 /* 52-53: store / fetch */ 903 903 (((unsigned int) !write_flag) + 1) << 10, 904 - /* 62-63: asce id (alway primary == 0) */ 904 + /* 62-63: asce id (always primary == 0) */ 905 905 .exc_access_id = 0, /* always primary */ 906 906 .op_access_id = 0, /* not MVPG */ 907 907 };
+1 -1
arch/s390/lib/spinlock.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/smp.h> 15 15 #include <linux/percpu.h> 16 + #include <linux/io.h> 16 17 #include <asm/alternative.h> 17 - #include <asm/io.h> 18 18 19 19 int spin_retry = -1; 20 20
+1 -1
arch/s390/mm/gmap.c
··· 1740 1740 * The r2t parameter specifies the address of the source table. The 1741 1741 * four pages of the source table are made read-only in the parent gmap 1742 1742 * address space. A write to the source table area @r2t will automatically 1743 - * remove the shadow r2 table and all of its decendents. 1743 + * remove the shadow r2 table and all of its descendants. 1744 1744 * 1745 1745 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the 1746 1746 * shadow table structure is incomplete, -ENOMEM if out of memory and
+1 -1
arch/s390/mm/maccess.c
··· 13 13 #include <linux/gfp.h> 14 14 #include <linux/cpu.h> 15 15 #include <linux/uio.h> 16 + #include <linux/io.h> 16 17 #include <asm/asm-extable.h> 17 18 #include <asm/ctl_reg.h> 18 - #include <asm/io.h> 19 19 #include <asm/abs_lowcore.h> 20 20 #include <asm/stacktrace.h> 21 21 #include <asm/maccess.h>
+3 -1
arch/s390/mm/vmem.c
··· 481 481 */ 482 482 static int vmem_add_range(unsigned long start, unsigned long size) 483 483 { 484 + start = (unsigned long)__va(start); 484 485 return add_pagetable(start, start + size, true); 485 486 } 486 487 ··· 490 489 */ 491 490 static void vmem_remove_range(unsigned long start, unsigned long size) 492 491 { 492 + start = (unsigned long)__va(start); 493 493 remove_pagetable(start, start + size, true); 494 494 } 495 495 ··· 558 556 * to any physical address. If missing, allocate segment- and region- 559 557 * table entries along. Meeting a large segment- or region-table entry 560 558 * while traversing is an error, since the function is expected to be 561 - * called against virtual regions reserverd for 4KB mappings only. 559 + * called against virtual regions reserved for 4KB mappings only. 562 560 */ 563 561 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc) 564 562 {
+2 -2
arch/s390/net/bpf_jit_comp.c
··· 523 523 #define BPF_PLT_SIZE 32 524 524 asm( 525 525 ".pushsection .rodata\n" 526 - " .align 8\n" 526 + " .balign 8\n" 527 527 "bpf_plt:\n" 528 528 " lgrl %r0,bpf_plt_ret\n" 529 529 " lgrl %r1,bpf_plt_target\n" 530 530 " br %r1\n" 531 - " .align 8\n" 531 + " .balign 8\n" 532 532 "bpf_plt_ret: .quad 0\n" 533 533 "bpf_plt_target: .quad 0\n" 534 534 "bpf_plt_end:\n"
+3 -3
arch/s390/pci/pci_irq.c
··· 163 163 if (!rescan || irqs_on++) 164 164 /* End of second scan with interrupts on. */ 165 165 break; 166 - /* First scan complete, reenable interrupts. */ 166 + /* First scan complete, re-enable interrupts. */ 167 167 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC, &iib)) 168 168 break; 169 169 bit = 0; ··· 202 202 if (irqs_on++) 203 203 /* End of second scan with interrupts on. */ 204 204 break; 205 - /* First scan complete, reenable interrupts. */ 205 + /* First scan complete, re-enable interrupts. */ 206 206 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib)) 207 207 break; 208 208 cpu = 0; ··· 247 247 if (irqs_on++) 248 248 /* End of second scan with interrupts on. */ 249 249 break; 250 - /* First scan complete, reenable interrupts. */ 250 + /* First scan complete, re-enable interrupts. */ 251 251 if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC, &iib)) 252 252 break; 253 253 si = 0;
+1 -1
arch/s390/purgatory/head.S
··· 100 100 * checksum verification only (%r2 = 0 -> verification only). 101 101 * 102 102 * Check now and preserve over C function call by storing in 103 - * %r10 whith 103 + * %r10 with 104 104 * 1 -> checksum verification only 105 105 * 0 -> load new kernel 106 106 */
-11
drivers/crypto/Kconfig
··· 92 92 93 93 If unsure, say N. 94 94 95 - config ZCRYPT_MULTIDEVNODES 96 - bool "Support for multiple zcrypt device nodes" 97 - default y 98 - depends on S390 99 - depends on ZCRYPT 100 - help 101 - With this option enabled the zcrypt device driver can 102 - provide multiple devices nodes in /dev. Each device 103 - node can get customized to limit access and narrow 104 - down the use of the available crypto hardware. 105 - 106 95 config PKEY 107 96 tristate "Kernel API for protected key handling" 108 97 depends on S390
+1 -1
drivers/s390/block/dasd_diag.c
··· 24 24 #include <asm/debug.h> 25 25 #include <asm/diag.h> 26 26 #include <asm/ebcdic.h> 27 - #include <asm/io.h> 27 + #include <linux/io.h> 28 28 #include <asm/irq.h> 29 29 #include <asm/vtoc.h> 30 30
+2 -2
drivers/s390/block/dasd_eckd.c
··· 21 21 #include <linux/compat.h> 22 22 #include <linux/init.h> 23 23 #include <linux/seq_file.h> 24 + #include <linux/uaccess.h> 25 + #include <linux/io.h> 24 26 25 27 #include <asm/css_chars.h> 26 28 #include <asm/debug.h> 27 29 #include <asm/idals.h> 28 30 #include <asm/ebcdic.h> 29 - #include <asm/io.h> 30 - #include <linux/uaccess.h> 31 31 #include <asm/cio.h> 32 32 #include <asm/ccwdev.h> 33 33 #include <asm/itcw.h>
+1 -1
drivers/s390/block/dasd_fba.c
··· 16 16 #include <linux/bio.h> 17 17 #include <linux/module.h> 18 18 #include <linux/init.h> 19 + #include <linux/io.h> 19 20 20 21 #include <asm/idals.h> 21 22 #include <asm/ebcdic.h> 22 - #include <asm/io.h> 23 23 #include <asm/ccwdev.h> 24 24 25 25 #include "dasd_int.h"
+1 -1
drivers/s390/block/dcssblk.c
··· 20 20 #include <linux/pfn_t.h> 21 21 #include <linux/uio.h> 22 22 #include <linux/dax.h> 23 + #include <linux/io.h> 23 24 #include <asm/extmem.h> 24 - #include <asm/io.h> 25 25 26 26 #define DCSSBLK_NAME "dcssblk" 27 27 #define DCSSBLK_MINORS_PER_DISK 1
+1 -1
drivers/s390/char/con3215.c
··· 25 25 #include <linux/slab.h> 26 26 #include <asm/ccwdev.h> 27 27 #include <asm/cio.h> 28 - #include <asm/io.h> 28 + #include <linux/io.h> 29 29 #include <asm/ebcdic.h> 30 30 #include <linux/uaccess.h> 31 31 #include <asm/delay.h>
+1 -1
drivers/s390/char/monwriter.c
··· 22 22 #include <linux/mutex.h> 23 23 #include <linux/slab.h> 24 24 #include <linux/uaccess.h> 25 + #include <linux/io.h> 25 26 #include <asm/ebcdic.h> 26 - #include <asm/io.h> 27 27 #include <asm/appldata.h> 28 28 #include <asm/monwriter.h> 29 29
+1 -1
drivers/s390/cio/ccwgroup.c
··· 152 152 153 153 /* 154 154 * Provide an 'ungroup' attribute so the user can remove group devices no 155 - * longer needed or accidentially created. Saves memory :) 155 + * longer needed or accidentally created. Saves memory :) 156 156 */ 157 157 static void ccwgroup_ungroup(struct ccwgroup_device *gdev) 158 158 {
+1 -1
drivers/s390/cio/device.c
··· 943 943 cdev->private->dev_id.devno, sch->schid.ssid, 944 944 sch->schib.pmcw.dev, rc); 945 945 if (old_enabled) { 946 - /* Try to reenable the old subchannel. */ 946 + /* Try to re-enable the old subchannel. */ 947 947 spin_lock_irq(old_sch->lock); 948 948 cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch)); 949 949 spin_unlock_irq(old_sch->lock);
+2 -2
drivers/s390/cio/device_fsm.c
··· 310 310 struct subchannel *sch = to_subchannel(cdev->dev.parent); 311 311 312 312 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 313 - /* Reenable channel measurements, if needed. */ 313 + /* Re-enable channel measurements, if needed. */ 314 314 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 315 315 /* Save indication for new paths. */ 316 316 cdev->private->path_new_mask = sch->vpm; ··· 947 947 */ 948 948 sch->lpm = sch->schib.pmcw.pam & sch->opm; 949 949 /* 950 - * Use the initial configuration since we can't be shure that the old 950 + * Use the initial configuration since we can't be sure that the old 951 951 * paths are valid. 952 952 */ 953 953 io_subchannel_init_config(sch);
+2 -2
drivers/s390/cio/vfio_ccw_cp.c
··· 672 672 /* 673 673 * Fetch one ccw. 674 674 * To reduce memory copy, we'll pin the cda page in memory, 675 - * and to get rid of the cda 2G limitiaion of ccw1, we'll translate 675 + * and to get rid of the cda 2G limitation of ccw1, we'll translate 676 676 * direct ccws to idal ccws. 677 677 */ 678 678 static int ccwchain_fetch_one(struct ccw1 *ccw, ··· 787 787 * program. 788 788 * 789 789 * These APIs will copy the ccws into kernel-space buffers, and update 790 - * the guest phsical addresses with their corresponding host physical 790 + * the guest physical addresses with their corresponding host physical 791 791 * addresses. Then channel I/O device drivers could issue the 792 792 * translated channel program to real devices to perform an I/O 793 793 * operation.
+2 -2
drivers/s390/crypto/ap_bus.c
··· 497 497 enum ap_sm_wait wait = AP_SM_WAIT_NONE; 498 498 499 499 /* Reset the indicator if interrupts are used. Thus new interrupts can 500 - * be received. Doing it in the beginning of the tasklet is therefor 500 + * be received. Doing it in the beginning of the tasklet is therefore 501 501 * important that no requests on any AP get lost. 502 502 */ 503 503 if (ap_irq_flag) ··· 2289 2289 timer_setup(&ap_config_timer, ap_config_timeout, 0); 2290 2290 2291 2291 /* 2292 - * Setup the high resultion poll timer. 2292 + * Setup the high resolution poll timer. 2293 2293 * If we are running under z/VM adjust polling to z/VM polling rate. 2294 2294 */ 2295 2295 if (MACHINE_IS_VM)
+1 -26
drivers/s390/crypto/ap_bus.h
··· 233 233 234 234 typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); 235 235 236 - /* failure injection cmd struct */ 237 - struct ap_fi { 238 - union { 239 - u16 cmd; /* fi flags + action */ 240 - struct { 241 - u8 flags; /* fi flags only */ 242 - u8 action; /* fi action only */ 243 - }; 244 - }; 245 - }; 246 - 247 - /* all currently known fi actions */ 248 - enum ap_fi_actions { 249 - AP_FI_ACTION_CCA_AGENT_FF = 0x01, 250 - AP_FI_ACTION_CCA_DOM_INVAL = 0x02, 251 - AP_FI_ACTION_NQAP_QID_INVAL = 0x03, 252 - }; 253 - 254 - /* all currently known fi flags */ 255 - enum ap_fi_flags { 256 - AP_FI_FLAG_NO_RETRY = 0x01, 257 - AP_FI_FLAG_TOGGLE_SPECIAL = 0x02, 258 - }; 259 - 260 236 struct ap_message { 261 237 struct list_head list; /* Request queueing. */ 262 238 unsigned long psmid; /* Message id. */ ··· 240 264 size_t len; /* actual msg len in msg buffer */ 241 265 size_t bufsize; /* allocated msg buffer size */ 242 266 u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ 243 - struct ap_fi fi; /* Failure Injection cmd */ 244 267 int rc; /* Return code for this message */ 245 268 void *private; /* ap driver private pointer. */ 246 269 /* receive is called from tasklet context */ ··· 359 384 * like "+1-16,-32,-0x40,+128" where only single bits or ranges of 360 385 * bits are cleared or set. Distinction is done based on the very 361 386 * first character which may be '+' or '-' for the relative string 362 - * and othewise assume to be an absolute value string. If parsing fails 387 + * and otherwise assume to be an absolute value string. If parsing fails 363 388 * a negative errno value is returned. All arguments and bitmaps are 364 389 * big endian order. 365 390 */
-7
drivers/s390/crypto/ap_queue.c
··· 274 274 275 275 /* Start the next request on the queue. */ 276 276 ap_msg = list_entry(aq->requestq.next, struct ap_message, list); 277 - #ifdef CONFIG_ZCRYPT_DEBUG 278 - if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) { 279 - AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n", 280 - __func__, ap_msg->fi.cmd); 281 - qid = 0xFF00; 282 - } 283 - #endif 284 277 status = __ap_send(qid, ap_msg->psmid, 285 278 ap_msg->msg, ap_msg->len, 286 279 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+2 -2
drivers/s390/crypto/vfio_ap_ops.c
··· 445 445 q->saved_isc = isc; 446 446 break; 447 447 case AP_RESPONSE_OTHERWISE_CHANGED: 448 - /* We could not modify IRQ setings: clear new configuration */ 448 + /* We could not modify IRQ settings: clear new configuration */ 449 449 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); 450 450 kvm_s390_gisc_unregister(kvm, isc); 451 451 break; ··· 524 524 * Response.status may be set to following Response Code: 525 525 * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available 526 526 * - AP_RESPONSE_DECONFIGURED: if the queue is not configured 527 - * - AP_RESPONSE_NORMAL (0) : in case of successs 527 + * - AP_RESPONSE_NORMAL (0) : in case of success 528 528 * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC. 529 529 * We take the matrix_dev lock to ensure serialization on queues and 530 530 * mediated device access.
+6 -107
drivers/s390/crypto/zcrypt_api.c
··· 111 111 * Multi device nodes extension functions. 112 112 */ 113 113 114 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 115 - 116 114 struct zcdn_device; 117 115 118 116 static struct class *zcrypt_class; ··· 475 477 mutex_unlock(&ap_perms_mutex); 476 478 } 477 479 478 - #endif 479 - 480 480 /* 481 481 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 482 482 * ··· 506 510 { 507 511 struct ap_perms *perms = &ap_perms; 508 512 509 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 510 513 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 511 514 struct zcdn_device *zcdndev; 512 515 ··· 517 522 if (zcdndev) 518 523 perms = &zcdndev->perms; 519 524 } 520 - #endif 521 525 filp->private_data = (void *)perms; 522 526 523 527 atomic_inc(&zcrypt_open_count); ··· 530 536 */ 531 537 static int zcrypt_release(struct inode *inode, struct file *filp) 532 538 { 533 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 534 539 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 535 540 struct zcdn_device *zcdndev; 536 541 ··· 542 549 put_device(&zcdndev->device); 543 550 } 544 551 } 545 - #endif 546 552 547 553 atomic_dec(&zcrypt_open_count); 548 554 return 0; ··· 653 661 654 662 ap_init_message(&ap_msg); 655 663 656 - #ifdef CONFIG_ZCRYPT_DEBUG 657 - if (tr && tr->fi.cmd) 658 - ap_msg.fi.cmd = tr->fi.cmd; 659 - #endif 660 - 661 664 if (mex->outputdatalength < mex->inputdatalength) { 662 665 func_code = 0; 663 666 rc = -EINVAL; ··· 674 687 pref_zq = NULL; 675 688 spin_lock(&zcrypt_list_lock); 676 689 for_each_zcrypt_card(zc) { 677 - /* Check for usable accelarator or CCA card */ 690 + /* Check for usable accelerator or CCA card */ 678 691 if (!zc->online || !zc->card->config || zc->card->chkstop || 679 692 !(zc->card->functions & 0x18000000)) 680 693 continue; ··· 758 771 759 772 ap_init_message(&ap_msg); 760 773 761 - #ifdef CONFIG_ZCRYPT_DEBUG 762 - if (tr && tr->fi.cmd) 763 - ap_msg.fi.cmd = tr->fi.cmd; 764 - #endif 765 - 766 774 if (crt->outputdatalength < crt->inputdatalength) { 767 775 func_code = 0; 768 776 rc = -EINVAL; ··· 779 797 pref_zq = NULL; 780 798 spin_lock(&zcrypt_list_lock); 781 799 for_each_zcrypt_card(zc) { 782 - /* Check for usable accelarator or CCA card */ 800 + /* Check for usable accelerator or CCA card */ 783 801 if (!zc->online || !zc->card->config || zc->card->chkstop || 784 802 !(zc->card->functions & 0x18000000)) 785 803 continue; ··· 864 882 865 883 xcrb->status = 0; 866 884 ap_init_message(&ap_msg); 867 - 868 - #ifdef CONFIG_ZCRYPT_DEBUG 869 - if (tr && tr->fi.cmd) 870 - ap_msg.fi.cmd = tr->fi.cmd; 871 - if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { 872 - ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", 873 - __func__, tr->fi.cmd); 874 - xcrb->agent_ID = 0x4646; 875 - } 876 - #endif 877 885 878 886 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 879 887 if (rc) ··· 954 982 if (*domain == AUTOSEL_DOM) 955 983 *domain = AP_QID_QUEUE(qid); 956 984 957 - #ifdef CONFIG_ZCRYPT_DEBUG 958 - if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) { 959 - ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n", 960 - __func__, tr->fi.cmd); 961 - *domain = 99; 962 - } 963 - #endif 964 - 965 985 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 966 986 967 987 spin_lock(&zcrypt_list_lock); ··· 1021 1057 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1022 1058 1023 1059 ap_init_message(&ap_msg); 1024 - 1025 - #ifdef CONFIG_ZCRYPT_DEBUG 1026 - if (tr && tr->fi.cmd) 1027 - ap_msg.fi.cmd = tr->fi.cmd; 1028 - #endif 1029 1060 1030 1061 target_num = (unsigned short)xcrb->targets_num; 1031 1062 ··· 1432 1473 if (copy_from_user(&mex, umex, sizeof(mex))) 1433 1474 return -EFAULT; 1434 1475 1435 - #ifdef CONFIG_ZCRYPT_DEBUG 1436 - if (mex.inputdatalength & (1U << 31)) { 1437 - if (!capable(CAP_SYS_ADMIN)) 1438 - return -EPERM; 1439 - tr.fi.cmd = (u16)(mex.inputdatalength >> 16); 1440 - } 1441 - mex.inputdatalength &= 0x0000FFFF; 1442 - #endif 1443 - 1444 1476 do { 1445 1477 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1446 1478 if (rc == -EAGAIN) 1447 1479 tr.again_counter++; 1448 - #ifdef CONFIG_ZCRYPT_DEBUG 1449 - if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1450 - break; 1451 - #endif 1452 1480 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1453 1481 /* on failure: retry once again after a requested rescan */ 1454 1482 if ((rc == -ENODEV) && (zcrypt_process_rescan())) ··· 1464 1518 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1465 1519 return -EFAULT; 1466 1520 1467 - #ifdef CONFIG_ZCRYPT_DEBUG 1468 - if (crt.inputdatalength & (1U << 31)) { 1469 - if (!capable(CAP_SYS_ADMIN)) 1470 - return -EPERM; 1471 - tr.fi.cmd = (u16)(crt.inputdatalength >> 16); 1472 - } 1473 - crt.inputdatalength &= 0x0000FFFF; 1474 - #endif 1475 - 1476 1521 do { 1477 1522 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1478 1523 if (rc == -EAGAIN) 1479 1524 tr.again_counter++; 1480 - #ifdef CONFIG_ZCRYPT_DEBUG 1481 - if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1482 - break; 1483 - #endif 1484 1525 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1485 1526 /* on failure: retry once again after a requested rescan */ 1486 1527 if ((rc == -ENODEV) && (zcrypt_process_rescan())) ··· 1496 1563 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1497 1564 return -EFAULT; 1498 1565 1499 - #ifdef CONFIG_ZCRYPT_DEBUG 1500 - if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { 1501 - if (!capable(CAP_SYS_ADMIN)) 1502 - return -EPERM; 1503 - tr.fi.cmd = (u16)(xcrb.status >> 16); 1504 - } 1505 - xcrb.status = 0; 1506 - #endif 1507 - 1508 1566 do { 1509 1567 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1510 1568 if (rc == -EAGAIN) 1511 1569 tr.again_counter++; 1512 - #ifdef CONFIG_ZCRYPT_DEBUG 1513 - if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1514 - break; 1515 - #endif 1516 1570 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1517 1571 /* on failure: retry once again after a requested rescan */ 1518 1572 if ((rc == -ENODEV) && (zcrypt_process_rescan())) ··· 1529 1609 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1530 1610 return -EFAULT; 1531 1611 1532 - #ifdef CONFIG_ZCRYPT_DEBUG 1533 - if (xcrb.req_len & (1ULL << 63)) { 1534 - if (!capable(CAP_SYS_ADMIN)) 1535 - return -EPERM; 1536 - tr.fi.cmd = (u16)(xcrb.req_len >> 48); 1537 - } 1538 - xcrb.req_len &= 0x0000FFFFFFFFFFFFULL; 1539 - #endif 1540 - 1541 1612 do { 1542 1613 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1543 1614 if (rc == -EAGAIN) 1544 1615 tr.again_counter++; 1545 - #ifdef CONFIG_ZCRYPT_DEBUG 1546 - if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1547 - break; 1548 - #endif 1549 1616 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1550 1617 /* on failure: retry once again after a requested rescan */ 1551 1618 if ((rc == -ENODEV) && (zcrypt_process_rescan())) ··· 1575 1668 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1576 1669 * sizeof(struct zcrypt_device_status_ext); 1577 1670 1578 - device_status = kzalloc(total_size, GFP_KERNEL); 1671 + device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1672 + sizeof(struct zcrypt_device_status_ext), 1673 + GFP_KERNEL); 1579 1674 if (!device_status) 1580 1675 return -ENOMEM; 1581 1676 zcrypt_device_status_mask_ext(device_status); 1582 1677 if (copy_to_user((char __user *)arg, device_status, 1583 1678 total_size)) 1584 1679 rc = -EFAULT; 1585 - kfree(device_status); 1680 + kvfree(device_status); 1586 1681 return rc; 1587 1682 } 1588 1683 case ZCRYPT_STATUS_MASK: { ··· 2053 2144 debug_unregister(zcrypt_dbf_info); 2054 2145 } 2055 2146 2056 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2057 - 2058 2147 static int __init zcdn_init(void) 2059 2148 { 2060 2149 int rc; ··· 2110 2203 class_destroy(zcrypt_class); 2111 2204 } 2112 2205 2113 - #endif 2114 - 2115 2206 /* 2116 2207 * zcrypt_api_init(): Module initialization. 2117 2208 * ··· 2123 2218 if (rc) 2124 2219 goto out; 2125 2220 2126 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2127 2221 rc = zcdn_init(); 2128 2222 if (rc) 2129 2223 goto out; 2130 - #endif 2131 2224 2132 2225 /* Register the request sprayer. */ 2133 2226 rc = misc_register(&zcrypt_misc_device); ··· 2138 2235 return 0; 2139 2236 2140 2237 out_misc_register_failed: 2141 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2142 2238 zcdn_exit(); 2143 - #endif 2144 2239 zcrypt_debug_exit(); 2145 2240 out: 2146 2241 return rc; ··· 2151 2250 */ 2152 2251 void __exit zcrypt_api_exit(void) 2153 2252 { 2154 - #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2155 2253 zcdn_exit(); 2156 - #endif 2157 2254 misc_deregister(&zcrypt_misc_device); 2158 2255 zcrypt_msgtype6_exit(); 2159 2256 zcrypt_msgtype50_exit();
-3
drivers/s390/crypto/zcrypt_api.h
··· 60 60 int again_counter; /* retry attempts counter */ 61 61 int last_qid; /* last qid used */ 62 62 int last_rc; /* last return code */ 63 - #ifdef CONFIG_ZCRYPT_DEBUG 64 - struct ap_fi fi; /* failure injection cmd */ 65 - #endif 66 63 }; 67 64 68 65 /* defines related to message tracking */
+1 -1
drivers/s390/crypto/zcrypt_ccamisc.c
··· 689 689 goto out; 690 690 } 691 691 692 - /* copy the tanslated protected key */ 692 + /* copy the translated protected key */ 693 693 switch (prepparm->lv3.ckb.len) { 694 694 case 16 + 32: 695 695 /* AES 128 protected key */
+2 -2
drivers/s390/crypto/zcrypt_ccamisc.h
··· 115 115 u64 mkvp; /* master key verification pattern */ 116 116 u8 opk[48]; /* encrypted object protection key data */ 117 117 u16 adatalen; /* associated data length in bytes */ 118 - u16 fseclen; /* formated section length in bytes */ 118 + u16 fseclen; /* formatted section length in bytes */ 119 119 u8 more_data[]; /* more data follows */ 120 120 } __packed; 121 121 ··· 232 232 * the number of apqns stored into the list is returned in *nr_apqns. One apqn 233 233 * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and 234 234 * may be casted to struct pkey_apqn. The return value is either 0 for success 235 - * or a negative errno value. If no apqn meeting the criterias is found, 235 + * or a negative errno value. If no apqn meeting the criteria is found, 236 236 * -ENODEV is returned. 237 237 */ 238 238 int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+1 -1
drivers/s390/crypto/zcrypt_ep11misc.c
··· 1368 1368 goto out; 1369 1369 } 1370 1370 1371 - /* copy the tanslated protected key */ 1371 + /* copy the translated protected key */ 1372 1372 if (wki->pkeysize > *protkeylen) { 1373 1373 DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n", 1374 1374 __func__, wki->pkeysize, *protkeylen);
+2 -2
drivers/s390/crypto/zcrypt_ep11misc.h
··· 131 131 * - if minapi > 0 only apqns with API_ord_nr >= minapi 132 132 * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches 133 133 * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping 134 - * key for this domain. When a wkvp is given there will aways be a re-fetch 134 + * key for this domain. When a wkvp is given there will always be a re-fetch 135 135 * of the domain info for the potential apqn - so this triggers an request 136 136 * reply to each apqn eligible. 137 137 * The array of apqn entries is allocated with kmalloc and returned in *apqns; 138 138 * the number of apqns stored into the list is returned in *nr_apqns. One apqn 139 139 * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and 140 140 * may be casted to struct pkey_apqn. The return value is either 0 for success 141 - * or a negative errno value. If no apqn meeting the criterias is found, 141 + * or a negative errno value. If no apqn meeting the criteria is found, 142 142 * -ENODEV is returned. 143 143 */ 144 144 int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
-10
drivers/s390/crypto/zcrypt_msgtype50.c
··· 246 246 copy_from_user(inp, mex->inputdata, mod_len)) 247 247 return -EFAULT; 248 248 249 - #ifdef CONFIG_ZCRYPT_DEBUG 250 - if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 251 - ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 252 - #endif 253 - 254 249 return 0; 255 250 } 256 251 ··· 332 337 copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) || 333 338 copy_from_user(inp, crt->inputdata, mod_len)) 334 339 return -EFAULT; 335 - 336 - #ifdef CONFIG_ZCRYPT_DEBUG 337 - if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 338 - ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 339 - #endif 340 340 341 341 return 0; 342 342 }
+6 -10
drivers/s390/crypto/zcrypt_msgtype6.c
··· 425 425 memcmp(function_code, "AU", 2) == 0) 426 426 ap_msg->flags |= AP_MSG_FLAG_SPECIAL; 427 427 428 - #ifdef CONFIG_ZCRYPT_DEBUG 429 - if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 430 - ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 431 - #endif 432 - 433 428 /* check CPRB minor version, set info bits in ap_message flag field */ 434 429 switch (*(unsigned short *)(&msg->cprbx.func_id[0])) { 435 430 case 0x5432: /* "T2" */ ··· 529 534 /* enable special processing based on the cprbs flags special bit */ 530 535 if (msg->cprbx.flags & 0x20) 531 536 ap_msg->flags |= AP_MSG_FLAG_SPECIAL; 532 - 533 - #ifdef CONFIG_ZCRYPT_DEBUG 534 - if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 535 - ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 536 - #endif 537 537 538 538 /* set info bits in ap_message flag field */ 539 539 if (msg->cprbx.flags & 0x80) ··· 1133 1143 ap_cancel_message(zq->queue, ap_msg); 1134 1144 } 1135 1145 1146 + if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN) 1147 + rc = -EIO; /* do not retry administrative requests */ 1148 + 1136 1149 out: 1137 1150 if (rc) 1138 1151 ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", ··· 1255 1262 /* Signal pending. */ 1256 1263 ap_cancel_message(zq->queue, ap_msg); 1257 1264 } 1265 + 1266 + if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN) 1267 + rc = -EIO; /* do not retry administrative requests */ 1258 1268 1259 1269 out: 1260 1270 if (rc)
+5 -5
drivers/s390/net/ctcm_mpc.c
··· 43 43 #include <linux/netdevice.h> 44 44 #include <net/dst.h> 45 45 46 - #include <linux/io.h> /* instead of <asm/io.h> ok ? */ 47 - #include <asm/ccwdev.h> 48 - #include <asm/ccwgroup.h> 49 - #include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */ 50 - #include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */ 46 + #include <linux/io.h> 47 + #include <linux/bitops.h> 48 + #include <linux/uaccess.h> 51 49 #include <linux/wait.h> 52 50 #include <linux/moduleparam.h> 51 + #include <asm/ccwdev.h> 52 + #include <asm/ccwgroup.h> 53 53 #include <asm/idals.h> 54 54 55 55 #include "ctcm_main.h"
+1 -1
drivers/s390/net/netiucv.c
··· 47 47 #include <linux/ctype.h> 48 48 #include <net/dst.h> 49 49 50 - #include <asm/io.h> 50 + #include <linux/io.h> 51 51 #include <linux/uaccess.h> 52 52 #include <asm/ebcdic.h> 53 53