Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Vasily Gorbik:

- Raise minimum supported machine generation to z10, which comes with
various cleanups and code simplifications (usercopy/spectre
mitigation/etc).

- Rework extables and get rid of anonymous out-of-line fixups.

- Page table helpers cleanup. Add set_pXd()/set_pte() helper functions.
Covert pte_val()/pXd_val() macros to functions.

- Optimize kretprobe handling by avoiding extra kprobe on
__kretprobe_trampoline.

- Add support for CEX8 crypto cards.

- Allow to trigger AP bus rescan via writing to /sys/bus/ap/scans.

- Add CONFIG_EXPOLINE_EXTERN option to build the kernel without COMDAT
group sections which simplifies kpatch support.

- Always use the packed stack layout and extend kernel unwinder tests.

- Add sanity checks for ftrace code patching.

- Add s390dbf debug log for the vfio_ap device driver.

- Various virtual vs physical address confusion fixes.

- Various small fixes and improvements all over the code.

* tag 's390-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (69 commits)
s390/test_unwind: add kretprobe tests
s390/kprobes: Avoid additional kprobe in kretprobe handling
s390: convert ".insn" encoding to instruction names
s390: assume stckf is always present
s390/nospec: move to single register thunks
s390: raise minimum supported machine generation to z10
s390/uaccess: Add copy_from/to_user_key functions
s390/nospec: align and size extern thunks
s390/nospec: add an option to use thunk-extern
s390/nospec: generate single register thunks if possible
s390/pci: make zpci_set_irq()/zpci_clear_irq() static
s390: remove unused expoline to BC instructions
s390/irq: use assignment instead of cast
s390/traps: get rid of magic cast for per code
s390/traps: get rid of magic cast for program interruption code
s390/signal: fix typo in comments
s390/asm-offsets: remove unused defines
s390/test_unwind: avoid build warning with W=1
s390: remove .fixup section
s390/bpf: encode register within extable entry
...

+1844 -1439
+1 -3
MAINTAINERS
··· 17019 17019 S: Supported 17020 17020 W: http://www.ibm.com/developerworks/linux/linux390/ 17021 17021 F: Documentation/s390/vfio-ap.rst 17022 - F: drivers/s390/crypto/vfio_ap_drv.c 17023 - F: drivers/s390/crypto/vfio_ap_ops.c 17024 - F: drivers/s390/crypto/vfio_ap_private.h 17022 + F: drivers/s390/crypto/vfio_ap* 17025 17023 17026 17024 S390 VFIO-CCW DRIVER 17027 17025 M: Eric Farman <farman@linux.ibm.com>
+17 -80
arch/s390/Kconfig
··· 122 122 select ARCH_WANT_IPC_PARSE_VERSION 123 123 select BUILDTIME_TABLE_SORT 124 124 select CLONE_BACKWARDS2 125 - select CPU_NO_EFFICIENT_FFS if !HAVE_MARCH_Z9_109_FEATURES 126 125 select DMA_OPS if PCI 127 126 select DYNAMIC_FTRACE if FUNCTION_TRACER 128 127 select GENERIC_ALLOCATOR ··· 156 157 select HAVE_DYNAMIC_FTRACE_WITH_ARGS 157 158 select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 158 159 select HAVE_DYNAMIC_FTRACE_WITH_REGS 159 - select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES 160 + select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES 160 161 select HAVE_EFFICIENT_UNALIGNED_ACCESS 161 162 select HAVE_FAST_GUP 162 163 select HAVE_FENTRY ··· 231 232 232 233 menu "Processor type and features" 233 234 234 - config HAVE_MARCH_Z900_FEATURES 235 - def_bool n 236 - 237 - config HAVE_MARCH_Z990_FEATURES 238 - def_bool n 239 - select HAVE_MARCH_Z900_FEATURES 240 - 241 - config HAVE_MARCH_Z9_109_FEATURES 242 - def_bool n 243 - select HAVE_MARCH_Z990_FEATURES 244 - 245 235 config HAVE_MARCH_Z10_FEATURES 246 236 def_bool n 247 - select HAVE_MARCH_Z9_109_FEATURES 248 237 249 238 config HAVE_MARCH_Z196_FEATURES 250 239 def_bool n ··· 258 271 prompt "Processor type" 259 272 default MARCH_Z196 260 273 261 - config MARCH_Z900 262 - bool "IBM zSeries model z800 and z900" 263 - select HAVE_MARCH_Z900_FEATURES 264 - depends on $(cc-option,-march=z900) 265 - help 266 - Select this to enable optimizations for model z800/z900 (2064 and 267 - 2066 series). This will enable some optimizations that are not 268 - available on older ESA/390 (31 Bit) only CPUs. 269 - 270 - config MARCH_Z990 271 - bool "IBM zSeries model z890 and z990" 272 - select HAVE_MARCH_Z990_FEATURES 273 - depends on $(cc-option,-march=z990) 274 - help 275 - Select this to enable optimizations for model z890/z990 (2084 and 276 - 2086 series). The kernel will be slightly faster but will not work 277 - on older machines. 278 - 279 - config MARCH_Z9_109 280 - bool "IBM System z9" 281 - select HAVE_MARCH_Z9_109_FEATURES 282 - depends on $(cc-option,-march=z9-109) 283 - help 284 - Select this to enable optimizations for IBM System z9 (2094 and 285 - 2096 series). The kernel will be slightly faster but will not work 286 - on older machines. 287 - 288 274 config MARCH_Z10 289 275 bool "IBM System z10" 290 276 select HAVE_MARCH_Z10_FEATURES 291 277 depends on $(cc-option,-march=z10) 292 278 help 293 - Select this to enable optimizations for IBM System z10 (2097 and 294 - 2098 series). The kernel will be slightly faster but will not work 295 - on older machines. 279 + Select this to enable optimizations for IBM System z10 (2097 and 2098 280 + series). This is the oldest machine generation currently supported. 296 281 297 282 config MARCH_Z196 298 283 bool "IBM zEnterprise 114 and 196" ··· 313 354 314 355 endchoice 315 356 316 - config MARCH_Z900_TUNE 317 - def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT 318 - 319 - config MARCH_Z990_TUNE 320 - def_bool TUNE_Z990 || MARCH_Z990 && TUNE_DEFAULT 321 - 322 - config MARCH_Z9_109_TUNE 323 - def_bool TUNE_Z9_109 || MARCH_Z9_109 && TUNE_DEFAULT 324 - 325 357 config MARCH_Z10_TUNE 326 358 def_bool TUNE_Z10 || MARCH_Z10 && TUNE_DEFAULT 327 359 ··· 348 398 Tune the generated code for the target processor for which the kernel 349 399 will be compiled. 350 400 351 - config TUNE_Z900 352 - bool "IBM zSeries model z800 and z900" 353 - depends on $(cc-option,-mtune=z900) 354 - 355 - config TUNE_Z990 356 - bool "IBM zSeries model z890 and z990" 357 - depends on $(cc-option,-mtune=z990) 358 - 359 - config TUNE_Z9_109 360 - bool "IBM System z9" 361 - depends on $(cc-option,-mtune=z9-109) 362 - 363 401 config TUNE_Z10 364 402 bool "IBM System z10" 365 - depends on $(cc-option,-mtune=z10) 366 403 367 404 config TUNE_Z196 368 405 bool "IBM zEnterprise 114 and 196" ··· 524 587 525 588 config EXPOLINE 526 589 def_bool n 590 + depends on $(cc-option,-mindirect-branch=thunk) 527 591 prompt "Avoid speculative indirect branches in the kernel" 528 592 help 529 593 Compile the kernel with the expoline compiler options to guard ··· 532 594 branches. 533 595 Requires a compiler with -mindirect-branch=thunk support for full 534 596 protection. The kernel may run slower. 597 + 598 + If unsure, say N. 599 + 600 + config EXPOLINE_EXTERN 601 + def_bool n 602 + depends on EXPOLINE 603 + depends on CC_IS_GCC && GCC_VERSION >= 110200 604 + depends on $(success,$(srctree)/arch/s390/tools/gcc-thunk-extern.sh $(CC)) 605 + prompt "Generate expolines as extern functions." 606 + help 607 + This option is required for some tooling like kpatch. The kernel is 608 + compiled with -mindirect-branch=thunk-extern and requires a newer 609 + compiler. 535 610 536 611 If unsure, say N. 537 612 ··· 608 657 in bits. Supported is any size between 2^42 (4TB) and 2^53 (8PB). 609 658 Increasing the number of bits also increases the kernel image size. 610 659 By default 46 bits (64TB) are supported. 611 - 612 - config PACK_STACK 613 - def_bool y 614 - prompt "Pack kernel stack" 615 - help 616 - This option enables the compiler option -mkernel-backchain if it 617 - is available. If the option is available the compiler supports 618 - the new stack layout which dramatically reduces the minimum stack 619 - frame size. With an old compiler a non-leaf function needs a 620 - minimum of 96 bytes on 31 bit and 160 bytes on 64 bit. With 621 - -mkernel-backchain the minimum size drops to 16 byte on 31 bit 622 - and 24 byte on 64 bit. 623 - 624 - Say Y if you are unsure. 625 660 626 661 config CHECK_STACK 627 662 def_bool y
+11 -18
arch/s390/Makefile
··· 21 21 aflags_dwarf := -Wa,-gdwarf-2 22 22 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ 23 23 KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) 24 - KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 24 + KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack 25 25 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY 26 26 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain 27 27 KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables ··· 36 36 37 37 export LD_BFD 38 38 39 - mflags-$(CONFIG_MARCH_Z900) := -march=z900 40 - mflags-$(CONFIG_MARCH_Z990) := -march=z990 41 - mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 42 39 mflags-$(CONFIG_MARCH_Z10) := -march=z10 43 40 mflags-$(CONFIG_MARCH_Z196) := -march=z196 44 41 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 ··· 48 51 aflags-y += $(mflags-y) 49 52 cflags-y += $(mflags-y) 50 53 51 - cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 52 - cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 53 - cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 54 54 cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10 55 55 cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196 56 56 cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12 ··· 62 68 # 63 69 cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls 64 70 65 - ifneq ($(call cc-option,-mpacked-stack -mbackchain -msoft-float),) 66 - cflags-$(CONFIG_PACK_STACK) += -mpacked-stack -D__PACK_STACK 67 - aflags-$(CONFIG_PACK_STACK) += -D__PACK_STACK 68 - endif 69 - 70 71 KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y) 71 72 KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y) 72 73 ··· 75 86 endif 76 87 77 88 ifdef CONFIG_EXPOLINE 78 - ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),) 89 + ifdef CONFIG_EXPOLINE_EXTERN 90 + KBUILD_LDFLAGS_MODULE += arch/s390/lib/expoline.o 91 + CC_FLAGS_EXPOLINE := -mindirect-branch=thunk-extern 92 + CC_FLAGS_EXPOLINE += -mfunction-return=thunk-extern 93 + else 79 94 CC_FLAGS_EXPOLINE := -mindirect-branch=thunk 80 95 CC_FLAGS_EXPOLINE += -mfunction-return=thunk 81 - CC_FLAGS_EXPOLINE += -mindirect-branch-table 82 - export CC_FLAGS_EXPOLINE 83 - cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE 84 - aflags-y += -DCC_USING_EXPOLINE 85 96 endif 97 + CC_FLAGS_EXPOLINE += -mindirect-branch-table 98 + export CC_FLAGS_EXPOLINE 99 + cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE 100 + aflags-y += -DCC_USING_EXPOLINE 86 101 endif 87 102 88 103 ifdef CONFIG_FUNCTION_TRACER ··· 104 111 # Test CFI features of binutils 105 112 cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1) 106 113 107 - KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y) 114 + KBUILD_CFLAGS += -mpacked-stack -mbackchain -msoft-float $(cflags-y) 108 115 KBUILD_CFLAGS += -pipe -Wno-sign-compare 109 116 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi) 110 117 KBUILD_AFLAGS += $(aflags-y) $(cfi)
-1
arch/s390/boot/head.S
··· 5 5 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 * Rob van der Heij <rvdhei@iae.nl> 8 - * Heiko Carstens <heiko.carstens@de.ibm.com> 9 8 * 10 9 * There are 5 different IPL methods 11 10 * 1) load the image directly into ram at address 0 and do an PSW restart
+10 -10
arch/s390/crypto/chacha-s390.S
··· 312 312 VPERM XC0,XC0,XC0,BEPERM 313 313 VPERM XD0,XD0,XD0,BEPERM 314 314 315 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 315 + clgfi LEN,0x40 316 316 jl .Ltail_4x 317 317 318 318 VLM XT0,XT3,0,INP,0 ··· 339 339 VPERM XC0,XC0,XC0,BEPERM 340 340 VPERM XD0,XD0,XD0,BEPERM 341 341 342 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 342 + clgfi LEN,0x40 343 343 jl .Ltail_4x 344 344 345 345 VLM XT0,XT3,0,INP,0 ··· 366 366 VPERM XC0,XC0,XC0,BEPERM 367 367 VPERM XD0,XD0,XD0,BEPERM 368 368 369 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 369 + clgfi LEN,0x40 370 370 jl .Ltail_4x 371 371 372 372 VLM XT0,XT3,0,INP,0 ··· 472 472 #define T3 %v30 473 473 474 474 ENTRY(chacha20_vx) 475 - .insn rilu,0xc20e00000000,LEN,256 # clgfi LEN,256 475 + clgfi LEN,256 476 476 jle chacha20_vx_4x 477 477 stmg %r6,%r7,6*8(SP) 478 478 ··· 725 725 VPERM C0,C0,C0,BEPERM 726 726 VPERM D0,D0,D0,BEPERM 727 727 728 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 728 + clgfi LEN,0x40 729 729 jl .Ltail_vx 730 730 731 731 VAF D2,D2,T2 # +K[3]+2 ··· 754 754 VPERM C0,C1,C1,BEPERM 755 755 VPERM D0,D1,D1,BEPERM 756 756 757 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 757 + clgfi LEN,0x40 758 758 jl .Ltail_vx 759 759 760 760 VLM A1,D1,0,INP,0 ··· 780 780 VPERM C0,C2,C2,BEPERM 781 781 VPERM D0,D2,D2,BEPERM 782 782 783 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 783 + clgfi LEN,0x40 784 784 jl .Ltail_vx 785 785 786 786 VLM A1,D1,0,INP,0 ··· 807 807 VPERM C0,C3,C3,BEPERM 808 808 VPERM D0,D3,D3,BEPERM 809 809 810 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 810 + clgfi LEN,0x40 811 811 jl .Ltail_vx 812 812 813 813 VAF D3,D2,T1 # K[3]+4 ··· 837 837 VPERM C0,C4,C4,BEPERM 838 838 VPERM D0,D4,D4,BEPERM 839 839 840 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 840 + clgfi LEN,0x40 841 841 jl .Ltail_vx 842 842 843 843 VLM A1,D1,0,INP,0 ··· 864 864 VPERM C0,C5,C5,BEPERM 865 865 VPERM D0,D5,D5,BEPERM 866 866 867 - .insn rilu,0xc20e00000000,LEN,0x40 # clgfi LEN,0x40 867 + clgfi LEN,0x40 868 868 jl .Ltail_vx 869 869 870 870 VLM A1,D1,0,INP,0
+1
arch/s390/hypfs/hypfs_vm.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/string.h> 12 12 #include <linux/vmalloc.h> 13 + #include <asm/extable.h> 13 14 #include <asm/diag.h> 14 15 #include <asm/ebcdic.h> 15 16 #include <asm/timex.h>
+1
arch/s390/include/asm/ap.h
··· 13 13 #define _ASM_S390_AP_H_ 14 14 15 15 #include <linux/io.h> 16 + #include <asm/asm-extable.h> 16 17 17 18 /** 18 19 * The ap_qid_t identifier of an ap queue.
+53
arch/s390/include/asm/asm-extable.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_EXTABLE_H 3 + #define __ASM_EXTABLE_H 4 + 5 + #include <linux/stringify.h> 6 + #include <asm/asm-const.h> 7 + 8 + #define EX_TYPE_NONE 0 9 + #define EX_TYPE_FIXUP 1 10 + #define EX_TYPE_BPF 2 11 + #define EX_TYPE_UACCESS 3 12 + 13 + #define __EX_TABLE(_section, _fault, _target, _type) \ 14 + stringify_in_c(.section _section,"a";) \ 15 + stringify_in_c(.align 4;) \ 16 + stringify_in_c(.long (_fault) - .;) \ 17 + stringify_in_c(.long (_target) - .;) \ 18 + stringify_in_c(.short (_type);) \ 19 + stringify_in_c(.short 0;) \ 20 + stringify_in_c(.previous) 21 + 22 + #define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \ 23 + stringify_in_c(.section _section,"a";) \ 24 + stringify_in_c(.align 4;) \ 25 + stringify_in_c(.long (_fault) - .;) \ 26 + stringify_in_c(.long (_target) - .;) \ 27 + stringify_in_c(.short (_type);) \ 28 + stringify_in_c(.macro extable_reg reg;) \ 29 + stringify_in_c(.set found, 0;) \ 30 + stringify_in_c(.set regnr, 0;) \ 31 + stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \ 32 + stringify_in_c(.ifc "\reg", "%%\rs";) \ 33 + stringify_in_c(.set found, 1;) \ 34 + stringify_in_c(.short regnr;) \ 35 + stringify_in_c(.endif;) \ 36 + stringify_in_c(.set regnr, regnr+1;) \ 37 + stringify_in_c(.endr;) \ 38 + stringify_in_c(.ifne (found != 1);) \ 39 + stringify_in_c(.error "extable_reg: bad register argument";) \ 40 + stringify_in_c(.endif;) \ 41 + stringify_in_c(.endm;) \ 42 + stringify_in_c(extable_reg _reg;) \ 43 + stringify_in_c(.purgem extable_reg;) \ 44 + stringify_in_c(.previous) 45 + 46 + #define EX_TABLE(_fault, _target) \ 47 + __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP) 48 + #define EX_TABLE_AMODE31(_fault, _target) \ 49 + __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP) 50 + #define EX_TABLE_UA(_fault, _target, _reg) \ 51 + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg) 52 + 53 + #endif /* __ASM_EXTABLE_H */
-12
arch/s390/include/asm/bitops.h
··· 256 256 return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 257 257 } 258 258 259 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 260 - 261 259 /** 262 260 * __flogr - find leftmost one 263 261 * @word - The word to search ··· 373 375 { 374 376 return fls64(word); 375 377 } 376 - 377 - #else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ 378 - 379 - #include <asm-generic/bitops/__ffs.h> 380 - #include <asm-generic/bitops/ffs.h> 381 - #include <asm-generic/bitops/__fls.h> 382 - #include <asm-generic/bitops/fls.h> 383 - #include <asm-generic/bitops/fls64.h> 384 - 385 - #endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ 386 378 387 379 #include <asm-generic/bitops/ffz.h> 388 380 #include <asm-generic/bitops/hweight.h>
+7 -6
arch/s390/include/asm/cpu_mf.h
··· 10 10 #define _ASM_S390_CPU_MF_H 11 11 12 12 #include <linux/errno.h> 13 + #include <asm/asm-extable.h> 13 14 #include <asm/facility.h> 14 15 15 16 asm(".include \"asm/cpu_mf-insn.h\"\n"); ··· 160 159 /* Load program parameter */ 161 160 static inline void lpp(void *pp) 162 161 { 163 - asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory"); 162 + asm volatile("lpp 0(%0)\n" :: "a" (pp) : "memory"); 164 163 } 165 164 166 165 /* Query counter information */ ··· 169 168 int rc = -EINVAL; 170 169 171 170 asm volatile ( 172 - "0: .insn s,0xb28e0000,%1\n" 171 + "0: qctri %1\n" 173 172 "1: lhi %0,0\n" 174 173 "2:\n" 175 174 EX_TABLE(1b, 2b) ··· 183 182 int cc; 184 183 185 184 asm volatile ( 186 - " .insn s,0xb2840000,%1\n" 185 + " lcctl %1\n" 187 186 " ipm %0\n" 188 187 " srl %0,28\n" 189 188 : "=d" (cc) : "Q" (ctl) : "cc"); ··· 197 196 int cc; 198 197 199 198 asm volatile ( 200 - " .insn rre,0xb2e40000,%0,%2\n" 199 + " ecctr %0,%2\n" 201 200 " ipm %1\n" 202 201 " srl %1,28\n" 203 202 : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc"); ··· 247 246 int cc = 1; 248 247 249 248 asm volatile( 250 - "0: .insn s,0xb2860000,%1\n" 249 + "0: qsi %1\n" 251 250 "1: lhi %0,0\n" 252 251 "2:\n" 253 252 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) ··· 262 261 263 262 cc = 1; 264 263 asm volatile( 265 - "0: .insn s,0xb2870000,0(%1)\n" 264 + "0: lsctl 0(%1)\n" 266 265 "1: ipm %0\n" 267 266 " srl %0,28\n" 268 267 "2:\n"
-1
arch/s390/include/asm/crw.h
··· 5 5 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 6 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 7 7 * Cornelia Huck <cornelia.huck@de.ibm.com>, 8 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 9 8 */ 10 9 11 10 #ifndef _ASM_S390_CRW_H
+1
arch/s390/include/asm/diag.h
··· 11 11 12 12 #include <linux/if_ether.h> 13 13 #include <linux/percpu.h> 14 + #include <asm/asm-extable.h> 14 15 15 16 enum diag_stat_enum { 16 17 DIAG_STAT_X008,
+20 -29
arch/s390/include/asm/extable.h
··· 25 25 struct exception_table_entry 26 26 { 27 27 int insn, fixup; 28 - long handler; 28 + short type, data; 29 29 }; 30 30 31 31 extern struct exception_table_entry *__start_amode31_ex_table; ··· 38 38 return (unsigned long)&x->fixup + x->fixup; 39 39 } 40 40 41 - typedef bool (*ex_handler_t)(const struct exception_table_entry *, 42 - struct pt_regs *); 43 - 44 - static inline ex_handler_t 45 - ex_fixup_handler(const struct exception_table_entry *x) 46 - { 47 - if (likely(!x->handler)) 48 - return NULL; 49 - return (ex_handler_t)((unsigned long)&x->handler + x->handler); 50 - } 51 - 52 - static inline bool ex_handle(const struct exception_table_entry *x, 53 - struct pt_regs *regs) 54 - { 55 - ex_handler_t handler = ex_fixup_handler(x); 56 - 57 - if (unlikely(handler)) 58 - return handler(x, regs); 59 - regs->psw.addr = extable_fixup(x); 60 - return true; 61 - } 62 - 63 41 #define ARCH_HAS_RELATIVE_EXTABLE 64 42 65 43 static inline void swap_ex_entry_fixup(struct exception_table_entry *a, ··· 47 69 { 48 70 a->fixup = b->fixup + delta; 49 71 b->fixup = tmp.fixup - delta; 50 - a->handler = b->handler; 51 - if (a->handler) 52 - a->handler += delta; 53 - b->handler = tmp.handler; 54 - if (b->handler) 55 - b->handler -= delta; 72 + a->type = b->type; 73 + b->type = tmp.type; 74 + a->data = b->data; 75 + b->data = tmp.data; 56 76 } 57 77 #define swap_ex_entry_fixup swap_ex_entry_fixup 78 + 79 + #ifdef CONFIG_BPF_JIT 80 + 81 + bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); 82 + 83 + #else /* !CONFIG_BPF_JIT */ 84 + 85 + static inline bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs) 86 + { 87 + return false; 88 + } 89 + 90 + #endif /* CONFIG_BPF_JIT */ 91 + 92 + bool fixup_exception(struct pt_regs *regs); 58 93 59 94 #endif
+1
arch/s390/include/asm/fpu/api.h
··· 45 45 #define _ASM_S390_FPU_API_H 46 46 47 47 #include <linux/preempt.h> 48 + #include <asm/asm-extable.h> 48 49 49 50 void save_fpu_regs(void); 50 51 void load_fpu_regs(void);
+1
arch/s390/include/asm/futex.h
··· 4 4 5 5 #include <linux/uaccess.h> 6 6 #include <linux/futex.h> 7 + #include <asm/asm-extable.h> 7 8 #include <asm/mmu_context.h> 8 9 #include <asm/errno.h> 9 10
+2 -2
arch/s390/include/asm/hugetlb.h
··· 45 45 pte_t *ptep, unsigned long sz) 46 46 { 47 47 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 48 - pte_val(*ptep) = _REGION3_ENTRY_EMPTY; 48 + set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY)); 49 49 else 50 - pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; 50 + set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY)); 51 51 } 52 52 53 53 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+7 -2
arch/s390/include/asm/irq.h
··· 81 81 } 82 82 83 83 struct ext_code { 84 - unsigned short subcode; 85 - unsigned short code; 84 + union { 85 + struct { 86 + unsigned short subcode; 87 + unsigned short code; 88 + }; 89 + unsigned int int_code; 90 + }; 86 91 }; 87 92 88 93 typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
+1
arch/s390/include/asm/kprobes.h
··· 71 71 72 72 void arch_remove_kprobe(struct kprobe *p); 73 73 void __kretprobe_trampoline(void); 74 + void trampoline_probe_handler(struct pt_regs *regs); 74 75 75 76 int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 76 77 int kprobe_exceptions_notify(struct notifier_block *self,
-18
arch/s390/include/asm/linkage.h
··· 2 2 #ifndef __ASM_LINKAGE_H 3 3 #define __ASM_LINKAGE_H 4 4 5 - #include <asm/asm-const.h> 6 5 #include <linux/stringify.h> 7 6 8 7 #define __ALIGN .align 16, 0x07 9 8 #define __ALIGN_STR __stringify(__ALIGN) 10 - 11 - /* 12 - * Helper macro for exception table entries 13 - */ 14 - 15 - #define __EX_TABLE(_section, _fault, _target) \ 16 - stringify_in_c(.section _section,"a";) \ 17 - stringify_in_c(.align 8;) \ 18 - stringify_in_c(.long (_fault) - .;) \ 19 - stringify_in_c(.long (_target) - .;) \ 20 - stringify_in_c(.quad 0;) \ 21 - stringify_in_c(.previous) 22 - 23 - #define EX_TABLE(_fault, _target) \ 24 - __EX_TABLE(__ex_table, _fault, _target) 25 - #define EX_TABLE_AMODE31(_fault, _target) \ 26 - __EX_TABLE(.amode31.ex_table, _fault, _target) 27 9 28 10 #endif
+17 -9
arch/s390/include/asm/lowcore.h
··· 34 34 __u32 ext_int_code_addr; 35 35 }; 36 36 __u32 svc_int_code; /* 0x0088 */ 37 - __u16 pgm_ilc; /* 0x008c */ 38 - __u16 pgm_code; /* 0x008e */ 37 + union { 38 + struct { 39 + __u16 pgm_ilc; /* 0x008c */ 40 + __u16 pgm_code; /* 0x008e */ 41 + }; 42 + __u32 pgm_int_code; 43 + }; 39 44 __u32 data_exc_code; /* 0x0090 */ 40 45 __u16 mon_class_num; /* 0x0094 */ 41 - __u8 per_code; /* 0x0096 */ 42 - __u8 per_atmid; /* 0x0097 */ 46 + union { 47 + struct { 48 + __u8 per_code; /* 0x0096 */ 49 + __u8 per_atmid; /* 0x0097 */ 50 + }; 51 + __u16 per_code_combined; 52 + }; 43 53 __u64 per_address; /* 0x0098 */ 44 54 __u8 exc_access_id; /* 0x00a0 */ 45 55 __u8 per_access_id; /* 0x00a1 */ ··· 163 153 __u64 gmap; /* 0x03d0 */ 164 154 __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */ 165 155 166 - /* br %r1 trampoline */ 167 - __u16 br_r1_trampoline; /* 0x0400 */ 168 - __u32 return_lpswe; /* 0x0402 */ 169 - __u32 return_mcck_lpswe; /* 0x0406 */ 170 - __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */ 156 + __u32 return_lpswe; /* 0x0400 */ 157 + __u32 return_mcck_lpswe; /* 0x0404 */ 158 + __u8 pad_0x040a[0x0e00-0x0408]; /* 0x0408 */ 171 159 172 160 /* 173 161 * 0xe00 contains the address of the IPL Parameter Information
+1
arch/s390/include/asm/mmu.h
··· 4 4 5 5 #include <linux/cpumask.h> 6 6 #include <linux/errno.h> 7 + #include <asm/asm-extable.h> 7 8 8 9 typedef struct { 9 10 spinlock_t lock;
-1
arch/s390/include/asm/nmi.h
··· 6 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 7 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 8 * Cornelia Huck <cornelia.huck@de.ibm.com>, 9 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 10 9 */ 11 10 12 11 #ifndef _ASM_S390_NMI_H
+48 -111
arch/s390/include/asm/nospec-insn.h
··· 10 10 11 11 #ifdef CC_USING_EXPOLINE 12 12 13 - _LC_BR_R1 = __LC_BR_R1 14 - 15 13 /* 16 14 * The expoline macros are used to create thunks in the same format 17 15 * as gcc generates them. The 'comdat' section flag makes sure that 18 16 * the various thunks are merged into a single copy. 19 17 */ 20 18 .macro __THUNK_PROLOG_NAME name 19 + #ifdef CONFIG_EXPOLINE_EXTERN 20 + .pushsection .text,"ax",@progbits 21 + .align 16,0x07 22 + #else 21 23 .pushsection .text.\name,"axG",@progbits,\name,comdat 24 + #endif 22 25 .globl \name 23 26 .hidden \name 24 27 .type \name,@function ··· 29 26 CFI_STARTPROC 30 27 .endm 31 28 32 - .macro __THUNK_EPILOG 29 + .macro __THUNK_EPILOG_NAME name 33 30 CFI_ENDPROC 31 + #ifdef CONFIG_EXPOLINE_EXTERN 32 + .size \name, .-\name 33 + #endif 34 34 .popsection 35 35 .endm 36 36 37 - .macro __THUNK_PROLOG_BR r1,r2 38 - __THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1 37 + .macro __THUNK_PROLOG_BR r1 38 + __THUNK_PROLOG_NAME __s390_indirect_jump_r\r1 39 39 .endm 40 40 41 - .macro __THUNK_PROLOG_BC d0,r1,r2 42 - __THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1 41 + .macro __THUNK_EPILOG_BR r1 42 + __THUNK_EPILOG_NAME __s390_indirect_jump_r\r1 43 43 .endm 44 44 45 - .macro __THUNK_BR r1,r2 46 - jg __s390_indirect_jump_r\r2\()use_r\r1 45 + .macro __THUNK_BR r1 46 + jg __s390_indirect_jump_r\r1 47 47 .endm 48 48 49 - .macro __THUNK_BC d0,r1,r2 50 - jg __s390_indirect_branch_\d0\()_\r2\()use_\r1 49 + .macro __THUNK_BRASL r1,r2 50 + brasl \r1,__s390_indirect_jump_r\r2 51 51 .endm 52 52 53 - .macro __THUNK_BRASL r1,r2,r3 54 - brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2 55 - .endm 56 - 57 - .macro __DECODE_RR expand,reg,ruse 53 + .macro __DECODE_R expand,reg 58 54 .set __decode_fail,1 59 55 .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 60 56 .ifc \reg,%r\r1 57 + \expand \r1 58 + .set __decode_fail,0 59 + .endif 60 + .endr 61 + .if __decode_fail == 1 62 + .error "__DECODE_R failed" 63 + .endif 64 + .endm 65 + 66 + .macro __DECODE_RR expand,rsave,rtarget 67 + .set __decode_fail,1 68 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 69 + .ifc \rsave,%r\r1 61 70 .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 62 - .ifc \ruse,%r\r2 71 + .ifc \rtarget,%r\r2 63 72 \expand \r1,\r2 64 73 .set __decode_fail,0 65 74 .endif ··· 83 68 .endif 84 69 .endm 85 70 86 - .macro __DECODE_RRR expand,rsave,rtarget,ruse 87 - .set __decode_fail,1 88 - .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 89 - .ifc \rsave,%r\r1 90 - .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 91 - .ifc \rtarget,%r\r2 92 - .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 93 - .ifc \ruse,%r\r3 94 - \expand \r1,\r2,\r3 95 - .set __decode_fail,0 96 - .endif 97 - .endr 98 - .endif 99 - .endr 100 - .endif 101 - .endr 102 - .if __decode_fail == 1 103 - .error "__DECODE_RRR failed" 104 - .endif 105 - .endm 106 - 107 - .macro __DECODE_DRR expand,disp,reg,ruse 108 - .set __decode_fail,1 109 - .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 110 - .ifc \reg,%r\r1 111 - .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 112 - .ifc \ruse,%r\r2 113 - \expand \disp,\r1,\r2 114 - .set __decode_fail,0 115 - .endif 116 - .endr 117 - .endif 118 - .endr 119 - .if __decode_fail == 1 120 - .error "__DECODE_DRR failed" 121 - .endif 122 - .endm 123 - 124 - .macro __THUNK_EX_BR reg,ruse 125 - # Be very careful when adding instructions to this macro! 126 - # The ALTERNATIVE replacement code has a .+10 which targets 127 - # the "br \reg" after the code has been patched. 128 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 71 + .macro __THUNK_EX_BR reg 129 72 exrl 0,555f 130 73 j . 131 - #else 132 - .ifc \reg,%r1 133 - ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 134 - j . 135 - .else 136 - larl \ruse,555f 137 - ex 0,0(\ruse) 138 - j . 139 - .endif 140 - #endif 141 74 555: br \reg 142 75 .endm 143 76 144 - .macro __THUNK_EX_BC disp,reg,ruse 145 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 146 - exrl 0,556f 147 - j . 77 + #ifdef CONFIG_EXPOLINE_EXTERN 78 + .macro GEN_BR_THUNK reg 79 + .endm 80 + .macro GEN_BR_THUNK_EXTERN reg 148 81 #else 149 - larl \ruse,556f 150 - ex 0,0(\ruse) 151 - j . 82 + .macro GEN_BR_THUNK reg 152 83 #endif 153 - 556: b \disp(\reg) 84 + __DECODE_R __THUNK_PROLOG_BR,\reg 85 + __THUNK_EX_BR \reg 86 + __DECODE_R __THUNK_EPILOG_BR,\reg 154 87 .endm 155 88 156 - .macro GEN_BR_THUNK reg,ruse=%r1 157 - __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse 158 - __THUNK_EX_BR \reg,\ruse 159 - __THUNK_EPILOG 160 - .endm 161 - 162 - .macro GEN_B_THUNK disp,reg,ruse=%r1 163 - __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse 164 - __THUNK_EX_BC \disp,\reg,\ruse 165 - __THUNK_EPILOG 166 - .endm 167 - 168 - .macro BR_EX reg,ruse=%r1 169 - 557: __DECODE_RR __THUNK_BR,\reg,\ruse 89 + .macro BR_EX reg 90 + 557: __DECODE_R __THUNK_BR,\reg 170 91 .pushsection .s390_indirect_branches,"a",@progbits 171 92 .long 557b-. 172 93 .popsection 173 94 .endm 174 95 175 - .macro B_EX disp,reg,ruse=%r1 176 - 558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse 177 - .pushsection .s390_indirect_branches,"a",@progbits 178 - .long 558b-. 179 - .popsection 180 - .endm 181 - 182 - .macro BASR_EX rsave,rtarget,ruse=%r1 183 - 559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse 96 + .macro BASR_EX rsave,rtarget 97 + 559: __DECODE_RR __THUNK_BRASL,\rsave,\rtarget 184 98 .pushsection .s390_indirect_branches,"a",@progbits 185 99 .long 559b-. 186 100 .popsection 187 101 .endm 188 102 189 103 #else 190 - .macro GEN_BR_THUNK reg,ruse=%r1 104 + .macro GEN_BR_THUNK reg 191 105 .endm 192 106 193 - .macro GEN_B_THUNK disp,reg,ruse=%r1 194 - .endm 195 - 196 - .macro BR_EX reg,ruse=%r1 107 + .macro BR_EX reg 197 108 br \reg 198 109 .endm 199 110 200 - .macro B_EX disp,reg,ruse=%r1 201 - b \disp(\reg) 202 - .endm 203 - 204 - .macro BASR_EX rsave,rtarget,ruse=%r1 111 + .macro BASR_EX rsave,rtarget 205 112 basr \rsave,\rtarget 206 113 .endm 207 114 #endif /* CC_USING_EXPOLINE */
+1 -1
arch/s390/include/asm/os_info.h
··· 39 39 40 40 #ifdef CONFIG_CRASH_DUMP 41 41 void *os_info_old_entry(int nr, unsigned long *size); 42 - int copy_oldmem_kernel(void *dst, void *src, size_t count); 42 + int copy_oldmem_kernel(void *dst, unsigned long src, size_t count); 43 43 #else 44 44 static inline void *os_info_old_entry(int nr, unsigned long *size) 45 45 {
+25 -5
arch/s390/include/asm/page.h
··· 92 92 93 93 #define pgprot_val(x) ((x).pgprot) 94 94 #define pgste_val(x) ((x).pgste) 95 - #define pte_val(x) ((x).pte) 96 - #define pmd_val(x) ((x).pmd) 97 - #define pud_val(x) ((x).pud) 98 - #define p4d_val(x) ((x).p4d) 99 - #define pgd_val(x) ((x).pgd) 95 + 96 + static inline unsigned long pte_val(pte_t pte) 97 + { 98 + return pte.pte; 99 + } 100 + 101 + static inline unsigned long pmd_val(pmd_t pmd) 102 + { 103 + return pmd.pmd; 104 + } 105 + 106 + static inline unsigned long pud_val(pud_t pud) 107 + { 108 + return pud.pud; 109 + } 110 + 111 + static inline unsigned long p4d_val(p4d_t p4d) 112 + { 113 + return p4d.p4d; 114 + } 115 + 116 + static inline unsigned long pgd_val(pgd_t pgd) 117 + { 118 + return pgd.pgd; 119 + } 100 120 101 121 #define __pgste(x) ((pgste_t) { (x) } ) 102 122 #define __pte(x) ((pte_t) { (x) } )
-3
arch/s390/include/asm/pci.h
··· 283 283 int __init zpci_irq_init(void); 284 284 void __init zpci_irq_exit(void); 285 285 286 - int zpci_set_irq(struct zpci_dev *zdev); 287 - int zpci_clear_irq(struct zpci_dev *zdev); 288 - 289 286 /* FMB */ 290 287 int zpci_fmb_enable_device(struct zpci_dev *); 291 288 int zpci_fmb_disable_device(struct zpci_dev *);
+4 -4
arch/s390/include/asm/pgalloc.h
··· 103 103 104 104 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) 105 105 { 106 - pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d); 106 + set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d))); 107 107 } 108 108 109 109 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) 110 110 { 111 - p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud); 111 + set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud))); 112 112 } 113 113 114 114 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 115 115 { 116 - pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 116 + set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd))); 117 117 } 118 118 119 119 static inline pgd_t *pgd_alloc(struct mm_struct *mm) ··· 129 129 static inline void pmd_populate(struct mm_struct *mm, 130 130 pmd_t *pmd, pgtable_t pte) 131 131 { 132 - pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 132 + set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte))); 133 133 } 134 134 135 135 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
+138 -95
arch/s390/include/asm/pgtable.h
··· 538 538 return 0; 539 539 } 540 540 541 + static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 542 + { 543 + return __pte(pte_val(pte) & ~pgprot_val(prot)); 544 + } 545 + 546 + static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 547 + { 548 + return __pte(pte_val(pte) | pgprot_val(prot)); 549 + } 550 + 551 + static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot) 552 + { 553 + return __pmd(pmd_val(pmd) & ~pgprot_val(prot)); 554 + } 555 + 556 + static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) 557 + { 558 + return __pmd(pmd_val(pmd) | pgprot_val(prot)); 559 + } 560 + 561 + static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot) 562 + { 563 + return __pud(pud_val(pud) & ~pgprot_val(prot)); 564 + } 565 + 566 + static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot) 567 + { 568 + return __pud(pud_val(pud) | pgprot_val(prot)); 569 + } 570 + 541 571 /* 542 572 * In the case that a guest uses storage keys 543 573 * faults should no longer be backed by zero pages ··· 600 570 unsigned long address = (unsigned long)ptr | 1; 601 571 602 572 asm volatile( 603 - " .insn rre,0xb98a0000,%[r1],%[address]" 573 + " cspg %[r1],%[address]" 604 574 : [r1] "+&d" (r1.pair), "+m" (*ptr) 605 575 : [address] "d" (address) 606 576 : "cc"); ··· 834 804 835 805 static inline pte_t pte_mksoft_dirty(pte_t pte) 836 806 { 837 - pte_val(pte) |= _PAGE_SOFT_DIRTY; 838 - return pte; 807 + return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); 839 808 } 840 809 #define pte_swp_mksoft_dirty pte_mksoft_dirty 841 810 842 811 static inline pte_t pte_clear_soft_dirty(pte_t pte) 843 812 { 844 - pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 845 - return pte; 813 + return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY)); 846 814 } 847 815 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 848 816 ··· 851 823 852 824 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 853 825 { 854 - pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 855 - return pmd; 826 + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); 856 827 } 857 828 858 829 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 859 830 { 860 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 861 - return pmd; 831 + return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY)); 862 832 } 863 833 864 834 /* ··· 907 881 * pgd/pmd/pte modification functions 908 882 */ 909 883 884 + static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 885 + { 886 + WRITE_ONCE(*pgdp, pgd); 887 + } 888 + 889 + static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) 890 + { 891 + WRITE_ONCE(*p4dp, p4d); 892 + } 893 + 894 + static inline void set_pud(pud_t *pudp, pud_t pud) 895 + { 896 + WRITE_ONCE(*pudp, pud); 897 + } 898 + 899 + static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 900 + { 901 + WRITE_ONCE(*pmdp, pmd); 902 + } 903 + 904 + static inline void set_pte(pte_t *ptep, pte_t pte) 905 + { 906 + WRITE_ONCE(*ptep, pte); 907 + } 908 + 910 909 static inline void pgd_clear(pgd_t *pgd) 911 910 { 912 911 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) 913 - pgd_val(*pgd) = _REGION1_ENTRY_EMPTY; 912 + set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY)); 914 913 } 915 914 916 915 static inline void p4d_clear(p4d_t *p4d) 917 916 { 918 917 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 919 - p4d_val(*p4d) = _REGION2_ENTRY_EMPTY; 918 + set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY)); 920 919 } 921 920 922 921 static inline void pud_clear(pud_t *pud) 923 922 { 924 923 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 925 - pud_val(*pud) = _REGION3_ENTRY_EMPTY; 924 + set_pud(pud, __pud(_REGION3_ENTRY_EMPTY)); 926 925 } 927 926 928 927 static inline void pmd_clear(pmd_t *pmdp) 929 928 { 930 - pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 929 + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 931 930 } 932 931 933 932 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 934 933 { 935 - pte_val(*ptep) = _PAGE_INVALID; 934 + set_pte(ptep, __pte(_PAGE_INVALID)); 936 935 } 937 936 938 937 /* ··· 966 915 */ 967 916 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 968 917 { 969 - pte_val(pte) &= _PAGE_CHG_MASK; 970 - pte_val(pte) |= pgprot_val(newprot); 918 + pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK)); 919 + pte = set_pte_bit(pte, newprot); 971 920 /* 972 921 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX 973 922 * has the invalid bit set, clear it again for readable, young pages 974 923 */ 975 924 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) 976 - pte_val(pte) &= ~_PAGE_INVALID; 925 + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); 977 926 /* 978 927 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page 979 928 * protection bit set, clear it again for writable, dirty pages 980 929 */ 981 930 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) 982 - pte_val(pte) &= ~_PAGE_PROTECT; 931 + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 983 932 return pte; 984 933 } 985 934 986 935 static inline pte_t pte_wrprotect(pte_t pte) 987 936 { 988 - pte_val(pte) &= ~_PAGE_WRITE; 989 - pte_val(pte) |= _PAGE_PROTECT; 990 - return pte; 937 + pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE)); 938 + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 991 939 } 992 940 993 941 static inline pte_t pte_mkwrite(pte_t pte) 994 942 { 995 - pte_val(pte) |= _PAGE_WRITE; 943 + pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE)); 996 944 if (pte_val(pte) & _PAGE_DIRTY) 997 - pte_val(pte) &= ~_PAGE_PROTECT; 945 + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 998 946 return pte; 999 947 } 1000 948 1001 949 static inline pte_t pte_mkclean(pte_t pte) 1002 950 { 1003 - pte_val(pte) &= ~_PAGE_DIRTY; 1004 - pte_val(pte) |= _PAGE_PROTECT; 1005 - return pte; 951 + pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY)); 952 + return set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1006 953 } 1007 954 1008 955 static inline pte_t pte_mkdirty(pte_t pte) 1009 956 { 1010 - pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 957 + pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY)); 1011 958 if (pte_val(pte) & _PAGE_WRITE) 1012 - pte_val(pte) &= ~_PAGE_PROTECT; 959 + pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 1013 960 return pte; 1014 961 } 1015 962 1016 963 static inline pte_t pte_mkold(pte_t pte) 1017 964 { 1018 - pte_val(pte) &= ~_PAGE_YOUNG; 1019 - pte_val(pte) |= _PAGE_INVALID; 1020 - return pte; 965 + pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG)); 966 + return set_pte_bit(pte, __pgprot(_PAGE_INVALID)); 1021 967 } 1022 968 1023 969 static inline pte_t pte_mkyoung(pte_t pte) 1024 970 { 1025 - pte_val(pte) |= _PAGE_YOUNG; 971 + pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG)); 1026 972 if (pte_val(pte) & _PAGE_READ) 1027 - pte_val(pte) &= ~_PAGE_INVALID; 973 + pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); 1028 974 return pte; 1029 975 } 1030 976 1031 977 static inline pte_t pte_mkspecial(pte_t pte) 1032 978 { 1033 - pte_val(pte) |= _PAGE_SPECIAL; 1034 - return pte; 979 + return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL)); 1035 980 } 1036 981 1037 982 #ifdef CONFIG_HUGETLB_PAGE 1038 983 static inline pte_t pte_mkhuge(pte_t pte) 1039 984 { 1040 - pte_val(pte) |= _PAGE_LARGE; 1041 - return pte; 985 + return set_pte_bit(pte, __pgprot(_PAGE_LARGE)); 1042 986 } 1043 987 #endif 1044 988 ··· 1052 1006 if (__builtin_constant_p(opt) && opt == 0) { 1053 1007 /* Invalidation + TLB flush for the pte */ 1054 1008 asm volatile( 1055 - " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]" 1009 + " ipte %[r1],%[r2],0,%[m4]" 1056 1010 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address), 1057 1011 [m4] "i" (local)); 1058 1012 return; ··· 1061 1015 /* Invalidate ptes with options + TLB flush of the ptes */ 1062 1016 opt = opt | (asce & _ASCE_ORIGIN); 1063 1017 asm volatile( 1064 - " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1018 + " ipte %[r1],%[r2],%[r3],%[m4]" 1065 1019 : [r2] "+a" (address), [r3] "+a" (opt) 1066 1020 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1067 1021 } ··· 1074 1028 /* Invalidate a range of ptes + TLB flush of the ptes */ 1075 1029 do { 1076 1030 asm volatile( 1077 - " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]" 1031 + " ipte %[r1],%[r2],%[r3],%[m4]" 1078 1032 : [r2] "+a" (address), [r3] "+a" (nr) 1079 1033 : [r1] "a" (pto), [m4] "i" (local) : "memory"); 1080 1034 } while (nr != 255); ··· 1160 1114 1161 1115 if (full) { 1162 1116 res = *ptep; 1163 - *ptep = __pte(_PAGE_INVALID); 1117 + set_pte(ptep, __pte(_PAGE_INVALID)); 1164 1118 } else { 1165 1119 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID)); 1166 1120 } ··· 1244 1198 pte_t *ptep, pte_t entry) 1245 1199 { 1246 1200 if (pte_present(entry)) 1247 - pte_val(entry) &= ~_PAGE_UNUSED; 1201 + entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED)); 1248 1202 if (mm_has_pgste(mm)) 1249 1203 ptep_set_pte_at(mm, addr, ptep, entry); 1250 1204 else 1251 - *ptep = entry; 1205 + set_pte(ptep, entry); 1252 1206 } 1253 1207 1254 1208 /* ··· 1259 1213 { 1260 1214 pte_t __pte; 1261 1215 1262 - pte_val(__pte) = physpage | pgprot_val(pgprot); 1216 + __pte = __pte(physpage | pgprot_val(pgprot)); 1263 1217 if (!MACHINE_HAS_NX) 1264 - pte_val(__pte) &= ~_PAGE_NOEXEC; 1218 + __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC)); 1265 1219 return pte_mkyoung(__pte); 1266 1220 } 1267 1221 ··· 1401 1355 1402 1356 static inline pmd_t pmd_wrprotect(pmd_t pmd) 1403 1357 { 1404 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE; 1405 - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1406 - return pmd; 1358 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); 1359 + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1407 1360 } 1408 1361 1409 1362 static inline pmd_t pmd_mkwrite(pmd_t pmd) 1410 1363 { 1411 - pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE; 1364 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE)); 1412 1365 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) 1413 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1366 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1414 1367 return pmd; 1415 1368 } 1416 1369 1417 1370 static inline pmd_t pmd_mkclean(pmd_t pmd) 1418 1371 { 1419 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY; 1420 - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1421 - return pmd; 1372 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY)); 1373 + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1422 1374 } 1423 1375 1424 1376 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1425 1377 { 1426 - pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY; 1378 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY)); 1427 1379 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1428 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1380 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1429 1381 return pmd; 1430 1382 } 1431 1383 1432 1384 static inline pud_t pud_wrprotect(pud_t pud) 1433 1385 { 1434 - pud_val(pud) &= ~_REGION3_ENTRY_WRITE; 1435 - pud_val(pud) |= _REGION_ENTRY_PROTECT; 1436 - return pud; 1386 + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); 1387 + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1437 1388 } 1438 1389 1439 1390 static inline pud_t pud_mkwrite(pud_t pud) 1440 1391 { 1441 - pud_val(pud) |= _REGION3_ENTRY_WRITE; 1392 + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE)); 1442 1393 if (pud_val(pud) & _REGION3_ENTRY_DIRTY) 1443 - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1394 + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1444 1395 return pud; 1445 1396 } 1446 1397 1447 1398 static inline pud_t pud_mkclean(pud_t pud) 1448 1399 { 1449 - pud_val(pud) &= ~_REGION3_ENTRY_DIRTY; 1450 - pud_val(pud) |= _REGION_ENTRY_PROTECT; 1451 - return pud; 1400 + pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY)); 1401 + return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1452 1402 } 1453 1403 1454 1404 static inline pud_t pud_mkdirty(pud_t pud) 1455 1405 { 1456 - pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY; 1406 + pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY)); 1457 1407 if (pud_val(pud) & _REGION3_ENTRY_WRITE) 1458 - pud_val(pud) &= ~_REGION_ENTRY_PROTECT; 1408 + pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT)); 1459 1409 return pud; 1460 1410 } 1461 1411 ··· 1475 1433 1476 1434 static inline pmd_t pmd_mkyoung(pmd_t pmd) 1477 1435 { 1478 - pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1436 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1479 1437 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ) 1480 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1438 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1481 1439 return pmd; 1482 1440 } 1483 1441 1484 1442 static inline pmd_t pmd_mkold(pmd_t pmd) 1485 1443 { 1486 - pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1487 - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1488 - return pmd; 1444 + pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1445 + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1489 1446 } 1490 1447 1491 1448 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1492 1449 { 1493 - pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1494 - _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1495 - _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY; 1496 - pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1450 + unsigned long mask; 1451 + 1452 + mask = _SEGMENT_ENTRY_ORIGIN_LARGE; 1453 + mask |= _SEGMENT_ENTRY_DIRTY; 1454 + mask |= _SEGMENT_ENTRY_YOUNG; 1455 + mask |= _SEGMENT_ENTRY_LARGE; 1456 + mask |= _SEGMENT_ENTRY_SOFT_DIRTY; 1457 + pmd = __pmd(pmd_val(pmd) & mask); 1458 + pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot))); 1497 1459 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1498 - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1460 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1499 1461 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)) 1500 - pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1462 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID)); 1501 1463 return pmd; 1502 1464 } 1503 1465 1504 1466 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) 1505 1467 { 1506 - pmd_t __pmd; 1507 - pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1508 - return __pmd; 1468 + return __pmd(physpage + massage_pgprot_pmd(pgprot)); 1509 1469 } 1510 1470 1511 1471 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ ··· 1535 1491 if (__builtin_constant_p(opt) && opt == 0) { 1536 1492 /* flush without guest asce */ 1537 1493 asm volatile( 1538 - " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1494 + " idte %[r1],0,%[r2],%[m4]" 1539 1495 : "+m" (*pmdp) 1540 1496 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)), 1541 1497 [m4] "i" (local) ··· 1543 1499 } else { 1544 1500 /* flush with guest asce */ 1545 1501 asm volatile( 1546 - " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1502 + " idte %[r1],%[r3],%[r2],%[m4]" 1547 1503 : "+m" (*pmdp) 1548 1504 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt), 1549 1505 [r3] "a" (asce), [m4] "i" (local) ··· 1562 1518 if (__builtin_constant_p(opt) && opt == 0) { 1563 1519 /* flush without guest asce */ 1564 1520 asm volatile( 1565 - " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]" 1521 + " idte %[r1],0,%[r2],%[m4]" 1566 1522 : "+m" (*pudp) 1567 1523 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)), 1568 1524 [m4] "i" (local) ··· 1570 1526 } else { 1571 1527 /* flush with guest asce */ 1572 1528 asm volatile( 1573 - " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]" 1529 + " idte %[r1],%[r3],%[r2],%[m4]" 1574 1530 : "+m" (*pudp) 1575 1531 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt), 1576 1532 [r3] "a" (asce), [m4] "i" (local) ··· 1629 1585 pmd_t *pmdp, pmd_t entry) 1630 1586 { 1631 1587 if (!MACHINE_HAS_NX) 1632 - pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC; 1633 - *pmdp = entry; 1588 + entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC)); 1589 + set_pmd(pmdp, entry); 1634 1590 } 1635 1591 1636 1592 static inline pmd_t pmd_mkhuge(pmd_t pmd) 1637 1593 { 1638 - pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1639 - pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1640 - pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1641 - return pmd; 1594 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE)); 1595 + pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG)); 1596 + return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT)); 1642 1597 } 1643 1598 1644 1599 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR ··· 1654 1611 { 1655 1612 if (full) { 1656 1613 pmd_t pmd = *pmdp; 1657 - *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 1614 + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 1658 1615 return pmd; 1659 1616 } 1660 1617 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); ··· 1733 1690 1734 1691 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1735 1692 { 1736 - pte_t pte; 1693 + unsigned long pteval; 1737 1694 1738 - pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; 1739 - pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1740 - pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1741 - return pte; 1695 + pteval = _PAGE_INVALID | _PAGE_PROTECT; 1696 + pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; 1697 + pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; 1698 + return __pte(pteval); 1742 1699 } 1743 1700 1744 1701 static inline unsigned long __swp_type(swp_entry_t entry)
+3 -4
arch/s390/include/asm/processor.h
··· 225 225 { 226 226 unsigned long val; 227 227 228 - asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */ 229 - : "=d" (val) : "a" (asi << 8 | parm)); 228 + asm volatile("ecag %0,0,0(%1)" : "=d" (val) : "a" (asi << 8 | parm)); 230 229 return val; 231 230 } 232 231 ··· 312 313 * Basic Program Check Handler. 313 314 */ 314 315 extern void s390_base_pgm_handler(void); 315 - extern void (*s390_base_pgm_handler_fn)(void); 316 + extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs); 316 317 317 318 #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL 318 319 319 - extern int memcpy_real(void *, void *, size_t); 320 + extern int memcpy_real(void *, unsigned long, size_t); 320 321 extern void memcpy_absolute(void *, void *, size_t); 321 322 322 323 #define mem_assign_absolute(dest, val) do { \
-1
arch/s390/include/asm/sclp.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * Copyright IBM Corp. 2007 4 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 4 */ 6 5 7 6 #ifndef _ASM_S390_SCLP_H
-1
arch/s390/include/asm/smp.h
··· 3 3 * Copyright IBM Corp. 1999, 2012 4 4 * Author(s): Denis Joseph Barrow, 5 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 7 6 */ 8 7 #ifndef __ASM_SMP_H 9 8 #define __ASM_SMP_H
+1 -9
arch/s390/include/asm/stacktrace.h
··· 36 36 37 37 /* 38 38 * Stack layout of a C stack frame. 39 + * Kernel uses the packed stack layout (-mpacked-stack). 39 40 */ 40 - #ifndef __PACK_STACK 41 - struct stack_frame { 42 - unsigned long back_chain; 43 - unsigned long empty1[5]; 44 - unsigned long gprs[10]; 45 - unsigned int empty2[8]; 46 - }; 47 - #else 48 41 struct stack_frame { 49 42 unsigned long empty1[5]; 50 43 unsigned int empty2[8]; 51 44 unsigned long gprs[10]; 52 45 unsigned long back_chain; 53 46 }; 54 - #endif 55 47 56 48 /* 57 49 * Unlike current_stack_pointer() which simply returns current value of %r15
+1 -5
arch/s390/include/asm/timex.h
··· 148 148 asm volatile( \ 149 149 " lgr 0,%[reg0]\n" \ 150 150 " lgr 1,%[reg1]\n" \ 151 - " .insn e,0x0104\n" \ 151 + " ptff\n" \ 152 152 " ipm %[rc]\n" \ 153 153 " srl %[rc],28\n" \ 154 154 : [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1) \ ··· 187 187 188 188 static inline unsigned long get_tod_clock_fast(void) 189 189 { 190 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 191 190 unsigned long clk; 192 191 193 192 asm volatile("stckf %0" : "=Q" (clk) : : "cc"); 194 193 return clk; 195 - #else 196 - return get_tod_clock(); 197 - #endif 198 194 } 199 195 200 196 static inline cycles_t get_cycles(void)
+1 -3
arch/s390/include/asm/tlbflush.h
··· 25 25 if (MACHINE_HAS_TLB_GUEST) 26 26 opt |= IDTE_GUEST_ASCE; 27 27 /* Global TLB flush for the mm */ 28 - asm volatile( 29 - " .insn rrf,0xb98e0000,0,%0,%1,0" 30 - : : "a" (opt), "a" (asce) : "cc"); 28 + asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); 31 29 } 32 30 33 31 /*
+8 -37
arch/s390/include/asm/uaccess.h
··· 13 13 /* 14 14 * User space memory access functions 15 15 */ 16 + #include <asm/asm-extable.h> 16 17 #include <asm/processor.h> 17 18 #include <asm/ctl_reg.h> 18 19 #include <asm/extable.h> ··· 80 79 }; 81 80 }; 82 81 83 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 84 - 85 82 #define __put_get_user_asm(to, from, size, oac_spec) \ 86 83 ({ \ 87 84 int __rc; \ ··· 89 90 "0: mvcos %[_to],%[_from],%[_size]\n" \ 90 91 "1: xr %[rc],%[rc]\n" \ 91 92 "2:\n" \ 92 - ".pushsection .fixup, \"ax\"\n" \ 93 - "3: lhi %[rc],%[retval]\n" \ 94 - " jg 2b\n" \ 95 - ".popsection\n" \ 96 - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 93 + EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \ 97 94 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ 98 95 : [_size] "d" (size), [_from] "Q" (*(from)), \ 99 - [retval] "K" (-EFAULT), [spec] "d" (oac_spec.val) \ 96 + [spec] "d" (oac_spec.val) \ 100 97 : "cc", "0"); \ 101 98 __rc; \ 102 99 }) ··· 172 177 } 173 178 return rc; 174 179 } 175 - 176 - #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 177 - 178 - static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 179 - { 180 - size = raw_copy_to_user(ptr, x, size); 181 - return size ? -EFAULT : 0; 182 - } 183 - 184 - static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 185 - { 186 - size = raw_copy_from_user(x, ptr, size); 187 - return size ? -EFAULT : 0; 188 - } 189 - 190 - #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ 191 180 192 181 /* 193 182 * These are the main single-value transfer routines. They automatically ··· 268 289 return __clear_user(to, n); 269 290 } 270 291 271 - int copy_to_user_real(void __user *dest, void *src, unsigned long count); 292 + int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count); 272 293 void *s390_kernel_write(void *dst, const void *src, size_t size); 273 294 274 295 int __noreturn __put_kernel_bad(void); ··· 281 302 "0: " insn " %2,%1\n" \ 282 303 "1: xr %0,%0\n" \ 283 304 "2:\n" \ 284 - ".pushsection .fixup, \"ax\"\n" \ 285 - "3: lhi %0,%3\n" \ 286 - " jg 2b\n" \ 287 - ".popsection\n" \ 288 - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 305 + EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 289 306 : "=d" (__rc), "+Q" (*(to)) \ 290 - : "d" (val), "K" (-EFAULT) \ 307 + : "d" (val) \ 291 308 : "cc"); \ 292 309 __rc; \ 293 310 }) ··· 324 349 "0: " insn " %1,%2\n" \ 325 350 "1: xr %0,%0\n" \ 326 351 "2:\n" \ 327 - ".pushsection .fixup, \"ax\"\n" \ 328 - "3: lhi %0,%3\n" \ 329 - " jg 2b\n" \ 330 - ".popsection\n" \ 331 - EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ 352 + EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 332 353 : "=d" (__rc), "+d" (val) \ 333 - : "Q" (*(from)), "K" (-EFAULT) \ 354 + : "Q" (*(from)) \ 334 355 : "cc"); \ 335 356 __rc; \ 336 357 })
+1 -1
arch/s390/include/uapi/asm/zcrypt.h
··· 288 288 * 0x08: CEX3A 289 289 * 0x0a: CEX4 290 290 * 0x0b: CEX5 291 - * 0x0c: CEX6 and CEX7 291 + * 0x0c: CEX6, CEX7 or CEX8 292 292 * 0x0d: device is disabled 293 293 * 294 294 * ZCRYPT_QDEPTH_MASK
+3 -1
arch/s390/kernel/Makefile
··· 57 57 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 58 58 obj-$(CONFIG_KPROBES) += kprobes.o 59 59 obj-$(CONFIG_KPROBES) += kprobes_insn_page.o 60 - obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o 60 + obj-$(CONFIG_KPROBES) += mcount.o 61 + obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o 62 + obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 61 63 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 62 64 obj-$(CONFIG_UPROBES) += uprobes.o 63 65 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+2 -6
arch/s390/kernel/asm-offsets.c
··· 50 50 BLANK(); 51 51 /* idle data offsets */ 52 52 OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); 53 - OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); 54 53 OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); 55 - OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); 56 54 OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter); 57 55 BLANK(); 58 56 /* hardware defined lowcore locations 0x000 - 0x1ff */ ··· 121 123 OFFSET(__LC_USER_ASCE, lowcore, user_asce); 122 124 OFFSET(__LC_LPP, lowcore, lpp); 123 125 OFFSET(__LC_CURRENT_PID, lowcore, current_pid); 124 - OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset); 125 - OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); 126 - OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); 127 126 OFFSET(__LC_GMAP, lowcore, gmap); 128 - OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); 129 127 OFFSET(__LC_LAST_BREAK, lowcore, last_break); 130 128 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 131 129 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 130 + OFFSET(__LC_VMCORE_INFO, lowcore, vmcore_info); 131 + OFFSET(__LC_OS_INFO, lowcore, os_info); 132 132 /* hardware defined lowcore locations 0x1000 - 0x18ff */ 133 133 OFFSET(__LC_MCESAD, lowcore, mcesad); 134 134 OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2);
+17 -8
arch/s390/kernel/base.S
··· 3 3 * arch/s390/kernel/base.S 4 4 * 5 5 * Copyright IBM Corp. 2006, 2007 6 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 7 - * Michael Holzheu <holzheu@de.ibm.com> 6 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> 8 7 */ 9 8 10 9 #include <linux/linkage.h> ··· 14 15 GEN_BR_THUNK %r9 15 16 GEN_BR_THUNK %r14 16 17 18 + __PT_R0 = __PT_GPRS 19 + __PT_R8 = __PT_GPRS + 64 20 + 17 21 ENTRY(s390_base_pgm_handler) 18 - stmg %r0,%r15,__LC_SAVE_AREA_SYNC 19 - basr %r13,0 20 - 0: aghi %r15,-STACK_FRAME_OVERHEAD 22 + stmg %r8,%r15,__LC_SAVE_AREA_SYNC 23 + aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 24 + la %r11,STACK_FRAME_OVERHEAD(%r15) 25 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 26 + stmg %r0,%r7,__PT_R0(%r11) 27 + mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW 28 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 29 + lgr %r2,%r11 21 30 larl %r1,s390_base_pgm_handler_fn 22 31 lg %r9,0(%r1) 23 32 ltgr %r9,%r9 24 33 jz 1f 25 34 BASR_EX %r14,%r9 26 - lmg %r0,%r15,__LC_SAVE_AREA_SYNC 27 - lpswe __LC_PGM_OLD_PSW 28 - 1: lpswe disabled_wait_psw-0b(%r13) 35 + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 36 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 37 + lpswe __LC_RETURN_PSW 38 + 1: larl %r13,disabled_wait_psw 39 + lpswe 0(%r13) 29 40 ENDPROC(s390_base_pgm_handler) 30 41 31 42 .align 8
-7
arch/s390/kernel/cache.c
··· 3 3 * Extract CPU cache information and expose them via sysfs. 4 4 * 5 5 * Copyright IBM Corp. 2012 6 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 7 6 */ 8 7 9 8 #include <linux/seq_file.h> ··· 70 71 struct cacheinfo *cache; 71 72 int idx; 72 73 73 - if (!test_facility(34)) 74 - return; 75 74 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); 76 75 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { 77 76 cache = this_cpu_ci->info_list + idx; ··· 129 132 union cache_topology ct; 130 133 enum cache_type ctype; 131 134 132 - if (!test_facility(34)) 133 - return -EOPNOTSUPP; 134 135 if (!this_cpu_ci) 135 136 return -EINVAL; 136 137 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); ··· 152 157 union cache_topology ct; 153 158 enum cache_type ctype; 154 159 155 - if (!test_facility(34)) 156 - return -EOPNOTSUPP; 157 160 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 158 161 for (idx = 0, level = 0; level < this_cpu_ci->num_levels && 159 162 idx < this_cpu_ci->num_leaves; idx++, level++) {
+1 -1
arch/s390/kernel/compat_signal.c
··· 89 89 _sigregs32 user_sregs; 90 90 int i; 91 91 92 - /* Alwys make any pending restarted system call return -EINTR */ 92 + /* Always make any pending restarted system call return -EINTR */ 93 93 current->restart_block.fn = do_no_restart_syscall; 94 94 95 95 if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
+28 -30
arch/s390/kernel/crash_dump.c
··· 132 132 /* 133 133 * Copy memory of the old, dumped system to a kernel space virtual address 134 134 */ 135 - int copy_oldmem_kernel(void *dst, void *src, size_t count) 135 + int copy_oldmem_kernel(void *dst, unsigned long src, size_t count) 136 136 { 137 - unsigned long from, len; 137 + unsigned long len; 138 138 void *ra; 139 139 int rc; 140 140 141 141 while (count) { 142 - from = __pa(src); 143 - if (!oldmem_data.start && from < sclp.hsa_size) { 142 + if (!oldmem_data.start && src < sclp.hsa_size) { 144 143 /* Copy from zfcp/nvme dump HSA area */ 145 - len = min(count, sclp.hsa_size - from); 146 - rc = memcpy_hsa_kernel(dst, from, len); 144 + len = min(count, sclp.hsa_size - src); 145 + rc = memcpy_hsa_kernel(dst, src, len); 147 146 if (rc) 148 147 return rc; 149 148 } else { 150 149 /* Check for swapped kdump oldmem areas */ 151 - if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) { 152 - from -= oldmem_data.start; 153 - len = min(count, oldmem_data.size - from); 154 - } else if (oldmem_data.start && from < oldmem_data.size) { 155 - len = min(count, oldmem_data.size - from); 156 - from += oldmem_data.start; 150 + if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) { 151 + src -= oldmem_data.start; 152 + len = min(count, oldmem_data.size - src); 153 + } else if (oldmem_data.start && src < oldmem_data.size) { 154 + len = min(count, oldmem_data.size - src); 155 + src += oldmem_data.start; 157 156 } else { 158 157 len = count; 159 158 } ··· 162 163 } else { 163 164 ra = dst; 164 165 } 165 - if (memcpy_real(ra, (void *) from, len)) 166 + if (memcpy_real(ra, src, len)) 166 167 return -EFAULT; 167 168 } 168 169 dst += len; ··· 175 176 /* 176 177 * Copy memory of the old, dumped system to a user space virtual address 177 178 */ 178 - static int copy_oldmem_user(void __user *dst, void *src, size_t count) 179 + static int copy_oldmem_user(void __user *dst, unsigned long src, size_t count) 179 180 { 180 - unsigned long from, len; 181 + unsigned long len; 181 182 int rc; 182 183 183 184 while (count) { 184 - from = __pa(src); 185 - if (!oldmem_data.start && from < sclp.hsa_size) { 185 + if (!oldmem_data.start && src < sclp.hsa_size) { 186 186 /* Copy from zfcp/nvme dump HSA area */ 187 - len = min(count, sclp.hsa_size - from); 188 - rc = memcpy_hsa_user(dst, from, len); 187 + len = min(count, sclp.hsa_size - src); 188 + rc = memcpy_hsa_user(dst, src, len); 189 189 if (rc) 190 190 return rc; 191 191 } else { 192 192 /* Check for swapped kdump oldmem areas */ 193 - if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) { 194 - from -= oldmem_data.start; 195 - len = min(count, oldmem_data.size - from); 196 - } else if (oldmem_data.start && from < oldmem_data.size) { 197 - len = min(count, oldmem_data.size - from); 198 - from += oldmem_data.start; 193 + if (oldmem_data.start && src - oldmem_data.start < oldmem_data.size) { 194 + src -= oldmem_data.start; 195 + len = min(count, oldmem_data.size - src); 196 + } else if (oldmem_data.start && src < oldmem_data.size) { 197 + len = min(count, oldmem_data.size - src); 198 + src += oldmem_data.start; 199 199 } else { 200 200 len = count; 201 201 } 202 - rc = copy_to_user_real(dst, (void *) from, count); 202 + rc = copy_to_user_real(dst, src, count); 203 203 if (rc) 204 204 return rc; 205 205 } ··· 215 217 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, 216 218 unsigned long offset, int userbuf) 217 219 { 218 - void *src; 220 + unsigned long src; 219 221 int rc; 220 222 221 223 if (!csize) 222 224 return 0; 223 - src = (void *) (pfn << PAGE_SHIFT) + offset; 225 + src = pfn_to_phys(pfn) + offset; 224 226 if (userbuf) 225 227 rc = copy_oldmem_user((void __force __user *) buf, src, csize); 226 228 else ··· 427 429 static void *get_vmcoreinfo_old(unsigned long *size) 428 430 { 429 431 char nt_name[11], *vmcoreinfo; 432 + unsigned long addr; 430 433 Elf64_Nhdr note; 431 - void *addr; 432 434 433 - if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) 435 + if (copy_oldmem_kernel(&addr, __LC_VMCORE_INFO, sizeof(addr))) 434 436 return NULL; 435 437 memset(nt_name, 0, sizeof(nt_name)); 436 438 if (copy_oldmem_kernel(&note, addr, sizeof(note)))
+1
arch/s390/kernel/diag.c
··· 11 11 #include <linux/cpu.h> 12 12 #include <linux/seq_file.h> 13 13 #include <linux/debugfs.h> 14 + #include <asm/asm-extable.h> 14 15 #include <asm/diag.h> 15 16 #include <asm/trace/diag.h> 16 17 #include <asm/sections.h>
+9 -15
arch/s390/kernel/early.c
··· 2 2 /* 3 3 * Copyright IBM Corp. 2007, 2009 4 4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>, 5 - * Heiko Carstens <heiko.carstens@de.ibm.com> 6 5 */ 7 6 8 7 #define KMSG_COMPONENT "setup" ··· 17 18 #include <linux/pfn.h> 18 19 #include <linux/uaccess.h> 19 20 #include <linux/kernel.h> 21 + #include <asm/asm-extable.h> 20 22 #include <asm/diag.h> 21 23 #include <asm/ebcdic.h> 22 24 #include <asm/ipl.h> ··· 149 149 topology_max_mnest = max_mnest; 150 150 } 151 151 152 - static void early_pgm_check_handler(void) 152 + static void early_pgm_check_handler(struct pt_regs *regs) 153 153 { 154 - const struct exception_table_entry *fixup; 155 - unsigned long cr0, cr0_new; 156 - unsigned long addr; 157 - 158 - addr = S390_lowcore.program_old_psw.addr; 159 - fixup = s390_search_extables(addr); 160 - if (!fixup) 154 + if (!fixup_exception(regs)) 161 155 disabled_wait(); 162 - /* Disable low address protection before storing into lowcore. */ 163 - __ctl_store(cr0, 0, 0); 164 - cr0_new = cr0 & ~(1UL << 28); 165 - __ctl_load(cr0_new, 0, 0); 166 - S390_lowcore.program_old_psw.addr = extable_fixup(fixup); 167 - __ctl_load(cr0, 0, 0); 168 156 } 169 157 170 158 static noinline __init void setup_lowcore_early(void) ··· 282 294 disabled_wait(); 283 295 } 284 296 297 + static void __init sort_amode31_extable(void) 298 + { 299 + sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table); 300 + } 301 + 285 302 void __init startup_init(void) 286 303 { 287 304 sclp_early_adjust_va(); ··· 295 302 time_early_init(); 296 303 init_kernel_storage_key(); 297 304 lockdep_off(); 305 + sort_amode31_extable(); 298 306 setup_lowcore_early(); 299 307 setup_facility_list(); 300 308 detect_machine_type();
+6 -12
arch/s390/kernel/entry.S
··· 6 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 7 * Hartmut Penner (hp@de.ibm.com), 8 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 - * Heiko Carstens <heiko.carstens@de.ibm.com> 10 9 */ 11 10 12 11 #include <linux/init.h> 13 12 #include <linux/linkage.h> 13 + #include <asm/asm-extable.h> 14 14 #include <asm/alternative-asm.h> 15 15 #include <asm/processor.h> 16 16 #include <asm/cache.h> ··· 96 96 #else 97 97 j \oklabel 98 98 #endif 99 - .endm 100 - 101 - .macro STCK savearea 102 - ALTERNATIVE ".insn s,0xb2050000,\savearea", \ 103 - ".insn s,0xb27c0000,\savearea", 25 104 99 .endm 105 100 106 101 /* ··· 186 191 #endif 187 192 188 193 GEN_BR_THUNK %r14 189 - GEN_BR_THUNK %r14,%r13 190 194 191 195 .section .kprobes.text, "ax" 192 196 .Ldummy: ··· 226 232 aghi %r3,__TASK_pid 227 233 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 228 234 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 229 - ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 235 + ALTERNATIVE "", "lpp _LPP_OFFSET", 40 230 236 BR_EX %r14 231 237 ENDPROC(__switch_to) 232 238 ··· 437 443 */ 438 444 .macro INT_HANDLER name,lc_old_psw,handler 439 445 ENTRY(\name) 440 - STCK __LC_INT_CLOCK 446 + stckf __LC_INT_CLOCK 441 447 stpt __LC_SYS_ENTER_TIMER 442 448 STBEAR __LC_LAST_BREAK 443 449 BPOFF ··· 509 515 .Lpsw_idle_stcctm: 510 516 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 511 517 BPON 512 - STCK __CLOCK_IDLE_ENTER(%r2) 518 + stckf __CLOCK_IDLE_ENTER(%r2) 513 519 stpt __TIMER_IDLE_ENTER(%r2) 514 520 lpswe __SF_EMPTY(%r15) 515 521 .globl psw_idle_exit ··· 521 527 * Machine check handler routines 522 528 */ 523 529 ENTRY(mcck_int_handler) 524 - STCK __LC_MCCK_CLOCK 530 + stckf __LC_MCCK_CLOCK 525 531 BPOFF 526 532 la %r1,4095 # validate r1 527 533 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer ··· 648 654 ENDPROC(mcck_int_handler) 649 655 650 656 ENTRY(restart_int_handler) 651 - ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 657 + ALTERNATIVE "", "lpp _LPP_OFFSET", 40 652 658 stg %r15,__LC_SAVE_AREA_RESTART 653 659 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 654 660 jz 0f
+1
arch/s390/kernel/entry.h
··· 5 5 #include <linux/percpu.h> 6 6 #include <linux/types.h> 7 7 #include <linux/signal.h> 8 + #include <asm/extable.h> 8 9 #include <asm/ptrace.h> 9 10 #include <asm/idle.h> 10 11
+31 -34
arch/s390/kernel/ftrace.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2009,2014 6 6 * 7 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 - * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 9 8 */ 10 9 11 10 #include <linux/moduleloader.h> ··· 60 61 #ifdef CONFIG_EXPOLINE 61 62 asm( 62 63 " .align 16\n" 63 - "ftrace_shared_hotpatch_trampoline_ex:\n" 64 - " lmg %r0,%r1,2(%r1)\n" 65 - " ex %r0," __stringify(__LC_BR_R1) "(%r0)\n" 66 - " j .\n" 67 - "ftrace_shared_hotpatch_trampoline_ex_end:\n" 68 - ); 69 - 70 - asm( 71 - " .align 16\n" 72 64 "ftrace_shared_hotpatch_trampoline_exrl:\n" 73 65 " lmg %r0,%r1,2(%r1)\n" 74 - " .insn ril,0xc60000000000,%r0,0f\n" /* exrl */ 66 + " exrl %r0,0f\n" 75 67 " j .\n" 76 68 "0: br %r1\n" 77 69 "ftrace_shared_hotpatch_trampoline_exrl_end:\n" ··· 81 91 tend = ftrace_shared_hotpatch_trampoline_br_end; 82 92 #ifdef CONFIG_EXPOLINE 83 93 if (!nospec_disable) { 84 - tstart = ftrace_shared_hotpatch_trampoline_ex; 85 - tend = ftrace_shared_hotpatch_trampoline_ex_end; 86 - if (test_facility(35)) { /* exrl */ 87 - tstart = ftrace_shared_hotpatch_trampoline_exrl; 88 - tend = ftrace_shared_hotpatch_trampoline_exrl_end; 89 - } 94 + tstart = ftrace_shared_hotpatch_trampoline_exrl; 95 + tend = ftrace_shared_hotpatch_trampoline_exrl_end; 90 96 } 91 97 #endif /* CONFIG_EXPOLINE */ 92 98 if (end) ··· 180 194 return 0; 181 195 } 182 196 183 - static void brcl_disable(void *brcl) 197 + static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable) 184 198 { 185 - u8 op = 0x04; /* set mask field to zero */ 199 + u16 old; 200 + u8 op; 186 201 187 - s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 202 + if (get_kernel_nofault(old, addr)) 203 + return -EFAULT; 204 + if (old != expected) 205 + return -EINVAL; 206 + /* set mask field to all ones or zeroes */ 207 + op = enable ? 0xf4 : 0x04; 208 + s390_kernel_write((char *)addr + 1, &op, sizeof(op)); 209 + return 0; 188 210 } 189 211 190 212 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 191 213 unsigned long addr) 192 214 { 193 - brcl_disable((void *)rec->ip); 194 - return 0; 195 - } 196 - 197 - static void brcl_enable(void *brcl) 198 - { 199 - u8 op = 0xf4; /* set mask field to all ones */ 200 - 201 - s390_kernel_write((char *)brcl + 1, &op, sizeof(op)); 215 + /* Expect brcl 0xf,... */ 216 + return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false); 202 217 } 203 218 204 219 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ··· 210 223 if (IS_ERR(trampoline)) 211 224 return PTR_ERR(trampoline); 212 225 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr)); 213 - brcl_enable((void *)rec->ip); 214 - return 0; 226 + /* Expect brcl 0x0,... */ 227 + return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true); 215 228 } 216 229 217 230 int ftrace_update_ftrace_func(ftrace_func_t func) ··· 284 297 */ 285 298 int ftrace_enable_ftrace_graph_caller(void) 286 299 { 287 - brcl_disable(ftrace_graph_caller); 300 + int rc; 301 + 302 + /* Expect brc 0xf,... */ 303 + rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 304 + if (rc) 305 + return rc; 288 306 text_poke_sync_lock(); 289 307 return 0; 290 308 } 291 309 292 310 int ftrace_disable_ftrace_graph_caller(void) 293 311 { 294 - brcl_enable(ftrace_graph_caller); 312 + int rc; 313 + 314 + /* Expect brc 0x0,... */ 315 + rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 316 + if (rc) 317 + return rc; 295 318 text_poke_sync_lock(); 296 319 return 0; 297 320 }
-2
arch/s390/kernel/ftrace.h
··· 16 16 extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[]; 17 17 extern const char ftrace_shared_hotpatch_trampoline_br[]; 18 18 extern const char ftrace_shared_hotpatch_trampoline_br_end[]; 19 - extern const char ftrace_shared_hotpatch_trampoline_ex[]; 20 - extern const char ftrace_shared_hotpatch_trampoline_ex_end[]; 21 19 extern const char ftrace_shared_hotpatch_trampoline_exrl[]; 22 20 extern const char ftrace_shared_hotpatch_trampoline_exrl_end[]; 23 21 extern const char ftrace_plt_template[];
-1
arch/s390/kernel/head64.S
··· 5 5 * Author(s): Hartmut Penner <hp@de.ibm.com> 6 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 7 * Rob van der Heij <rvdhei@iae.nl> 8 - * Heiko Carstens <heiko.carstens@de.ibm.com> 9 8 * 10 9 */ 11 10
+1 -1
arch/s390/kernel/ipl.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2005, 2012 6 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 - * Heiko Carstens <heiko.carstens@de.ibm.com> 8 7 * Volker Sameske <sameske@de.ibm.com> 9 8 */ 10 9 ··· 19 20 #include <linux/gfp.h> 20 21 #include <linux/crash_dump.h> 21 22 #include <linux/debug_locks.h> 23 + #include <asm/asm-extable.h> 22 24 #include <asm/diag.h> 23 25 #include <asm/ipl.h> 24 26 #include <asm/smp.h>
+1 -1
arch/s390/kernel/irq.c
··· 342 342 struct ext_int_info *p; 343 343 int index; 344 344 345 - ext_code = *(struct ext_code *) &regs->int_code; 345 + ext_code.int_code = regs->int_code; 346 346 if (ext_code.code != EXT_IRQ_CLK_COMP) 347 347 set_cpu_flag(CIF_NOHZ_DELAY); 348 348
+14 -29
arch/s390/kernel/kprobes.c
··· 372 372 } 373 373 NOKPROBE_SYMBOL(kprobe_handler); 374 374 375 - /* 376 - * Function return probe trampoline: 377 - * - init_kprobes() establishes a probepoint here 378 - * - When the probed function returns, this probe 379 - * causes the handlers to fire 380 - */ 381 - static void __used kretprobe_trampoline_holder(void) 375 + void arch_kretprobe_fixup_return(struct pt_regs *regs, 376 + kprobe_opcode_t *correct_ret_addr) 382 377 { 383 - asm volatile(".global __kretprobe_trampoline\n" 384 - "__kretprobe_trampoline: bcr 0,0\n"); 378 + /* Replace fake return address with real one. */ 379 + regs->gprs[14] = (unsigned long)correct_ret_addr; 385 380 } 381 + NOKPROBE_SYMBOL(arch_kretprobe_fixup_return); 386 382 387 383 /* 388 - * Called when the probe at kretprobe trampoline is hit 384 + * Called from __kretprobe_trampoline 389 385 */ 390 - static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 386 + void trampoline_probe_handler(struct pt_regs *regs) 391 387 { 392 - regs->psw.addr = __kretprobe_trampoline_handler(regs, NULL); 393 - /* 394 - * By returning a non-zero value, we are telling 395 - * kprobe_handler() that we don't want the post_handler 396 - * to run (and have re-enabled preemption) 397 - */ 398 - return 1; 388 + kretprobe_trampoline_handler(regs, NULL); 399 389 } 400 390 NOKPROBE_SYMBOL(trampoline_probe_handler); 391 + 392 + /* assembler function that handles the kretprobes must not be probed itself */ 393 + NOKPROBE_SYMBOL(__kretprobe_trampoline); 401 394 402 395 /* 403 396 * Called after single-stepping. p->addr is the address of the ··· 458 465 { 459 466 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 460 467 struct kprobe *p = kprobe_running(); 461 - const struct exception_table_entry *entry; 462 468 463 469 switch(kcb->kprobe_status) { 464 470 case KPROBE_HIT_SS: ··· 479 487 * In case the user-specified fault handler returned 480 488 * zero, try to fix up. 481 489 */ 482 - entry = s390_search_extables(regs->psw.addr); 483 - if (entry && ex_handle(entry, regs)) 490 + if (fixup_exception(regs)) 484 491 return 1; 485 - 486 492 /* 487 493 * fixup_exception() could not handle it, 488 494 * Let do_page_fault() fix it. ··· 544 554 } 545 555 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 546 556 547 - static struct kprobe trampoline = { 548 - .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 549 - .pre_handler = trampoline_probe_handler 550 - }; 551 - 552 557 int __init arch_init_kprobes(void) 553 558 { 554 - return register_kprobe(&trampoline); 559 + return 0; 555 560 } 556 561 557 562 int arch_trampoline_kprobe(struct kprobe *p) 558 563 { 559 - return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline; 564 + return 0; 560 565 } 561 566 NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+1 -2
arch/s390/kernel/lgr.c
··· 88 88 if (stsi(si, 2, 2, 2)) 89 89 return; 90 90 cpascii(lgr_info->name, si->name, sizeof(si->name)); 91 - memcpy(&lgr_info->lpar_number, &si->lpar_number, 92 - sizeof(lgr_info->lpar_number)); 91 + lgr_info->lpar_number = si->lpar_number; 93 92 } 94 93 95 94 /*
-1
arch/s390/kernel/machine_kexec.c
··· 3 3 * Copyright IBM Corp. 2005, 2011 4 4 * 5 5 * Author(s): Rolf Adelsberger, 6 - * Heiko Carstens <heiko.carstens@de.ibm.com> 7 6 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 8 7 */ 9 8
+48 -24
arch/s390/kernel/mcount.S
··· 2 2 /* 3 3 * Copyright IBM Corp. 2008, 2009 4 4 * 5 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 - * 7 5 */ 8 6 9 7 #include <linux/linkage.h> ··· 10 12 #include <asm/nospec-insn.h> 11 13 #include <asm/ptrace.h> 12 14 #include <asm/export.h> 15 + 16 + 17 + #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 18 + #define STACK_PTREGS (STACK_FRAME_OVERHEAD) 19 + #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS) 20 + #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 21 + #define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2) 22 + #define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS) 23 + /* packed stack: allocate just enough for r14, r15 and backchain */ 24 + #define TRACED_FUNC_FRAME_SIZE 24 25 + 26 + #ifdef CONFIG_FUNCTION_TRACER 13 27 14 28 GEN_BR_THUNK %r1 15 29 GEN_BR_THUNK %r14 ··· 32 22 BR_EX %r14 33 23 ENDPROC(ftrace_stub) 34 24 35 - #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 36 - #define STACK_PTREGS (STACK_FRAME_OVERHEAD) 37 - #define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS) 38 - #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 39 - #define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2) 40 - #define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS) 41 - #ifdef __PACK_STACK 42 - /* allocate just enough for r14, r15 and backchain */ 43 - #define TRACED_FUNC_FRAME_SIZE 24 44 - #else 45 - #define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD 46 - #endif 47 - 48 25 .macro ftrace_regs_entry, allregs=0 49 26 stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller 50 27 51 28 .if \allregs == 1 52 - lghi %r14,0 # save condition code 53 - ipm %r14 # don't put any instructions 54 - sllg %r14,%r14,16 # clobbering CC before this point 29 + # save psw mask 30 + # don't put any instructions clobbering CC before this point 31 + epsw %r1,%r14 32 + risbg %r14,%r1,0,31,32 55 33 .endif 56 34 57 35 lgr %r1,%r15 ··· 55 57 56 58 .if \allregs == 1 57 59 stg %r14,(STACK_PTREGS_PSW)(%r15) 58 - stosm (STACK_PTREGS_PSW)(%r15),0 59 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 60 60 mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS 61 - #else 62 - lghi %r14,_PIF_FTRACE_FULL_REGS 63 - stg %r14,STACK_PTREGS_FLAGS(%r15) 64 - #endif 65 61 .else 66 62 xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15) 67 63 .endif ··· 133 141 SYM_FUNC_END(return_to_handler) 134 142 135 143 #endif 144 + #endif /* CONFIG_FUNCTION_TRACER */ 145 + 146 + #ifdef CONFIG_KPROBES 147 + 148 + SYM_FUNC_START(__kretprobe_trampoline) 149 + 150 + stg %r14,(__SF_GPRS+8*8)(%r15) 151 + lay %r15,-STACK_FRAME_SIZE(%r15) 152 + stmg %r0,%r14,STACK_PTREGS_GPRS(%r15) 153 + 154 + # store original stack pointer in backchain and pt_regs 155 + lay %r7,STACK_FRAME_SIZE(%r15) 156 + stg %r7,__SF_BACKCHAIN(%r15) 157 + stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15) 158 + 159 + # store full psw 160 + epsw %r2,%r3 161 + risbg %r3,%r2,0,31,32 162 + stg %r3,STACK_PTREGS_PSW(%r15) 163 + larl %r1,__kretprobe_trampoline 164 + stg %r1,STACK_PTREGS_PSW+8(%r15) 165 + 166 + lay %r2,STACK_PTREGS(%r15) 167 + brasl %r14,trampoline_probe_handler 168 + 169 + mvc __SF_EMPTY(16,%r7),STACK_PTREGS_PSW(%r15) 170 + lmg %r0,%r15,STACK_PTREGS_GPRS(%r15) 171 + lpswe __SF_EMPTY(%r15) 172 + 173 + SYM_FUNC_END(__kretprobe_trampoline) 174 + 175 + #endif /* CONFIG_KPROBES */
+3 -9
arch/s390/kernel/module.c
··· 517 517 518 518 ij = me->core_layout.base + me->arch.plt_offset + 519 519 me->arch.plt_size - PLT_ENTRY_SIZE; 520 - if (test_facility(35)) { 521 - ij[0] = 0xc6000000; /* exrl %r0,.+10 */ 522 - ij[1] = 0x0005a7f4; /* j . */ 523 - ij[2] = 0x000007f1; /* br %r1 */ 524 - } else { 525 - ij[0] = 0x44000000 | (unsigned int) 526 - offsetof(struct lowcore, br_r1_trampoline); 527 - ij[1] = 0xa7f40000; /* j . */ 528 - } 520 + ij[0] = 0xc6000000; /* exrl %r0,.+10 */ 521 + ij[1] = 0x0005a7f4; /* j . */ 522 + ij[2] = 0x000007f1; /* br %r1 */ 529 523 } 530 524 531 525 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-1
arch/s390/kernel/nmi.c
··· 6 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 7 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 8 * Cornelia Huck <cornelia.huck@de.ibm.com>, 9 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 10 9 */ 11 10 12 11 #include <linux/kernel_stat.h>
+5 -26
arch/s390/kernel/nospec-branch.c
··· 105 105 s32 *epo; 106 106 107 107 /* Second part of the instruction replace is always a nop */ 108 + memcpy(insnbuf + 2, branch, sizeof(branch)); 108 109 for (epo = start; epo < end; epo++) { 109 110 instr = (u8 *) epo + *epo; 110 111 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) ··· 118 117 if (thunk[0] == 0xc6 && thunk[1] == 0x00) 119 118 /* exrl %r0,<target-br> */ 120 119 br = thunk + (*(int *)(thunk + 2)) * 2; 121 - else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 && 122 - thunk[6] == 0x44 && thunk[7] == 0x00 && 123 - (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 && 124 - (thunk[1] & 0xf0) == (thunk[8] & 0xf0)) 125 - /* larl %rx,<target br> + ex %r0,0(%rx) */ 126 - br = thunk + (*(int *)(thunk + 2)) * 2; 127 120 else 128 121 continue; 129 - /* Check for unconditional branch 0x07f? or 0x47f???? */ 130 - if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) 122 + if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) 131 123 continue; 132 - 133 - memcpy(insnbuf + 2, branch, sizeof(branch)); 134 124 switch (type) { 135 125 case BRCL_EXPOLINE: 126 + /* brcl to thunk, replace with br + nop */ 136 127 insnbuf[0] = br[0]; 137 128 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 138 - if (br[0] == 0x47) { 139 - /* brcl to b, replace with bc + nopr */ 140 - insnbuf[2] = br[2]; 141 - insnbuf[3] = br[3]; 142 - } else { 143 - /* brcl to br, replace with bcr + nop */ 144 - } 145 129 break; 146 130 case BRASL_EXPOLINE: 131 + /* brasl to thunk, replace with basr + nop */ 132 + insnbuf[0] = 0x0d; 147 133 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 148 - if (br[0] == 0x47) { 149 - /* brasl to b, replace with bas + nopr */ 150 - insnbuf[0] = 0x4d; 151 - insnbuf[2] = br[2]; 152 - insnbuf[3] = br[3]; 153 - } else { 154 - /* brasl to br, replace with basr + nop */ 155 - insnbuf[0] = 0x0d; 156 - } 157 134 break; 158 135 } 159 136
+6 -6
arch/s390/kernel/os_info.c
··· 15 15 #include <asm/checksum.h> 16 16 #include <asm/lowcore.h> 17 17 #include <asm/os_info.h> 18 + #include <asm/asm-offsets.h> 18 19 19 20 /* 20 21 * OS info structure has to be page aligned ··· 46 45 */ 47 46 void os_info_entry_add(int nr, void *ptr, u64 size) 48 47 { 49 - os_info.entry[nr].addr = (u64)(unsigned long)ptr; 48 + os_info.entry[nr].addr = __pa(ptr); 50 49 os_info.entry[nr].size = size; 51 50 os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0); 52 51 os_info.csum = os_info_csum(&os_info); ··· 63 62 os_info.version_minor = OS_INFO_VERSION_MINOR; 64 63 os_info.magic = OS_INFO_MAGIC; 65 64 os_info.csum = os_info_csum(&os_info); 66 - mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr); 65 + mem_assign_absolute(S390_lowcore.os_info, __pa(ptr)); 67 66 } 68 67 69 68 #ifdef CONFIG_CRASH_DUMP ··· 91 90 goto fail; 92 91 } 93 92 buf_align = PTR_ALIGN(buf, align); 94 - if (copy_oldmem_kernel(buf_align, (void *) addr, size)) { 93 + if (copy_oldmem_kernel(buf_align, addr, size)) { 95 94 msg = "copy failed"; 96 95 goto fail_free; 97 96 } ··· 124 123 return; 125 124 if (!oldmem_data.start) 126 125 goto fail; 127 - if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr))) 126 + if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr))) 128 127 goto fail; 129 128 if (addr == 0 || addr % PAGE_SIZE) 130 129 goto fail; 131 130 os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL); 132 131 if (!os_info_old) 133 132 goto fail; 134 - if (copy_oldmem_kernel(os_info_old, (void *) addr, 135 - sizeof(*os_info_old))) 133 + if (copy_oldmem_kernel(os_info_old, addr, sizeof(*os_info_old))) 136 134 goto fail_free; 137 135 if (os_info_old->magic != OS_INFO_MAGIC) 138 136 goto fail_free;
+5 -6
arch/s390/kernel/perf_cpum_cf.c
··· 1451 1451 /* Get the CPU speed, try sampling facility first and CPU attributes second. */ 1452 1452 static void cfdiag_get_cpu_speed(void) 1453 1453 { 1454 + unsigned long mhz; 1455 + 1454 1456 if (cpum_sf_avail()) { /* Sampling facility first */ 1455 1457 struct hws_qsi_info_block si; 1456 1458 ··· 1466 1464 /* Fallback: CPU speed extract static part. Used in case 1467 1465 * CPU Measurement Sampling Facility is turned off. 1468 1466 */ 1469 - if (test_facility(34)) { 1470 - unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); 1471 - 1472 - if (mhz != -1UL) 1473 - cfdiag_cpu_speed = mhz & 0xffffffff; 1474 - } 1467 + mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); 1468 + if (mhz != -1UL) 1469 + cfdiag_cpu_speed = mhz & 0xffffffff; 1475 1470 } 1476 1471 1477 1472 static int cfset_init(void)
+3 -19
arch/s390/kernel/processor.c
··· 172 172 static int __init setup_hwcaps(void) 173 173 { 174 174 /* instructions named N3, "backported" to esa-mode */ 175 - if (test_facility(0)) 176 - elf_hwcap |= HWCAP_ESAN3; 175 + elf_hwcap |= HWCAP_ESAN3; 177 176 178 177 /* z/Architecture mode active */ 179 178 elf_hwcap |= HWCAP_ZARCH; ··· 190 191 elf_hwcap |= HWCAP_LDISP; 191 192 192 193 /* extended-immediate */ 193 - if (test_facility(21)) 194 - elf_hwcap |= HWCAP_EIMM; 194 + elf_hwcap |= HWCAP_EIMM; 195 195 196 196 /* extended-translation facility 3 enhancement */ 197 197 if (test_facility(22) && test_facility(30)) ··· 260 262 get_cpu_id(&cpu_id); 261 263 add_device_randomness(&cpu_id, sizeof(cpu_id)); 262 264 switch (cpu_id.machine) { 263 - case 0x2064: 264 - case 0x2066: 265 - default: /* Use "z900" as default for 64 bit kernels. */ 266 - strcpy(elf_platform, "z900"); 267 - break; 268 - case 0x2084: 269 - case 0x2086: 270 - strcpy(elf_platform, "z990"); 271 - break; 272 - case 0x2094: 273 - case 0x2096: 274 - strcpy(elf_platform, "z9-109"); 275 - break; 276 - case 0x2097: 277 - case 0x2098: 265 + default: /* Use "z10" as default. */ 278 266 strcpy(elf_platform, "z10"); 279 267 break; 280 268 case 0x2817:
+76 -88
arch/s390/kernel/ptrace.c
··· 147 147 static inline unsigned long __peek_user_per(struct task_struct *child, 148 148 addr_t addr) 149 149 { 150 - struct per_struct_kernel *dummy = NULL; 151 - 152 - if (addr == (addr_t) &dummy->cr9) 150 + if (addr == offsetof(struct per_struct_kernel, cr9)) 153 151 /* Control bits of the active per set. */ 154 152 return test_thread_flag(TIF_SINGLE_STEP) ? 155 153 PER_EVENT_IFETCH : child->thread.per_user.control; 156 - else if (addr == (addr_t) &dummy->cr10) 154 + else if (addr == offsetof(struct per_struct_kernel, cr10)) 157 155 /* Start address of the active per set. */ 158 156 return test_thread_flag(TIF_SINGLE_STEP) ? 159 157 0 : child->thread.per_user.start; 160 - else if (addr == (addr_t) &dummy->cr11) 158 + else if (addr == offsetof(struct per_struct_kernel, cr11)) 161 159 /* End address of the active per set. */ 162 160 return test_thread_flag(TIF_SINGLE_STEP) ? 163 161 -1UL : child->thread.per_user.end; 164 - else if (addr == (addr_t) &dummy->bits) 162 + else if (addr == offsetof(struct per_struct_kernel, bits)) 165 163 /* Single-step bit. */ 166 164 return test_thread_flag(TIF_SINGLE_STEP) ? 167 165 (1UL << (BITS_PER_LONG - 1)) : 0; 168 - else if (addr == (addr_t) &dummy->starting_addr) 166 + else if (addr == offsetof(struct per_struct_kernel, starting_addr)) 169 167 /* Start address of the user specified per set. */ 170 168 return child->thread.per_user.start; 171 - else if (addr == (addr_t) &dummy->ending_addr) 169 + else if (addr == offsetof(struct per_struct_kernel, ending_addr)) 172 170 /* End address of the user specified per set. */ 173 171 return child->thread.per_user.end; 174 - else if (addr == (addr_t) &dummy->perc_atmid) 172 + else if (addr == offsetof(struct per_struct_kernel, perc_atmid)) 175 173 /* PER code, ATMID and AI of the last PER trap */ 176 174 return (unsigned long) 177 175 child->thread.per_event.cause << (BITS_PER_LONG - 16); 178 - else if (addr == (addr_t) &dummy->address) 176 + else if (addr == offsetof(struct per_struct_kernel, address)) 179 177 /* Address of the last PER trap */ 180 178 return child->thread.per_event.address; 181 - else if (addr == (addr_t) &dummy->access_id) 179 + else if (addr == offsetof(struct per_struct_kernel, access_id)) 182 180 /* Access id of the last PER trap */ 183 181 return (unsigned long) 184 182 child->thread.per_event.paid << (BITS_PER_LONG - 8); ··· 194 196 */ 195 197 static unsigned long __peek_user(struct task_struct *child, addr_t addr) 196 198 { 197 - struct user *dummy = NULL; 198 199 addr_t offset, tmp; 199 200 200 - if (addr < (addr_t) &dummy->regs.acrs) { 201 + if (addr < offsetof(struct user, regs.acrs)) { 201 202 /* 202 203 * psw and gprs are stored on the stack 203 204 */ 204 205 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); 205 - if (addr == (addr_t) &dummy->regs.psw.mask) { 206 + if (addr == offsetof(struct user, regs.psw.mask)) { 206 207 /* Return a clean psw mask. */ 207 208 tmp &= PSW_MASK_USER | PSW_MASK_RI; 208 209 tmp |= PSW_USER_BITS; 209 210 } 210 211 211 - } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { 212 + } else if (addr < offsetof(struct user, regs.orig_gpr2)) { 212 213 /* 213 214 * access registers are stored in the thread structure 214 215 */ 215 - offset = addr - (addr_t) &dummy->regs.acrs; 216 + offset = addr - offsetof(struct user, regs.acrs); 216 217 /* 217 218 * Very special case: old & broken 64 bit gdb reading 218 219 * from acrs[15]. Result is a 64 bit value. Read the 219 220 * 32 bit acrs[15] value and shift it by 32. Sick... 220 221 */ 221 - if (addr == (addr_t) &dummy->regs.acrs[15]) 222 + if (addr == offsetof(struct user, regs.acrs[15])) 222 223 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 223 224 else 224 225 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); 225 226 226 - } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 227 + } else if (addr == offsetof(struct user, regs.orig_gpr2)) { 227 228 /* 228 229 * orig_gpr2 is stored on the kernel stack 229 230 */ 230 231 tmp = (addr_t) task_pt_regs(child)->orig_gpr2; 231 232 232 - } else if (addr < (addr_t) &dummy->regs.fp_regs) { 233 + } else if (addr < offsetof(struct user, regs.fp_regs)) { 233 234 /* 234 235 * prevent reads of padding hole between 235 236 * orig_gpr2 and fp_regs on s390. 236 237 */ 237 238 tmp = 0; 238 239 239 - } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { 240 + } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { 240 241 /* 241 242 * floating point control reg. is in the thread structure 242 243 */ 243 244 tmp = child->thread.fpu.fpc; 244 245 tmp <<= BITS_PER_LONG - 32; 245 246 246 - } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 247 + } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { 247 248 /* 248 249 * floating point regs. are either in child->thread.fpu 249 250 * or the child->thread.fpu.vxrs array 250 251 */ 251 - offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 252 + offset = addr - offsetof(struct user, regs.fp_regs.fprs); 252 253 if (MACHINE_HAS_VX) 253 254 tmp = *(addr_t *) 254 255 ((addr_t) child->thread.fpu.vxrs + 2*offset); ··· 255 258 tmp = *(addr_t *) 256 259 ((addr_t) child->thread.fpu.fprs + offset); 257 260 258 - } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 261 + } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { 259 262 /* 260 263 * Handle access to the per_info structure. 261 264 */ 262 - addr -= (addr_t) &dummy->regs.per_info; 265 + addr -= offsetof(struct user, regs.per_info); 263 266 tmp = __peek_user_per(child, addr); 264 267 265 268 } else ··· 278 281 * an alignment of 4. Programmers from hell... 279 282 */ 280 283 mask = __ADDR_MASK; 281 - if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 282 - addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 284 + if (addr >= offsetof(struct user, regs.acrs) && 285 + addr < offsetof(struct user, regs.orig_gpr2)) 283 286 mask = 3; 284 287 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 285 288 return -EIO; ··· 291 294 static inline void __poke_user_per(struct task_struct *child, 292 295 addr_t addr, addr_t data) 293 296 { 294 - struct per_struct_kernel *dummy = NULL; 295 - 296 297 /* 297 298 * There are only three fields in the per_info struct that the 298 299 * debugger user can write to. ··· 303 308 * addresses are used only if single stepping is not in effect. 304 309 * Writes to any other field in per_info are ignored. 305 310 */ 306 - if (addr == (addr_t) &dummy->cr9) 311 + if (addr == offsetof(struct per_struct_kernel, cr9)) 307 312 /* PER event mask of the user specified per set. */ 308 313 child->thread.per_user.control = 309 314 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 310 - else if (addr == (addr_t) &dummy->starting_addr) 315 + else if (addr == offsetof(struct per_struct_kernel, starting_addr)) 311 316 /* Starting address of the user specified per set. */ 312 317 child->thread.per_user.start = data; 313 - else if (addr == (addr_t) &dummy->ending_addr) 318 + else if (addr == offsetof(struct per_struct_kernel, ending_addr)) 314 319 /* Ending address of the user specified per set. */ 315 320 child->thread.per_user.end = data; 316 321 } ··· 323 328 */ 324 329 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) 325 330 { 326 - struct user *dummy = NULL; 327 331 addr_t offset; 328 332 329 333 330 - if (addr < (addr_t) &dummy->regs.acrs) { 334 + if (addr < offsetof(struct user, regs.acrs)) { 331 335 struct pt_regs *regs = task_pt_regs(child); 332 336 /* 333 337 * psw and gprs are stored on the stack 334 338 */ 335 - if (addr == (addr_t) &dummy->regs.psw.mask) { 339 + if (addr == offsetof(struct user, regs.psw.mask)) { 336 340 unsigned long mask = PSW_MASK_USER; 337 341 338 342 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; ··· 353 359 regs->int_code = 0x20000 | (data & 0xffff); 354 360 } 355 361 *(addr_t *)((addr_t) &regs->psw + addr) = data; 356 - } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 362 + } else if (addr < offsetof(struct user, regs.orig_gpr2)) { 357 363 /* 358 364 * access registers are stored in the thread structure 359 365 */ 360 - offset = addr - (addr_t) &dummy->regs.acrs; 366 + offset = addr - offsetof(struct user, regs.acrs); 361 367 /* 362 368 * Very special case: old & broken 64 bit gdb writing 363 369 * to acrs[15] with a 64 bit value. Ignore the lower 364 370 * half of the value and write the upper 32 bit to 365 371 * acrs[15]. Sick... 366 372 */ 367 - if (addr == (addr_t) &dummy->regs.acrs[15]) 373 + if (addr == offsetof(struct user, regs.acrs[15])) 368 374 child->thread.acrs[15] = (unsigned int) (data >> 32); 369 375 else 370 376 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; 371 377 372 - } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 378 + } else if (addr == offsetof(struct user, regs.orig_gpr2)) { 373 379 /* 374 380 * orig_gpr2 is stored on the kernel stack 375 381 */ 376 382 task_pt_regs(child)->orig_gpr2 = data; 377 383 378 - } else if (addr < (addr_t) &dummy->regs.fp_regs) { 384 + } else if (addr < offsetof(struct user, regs.fp_regs)) { 379 385 /* 380 386 * prevent writes of padding hole between 381 387 * orig_gpr2 and fp_regs on s390. 382 388 */ 383 389 return 0; 384 390 385 - } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) { 391 + } else if (addr == offsetof(struct user, regs.fp_regs.fpc)) { 386 392 /* 387 393 * floating point control reg. is in the thread structure 388 394 */ ··· 391 397 return -EINVAL; 392 398 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); 393 399 394 - } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 400 + } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { 395 401 /* 396 402 * floating point regs. are either in child->thread.fpu 397 403 * or the child->thread.fpu.vxrs array 398 404 */ 399 - offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 405 + offset = addr - offsetof(struct user, regs.fp_regs.fprs); 400 406 if (MACHINE_HAS_VX) 401 407 *(addr_t *)((addr_t) 402 408 child->thread.fpu.vxrs + 2*offset) = data; ··· 404 410 *(addr_t *)((addr_t) 405 411 child->thread.fpu.fprs + offset) = data; 406 412 407 - } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 413 + } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { 408 414 /* 409 415 * Handle access to the per_info structure. 410 416 */ 411 - addr -= (addr_t) &dummy->regs.per_info; 417 + addr -= offsetof(struct user, regs.per_info); 412 418 __poke_user_per(child, addr, data); 413 419 414 420 } ··· 425 431 * an alignment of 4. Programmers from hell indeed... 426 432 */ 427 433 mask = __ADDR_MASK; 428 - if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 429 - addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 434 + if (addr >= offsetof(struct user, regs.acrs) && 435 + addr < offsetof(struct user, regs.orig_gpr2)) 430 436 mask = 3; 431 437 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 432 438 return -EIO; ··· 534 540 static inline __u32 __peek_user_per_compat(struct task_struct *child, 535 541 addr_t addr) 536 542 { 537 - struct compat_per_struct_kernel *dummy32 = NULL; 538 - 539 - if (addr == (addr_t) &dummy32->cr9) 543 + if (addr == offsetof(struct compat_per_struct_kernel, cr9)) 540 544 /* Control bits of the active per set. */ 541 545 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 542 546 PER_EVENT_IFETCH : child->thread.per_user.control; 543 - else if (addr == (addr_t) &dummy32->cr10) 547 + else if (addr == offsetof(struct compat_per_struct_kernel, cr10)) 544 548 /* Start address of the active per set. */ 545 549 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 546 550 0 : child->thread.per_user.start; 547 - else if (addr == (addr_t) &dummy32->cr11) 551 + else if (addr == offsetof(struct compat_per_struct_kernel, cr11)) 548 552 /* End address of the active per set. */ 549 553 return test_thread_flag(TIF_SINGLE_STEP) ? 550 554 PSW32_ADDR_INSN : child->thread.per_user.end; 551 - else if (addr == (addr_t) &dummy32->bits) 555 + else if (addr == offsetof(struct compat_per_struct_kernel, bits)) 552 556 /* Single-step bit. */ 553 557 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 554 558 0x80000000 : 0; 555 - else if (addr == (addr_t) &dummy32->starting_addr) 559 + else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) 556 560 /* Start address of the user specified per set. */ 557 561 return (__u32) child->thread.per_user.start; 558 - else if (addr == (addr_t) &dummy32->ending_addr) 562 + else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) 559 563 /* End address of the user specified per set. */ 560 564 return (__u32) child->thread.per_user.end; 561 - else if (addr == (addr_t) &dummy32->perc_atmid) 565 + else if (addr == offsetof(struct compat_per_struct_kernel, perc_atmid)) 562 566 /* PER code, ATMID and AI of the last PER trap */ 563 567 return (__u32) child->thread.per_event.cause << 16; 564 - else if (addr == (addr_t) &dummy32->address) 568 + else if (addr == offsetof(struct compat_per_struct_kernel, address)) 565 569 /* Address of the last PER trap */ 566 570 return (__u32) child->thread.per_event.address; 567 - else if (addr == (addr_t) &dummy32->access_id) 571 + else if (addr == offsetof(struct compat_per_struct_kernel, access_id)) 568 572 /* Access id of the last PER trap */ 569 573 return (__u32) child->thread.per_event.paid << 24; 570 574 return 0; ··· 573 581 */ 574 582 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 575 583 { 576 - struct compat_user *dummy32 = NULL; 577 584 addr_t offset; 578 585 __u32 tmp; 579 586 580 - if (addr < (addr_t) &dummy32->regs.acrs) { 587 + if (addr < offsetof(struct compat_user, regs.acrs)) { 581 588 struct pt_regs *regs = task_pt_regs(child); 582 589 /* 583 590 * psw and gprs are stored on the stack 584 591 */ 585 - if (addr == (addr_t) &dummy32->regs.psw.mask) { 592 + if (addr == offsetof(struct compat_user, regs.psw.mask)) { 586 593 /* Fake a 31 bit psw mask. */ 587 594 tmp = (__u32)(regs->psw.mask >> 32); 588 595 tmp &= PSW32_MASK_USER | PSW32_MASK_RI; 589 596 tmp |= PSW32_USER_BITS; 590 - } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 597 + } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { 591 598 /* Fake a 31 bit psw address. */ 592 599 tmp = (__u32) regs->psw.addr | 593 600 (__u32)(regs->psw.mask & PSW_MASK_BA); ··· 594 603 /* gpr 0-15 */ 595 604 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4); 596 605 } 597 - } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 606 + } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { 598 607 /* 599 608 * access registers are stored in the thread structure 600 609 */ 601 - offset = addr - (addr_t) &dummy32->regs.acrs; 610 + offset = addr - offsetof(struct compat_user, regs.acrs); 602 611 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); 603 612 604 - } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 613 + } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { 605 614 /* 606 615 * orig_gpr2 is stored on the kernel stack 607 616 */ 608 617 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4); 609 618 610 - } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 619 + } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { 611 620 /* 612 621 * prevent reads of padding hole between 613 622 * orig_gpr2 and fp_regs on s390. 614 623 */ 615 624 tmp = 0; 616 625 617 - } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { 626 + } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { 618 627 /* 619 628 * floating point control reg. is in the thread structure 620 629 */ 621 630 tmp = child->thread.fpu.fpc; 622 631 623 - } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 632 + } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { 624 633 /* 625 634 * floating point regs. are either in child->thread.fpu 626 635 * or the child->thread.fpu.vxrs array 627 636 */ 628 - offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 637 + offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); 629 638 if (MACHINE_HAS_VX) 630 639 tmp = *(__u32 *) 631 640 ((addr_t) child->thread.fpu.vxrs + 2*offset); ··· 633 642 tmp = *(__u32 *) 634 643 ((addr_t) child->thread.fpu.fprs + offset); 635 644 636 - } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 645 + } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { 637 646 /* 638 647 * Handle access to the per_info structure. 639 648 */ 640 - addr -= (addr_t) &dummy32->regs.per_info; 649 + addr -= offsetof(struct compat_user, regs.per_info); 641 650 tmp = __peek_user_per_compat(child, addr); 642 651 643 652 } else ··· 664 673 static inline void __poke_user_per_compat(struct task_struct *child, 665 674 addr_t addr, __u32 data) 666 675 { 667 - struct compat_per_struct_kernel *dummy32 = NULL; 668 - 669 - if (addr == (addr_t) &dummy32->cr9) 676 + if (addr == offsetof(struct compat_per_struct_kernel, cr9)) 670 677 /* PER event mask of the user specified per set. */ 671 678 child->thread.per_user.control = 672 679 data & (PER_EVENT_MASK | PER_CONTROL_MASK); 673 - else if (addr == (addr_t) &dummy32->starting_addr) 680 + else if (addr == offsetof(struct compat_per_struct_kernel, starting_addr)) 674 681 /* Starting address of the user specified per set. */ 675 682 child->thread.per_user.start = data; 676 - else if (addr == (addr_t) &dummy32->ending_addr) 683 + else if (addr == offsetof(struct compat_per_struct_kernel, ending_addr)) 677 684 /* Ending address of the user specified per set. */ 678 685 child->thread.per_user.end = data; 679 686 } ··· 682 693 static int __poke_user_compat(struct task_struct *child, 683 694 addr_t addr, addr_t data) 684 695 { 685 - struct compat_user *dummy32 = NULL; 686 696 __u32 tmp = (__u32) data; 687 697 addr_t offset; 688 698 689 - if (addr < (addr_t) &dummy32->regs.acrs) { 699 + if (addr < offsetof(struct compat_user, regs.acrs)) { 690 700 struct pt_regs *regs = task_pt_regs(child); 691 701 /* 692 702 * psw, gprs, acrs and orig_gpr2 are stored on the stack 693 703 */ 694 - if (addr == (addr_t) &dummy32->regs.psw.mask) { 704 + if (addr == offsetof(struct compat_user, regs.psw.mask)) { 695 705 __u32 mask = PSW32_MASK_USER; 696 706 697 707 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; ··· 704 716 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 705 717 (regs->psw.mask & PSW_MASK_BA) | 706 718 (__u64)(tmp & mask) << 32; 707 - } else if (addr == (addr_t) &dummy32->regs.psw.addr) { 719 + } else if (addr == offsetof(struct compat_user, regs.psw.addr)) { 708 720 /* Build a 64 bit psw address from 31 bit address. */ 709 721 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; 710 722 /* Transfer 31 bit amode bit to psw mask. */ ··· 720 732 /* gpr 0-15 */ 721 733 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; 722 734 } 723 - } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { 735 + } else if (addr < offsetof(struct compat_user, regs.orig_gpr2)) { 724 736 /* 725 737 * access registers are stored in the thread structure 726 738 */ 727 - offset = addr - (addr_t) &dummy32->regs.acrs; 739 + offset = addr - offsetof(struct compat_user, regs.acrs); 728 740 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; 729 741 730 - } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { 742 + } else if (addr == offsetof(struct compat_user, regs.orig_gpr2)) { 731 743 /* 732 744 * orig_gpr2 is stored on the kernel stack 733 745 */ 734 746 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; 735 747 736 - } else if (addr < (addr_t) &dummy32->regs.fp_regs) { 748 + } else if (addr < offsetof(struct compat_user, regs.fp_regs)) { 737 749 /* 738 750 * prevent writess of padding hole between 739 751 * orig_gpr2 and fp_regs on s390. 740 752 */ 741 753 return 0; 742 754 743 - } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) { 755 + } else if (addr == offsetof(struct compat_user, regs.fp_regs.fpc)) { 744 756 /* 745 757 * floating point control reg. is in the thread structure 746 758 */ ··· 748 760 return -EINVAL; 749 761 child->thread.fpu.fpc = data; 750 762 751 - } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 763 + } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { 752 764 /* 753 765 * floating point regs. are either in child->thread.fpu 754 766 * or the child->thread.fpu.vxrs array 755 767 */ 756 - offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 768 + offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); 757 769 if (MACHINE_HAS_VX) 758 770 *(__u32 *)((addr_t) 759 771 child->thread.fpu.vxrs + 2*offset) = tmp; ··· 761 773 *(__u32 *)((addr_t) 762 774 child->thread.fpu.fprs + offset) = tmp; 763 775 764 - } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 776 + } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { 765 777 /* 766 778 * Handle access to the per_info structure. 767 779 */ 768 - addr -= (addr_t) &dummy32->regs.per_info; 780 + addr -= offsetof(struct compat_user, regs.per_info); 769 781 __poke_user_per_compat(child, addr, data); 770 782 } 771 783
+1 -2
arch/s390/kernel/relocate_kernel.S
··· 2 2 /* 3 3 * Copyright IBM Corp. 2005 4 4 * 5 - * Author(s): Rolf Adelsberger, 6 - * Heiko Carstens <heiko.carstens@de.ibm.com> 5 + * Author(s): Rolf Adelsberger 7 6 * 8 7 */ 9 8
-1
arch/s390/kernel/setup.c
··· 490 490 lc->spinlock_lockval = arch_spin_lockval(0); 491 491 lc->spinlock_index = 0; 492 492 arch_spin_lock_setup(0); 493 - lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 494 493 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 495 494 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 496 495 lc->preempt_count = PREEMPT_DISABLED;
+1 -1
arch/s390/kernel/signal.c
··· 141 141 { 142 142 _sigregs user_sregs; 143 143 144 - /* Alwys make any pending restarted system call return -EINTR */ 144 + /* Always make any pending restarted system call return -EINTR */ 145 145 current->restart_block.fn = do_no_restart_syscall; 146 146 147 147 if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
+4 -6
arch/s390/kernel/smp.c
··· 5 5 * Copyright IBM Corp. 1999, 2012 6 6 * Author(s): Denis Joseph Barrow, 7 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 9 8 * 10 9 * based on other smp stuff by 11 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> ··· 207 208 lc->cpu_nr = cpu; 208 209 lc->spinlock_lockval = arch_spin_lockval(cpu); 209 210 lc->spinlock_index = 0; 210 - lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 211 211 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 212 212 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 213 213 lc->preempt_count = PREEMPT_DISABLED; ··· 669 671 bool is_boot_cpu, void *regs) 670 672 { 671 673 if (is_boot_cpu) 672 - copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); 674 + copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512); 673 675 else 674 676 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(regs)); 675 677 save_area_add_regs(sa, regs); ··· 1251 1253 src.odd = sizeof(S390_lowcore); 1252 1254 dst.even = (unsigned long) lc; 1253 1255 dst.odd = sizeof(*lc); 1254 - pfx = (unsigned long) lc; 1256 + pfx = __pa(lc); 1255 1257 1256 1258 asm volatile( 1257 1259 " mvcl %[dst],%[src]\n" ··· 1291 1293 local_irq_restore(flags); 1292 1294 1293 1295 free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER); 1294 - memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE); 1295 - memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl)); 1296 + memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE); 1297 + memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl)); 1296 1298 1297 1299 return 0; 1298 1300 }
-1
arch/s390/kernel/stacktrace.c
··· 3 3 * Stack trace management functions 4 4 * 5 5 * Copyright IBM Corp. 2006 6 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 7 6 */ 8 7 9 8 #include <linux/stacktrace.h>
+1
arch/s390/kernel/sysinfo.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/export.h> 16 16 #include <linux/slab.h> 17 + #include <asm/asm-extable.h> 17 18 #include <asm/ebcdic.h> 18 19 #include <asm/debug.h> 19 20 #include <asm/sysinfo.h>
+1
arch/s390/kernel/text_amode31.S
··· 6 6 */ 7 7 8 8 #include <linux/linkage.h> 9 + #include <asm/asm-extable.h> 9 10 #include <asm/errno.h> 10 11 #include <asm/sigp.h> 11 12
-1
arch/s390/kernel/topology.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Copyright IBM Corp. 2007, 2011 4 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 4 */ 6 5 7 6 #define KMSG_COMPONENT "cpu"
+5 -11
arch/s390/kernel/traps.c
··· 27 27 #include <linux/uaccess.h> 28 28 #include <linux/cpu.h> 29 29 #include <linux/entry-common.h> 30 + #include <asm/asm-extable.h> 30 31 #include <asm/fpu/api.h> 31 32 #include <asm/vtime.h> 32 33 #include "entry.h" ··· 54 53 force_sig_fault(si_signo, si_code, get_trap_ip(regs)); 55 54 report_user_fault(regs, si_signo, 0); 56 55 } else { 57 - const struct exception_table_entry *fixup; 58 - fixup = s390_search_extables(regs->psw.addr); 59 - if (!fixup || !ex_handle(fixup, regs)) 56 + if (!fixup_exception(regs)) 60 57 die(regs, str); 61 58 } 62 59 } ··· 243 244 244 245 static void monitor_event_exception(struct pt_regs *regs) 245 246 { 246 - const struct exception_table_entry *fixup; 247 - 248 247 if (user_mode(regs)) 249 248 return; 250 249 251 250 switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) { 252 251 case BUG_TRAP_TYPE_NONE: 253 - fixup = s390_search_extables(regs->psw.addr); 254 - if (fixup) 255 - ex_handle(fixup, regs); 252 + fixup_exception(regs); 256 253 break; 257 254 case BUG_TRAP_TYPE_WARN: 258 255 break; ··· 286 291 287 292 void __init trap_init(void) 288 293 { 289 - sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table); 290 294 local_mcck_enable(); 291 295 test_monitor_call(); 292 296 } ··· 297 303 unsigned int trapnr; 298 304 irqentry_state_t state; 299 305 300 - regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc; 306 + regs->int_code = S390_lowcore.pgm_int_code; 301 307 regs->int_parm_long = S390_lowcore.trans_exc_code; 302 308 303 309 state = irqentry_enter(regs); ··· 322 328 323 329 set_thread_flag(TIF_PER_TRAP); 324 330 ev->address = S390_lowcore.per_address; 325 - ev->cause = *(u16 *)&S390_lowcore.per_code; 331 + ev->cause = S390_lowcore.per_code_combined; 326 332 ev->paid = S390_lowcore.per_access_id; 327 333 } else { 328 334 /* PER event in kernel is kprobes */
+3 -13
arch/s390/kernel/uprobes.c
··· 177 177 __typeof__(*(ptr)) input; \ 178 178 int __rc = 0; \ 179 179 \ 180 - if (!test_facility(34)) \ 181 - __rc = EMU_ILLEGAL_OP; \ 182 - else if ((u64 __force)ptr & mask) \ 180 + if ((u64 __force)ptr & mask) \ 183 181 __rc = EMU_SPECIFICATION; \ 184 182 else if (get_user(input, ptr)) \ 185 183 __rc = EMU_ADDRESSING; \ ··· 192 194 __typeof__(ptr) __ptr = (ptr); \ 193 195 int __rc = 0; \ 194 196 \ 195 - if (!test_facility(34)) \ 196 - __rc = EMU_ILLEGAL_OP; \ 197 - else if ((u64 __force)__ptr & mask) \ 197 + if ((u64 __force)__ptr & mask) \ 198 198 __rc = EMU_SPECIFICATION; \ 199 199 else if (put_user(*(input), __ptr)) \ 200 200 __rc = EMU_ADDRESSING; \ ··· 209 213 __typeof__(*(ptr)) input; \ 210 214 int __rc = 0; \ 211 215 \ 212 - if (!test_facility(34)) \ 213 - __rc = EMU_ILLEGAL_OP; \ 214 - else if ((u64 __force)ptr & mask) \ 216 + if ((u64 __force)ptr & mask) \ 215 217 __rc = EMU_SPECIFICATION; \ 216 218 else if (get_user(input, ptr)) \ 217 219 __rc = EMU_ADDRESSING; \ ··· 321 327 break; 322 328 case 0xc6: 323 329 switch (insn->opc1) { 324 - case 0x02: /* pfdrl */ 325 - if (!test_facility(34)) 326 - rc = EMU_ILLEGAL_OP; 327 - break; 328 330 case 0x04: /* cghrl */ 329 331 rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64); 330 332 break;
-1
arch/s390/kernel/vmlinux.lds.S
··· 49 49 SOFTIRQENTRY_TEXT 50 50 FTRACE_HOTPATCH_TRAMPOLINES_TEXT 51 51 *(.text.*_indirect_*) 52 - *(.fixup) 53 52 *(.gnu.warning) 54 53 . = ALIGN(PAGE_SIZE); 55 54 _etext = .; /* End of text section */
+6 -7
arch/s390/kernel/vtime.c
··· 128 128 129 129 timer = S390_lowcore.last_update_timer; 130 130 clock = S390_lowcore.last_update_clock; 131 - /* Use STORE CLOCK by default, STORE CLOCK FAST if available. */ 132 - alternative_io("stpt %0\n .insn s,0xb2050000,%1\n", 133 - "stpt %0\n .insn s,0xb27c0000,%1\n", 134 - 25, 135 - ASM_OUTPUT2("=Q" (S390_lowcore.last_update_timer), 136 - "=Q" (S390_lowcore.last_update_clock)), 137 - ASM_NO_INPUT_CLOBBER("cc")); 131 + asm volatile( 132 + " stpt %0\n" /* Store current cpu timer value */ 133 + " stckf %1" /* Store current tod clock value */ 134 + : "=Q" (S390_lowcore.last_update_timer), 135 + "=Q" (S390_lowcore.last_update_clock) 136 + : : "cc"); 138 137 clock = S390_lowcore.last_update_clock - clock; 139 138 timer -= S390_lowcore.last_update_timer; 140 139
-1
arch/s390/kvm/kvm-s390.c
··· 6 6 * 7 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 8 * Christian Borntraeger <borntraeger@de.ibm.com> 9 - * Heiko Carstens <heiko.carstens@de.ibm.com> 10 9 * Christian Ehrhardt <ehrhardt@de.ibm.com> 11 10 * Jason J. Herne <jjherne@us.ibm.com> 12 11 */
+1
arch/s390/lib/Makefile
··· 7 7 obj-y += mem.o xor.o 8 8 lib-$(CONFIG_KPROBES) += probes.o 9 9 lib-$(CONFIG_UPROBES) += probes.o 10 + obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o 10 11 obj-$(CONFIG_S390_KPROBES_SANITY_TEST) += test_kprobes_s390.o 11 12 test_kprobes_s390-objs += test_kprobes_asm.o test_kprobes.o 12 13
-1
arch/s390/lib/delay.c
··· 4 4 * 5 5 * Copyright IBM Corp. 1999, 2008 6 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, 7 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 7 */ 9 8 10 9 #include <linux/processor.h>
+12
arch/s390/lib/expoline.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <asm/nospec-insn.h> 4 + #include <linux/linkage.h> 5 + 6 + .macro GEN_ALL_BR_THUNK_EXTERN 7 + .irp r1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 8 + GEN_BR_THUNK_EXTERN %r\r1 9 + .endr 10 + .endm 11 + 12 + GEN_ALL_BR_THUNK_EXTERN
+195 -79
arch/s390/lib/test_unwind.c
··· 8 8 #include <linux/completion.h> 9 9 #include <linux/kallsyms.h> 10 10 #include <linux/kthread.h> 11 + #include <linux/ftrace.h> 11 12 #include <linux/module.h> 12 13 #include <linux/timer.h> 13 14 #include <linux/slab.h> ··· 17 16 #include <linux/wait.h> 18 17 #include <asm/irq.h> 19 18 20 - struct kunit *current_test; 19 + static struct kunit *current_test; 21 20 22 21 #define BT_BUF_SIZE (PAGE_SIZE * 4) 22 + 23 + static bool force_bt; 24 + module_param_named(backtrace, force_bt, bool, 0444); 25 + MODULE_PARM_DESC(backtrace, "print backtraces for all tests"); 23 26 24 27 /* 25 28 * To avoid printk line limit split backtrace by lines ··· 103 98 kunit_err(current_test, "Maximum number of frames exceeded\n"); 104 99 ret = -EINVAL; 105 100 } 106 - if (ret) 101 + if (ret || force_bt) 107 102 print_backtrace(bt); 108 103 kfree(bt); 109 104 return ret; ··· 129 124 #define UWM_CALLER 0x8 /* Unwind starting from caller. */ 130 125 #define UWM_SWITCH_STACK 0x10 /* Use call_on_stack. */ 131 126 #define UWM_IRQ 0x20 /* Unwind from irq context. */ 132 - #define UWM_PGM 0x40 /* Unwind from program check handler. */ 127 + #define UWM_PGM 0x40 /* Unwind from program check handler */ 128 + #define UWM_KPROBE_ON_FTRACE 0x80 /* Unwind from kprobe handler called via ftrace. */ 129 + #define UWM_FTRACE 0x100 /* Unwind from ftrace handler. */ 130 + #define UWM_KRETPROBE 0x200 /* Unwind kretprobe handlers. */ 133 131 134 132 static __always_inline unsigned long get_psw_addr(void) 135 133 { ··· 144 136 return psw_addr; 145 137 } 146 138 147 - #ifdef CONFIG_KPROBES 148 - static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs) 139 + static int kretprobe_ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs) 140 + { 141 + struct unwindme *u = unwindme; 142 + 143 + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, 144 + (u->flags & UWM_SP) ? u->sp : 0); 145 + 146 + return 0; 147 + } 148 + 149 + static noinline notrace void test_unwind_kretprobed_func(void) 150 + { 151 + asm volatile(" nop\n"); 152 + } 153 + 154 + static noinline void test_unwind_kretprobed_func_caller(void) 155 + { 156 + test_unwind_kretprobed_func(); 157 + } 158 + 159 + static int test_unwind_kretprobe(struct unwindme *u) 160 + { 161 + int ret; 162 + struct kretprobe my_kretprobe; 163 + 164 + if (!IS_ENABLED(CONFIG_KPROBES)) 165 + kunit_skip(current_test, "requires CONFIG_KPROBES"); 166 + 167 + u->ret = -1; /* make sure kprobe is called */ 168 + unwindme = u; 169 + 170 + memset(&my_kretprobe, 0, sizeof(my_kretprobe)); 171 + my_kretprobe.handler = kretprobe_ret_handler; 172 + my_kretprobe.maxactive = 1; 173 + my_kretprobe.kp.addr = (kprobe_opcode_t *)test_unwind_kretprobed_func; 174 + 175 + ret = register_kretprobe(&my_kretprobe); 176 + 177 + if (ret < 0) { 178 + kunit_err(current_test, "register_kretprobe failed %d\n", ret); 179 + return -EINVAL; 180 + } 181 + 182 + test_unwind_kretprobed_func_caller(); 183 + unregister_kretprobe(&my_kretprobe); 184 + unwindme = NULL; 185 + return u->ret; 186 + } 187 + 188 + static int kprobe_pre_handler(struct kprobe *p, struct pt_regs *regs) 149 189 { 150 190 struct unwindme *u = unwindme; 151 191 ··· 201 145 (u->flags & UWM_SP) ? u->sp : 0); 202 146 return 0; 203 147 } 148 + 149 + extern const char test_unwind_kprobed_insn[]; 150 + 151 + static noinline void test_unwind_kprobed_func(void) 152 + { 153 + asm volatile( 154 + " nopr %%r7\n" 155 + "test_unwind_kprobed_insn:\n" 156 + " nopr %%r7\n" 157 + :); 158 + } 159 + 160 + static int test_unwind_kprobe(struct unwindme *u) 161 + { 162 + struct kprobe kp; 163 + int ret; 164 + 165 + if (!IS_ENABLED(CONFIG_KPROBES)) 166 + kunit_skip(current_test, "requires CONFIG_KPROBES"); 167 + if (!IS_ENABLED(CONFIG_KPROBES_ON_FTRACE) && u->flags & UWM_KPROBE_ON_FTRACE) 168 + kunit_skip(current_test, "requires CONFIG_KPROBES_ON_FTRACE"); 169 + 170 + u->ret = -1; /* make sure kprobe is called */ 171 + unwindme = u; 172 + memset(&kp, 0, sizeof(kp)); 173 + kp.pre_handler = kprobe_pre_handler; 174 + kp.addr = u->flags & UWM_KPROBE_ON_FTRACE ? 175 + (kprobe_opcode_t *)test_unwind_kprobed_func : 176 + (kprobe_opcode_t *)test_unwind_kprobed_insn; 177 + ret = register_kprobe(&kp); 178 + if (ret < 0) { 179 + kunit_err(current_test, "register_kprobe failed %d\n", ret); 180 + return -EINVAL; 181 + } 182 + 183 + test_unwind_kprobed_func(); 184 + unregister_kprobe(&kp); 185 + unwindme = NULL; 186 + return u->ret; 187 + } 188 + 189 + static void notrace __used test_unwind_ftrace_handler(unsigned long ip, 190 + unsigned long parent_ip, 191 + struct ftrace_ops *fops, 192 + struct ftrace_regs *fregs) 193 + { 194 + struct unwindme *u = (struct unwindme *)fregs->regs.gprs[2]; 195 + 196 + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &fregs->regs : NULL, 197 + (u->flags & UWM_SP) ? u->sp : 0); 198 + } 199 + 200 + static noinline int test_unwind_ftraced_func(struct unwindme *u) 201 + { 202 + return READ_ONCE(u)->ret; 203 + } 204 + 205 + static int test_unwind_ftrace(struct unwindme *u) 206 + { 207 + int ret; 208 + #ifdef CONFIG_DYNAMIC_FTRACE 209 + struct ftrace_ops *fops; 210 + 211 + fops = kunit_kzalloc(current_test, sizeof(*fops), GFP_KERNEL); 212 + fops->func = test_unwind_ftrace_handler; 213 + fops->flags = FTRACE_OPS_FL_DYNAMIC | 214 + FTRACE_OPS_FL_RECURSION | 215 + FTRACE_OPS_FL_SAVE_REGS | 216 + FTRACE_OPS_FL_PERMANENT; 217 + #else 218 + kunit_skip(current_test, "requires CONFIG_DYNAMIC_FTRACE"); 204 219 #endif 220 + 221 + ret = ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 0, 0); 222 + if (ret) { 223 + kunit_err(current_test, "failed to set ftrace filter (%d)\n", ret); 224 + return -1; 225 + } 226 + 227 + ret = register_ftrace_function(fops); 228 + if (!ret) { 229 + ret = test_unwind_ftraced_func(u); 230 + unregister_ftrace_function(fops); 231 + } else { 232 + kunit_err(current_test, "failed to register ftrace handler (%d)\n", ret); 233 + } 234 + 235 + ftrace_set_filter_ip(fops, (unsigned long)test_unwind_ftraced_func, 1, 0); 236 + return ret; 237 + } 205 238 206 239 /* This function may or may not appear in the backtrace. */ 207 240 static noinline int unwindme_func4(struct unwindme *u) ··· 302 157 wait_event(u->task_wq, kthread_should_park()); 303 158 kthread_parkme(); 304 159 return 0; 305 - #ifdef CONFIG_KPROBES 306 - } else if (u->flags & UWM_PGM) { 307 - struct kprobe kp; 308 - int ret; 309 - 310 - unwindme = u; 311 - memset(&kp, 0, sizeof(kp)); 312 - kp.symbol_name = "do_report_trap"; 313 - kp.pre_handler = pgm_pre_handler; 314 - ret = register_kprobe(&kp); 315 - if (ret < 0) { 316 - kunit_err(current_test, "register_kprobe failed %d\n", ret); 317 - return -EINVAL; 318 - } 319 - 320 - /* 321 - * Trigger operation exception; use insn notation to bypass 322 - * llvm's integrated assembler sanity checks. 323 - */ 324 - asm volatile( 325 - " .insn e,0x0000\n" /* illegal opcode */ 326 - "0: nopr %%r7\n" 327 - EX_TABLE(0b, 0b) 328 - :); 329 - 330 - unregister_kprobe(&kp); 331 - unwindme = NULL; 332 - return u->ret; 333 - #endif 160 + } else if (u->flags & (UWM_PGM | UWM_KPROBE_ON_FTRACE)) { 161 + return test_unwind_kprobe(u); 162 + } else if (u->flags & (UWM_KRETPROBE)) { 163 + return test_unwind_kretprobe(u); 164 + } else if (u->flags & UWM_FTRACE) { 165 + return test_unwind_ftrace(u); 334 166 } else { 335 167 struct pt_regs regs; 336 168 ··· 377 255 } 378 256 379 257 /* Spawns a task and passes it to test_unwind(). */ 380 - static int test_unwind_task(struct kunit *test, struct unwindme *u) 258 + static int test_unwind_task(struct unwindme *u) 381 259 { 382 260 struct task_struct *task; 383 261 int ret; ··· 392 270 */ 393 271 task = kthread_run(unwindme_func1, u, "%s", __func__); 394 272 if (IS_ERR(task)) { 395 - kunit_err(test, "kthread_run() failed\n"); 273 + kunit_err(current_test, "kthread_run() failed\n"); 396 274 return PTR_ERR(task); 397 275 } 398 276 /* ··· 415 293 /* 416 294 * Create required parameter list for tests 417 295 */ 296 + #define TEST_WITH_FLAGS(f) { .flags = f, .name = #f } 418 297 static const struct test_params param_list[] = { 419 - {.flags = UWM_DEFAULT, .name = "UWM_DEFAULT"}, 420 - {.flags = UWM_SP, .name = "UWM_SP"}, 421 - {.flags = UWM_REGS, .name = "UWM_REGS"}, 422 - {.flags = UWM_SWITCH_STACK, 423 - .name = "UWM_SWITCH_STACK"}, 424 - {.flags = UWM_SP | UWM_REGS, 425 - .name = "UWM_SP | UWM_REGS"}, 426 - {.flags = UWM_CALLER | UWM_SP, 427 - .name = "WM_CALLER | UWM_SP"}, 428 - {.flags = UWM_CALLER | UWM_SP | UWM_REGS, 429 - .name = "UWM_CALLER | UWM_SP | UWM_REGS"}, 430 - {.flags = UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK, 431 - .name = "UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"}, 432 - {.flags = UWM_THREAD, .name = "UWM_THREAD"}, 433 - {.flags = UWM_THREAD | UWM_SP, 434 - .name = "UWM_THREAD | UWM_SP"}, 435 - {.flags = UWM_THREAD | UWM_CALLER | UWM_SP, 436 - .name = "UWM_THREAD | UWM_CALLER | UWM_SP"}, 437 - {.flags = UWM_IRQ, .name = "UWM_IRQ"}, 438 - {.flags = UWM_IRQ | UWM_SWITCH_STACK, 439 - .name = "UWM_IRQ | UWM_SWITCH_STACK"}, 440 - {.flags = UWM_IRQ | UWM_SP, 441 - .name = "UWM_IRQ | UWM_SP"}, 442 - {.flags = UWM_IRQ | UWM_REGS, 443 - .name = "UWM_IRQ | UWM_REGS"}, 444 - {.flags = UWM_IRQ | UWM_SP | UWM_REGS, 445 - .name = "UWM_IRQ | UWM_SP | UWM_REGS"}, 446 - {.flags = UWM_IRQ | UWM_CALLER | UWM_SP, 447 - .name = "UWM_IRQ | UWM_CALLER | UWM_SP"}, 448 - {.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS, 449 - .name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS"}, 450 - {.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK, 451 - .name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"}, 452 - #ifdef CONFIG_KPROBES 453 - {.flags = UWM_PGM, .name = "UWM_PGM"}, 454 - {.flags = UWM_PGM | UWM_SP, 455 - .name = "UWM_PGM | UWM_SP"}, 456 - {.flags = UWM_PGM | UWM_REGS, 457 - .name = "UWM_PGM | UWM_REGS"}, 458 - {.flags = UWM_PGM | UWM_SP | UWM_REGS, 459 - .name = "UWM_PGM | UWM_SP | UWM_REGS"}, 460 - #endif 298 + TEST_WITH_FLAGS(UWM_DEFAULT), 299 + TEST_WITH_FLAGS(UWM_SP), 300 + TEST_WITH_FLAGS(UWM_REGS), 301 + TEST_WITH_FLAGS(UWM_SWITCH_STACK), 302 + TEST_WITH_FLAGS(UWM_SP | UWM_REGS), 303 + TEST_WITH_FLAGS(UWM_CALLER | UWM_SP), 304 + TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS), 305 + TEST_WITH_FLAGS(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK), 306 + TEST_WITH_FLAGS(UWM_THREAD), 307 + TEST_WITH_FLAGS(UWM_THREAD | UWM_SP), 308 + TEST_WITH_FLAGS(UWM_THREAD | UWM_CALLER | UWM_SP), 309 + TEST_WITH_FLAGS(UWM_IRQ), 310 + TEST_WITH_FLAGS(UWM_IRQ | UWM_SWITCH_STACK), 311 + TEST_WITH_FLAGS(UWM_IRQ | UWM_SP), 312 + TEST_WITH_FLAGS(UWM_IRQ | UWM_REGS), 313 + TEST_WITH_FLAGS(UWM_IRQ | UWM_SP | UWM_REGS), 314 + TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP), 315 + TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS), 316 + TEST_WITH_FLAGS(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK), 317 + TEST_WITH_FLAGS(UWM_PGM), 318 + TEST_WITH_FLAGS(UWM_PGM | UWM_SP), 319 + TEST_WITH_FLAGS(UWM_PGM | UWM_REGS), 320 + TEST_WITH_FLAGS(UWM_PGM | UWM_SP | UWM_REGS), 321 + TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE), 322 + TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP), 323 + TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_REGS), 324 + TEST_WITH_FLAGS(UWM_KPROBE_ON_FTRACE | UWM_SP | UWM_REGS), 325 + TEST_WITH_FLAGS(UWM_FTRACE), 326 + TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP), 327 + TEST_WITH_FLAGS(UWM_FTRACE | UWM_REGS), 328 + TEST_WITH_FLAGS(UWM_FTRACE | UWM_SP | UWM_REGS), 329 + TEST_WITH_FLAGS(UWM_KRETPROBE), 330 + TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP), 331 + TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_REGS), 332 + TEST_WITH_FLAGS(UWM_KRETPROBE | UWM_SP | UWM_REGS), 461 333 }; 462 334 463 335 /* ··· 476 360 params = (const struct test_params *)test->param_value; 477 361 u.flags = params->flags; 478 362 if (u.flags & UWM_THREAD) 479 - KUNIT_EXPECT_EQ(test, 0, test_unwind_task(test, &u)); 363 + KUNIT_EXPECT_EQ(test, 0, test_unwind_task(&u)); 480 364 else if (u.flags & UWM_IRQ) 481 365 KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u)); 482 366 else
+14 -169
arch/s390/lib/uaccess.c
··· 8 8 * Gerald Schaefer (gerald.schaefer@de.ibm.com) 9 9 */ 10 10 11 - #include <linux/jump_label.h> 12 11 #include <linux/uaccess.h> 13 12 #include <linux/export.h> 14 - #include <linux/errno.h> 15 13 #include <linux/mm.h> 16 - #include <asm/mmu_context.h> 17 - #include <asm/facility.h> 14 + #include <asm/asm-extable.h> 18 15 19 16 #ifdef CONFIG_DEBUG_ENTRY 20 17 void debug_user_asce(int exit) ··· 31 34 } 32 35 #endif /*CONFIG_DEBUG_ENTRY */ 33 36 34 - #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 35 - static DEFINE_STATIC_KEY_FALSE(have_mvcos); 36 - 37 - static int __init uaccess_init(void) 38 - { 39 - if (test_facility(27)) 40 - static_branch_enable(&have_mvcos); 41 - return 0; 42 - } 43 - early_initcall(uaccess_init); 44 - 45 - static inline int copy_with_mvcos(void) 46 - { 47 - if (static_branch_likely(&have_mvcos)) 48 - return 1; 49 - return 0; 50 - } 51 - #else 52 - static inline int copy_with_mvcos(void) 53 - { 54 - return 1; 55 - } 56 - #endif 57 - 58 - static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, 59 - unsigned long size, unsigned long key) 37 + static unsigned long raw_copy_from_user_key(void *to, const void __user *from, 38 + unsigned long size, unsigned long key) 60 39 { 61 40 unsigned long tmp1, tmp2; 62 41 union oac spec = { ··· 45 72 tmp1 = -4096UL; 46 73 asm volatile( 47 74 " lr 0,%[spec]\n" 48 - "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" 75 + "0: mvcos 0(%2),0(%1),%0\n" 49 76 "6: jz 4f\n" 50 77 "1: algr %0,%3\n" 51 78 " slgr %1,%3\n" ··· 56 83 " slgr %4,%1\n" 57 84 " clgr %0,%4\n" /* copy crosses next page boundary? */ 58 85 " jnh 5f\n" 59 - "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" 86 + "3: mvcos 0(%2),0(%1),%4\n" 60 87 "7: slgr %0,%4\n" 61 88 " j 5f\n" 62 89 "4: slgr %0,%0\n" 63 90 "5:\n" 64 91 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 65 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 92 + : "+a" (size), "+a" (from), "+a" (to), "+a" (tmp1), "=a" (tmp2) 66 93 : [spec] "d" (spec.val) 67 94 : "cc", "memory", "0"); 68 95 return size; 69 - } 70 - 71 - static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, 72 - unsigned long size, unsigned long key) 73 - { 74 - unsigned long tmp1, tmp2; 75 - 76 - tmp1 = -256UL; 77 - asm volatile( 78 - " sacf 0\n" 79 - "0: mvcp 0(%0,%2),0(%1),%[key]\n" 80 - "7: jz 5f\n" 81 - "1: algr %0,%3\n" 82 - " la %1,256(%1)\n" 83 - " la %2,256(%2)\n" 84 - "2: mvcp 0(%0,%2),0(%1),%[key]\n" 85 - "8: jnz 1b\n" 86 - " j 5f\n" 87 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 88 - " lghi %3,-4096\n" 89 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 90 - " slgr %4,%1\n" 91 - " clgr %0,%4\n" /* copy crosses next page boundary? */ 92 - " jnh 6f\n" 93 - "4: mvcp 0(%4,%2),0(%1),%[key]\n" 94 - "9: slgr %0,%4\n" 95 - " j 6f\n" 96 - "5: slgr %0,%0\n" 97 - "6: sacf 768\n" 98 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 99 - EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 100 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 101 - : [key] "d" (key << 4) 102 - : "cc", "memory"); 103 - return size; 104 - } 105 - 106 - static unsigned long raw_copy_from_user_key(void *to, const void __user *from, 107 - unsigned long n, unsigned long key) 108 - { 109 - if (copy_with_mvcos()) 110 - return copy_from_user_mvcos(to, from, n, key); 111 - return copy_from_user_mvcp(to, from, n, key); 112 96 } 113 97 114 98 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) ··· 90 160 } 91 161 EXPORT_SYMBOL(_copy_from_user_key); 92 162 93 - static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, 94 - unsigned long size, unsigned long key) 163 + static unsigned long raw_copy_to_user_key(void __user *to, const void *from, 164 + unsigned long size, unsigned long key) 95 165 { 96 166 unsigned long tmp1, tmp2; 97 167 union oac spec = { ··· 104 174 tmp1 = -4096UL; 105 175 asm volatile( 106 176 " lr 0,%[spec]\n" 107 - "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" 177 + "0: mvcos 0(%1),0(%2),%0\n" 108 178 "6: jz 4f\n" 109 179 "1: algr %0,%3\n" 110 180 " slgr %1,%3\n" ··· 115 185 " slgr %4,%1\n" 116 186 " clgr %0,%4\n" /* copy crosses next page boundary? */ 117 187 " jnh 5f\n" 118 - "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" 188 + "3: mvcos 0(%1),0(%2),%4\n" 119 189 "7: slgr %0,%4\n" 120 190 " j 5f\n" 121 191 "4: slgr %0,%0\n" 122 192 "5:\n" 123 193 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 124 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 194 + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) 125 195 : [spec] "d" (spec.val) 126 196 : "cc", "memory", "0"); 127 197 return size; 128 - } 129 - 130 - static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, 131 - unsigned long size, unsigned long key) 132 - { 133 - unsigned long tmp1, tmp2; 134 - 135 - tmp1 = -256UL; 136 - asm volatile( 137 - " sacf 0\n" 138 - "0: mvcs 0(%0,%1),0(%2),%[key]\n" 139 - "7: jz 5f\n" 140 - "1: algr %0,%3\n" 141 - " la %1,256(%1)\n" 142 - " la %2,256(%2)\n" 143 - "2: mvcs 0(%0,%1),0(%2),%[key]\n" 144 - "8: jnz 1b\n" 145 - " j 5f\n" 146 - "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 147 - " lghi %3,-4096\n" 148 - " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 149 - " slgr %4,%1\n" 150 - " clgr %0,%4\n" /* copy crosses next page boundary? */ 151 - " jnh 6f\n" 152 - "4: mvcs 0(%4,%1),0(%2),%[key]\n" 153 - "9: slgr %0,%4\n" 154 - " j 6f\n" 155 - "5: slgr %0,%0\n" 156 - "6: sacf 768\n" 157 - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 158 - EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) 159 - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) 160 - : [key] "d" (key << 4) 161 - : "cc", "memory"); 162 - return size; 163 - } 164 - 165 - static unsigned long raw_copy_to_user_key(void __user *to, const void *from, 166 - unsigned long n, unsigned long key) 167 - { 168 - if (copy_with_mvcos()) 169 - return copy_to_user_mvcos(to, from, n, key); 170 - return copy_to_user_mvcs(to, from, n, key); 171 198 } 172 199 173 200 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) ··· 144 257 } 145 258 EXPORT_SYMBOL(_copy_to_user_key); 146 259 147 - static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) 260 + unsigned long __clear_user(void __user *to, unsigned long size) 148 261 { 149 262 unsigned long tmp1, tmp2; 150 263 union oac spec = { ··· 155 268 tmp1 = -4096UL; 156 269 asm volatile( 157 270 " lr 0,%[spec]\n" 158 - "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" 271 + "0: mvcos 0(%1),0(%4),%0\n" 159 272 " jz 4f\n" 160 273 "1: algr %0,%2\n" 161 274 " slgr %1,%2\n" ··· 165 278 " slgr %3,%1\n" 166 279 " clgr %0,%3\n" /* copy crosses next page boundary? */ 167 280 " jnh 5f\n" 168 - "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" 281 + "3: mvcos 0(%1),0(%4),%3\n" 169 282 " slgr %0,%3\n" 170 283 " j 5f\n" 171 284 "4: slgr %0,%0\n" ··· 175 288 : "a" (empty_zero_page), [spec] "d" (spec.val) 176 289 : "cc", "memory", "0"); 177 290 return size; 178 - } 179 - 180 - static inline unsigned long clear_user_xc(void __user *to, unsigned long size) 181 - { 182 - unsigned long tmp1, tmp2; 183 - 184 - asm volatile( 185 - " sacf 256\n" 186 - " aghi %0,-1\n" 187 - " jo 5f\n" 188 - " bras %3,3f\n" 189 - " xc 0(1,%1),0(%1)\n" 190 - "0: aghi %0,257\n" 191 - " la %2,255(%1)\n" /* %2 = ptr + 255 */ 192 - " srl %2,12\n" 193 - " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ 194 - " slgr %2,%1\n" 195 - " clgr %0,%2\n" /* clear crosses next page boundary? */ 196 - " jnh 5f\n" 197 - " aghi %2,-1\n" 198 - "1: ex %2,0(%3)\n" 199 - " aghi %2,1\n" 200 - " slgr %0,%2\n" 201 - " j 5f\n" 202 - "2: xc 0(256,%1),0(%1)\n" 203 - " la %1,256(%1)\n" 204 - "3: aghi %0,-256\n" 205 - " jnm 2b\n" 206 - "4: ex %0,0(%3)\n" 207 - "5: slgr %0,%0\n" 208 - "6: sacf 768\n" 209 - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 210 - : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) 211 - : : "cc", "memory"); 212 - return size; 213 - } 214 - 215 - unsigned long __clear_user(void __user *to, unsigned long size) 216 - { 217 - if (copy_with_mvcos()) 218 - return clear_user_mvcos(to, size); 219 - return clear_user_xc(to, size); 220 291 } 221 292 EXPORT_SYMBOL(__clear_user);
+1 -1
arch/s390/mm/Makefile
··· 4 4 # 5 5 6 6 obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o 7 - obj-y += page-states.o pageattr.o pgtable.o pgalloc.o 7 + obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o 8 8 9 9 obj-$(CONFIG_CMM) += cmm.o 10 10 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+50
arch/s390/mm/extable.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/extable.h> 4 + #include <linux/errno.h> 5 + #include <linux/panic.h> 6 + #include <asm/asm-extable.h> 7 + #include <asm/extable.h> 8 + 9 + const struct exception_table_entry *s390_search_extables(unsigned long addr) 10 + { 11 + const struct exception_table_entry *fixup; 12 + size_t num; 13 + 14 + fixup = search_exception_tables(addr); 15 + if (fixup) 16 + return fixup; 17 + num = __stop_amode31_ex_table - __start_amode31_ex_table; 18 + return search_extable(__start_amode31_ex_table, num, addr); 19 + } 20 + 21 + static bool ex_handler_fixup(const struct exception_table_entry *ex, struct pt_regs *regs) 22 + { 23 + regs->psw.addr = extable_fixup(ex); 24 + return true; 25 + } 26 + 27 + static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs) 28 + { 29 + regs->gprs[ex->data] = -EFAULT; 30 + regs->psw.addr = extable_fixup(ex); 31 + return true; 32 + } 33 + 34 + bool fixup_exception(struct pt_regs *regs) 35 + { 36 + const struct exception_table_entry *ex; 37 + 38 + ex = s390_search_extables(instruction_pointer(regs)); 39 + if (!ex) 40 + return false; 41 + switch (ex->type) { 42 + case EX_TYPE_FIXUP: 43 + return ex_handler_fixup(ex, regs); 44 + case EX_TYPE_BPF: 45 + return ex_handler_bpf(ex, regs); 46 + case EX_TYPE_UACCESS: 47 + return ex_handler_uaccess(ex, regs); 48 + } 49 + panic("invalid exception table entry"); 50 + }
+2 -18
arch/s390/mm/fault.c
··· 32 32 #include <linux/uaccess.h> 33 33 #include <linux/hugetlb.h> 34 34 #include <linux/kfence.h> 35 + #include <asm/asm-extable.h> 35 36 #include <asm/asm-offsets.h> 36 37 #include <asm/diag.h> 37 38 #include <asm/gmap.h> ··· 228 227 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); 229 228 } 230 229 231 - const struct exception_table_entry *s390_search_extables(unsigned long addr) 232 - { 233 - const struct exception_table_entry *fixup; 234 - 235 - fixup = search_extable(__start_amode31_ex_table, 236 - __stop_amode31_ex_table - __start_amode31_ex_table, 237 - addr); 238 - if (!fixup) 239 - fixup = search_exception_tables(addr); 240 - return fixup; 241 - } 242 - 243 230 static noinline void do_no_context(struct pt_regs *regs) 244 231 { 245 - const struct exception_table_entry *fixup; 246 - 247 - /* Are we prepared to handle this kernel fault? */ 248 - fixup = s390_search_extables(regs->psw.addr); 249 - if (fixup && ex_handle(fixup, regs)) 232 + if (fixup_exception(regs)) 250 233 return; 251 - 252 234 /* 253 235 * Oops. The kernel tried to access some bad page. We'll have to 254 236 * terminate things with extreme prejudice.
+11 -11
arch/s390/mm/gmap.c
··· 974 974 return -EAGAIN; 975 975 976 976 if (prot == PROT_NONE && !pmd_i) { 977 - pmd_val(new) |= _SEGMENT_ENTRY_INVALID; 977 + new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID)); 978 978 gmap_pmdp_xchg(gmap, pmdp, new, gaddr); 979 979 } 980 980 981 981 if (prot == PROT_READ && !pmd_p) { 982 - pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID; 983 - pmd_val(new) |= _SEGMENT_ENTRY_PROTECT; 982 + new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID)); 983 + new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_PROTECT)); 984 984 gmap_pmdp_xchg(gmap, pmdp, new, gaddr); 985 985 } 986 986 987 987 if (bits & GMAP_NOTIFY_MPROT) 988 - pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN; 988 + set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN))); 989 989 990 990 /* Shadow GMAP protection needs split PMDs */ 991 991 if (bits & GMAP_NOTIFY_SHADOW) ··· 1151 1151 address = pte_val(pte) & PAGE_MASK; 1152 1152 address += gaddr & ~PAGE_MASK; 1153 1153 *val = *(unsigned long *) address; 1154 - pte_val(*ptep) |= _PAGE_YOUNG; 1154 + set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG))); 1155 1155 /* Do *NOT* clear the _PAGE_INVALID bit! */ 1156 1156 rc = 0; 1157 1157 } ··· 1278 1278 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr) 1279 1279 { 1280 1280 asm volatile( 1281 - " .insn rrf,0xb98e0000,%0,%1,0,0" 1281 + " idte %0,0,%1" 1282 1282 : : "a" (asce), "a" (vaddr) : "cc", "memory"); 1283 1283 } 1284 1284 ··· 2275 2275 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp, 2276 2276 unsigned long gaddr) 2277 2277 { 2278 - pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN; 2278 + set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN))); 2279 2279 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1); 2280 2280 } 2281 2281 ··· 2294 2294 { 2295 2295 gaddr &= HPAGE_MASK; 2296 2296 pmdp_notify_gmap(gmap, pmdp, gaddr); 2297 - pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN; 2297 + new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_GMAP_IN)); 2298 2298 if (MACHINE_HAS_TLB_GUEST) 2299 2299 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, 2300 2300 IDTE_GLOBAL); ··· 2302 2302 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 2303 2303 else 2304 2304 __pmdp_csp(pmdp); 2305 - *pmdp = new; 2305 + set_pmd(pmdp, new); 2306 2306 } 2307 2307 2308 2308 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr, ··· 2324 2324 _SEGMENT_ENTRY_GMAP_UC)); 2325 2325 if (purge) 2326 2326 __pmdp_csp(pmdp); 2327 - pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 2327 + set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 2328 2328 } 2329 2329 spin_unlock(&gmap->guest_table_lock); 2330 2330 } ··· 2447 2447 return false; 2448 2448 2449 2449 /* Clear UC indication and reset protection */ 2450 - pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC; 2450 + set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC))); 2451 2451 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0); 2452 2452 return true; 2453 2453 }
+14 -22
arch/s390/mm/hugetlbpage.c
··· 73 73 74 74 static inline pte_t __rste_to_pte(unsigned long rste) 75 75 { 76 + unsigned long pteval; 76 77 int present; 77 - pte_t pte; 78 78 79 79 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 80 80 present = pud_present(__pud(rste)); ··· 102 102 * u unused, l large 103 103 */ 104 104 if (present) { 105 - pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 106 - pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT; 107 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ, 108 - _PAGE_READ); 109 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, 110 - _PAGE_WRITE); 111 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, 112 - _PAGE_INVALID); 113 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, 114 - _PAGE_PROTECT); 115 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, 116 - _PAGE_DIRTY); 117 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, 118 - _PAGE_YOUNG); 105 + pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE; 106 + pteval |= _PAGE_LARGE | _PAGE_PRESENT; 107 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ); 108 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE); 109 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID); 110 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT); 111 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY); 112 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG); 119 113 #ifdef CONFIG_MEM_SOFT_DIRTY 120 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, 121 - _PAGE_SOFT_DIRTY); 114 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY); 122 115 #endif 123 - pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, 124 - _PAGE_NOEXEC); 116 + pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); 125 117 } else 126 - pte_val(pte) = _PAGE_INVALID; 127 - return pte; 118 + pteval = _PAGE_INVALID; 119 + return __pte(pteval); 128 120 } 129 121 130 122 static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) ··· 160 168 rste |= _SEGMENT_ENTRY_LARGE; 161 169 162 170 clear_huge_pte_skeys(mm, rste); 163 - pte_val(*ptep) = rste; 171 + set_pte(ptep, __pte(rste)); 164 172 } 165 173 166 174 pte_t huge_ptep_get(pte_t *ptep)
+4 -4
arch/s390/mm/kasan_init.c
··· 175 175 page = kasan_early_alloc_segment(); 176 176 memset(page, 0, _SEGMENT_SIZE); 177 177 } 178 - pmd_val(*pm_dir) = __pa(page) | sgt_prot; 178 + set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot)); 179 179 address = (address + PMD_SIZE) & PMD_MASK; 180 180 continue; 181 181 } ··· 194 194 switch (mode) { 195 195 case POPULATE_ONE2ONE: 196 196 page = (void *)address; 197 - pte_val(*pt_dir) = __pa(page) | pgt_prot; 197 + set_pte(pt_dir, __pte(__pa(page) | pgt_prot)); 198 198 break; 199 199 case POPULATE_MAP: 200 200 page = kasan_early_alloc_pages(0); 201 201 memset(page, 0, PAGE_SIZE); 202 - pte_val(*pt_dir) = __pa(page) | pgt_prot; 202 + set_pte(pt_dir, __pte(__pa(page) | pgt_prot)); 203 203 break; 204 204 case POPULATE_ZERO_SHADOW: 205 205 page = kasan_early_shadow_page; 206 - pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; 206 + set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero)); 207 207 break; 208 208 case POPULATE_SHALLOW: 209 209 /* should never happen */
+12 -12
arch/s390/mm/maccess.c
··· 4 4 * 5 5 * Copyright IBM Corp. 2009, 2015 6 6 * 7 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 8 - * 9 7 */ 10 8 11 9 #include <linux/uaccess.h> ··· 12 14 #include <linux/errno.h> 13 15 #include <linux/gfp.h> 14 16 #include <linux/cpu.h> 17 + #include <asm/asm-extable.h> 15 18 #include <asm/ctl_reg.h> 16 19 #include <asm/io.h> 17 20 #include <asm/stacktrace.h> ··· 122 123 /* 123 124 * Copy memory in real mode (kernel to kernel) 124 125 */ 125 - int memcpy_real(void *dest, void *src, size_t count) 126 + int memcpy_real(void *dest, unsigned long src, size_t count) 126 127 { 127 128 unsigned long _dest = (unsigned long)dest; 128 129 unsigned long _src = (unsigned long)src; ··· 174 175 /* 175 176 * Copy memory from kernel (real) to user (virtual) 176 177 */ 177 - int copy_to_user_real(void __user *dest, void *src, unsigned long count) 178 + int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count) 178 179 { 179 180 int offs = 0, size, rc; 180 181 char *buf; ··· 200 201 /* 201 202 * Check if physical address is within prefix or zero page 202 203 */ 203 - static int is_swapped(unsigned long addr) 204 + static int is_swapped(phys_addr_t addr) 204 205 { 205 - unsigned long lc; 206 + phys_addr_t lc; 206 207 int cpu; 207 208 208 209 if (addr < sizeof(struct lowcore)) 209 210 return 1; 210 211 for_each_online_cpu(cpu) { 211 - lc = (unsigned long) lowcore_ptr[cpu]; 212 + lc = virt_to_phys(lowcore_ptr[cpu]); 212 213 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) 213 214 continue; 214 215 return 1; ··· 224 225 */ 225 226 void *xlate_dev_mem_ptr(phys_addr_t addr) 226 227 { 227 - void *bounce = (void *) addr; 228 + void *ptr = phys_to_virt(addr); 229 + void *bounce = ptr; 228 230 unsigned long size; 229 231 230 232 cpus_read_lock(); ··· 234 234 size = PAGE_SIZE - (addr & ~PAGE_MASK); 235 235 bounce = (void *) __get_free_page(GFP_ATOMIC); 236 236 if (bounce) 237 - memcpy_absolute(bounce, (void *) addr, size); 237 + memcpy_absolute(bounce, ptr, size); 238 238 } 239 239 preempt_enable(); 240 240 cpus_read_unlock(); ··· 244 244 /* 245 245 * Free converted buffer for /dev/mem access (if necessary) 246 246 */ 247 - void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf) 247 + void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr) 248 248 { 249 - if ((void *) addr != buf) 250 - free_page((unsigned long) buf); 249 + if (addr != virt_to_phys(ptr)) 250 + free_page((unsigned long)ptr); 251 251 }
+1
arch/s390/mm/page-states.c
··· 14 14 #include <linux/memblock.h> 15 15 #include <linux/gfp.h> 16 16 #include <linux/init.h> 17 + #include <asm/asm-extable.h> 17 18 #include <asm/facility.h> 18 19 #include <asm/page-states.h> 19 20
+17 -16
arch/s390/mm/pageattr.c
··· 98 98 else if (flags & SET_MEMORY_RW) 99 99 new = pte_mkwrite(pte_mkdirty(new)); 100 100 if (flags & SET_MEMORY_NX) 101 - pte_val(new) |= _PAGE_NOEXEC; 101 + new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC)); 102 102 else if (flags & SET_MEMORY_X) 103 - pte_val(new) &= ~_PAGE_NOEXEC; 103 + new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC)); 104 104 pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE); 105 105 ptep++; 106 106 addr += PAGE_SIZE; ··· 127 127 prot &= ~_PAGE_NOEXEC; 128 128 ptep = pt_dir; 129 129 for (i = 0; i < PTRS_PER_PTE; i++) { 130 - pte_val(*ptep) = pte_addr | prot; 130 + set_pte(ptep, __pte(pte_addr | prot)); 131 131 pte_addr += PAGE_SIZE; 132 132 ptep++; 133 133 } 134 - pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY; 134 + new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY); 135 135 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT); 136 136 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE); 137 137 update_page_count(PG_DIRECT_MAP_1M, -1); ··· 148 148 else if (flags & SET_MEMORY_RW) 149 149 new = pmd_mkwrite(pmd_mkdirty(new)); 150 150 if (flags & SET_MEMORY_NX) 151 - pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC; 151 + new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC)); 152 152 else if (flags & SET_MEMORY_X) 153 - pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC; 153 + new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC)); 154 154 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT); 155 155 } 156 156 ··· 208 208 prot &= ~_SEGMENT_ENTRY_NOEXEC; 209 209 pmdp = pm_dir; 210 210 for (i = 0; i < PTRS_PER_PMD; i++) { 211 - pmd_val(*pmdp) = pmd_addr | prot; 211 + set_pmd(pmdp, __pmd(pmd_addr | prot)); 212 212 pmd_addr += PMD_SIZE; 213 213 pmdp++; 214 214 } 215 - pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY; 215 + new = __pud(__pa(pm_dir) | _REGION3_ENTRY); 216 216 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); 217 217 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD); 218 218 update_page_count(PG_DIRECT_MAP_2G, -1); ··· 229 229 else if (flags & SET_MEMORY_RW) 230 230 new = pud_mkwrite(pud_mkdirty(new)); 231 231 if (flags & SET_MEMORY_NX) 232 - pud_val(new) |= _REGION_ENTRY_NOEXEC; 232 + new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC)); 233 233 else if (flags & SET_MEMORY_X) 234 - pud_val(new) &= ~_REGION_ENTRY_NOEXEC; 234 + new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC)); 235 235 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); 236 236 } 237 237 ··· 347 347 void __kernel_map_pages(struct page *page, int numpages, int enable) 348 348 { 349 349 unsigned long address; 350 + pte_t *ptep, pte; 350 351 int nr, i, j; 351 - pte_t *pte; 352 352 353 353 for (i = 0; i < numpages;) { 354 354 address = (unsigned long)page_to_virt(page + i); 355 - pte = virt_to_kpte(address); 356 - nr = (unsigned long)pte >> ilog2(sizeof(long)); 355 + ptep = virt_to_kpte(address); 356 + nr = (unsigned long)ptep >> ilog2(sizeof(long)); 357 357 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1)); 358 358 nr = min(numpages - i, nr); 359 359 if (enable) { 360 360 for (j = 0; j < nr; j++) { 361 - pte_val(*pte) &= ~_PAGE_INVALID; 361 + pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID)); 362 + set_pte(ptep, pte); 362 363 address += PAGE_SIZE; 363 - pte++; 364 + ptep++; 364 365 } 365 366 } else { 366 - ipte_range(pte, address, nr); 367 + ipte_range(ptep, address, nr); 367 368 } 368 369 i += nr; 369 370 }
+4 -4
arch/s390/mm/pgalloc.c
··· 53 53 54 54 unsigned long *crst_table_alloc(struct mm_struct *mm) 55 55 { 56 - struct page *page = alloc_pages(GFP_KERNEL, 2); 56 + struct page *page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER); 57 57 58 58 if (!page) 59 59 return NULL; 60 - arch_set_page_dat(page, 2); 60 + arch_set_page_dat(page, CRST_ALLOC_ORDER); 61 61 return (unsigned long *) page_to_virt(page); 62 62 } 63 63 64 64 void crst_table_free(struct mm_struct *mm, unsigned long *table) 65 65 { 66 - free_pages((unsigned long) table, 2); 66 + free_pages((unsigned long)table, CRST_ALLOC_ORDER); 67 67 } 68 68 69 69 static void __crst_table_upgrade(void *arg) ··· 403 403 404 404 switch (half) { 405 405 case 0x00U: /* pmd, pud, or p4d */ 406 - free_pages((unsigned long) table, 2); 406 + free_pages((unsigned long)table, CRST_ALLOC_ORDER); 407 407 return; 408 408 case 0x01U: /* lower 2K of a 4K page table */ 409 409 case 0x02U: /* higher 2K of a 4K page table */
+22 -22
arch/s390/mm/pgtable.c
··· 115 115 atomic_inc(&mm->context.flush_count); 116 116 if (cpumask_equal(&mm->context.cpu_attach_mask, 117 117 cpumask_of(smp_processor_id()))) { 118 - pte_val(*ptep) |= _PAGE_INVALID; 118 + set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID))); 119 119 mm->context.flush_mm = 1; 120 120 } else 121 121 ptep_ipte_global(mm, addr, ptep, nodat); ··· 224 224 * Without enhanced suppression-on-protection force 225 225 * the dirty bit on for all writable ptes. 226 226 */ 227 - pte_val(entry) |= _PAGE_DIRTY; 228 - pte_val(entry) &= ~_PAGE_PROTECT; 227 + entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY)); 228 + entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT)); 229 229 } 230 230 if (!(pte_val(entry) & _PAGE_PROTECT)) 231 231 /* This pte allows write access, set user-dirty */ 232 232 pgste_val(pgste) |= PGSTE_UC_BIT; 233 233 } 234 234 #endif 235 - *ptep = entry; 235 + set_pte(ptep, entry); 236 236 return pgste; 237 237 } 238 238 ··· 275 275 pgste = pgste_update_all(old, pgste, mm); 276 276 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == 277 277 _PGSTE_GPS_USAGE_UNUSED) 278 - pte_val(old) |= _PAGE_UNUSED; 278 + old = set_pte_bit(old, __pgprot(_PAGE_UNUSED)); 279 279 } 280 280 pgste = pgste_set_pte(ptep, pgste, new); 281 281 pgste_set_unlock(ptep, pgste); 282 282 } else { 283 - *ptep = new; 283 + set_pte(ptep, new); 284 284 } 285 285 return old; 286 286 } ··· 345 345 struct mm_struct *mm = vma->vm_mm; 346 346 347 347 if (!MACHINE_HAS_NX) 348 - pte_val(pte) &= ~_PAGE_NOEXEC; 348 + pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC)); 349 349 if (mm_has_pgste(mm)) { 350 350 pgste = pgste_get(ptep); 351 351 pgste_set_key(ptep, pgste, pte, mm); 352 352 pgste = pgste_set_pte(ptep, pgste, pte); 353 353 pgste_set_unlock(ptep, pgste); 354 354 } else { 355 - *ptep = pte; 355 + set_pte(ptep, pte); 356 356 } 357 357 preempt_enable(); 358 358 } ··· 417 417 atomic_inc(&mm->context.flush_count); 418 418 if (cpumask_equal(&mm->context.cpu_attach_mask, 419 419 cpumask_of(smp_processor_id()))) { 420 - pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; 420 + set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID))); 421 421 mm->context.flush_mm = 1; 422 422 if (mm_has_pgste(mm)) 423 423 gmap_pmdp_invalidate(mm, addr); ··· 469 469 470 470 preempt_disable(); 471 471 old = pmdp_flush_direct(mm, addr, pmdp); 472 - *pmdp = new; 472 + set_pmd(pmdp, new); 473 473 preempt_enable(); 474 474 return old; 475 475 } ··· 482 482 483 483 preempt_disable(); 484 484 old = pmdp_flush_lazy(mm, addr, pmdp); 485 - *pmdp = new; 485 + set_pmd(pmdp, new); 486 486 preempt_enable(); 487 487 return old; 488 488 } ··· 539 539 540 540 preempt_disable(); 541 541 old = pudp_flush_direct(mm, addr, pudp); 542 - *pudp = new; 542 + set_pud(pudp, new); 543 543 preempt_enable(); 544 544 return old; 545 545 } ··· 579 579 list_del(lh); 580 580 } 581 581 ptep = (pte_t *) pgtable; 582 - pte_val(*ptep) = _PAGE_INVALID; 582 + set_pte(ptep, __pte(_PAGE_INVALID)); 583 583 ptep++; 584 - pte_val(*ptep) = _PAGE_INVALID; 584 + set_pte(ptep, __pte(_PAGE_INVALID)); 585 585 return pgtable; 586 586 } 587 587 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ··· 646 646 if (prot == PROT_NONE && !pte_i) { 647 647 ptep_flush_direct(mm, addr, ptep, nodat); 648 648 pgste = pgste_update_all(entry, pgste, mm); 649 - pte_val(entry) |= _PAGE_INVALID; 649 + entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID)); 650 650 } 651 651 if (prot == PROT_READ && !pte_p) { 652 652 ptep_flush_direct(mm, addr, ptep, nodat); 653 - pte_val(entry) &= ~_PAGE_INVALID; 654 - pte_val(entry) |= _PAGE_PROTECT; 653 + entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID)); 654 + entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT)); 655 655 } 656 656 pgste_val(pgste) |= bit; 657 657 pgste = pgste_set_pte(ptep, pgste, entry); ··· 675 675 !(pte_val(pte) & _PAGE_PROTECT))) { 676 676 pgste_val(spgste) |= PGSTE_VSIE_BIT; 677 677 tpgste = pgste_get_lock(tptep); 678 - pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | 679 - (pte_val(pte) & _PAGE_PROTECT); 678 + tpte = __pte((pte_val(spte) & PAGE_MASK) | 679 + (pte_val(pte) & _PAGE_PROTECT)); 680 680 /* don't touch the storage key - it belongs to parent pgste */ 681 681 tpgste = pgste_set_pte(tptep, tpgste, tpte); 682 682 pgste_set_unlock(tptep, tpgste); ··· 773 773 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT); 774 774 ptep_ipte_global(mm, addr, ptep, nodat); 775 775 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) 776 - pte_val(pte) |= _PAGE_PROTECT; 776 + pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT)); 777 777 else 778 - pte_val(pte) |= _PAGE_INVALID; 779 - *ptep = pte; 778 + pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID)); 779 + set_pte(ptep, pte); 780 780 } 781 781 pgste_set_unlock(ptep, pgste); 782 782 return dirty;
+7 -12
arch/s390/mm/vmem.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Copyright IBM Corp. 2006 4 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 4 */ 6 5 7 6 #include <linux/memory_hotplug.h> ··· 174 175 175 176 if (!new_page) 176 177 goto out; 177 - pte_val(*pte) = __pa(new_page) | prot; 178 + set_pte(pte, __pte(__pa(new_page) | prot)); 178 179 } else { 179 - pte_val(*pte) = __pa(addr) | prot; 180 + set_pte(pte, __pte(__pa(addr) | prot)); 180 181 } 181 182 } else { 182 183 continue; ··· 242 243 IS_ALIGNED(next, PMD_SIZE) && 243 244 MACHINE_HAS_EDAT1 && addr && direct && 244 245 !debug_pagealloc_enabled()) { 245 - pmd_val(*pmd) = __pa(addr) | prot; 246 + set_pmd(pmd, __pmd(__pa(addr) | prot)); 246 247 pages++; 247 248 continue; 248 249 } else if (!direct && MACHINE_HAS_EDAT1) { ··· 257 258 */ 258 259 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE); 259 260 if (new_page) { 260 - pmd_val(*pmd) = __pa(new_page) | prot; 261 + set_pmd(pmd, __pmd(__pa(new_page) | prot)); 261 262 if (!IS_ALIGNED(addr, PMD_SIZE) || 262 263 !IS_ALIGNED(next, PMD_SIZE)) { 263 264 vmemmap_use_new_sub_pmd(addr, next); ··· 338 339 IS_ALIGNED(next, PUD_SIZE) && 339 340 MACHINE_HAS_EDAT2 && addr && direct && 340 341 !debug_pagealloc_enabled()) { 341 - pud_val(*pud) = __pa(addr) | prot; 342 + set_pud(pud, __pud(__pa(addr) | prot)); 342 343 pages++; 343 344 continue; 344 345 } ··· 584 585 __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT, 585 586 SET_MEMORY_RO | SET_MEMORY_X); 586 587 587 - if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) { 588 - /* 589 - * Lowcore must be executable for LPSWE 590 - * and expoline trampoline branch instructions. 591 - */ 588 + /* lowcore must be executable for LPSWE */ 589 + if (!static_key_enabled(&cpu_has_bear)) 592 590 set_memory_x(0, 1); 593 - } 594 591 595 592 pr_info("Write protected kernel read-only data: %luk\n", 596 593 (unsigned long)(__end_rodata - _stext) >> 10);
+16 -38
arch/s390/net/bpf_jit_comp.c
··· 7 7 * - HAVE_MARCH_Z196_FEATURES: laal, laalg 8 8 * - HAVE_MARCH_Z10_FEATURES: msfi, cgrj, clgrj 9 9 * - HAVE_MARCH_Z9_109_FEATURES: alfi, llilf, clfi, oilf, nilf 10 - * - PACK_STACK 11 10 * - 64BIT 12 11 * 13 12 * Copyright IBM Corp. 2012,2015 ··· 25 26 #include <linux/mm.h> 26 27 #include <linux/kernel.h> 27 28 #include <asm/cacheflush.h> 29 + #include <asm/extable.h> 28 30 #include <asm/dis.h> 29 31 #include <asm/facility.h> 30 32 #include <asm/nospec-branch.h> ··· 570 570 if (nospec_uses_trampoline()) { 571 571 jit->r14_thunk_ip = jit->prg; 572 572 /* Generate __s390_indirect_jump_r14 thunk */ 573 - if (test_facility(35)) { 574 - /* exrl %r0,.+10 */ 575 - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 576 - } else { 577 - /* larl %r1,.+14 */ 578 - EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); 579 - /* ex 0,0(%r1) */ 580 - EMIT4_DISP(0x44000000, REG_0, REG_1, 0); 581 - } 573 + /* exrl %r0,.+10 */ 574 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 582 575 /* j . */ 583 576 EMIT4_PCREL(0xa7f40000, 0); 584 577 } ··· 582 589 (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) { 583 590 jit->r1_thunk_ip = jit->prg; 584 591 /* Generate __s390_indirect_jump_r1 thunk */ 585 - if (test_facility(35)) { 586 - /* exrl %r0,.+10 */ 587 - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 588 - /* j . */ 589 - EMIT4_PCREL(0xa7f40000, 0); 590 - /* br %r1 */ 591 - _EMIT2(0x07f1); 592 - } else { 593 - /* ex 0,S390_lowcore.br_r1_tampoline */ 594 - EMIT4_DISP(0x44000000, REG_0, REG_0, 595 - offsetof(struct lowcore, br_r1_trampoline)); 596 - /* j . */ 597 - EMIT4_PCREL(0xa7f40000, 0); 598 - } 592 + /* exrl %r0,.+10 */ 593 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 594 + /* j . */ 595 + EMIT4_PCREL(0xa7f40000, 0); 596 + /* br %r1 */ 597 + _EMIT2(0x07f1); 599 598 } 600 599 } 601 600 ··· 607 622 return insn[1] >> 4; 608 623 } 609 624 610 - static bool ex_handler_bpf(const struct exception_table_entry *x, 611 - struct pt_regs *regs) 625 + bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) 612 626 { 613 - int regno; 614 - u8 *insn; 615 - 616 627 regs->psw.addr = extable_fixup(x); 617 - insn = (u8 *)__rewind_psw(regs->psw, regs->int_code >> 16); 618 - regno = get_probe_mem_regno(insn); 619 - if (WARN_ON_ONCE(regno < 0)) 620 - /* JIT bug - unexpected instruction. */ 621 - return false; 622 - regs->gprs[regno] = 0; 628 + regs->gprs[x->data] = 0; 623 629 return true; 624 630 } 625 631 ··· 618 642 int probe_prg, int nop_prg) 619 643 { 620 644 struct exception_table_entry *ex; 645 + int reg, prg; 621 646 s64 delta; 622 647 u8 *insn; 623 - int prg; 624 648 int i; 625 649 626 650 if (!fp->aux->extable) 627 651 /* Do nothing during early JIT passes. */ 628 652 return 0; 629 653 insn = jit->prg_buf + probe_prg; 630 - if (WARN_ON_ONCE(get_probe_mem_regno(insn) < 0)) 654 + reg = get_probe_mem_regno(insn); 655 + if (WARN_ON_ONCE(reg < 0)) 631 656 /* JIT bug - unexpected probe instruction. */ 632 657 return -1; 633 658 if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg)) ··· 655 678 /* JIT bug - landing pad and extable must be close. */ 656 679 return -1; 657 680 ex->fixup = delta; 658 - ex->handler = (u8 *)ex_handler_bpf - (u8 *)&ex->handler; 681 + ex->type = EX_TYPE_BPF; 682 + ex->data = reg; 659 683 jit->excnt++; 660 684 } 661 685 return 0;
+1
arch/s390/pci/pci_clp.c
··· 17 17 #include <linux/delay.h> 18 18 #include <linux/pci.h> 19 19 #include <linux/uaccess.h> 20 + #include <asm/asm-extable.h> 20 21 #include <asm/pci_debug.h> 21 22 #include <asm/pci_clp.h> 22 23 #include <asm/clp.h>
+1
arch/s390/pci/pci_insn.c
··· 9 9 #include <linux/errno.h> 10 10 #include <linux/delay.h> 11 11 #include <linux/jump_label.h> 12 + #include <asm/asm-extable.h> 12 13 #include <asm/facility.h> 13 14 #include <asm/pci_insn.h> 14 15 #include <asm/pci_debug.h>
+2 -2
arch/s390/pci/pci_irq.c
··· 99 99 } 100 100 101 101 /* Register adapter interruptions */ 102 - int zpci_set_irq(struct zpci_dev *zdev) 102 + static int zpci_set_irq(struct zpci_dev *zdev) 103 103 { 104 104 int rc; 105 105 ··· 115 115 } 116 116 117 117 /* Clear adapter interruptions */ 118 - int zpci_clear_irq(struct zpci_dev *zdev) 118 + static int zpci_clear_irq(struct zpci_dev *zdev) 119 119 { 120 120 int rc; 121 121
+1
arch/s390/pci/pci_mmio.c
··· 11 11 #include <linux/mm.h> 12 12 #include <linux/errno.h> 13 13 #include <linux/pci.h> 14 + #include <asm/asm-extable.h> 14 15 #include <asm/pci_io.h> 15 16 #include <asm/pci_debug.h> 16 17
+24
arch/s390/tools/gcc-thunk-extern.sh
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # Borrowed from gcc: gcc/testsuite/gcc.target/s390/nobp-section-type-conflict.c 4 + # Checks that we don't get error: section type conflict with ‘put_page’. 5 + 6 + cat << "END" | $@ -x c - -fno-PIE -march=z10 -mindirect-branch=thunk-extern -mfunction-return=thunk-extern -mindirect-branch-table -O2 -c -o /dev/null 7 + int a; 8 + int b (void); 9 + void c (int); 10 + 11 + static void 12 + put_page (void) 13 + { 14 + if (b ()) 15 + c (a); 16 + } 17 + 18 + __attribute__ ((__section__ (".init.text"), __cold__)) void 19 + d (void) 20 + { 21 + put_page (); 22 + put_page (); 23 + } 24 + END
-8
arch/s390/tools/gen_facilities.c
··· 27 27 */ 28 28 .name = "FACILITIES_ALS", 29 29 .bits = (int[]){ 30 - #ifdef CONFIG_HAVE_MARCH_Z900_FEATURES 31 30 0, /* N3 instructions */ 32 31 1, /* z/Arch mode installed */ 33 - #endif 34 - #ifdef CONFIG_HAVE_MARCH_Z990_FEATURES 35 32 18, /* long displacement facility */ 36 - #endif 37 - #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES 38 33 21, /* extended-immediate facility */ 39 34 25, /* store clock fast */ 40 - #endif 41 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 42 35 27, /* mvcos */ 43 36 32, /* compare and swap and store */ 44 37 33, /* compare and swap and store 2 */ 45 38 34, /* general instructions extension */ 46 39 35, /* execute extensions */ 47 - #endif 48 40 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 49 41 45, /* fast-BCR, etc. */ 50 42 #endif
+1 -1
drivers/s390/block/dasd_diag.c
··· 19 19 #include <linux/module.h> 20 20 #include <linux/init.h> 21 21 #include <linux/jiffies.h> 22 - 22 + #include <asm/asm-extable.h> 23 23 #include <asm/dasd.h> 24 24 #include <asm/debug.h> 25 25 #include <asm/diag.h>
+1
drivers/s390/char/diag_ftp.c
··· 15 15 #include <linux/irq.h> 16 16 #include <linux/wait.h> 17 17 #include <linux/string.h> 18 + #include <asm/asm-extable.h> 18 19 #include <asm/ctl_reg.h> 19 20 #include <asm/diag.h> 20 21
+1
drivers/s390/char/sclp.h
··· 11 11 12 12 #include <linux/types.h> 13 13 #include <linux/list.h> 14 + #include <asm/asm-extable.h> 14 15 #include <asm/sclp.h> 15 16 #include <asm/ebcdic.h> 16 17
+1 -2
drivers/s390/char/sclp_cmd.c
··· 2 2 /* 3 3 * Copyright IBM Corp. 2007,2012 4 4 * 5 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 - * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 6 */ 8 7 9 8 #define KMSG_COMPONENT "sclp_cmd"
-1
drivers/s390/char/sclp_config.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Copyright IBM Corp. 2007 4 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 4 */ 6 5 7 6 #define KMSG_COMPONENT "sclp_config"
+1 -1
drivers/s390/char/sclp_sdias.c
··· 184 184 sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64; 185 185 sccb->evbuf.event_status = 0; 186 186 sccb->evbuf.blk_cnt = nr_blks; 187 - sccb->evbuf.asa = (unsigned long)dest; 187 + sccb->evbuf.asa = __pa(dest); 188 188 sccb->evbuf.fbn = start_blk; 189 189 sccb->evbuf.lbn = 0; 190 190 sccb->evbuf.dbs = 1;
+1 -2
drivers/s390/char/zcore.c
··· 229 229 rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib, 230 230 PAGE_SIZE); 231 231 else 232 - rc = memcpy_real(zcore_ipl_block, (void *) ipib_info.ipib, 233 - PAGE_SIZE); 232 + rc = memcpy_real(zcore_ipl_block, ipib_info.ipib, PAGE_SIZE); 234 233 if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) != 235 234 ipib_info.checksum) { 236 235 TRACE("Checksum does not match\n");
+2 -2
drivers/s390/cio/airq.c
··· 44 44 if (!airq->handler || airq->isc > MAX_ISC) 45 45 return -EINVAL; 46 46 if (!airq->lsi_ptr) { 47 - airq->lsi_ptr = kzalloc(1, GFP_KERNEL); 47 + airq->lsi_ptr = cio_dma_zalloc(1); 48 48 if (!airq->lsi_ptr) 49 49 return -ENOMEM; 50 50 airq->flags |= AIRQ_PTR_ALLOCATED; ··· 79 79 synchronize_rcu(); 80 80 isc_unregister(airq->isc); 81 81 if (airq->flags & AIRQ_PTR_ALLOCATED) { 82 - kfree(airq->lsi_ptr); 82 + cio_dma_free(airq->lsi_ptr, 1); 83 83 airq->lsi_ptr = NULL; 84 84 airq->flags &= ~AIRQ_PTR_ALLOCATED; 85 85 }
-1
drivers/s390/cio/crw.c
··· 6 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 7 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 8 * Cornelia Huck <cornelia.huck@de.ibm.com>, 9 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 10 9 */ 11 10 12 11 #include <linux/mutex.h>
+1
drivers/s390/cio/ioasm.c
··· 5 5 6 6 #include <linux/export.h> 7 7 8 + #include <asm/asm-extable.h> 8 9 #include <asm/chpid.h> 9 10 #include <asm/schid.h> 10 11 #include <asm/crw.h>
+294 -34
drivers/s390/crypto/ap_bus.c
··· 36 36 #include <linux/mod_devicetable.h> 37 37 #include <linux/debugfs.h> 38 38 #include <linux/ctype.h> 39 + #include <linux/module.h> 39 40 40 41 #include "ap_bus.h" 41 42 #include "ap_debug.h" ··· 92 91 static DECLARE_COMPLETION(ap_init_apqn_bindings_complete); 93 92 94 93 static struct ap_config_info *ap_qci_info; 94 + static struct ap_config_info *ap_qci_info_old; 95 95 96 96 /* 97 97 * AP bus related debug feature things. ··· 230 228 ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL); 231 229 if (!ap_qci_info) 232 230 return; 231 + ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL); 232 + if (!ap_qci_info_old) 233 + return; 233 234 if (ap_fetch_qci_info(ap_qci_info) != 0) { 234 235 kfree(ap_qci_info); 236 + kfree(ap_qci_info_old); 235 237 ap_qci_info = NULL; 238 + ap_qci_info_old = NULL; 236 239 return; 237 240 } 238 241 AP_DBF_INFO("%s successful fetched initial qci info\n", __func__); ··· 254 247 __func__, ap_max_domain_id); 255 248 } 256 249 } 250 + 251 + memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info)); 257 252 } 258 253 259 254 /* ··· 323 314 * false otherwise. 324 315 */ 325 316 static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac, 326 - int *q_depth, int *q_ml, bool *q_decfg) 317 + int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop) 327 318 { 328 319 struct ap_queue_status status; 329 320 union { ··· 366 357 *q_depth = tapq_info.tapq_gr2.qd; 367 358 *q_ml = tapq_info.tapq_gr2.ml; 368 359 *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED; 360 + *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED; 369 361 switch (*q_type) { 370 362 /* For CEX2 and CEX3 the available functions 371 363 * are not reflected by the facilities bits. ··· 1077 1067 return 0; 1078 1068 } 1079 1069 1070 + static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits, 1071 + unsigned long *newmap) 1072 + { 1073 + unsigned long size; 1074 + int rc; 1075 + 1076 + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); 1077 + if (*str == '+' || *str == '-') { 1078 + memcpy(newmap, bitmap, size); 1079 + rc = modify_bitmap(str, newmap, bits); 1080 + } else { 1081 + memset(newmap, 0, size); 1082 + rc = hex2bitmap(str, newmap, bits); 1083 + } 1084 + return rc; 1085 + } 1086 + 1080 1087 int ap_parse_mask_str(const char *str, 1081 1088 unsigned long *bitmap, int bits, 1082 1089 struct mutex *lock) ··· 1113 1086 kfree(newmap); 1114 1087 return -ERESTARTSYS; 1115 1088 } 1116 - 1117 - if (*str == '+' || *str == '-') { 1118 - memcpy(newmap, bitmap, size); 1119 - rc = modify_bitmap(str, newmap, bits); 1120 - } else { 1121 - memset(newmap, 0, size); 1122 - rc = hex2bitmap(str, newmap, bits); 1123 - } 1089 + rc = ap_parse_bitmap_str(str, bitmap, bits, newmap); 1124 1090 if (rc == 0) 1125 1091 memcpy(bitmap, newmap, size); 1126 1092 mutex_unlock(lock); ··· 1306 1286 return rc; 1307 1287 } 1308 1288 1289 + static int __verify_card_reservations(struct device_driver *drv, void *data) 1290 + { 1291 + int rc = 0; 1292 + struct ap_driver *ap_drv = to_ap_drv(drv); 1293 + unsigned long *newapm = (unsigned long *)data; 1294 + 1295 + /* 1296 + * increase the driver's module refcounter to be sure it is not 1297 + * going away when we invoke the callback function. 1298 + */ 1299 + if (!try_module_get(drv->owner)) 1300 + return 0; 1301 + 1302 + if (ap_drv->in_use) { 1303 + rc = ap_drv->in_use(newapm, ap_perms.aqm); 1304 + if (rc) 1305 + rc = -EBUSY; 1306 + } 1307 + 1308 + /* release the driver's module */ 1309 + module_put(drv->owner); 1310 + 1311 + return rc; 1312 + } 1313 + 1314 + static int apmask_commit(unsigned long *newapm) 1315 + { 1316 + int rc; 1317 + unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)]; 1318 + 1319 + /* 1320 + * Check if any bits in the apmask have been set which will 1321 + * result in queues being removed from non-default drivers 1322 + */ 1323 + if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) { 1324 + rc = bus_for_each_drv(&ap_bus_type, NULL, reserved, 1325 + __verify_card_reservations); 1326 + if (rc) 1327 + return rc; 1328 + } 1329 + 1330 + memcpy(ap_perms.apm, newapm, APMASKSIZE); 1331 + 1332 + return 0; 1333 + } 1334 + 1309 1335 static ssize_t apmask_store(struct bus_type *bus, const char *buf, 1310 1336 size_t count) 1311 1337 { 1312 1338 int rc; 1339 + DECLARE_BITMAP(newapm, AP_DEVICES); 1313 1340 1314 - rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex); 1341 + if (mutex_lock_interruptible(&ap_perms_mutex)) 1342 + return -ERESTARTSYS; 1343 + 1344 + rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm); 1345 + if (rc) 1346 + goto done; 1347 + 1348 + rc = apmask_commit(newapm); 1349 + 1350 + done: 1351 + mutex_unlock(&ap_perms_mutex); 1315 1352 if (rc) 1316 1353 return rc; 1317 1354 ··· 1394 1317 return rc; 1395 1318 } 1396 1319 1320 + static int __verify_queue_reservations(struct device_driver *drv, void *data) 1321 + { 1322 + int rc = 0; 1323 + struct ap_driver *ap_drv = to_ap_drv(drv); 1324 + unsigned long *newaqm = (unsigned long *)data; 1325 + 1326 + /* 1327 + * increase the driver's module refcounter to be sure it is not 1328 + * going away when we invoke the callback function. 1329 + */ 1330 + if (!try_module_get(drv->owner)) 1331 + return 0; 1332 + 1333 + if (ap_drv->in_use) { 1334 + rc = ap_drv->in_use(ap_perms.apm, newaqm); 1335 + if (rc) 1336 + return -EBUSY; 1337 + } 1338 + 1339 + /* release the driver's module */ 1340 + module_put(drv->owner); 1341 + 1342 + return rc; 1343 + } 1344 + 1345 + static int aqmask_commit(unsigned long *newaqm) 1346 + { 1347 + int rc; 1348 + unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)]; 1349 + 1350 + /* 1351 + * Check if any bits in the aqmask have been set which will 1352 + * result in queues being removed from non-default drivers 1353 + */ 1354 + if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) { 1355 + rc = bus_for_each_drv(&ap_bus_type, NULL, reserved, 1356 + __verify_queue_reservations); 1357 + if (rc) 1358 + return rc; 1359 + } 1360 + 1361 + memcpy(ap_perms.aqm, newaqm, AQMASKSIZE); 1362 + 1363 + return 0; 1364 + } 1365 + 1397 1366 static ssize_t aqmask_store(struct bus_type *bus, const char *buf, 1398 1367 size_t count) 1399 1368 { 1400 1369 int rc; 1370 + DECLARE_BITMAP(newaqm, AP_DOMAINS); 1401 1371 1402 - rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex); 1372 + if (mutex_lock_interruptible(&ap_perms_mutex)) 1373 + return -ERESTARTSYS; 1374 + 1375 + rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm); 1376 + if (rc) 1377 + goto done; 1378 + 1379 + rc = aqmask_commit(newaqm); 1380 + 1381 + done: 1382 + mutex_unlock(&ap_perms_mutex); 1403 1383 if (rc) 1404 1384 return rc; 1405 1385 ··· 1473 1339 atomic64_read(&ap_scan_bus_count)); 1474 1340 } 1475 1341 1476 - static BUS_ATTR_RO(scans); 1342 + static ssize_t scans_store(struct bus_type *bus, const char *buf, 1343 + size_t count) 1344 + { 1345 + AP_DBF_INFO("%s force AP bus rescan\n", __func__); 1346 + 1347 + ap_bus_force_rescan(); 1348 + 1349 + return count; 1350 + } 1351 + 1352 + static BUS_ATTR_RW(scans); 1477 1353 1478 1354 static ssize_t bindings_show(struct bus_type *bus, char *buf) 1479 1355 { ··· 1590 1446 AP_QID_QUEUE(qid), rawtype); 1591 1447 return 0; 1592 1448 } 1593 - /* up to CEX7 known and fully supported */ 1594 - if (rawtype <= AP_DEVICE_TYPE_CEX7) 1449 + /* up to CEX8 known and fully supported */ 1450 + if (rawtype <= AP_DEVICE_TYPE_CEX8) 1595 1451 return rawtype; 1596 1452 /* 1597 - * unknown new type > CEX7, check for compatibility 1453 + * unknown new type > CEX8, check for compatibility 1598 1454 * to the highest known and supported type which is 1599 - * currently CEX7 with the help of the QACT function. 1455 + * currently CEX8 with the help of the QACT function. 1600 1456 */ 1601 1457 if (ap_qact_available()) { 1602 1458 struct ap_queue_status status; 1603 1459 union ap_qact_ap_info apinfo = {0}; 1604 1460 1605 1461 apinfo.mode = (func >> 26) & 0x07; 1606 - apinfo.cat = AP_DEVICE_TYPE_CEX7; 1462 + apinfo.cat = AP_DEVICE_TYPE_CEX8; 1607 1463 status = ap_qact(qid, 0, &apinfo); 1608 1464 if (status.response_code == AP_RESPONSE_NORMAL 1609 1465 && apinfo.cat >= AP_DEVICE_TYPE_CEX2A 1610 - && apinfo.cat <= AP_DEVICE_TYPE_CEX7) 1466 + && apinfo.cat <= AP_DEVICE_TYPE_CEX8) 1611 1467 comp_type = apinfo.cat; 1612 1468 } 1613 1469 if (!comp_type) ··· 1649 1505 && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data; 1650 1506 } 1651 1507 1508 + /* Helper function for notify_config_changed */ 1509 + static int __drv_notify_config_changed(struct device_driver *drv, void *data) 1510 + { 1511 + struct ap_driver *ap_drv = to_ap_drv(drv); 1512 + 1513 + if (try_module_get(drv->owner)) { 1514 + if (ap_drv->on_config_changed) 1515 + ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old); 1516 + module_put(drv->owner); 1517 + } 1518 + 1519 + return 0; 1520 + } 1521 + 1522 + /* Notify all drivers about an qci config change */ 1523 + static inline void notify_config_changed(void) 1524 + { 1525 + bus_for_each_drv(&ap_bus_type, NULL, NULL, 1526 + __drv_notify_config_changed); 1527 + } 1528 + 1529 + /* Helper function for notify_scan_complete */ 1530 + static int __drv_notify_scan_complete(struct device_driver *drv, void *data) 1531 + { 1532 + struct ap_driver *ap_drv = to_ap_drv(drv); 1533 + 1534 + if (try_module_get(drv->owner)) { 1535 + if (ap_drv->on_scan_complete) 1536 + ap_drv->on_scan_complete(ap_qci_info, 1537 + ap_qci_info_old); 1538 + module_put(drv->owner); 1539 + } 1540 + 1541 + return 0; 1542 + } 1543 + 1544 + /* Notify all drivers about bus scan complete */ 1545 + static inline void notify_scan_complete(void) 1546 + { 1547 + bus_for_each_drv(&ap_bus_type, NULL, NULL, 1548 + __drv_notify_scan_complete); 1549 + } 1550 + 1652 1551 /* 1653 1552 * Helper function for ap_scan_bus(). 1654 1553 * Remove card device and associated queue devices. ··· 1711 1524 */ 1712 1525 static inline void ap_scan_domains(struct ap_card *ac) 1713 1526 { 1714 - bool decfg; 1527 + bool decfg, chkstop; 1715 1528 ap_qid_t qid; 1716 1529 unsigned int func; 1717 1530 struct device *dev; ··· 1740 1553 continue; 1741 1554 } 1742 1555 /* domain is valid, get info from this APQN */ 1743 - if (!ap_queue_info(qid, &type, &func, &depth, &ml, &decfg)) { 1556 + if (!ap_queue_info(qid, &type, &func, &depth, 1557 + &ml, &decfg, &chkstop)) { 1744 1558 if (aq) { 1745 1559 AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n", 1746 1560 __func__, ac->id, dom); ··· 1760 1572 } 1761 1573 aq->card = ac; 1762 1574 aq->config = !decfg; 1575 + aq->chkstop = chkstop; 1763 1576 dev = &aq->ap_dev.device; 1764 1577 dev->bus = &ap_bus_type; 1765 1578 dev->parent = &ac->ap_dev.device; ··· 1777 1588 if (decfg) 1778 1589 AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n", 1779 1590 __func__, ac->id, dom); 1591 + else if (chkstop) 1592 + AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n", 1593 + __func__, ac->id, dom); 1780 1594 else 1781 1595 AP_DBF_INFO("%s(%d,%d) new queue dev created\n", 1782 1596 __func__, ac->id, dom); 1783 1597 goto put_dev_and_continue; 1784 1598 } 1785 - /* Check config state on the already existing queue device */ 1599 + /* handle state changes on already existing queue device */ 1786 1600 spin_lock_bh(&aq->lock); 1601 + /* checkstop state */ 1602 + if (chkstop && !aq->chkstop) { 1603 + /* checkstop on */ 1604 + aq->chkstop = true; 1605 + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { 1606 + aq->dev_state = AP_DEV_STATE_ERROR; 1607 + aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED; 1608 + } 1609 + spin_unlock_bh(&aq->lock); 1610 + AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n", 1611 + __func__, ac->id, dom); 1612 + /* 'receive' pending messages with -EAGAIN */ 1613 + ap_flush_queue(aq); 1614 + goto put_dev_and_continue; 1615 + } else if (!chkstop && aq->chkstop) { 1616 + /* checkstop off */ 1617 + aq->chkstop = false; 1618 + if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { 1619 + aq->dev_state = AP_DEV_STATE_OPERATING; 1620 + aq->sm_state = AP_SM_STATE_RESET_START; 1621 + } 1622 + spin_unlock_bh(&aq->lock); 1623 + AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n", 1624 + __func__, ac->id, dom); 1625 + goto put_dev_and_continue; 1626 + } 1627 + /* config state change */ 1787 1628 if (decfg && aq->config) { 1788 1629 /* config off this queue device */ 1789 1630 aq->config = false; ··· 1822 1603 aq->last_err_rc = AP_RESPONSE_DECONFIGURED; 1823 1604 } 1824 1605 spin_unlock_bh(&aq->lock); 1825 - AP_DBF_INFO("%s(%d,%d) queue dev config off\n", 1826 - __func__, ac->id, dom); 1606 + AP_DBF_DBG("%s(%d,%d) queue dev config off\n", 1607 + __func__, ac->id, dom); 1827 1608 ap_send_config_uevent(&aq->ap_dev, aq->config); 1828 1609 /* 'receive' pending messages with -EAGAIN */ 1829 1610 ap_flush_queue(aq); 1830 1611 goto put_dev_and_continue; 1831 - } 1832 - if (!decfg && !aq->config) { 1612 + } else if (!decfg && !aq->config) { 1833 1613 /* config on this queue device */ 1834 1614 aq->config = true; 1835 1615 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) { ··· 1836 1618 aq->sm_state = AP_SM_STATE_RESET_START; 1837 1619 } 1838 1620 spin_unlock_bh(&aq->lock); 1839 - AP_DBF_INFO("%s(%d,%d) queue dev config on\n", 1840 - __func__, ac->id, dom); 1621 + AP_DBF_DBG("%s(%d,%d) queue dev config on\n", 1622 + __func__, ac->id, dom); 1841 1623 ap_send_config_uevent(&aq->ap_dev, aq->config); 1842 1624 goto put_dev_and_continue; 1843 1625 } ··· 1864 1646 */ 1865 1647 static inline void ap_scan_adapter(int ap) 1866 1648 { 1867 - bool decfg; 1649 + bool decfg, chkstop; 1868 1650 ap_qid_t qid; 1869 1651 unsigned int func; 1870 1652 struct device *dev; ··· 1898 1680 for (dom = 0; dom <= ap_max_domain_id; dom++) 1899 1681 if (ap_test_config_usage_domain(dom)) { 1900 1682 qid = AP_MKQID(ap, dom); 1901 - if (ap_queue_info(qid, &type, &func, 1902 - &depth, &ml, &decfg)) 1683 + if (ap_queue_info(qid, &type, &func, &depth, 1684 + &ml, &decfg, &chkstop)) 1903 1685 break; 1904 1686 } 1905 1687 if (dom > ap_max_domain_id) { ··· 1944 1726 put_device(dev); 1945 1727 ac = NULL; 1946 1728 } else { 1729 + /* handle checkstop state change */ 1730 + if (chkstop && !ac->chkstop) { 1731 + /* checkstop on */ 1732 + ac->chkstop = true; 1733 + AP_DBF_INFO("%s(%d) card dev checkstop on\n", 1734 + __func__, ap); 1735 + } else if (!chkstop && ac->chkstop) { 1736 + /* checkstop off */ 1737 + ac->chkstop = false; 1738 + AP_DBF_INFO("%s(%d) card dev checkstop off\n", 1739 + __func__, ap); 1740 + } 1741 + /* handle config state change */ 1947 1742 if (decfg && ac->config) { 1948 1743 ac->config = false; 1949 1744 AP_DBF_INFO("%s(%d) card dev config off\n", 1950 1745 __func__, ap); 1951 1746 ap_send_config_uevent(&ac->ap_dev, ac->config); 1952 - } 1953 - if (!decfg && !ac->config) { 1747 + } else if (!decfg && !ac->config) { 1954 1748 ac->config = true; 1955 1749 AP_DBF_INFO("%s(%d) card dev config on\n", 1956 1750 __func__, ap); ··· 1986 1756 return; 1987 1757 } 1988 1758 ac->config = !decfg; 1759 + ac->chkstop = chkstop; 1989 1760 dev = &ac->ap_dev.device; 1990 1761 dev->bus = &ap_bus_type; 1991 1762 dev->parent = ap_root_device; ··· 2011 1780 if (decfg) 2012 1781 AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n", 2013 1782 __func__, ap, type, func); 1783 + else if (chkstop) 1784 + AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n", 1785 + __func__, ap, type, func); 2014 1786 else 2015 1787 AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n", 2016 1788 __func__, ap, type, func); ··· 2027 1793 } 2028 1794 2029 1795 /** 1796 + * ap_get_configuration - get the host AP configuration 1797 + * 1798 + * Stores the host AP configuration information returned from the previous call 1799 + * to Query Configuration Information (QCI), then retrieves and stores the 1800 + * current AP configuration returned from QCI. 1801 + * 1802 + * Return: true if the host AP configuration changed between calls to QCI; 1803 + * otherwise, return false. 1804 + */ 1805 + static bool ap_get_configuration(void) 1806 + { 1807 + memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info)); 1808 + ap_fetch_qci_info(ap_qci_info); 1809 + 1810 + return memcmp(ap_qci_info, ap_qci_info_old, 1811 + sizeof(struct ap_config_info)) != 0; 1812 + } 1813 + 1814 + /** 2030 1815 * ap_scan_bus(): Scan the AP bus for new devices 2031 1816 * Runs periodically, workqueue timer (ap_config_time) 2032 1817 * @unused: Unused pointer. 2033 1818 */ 2034 1819 static void ap_scan_bus(struct work_struct *unused) 2035 1820 { 2036 - int ap; 1821 + int ap, config_changed = 0; 2037 1822 2038 - ap_fetch_qci_info(ap_qci_info); 1823 + /* config change notify */ 1824 + config_changed = ap_get_configuration(); 1825 + if (config_changed) 1826 + notify_config_changed(); 2039 1827 ap_select_domain(); 2040 1828 2041 1829 AP_DBF_DBG("%s running\n", __func__); ··· 2065 1809 /* loop over all possible adapters */ 2066 1810 for (ap = 0; ap <= ap_max_adapter_id; ap++) 2067 1811 ap_scan_adapter(ap); 1812 + 1813 + /* scan complete notify */ 1814 + if (config_changed) 1815 + notify_scan_complete(); 2068 1816 2069 1817 /* check if there is at least one queue available with default domain */ 2070 1818 if (ap_domain_index >= 0) {
+23 -1
drivers/s390/crypto/ap_bus.h
··· 47 47 #define AP_RESPONSE_BUSY 0x05 48 48 #define AP_RESPONSE_INVALID_ADDRESS 0x06 49 49 #define AP_RESPONSE_OTHERWISE_CHANGED 0x07 50 + #define AP_RESPONSE_INVALID_GISA 0x08 50 51 #define AP_RESPONSE_Q_FULL 0x10 51 52 #define AP_RESPONSE_NO_PENDING_REPLY 0x10 52 53 #define AP_RESPONSE_INDEX_TOO_BIG 0x11 ··· 70 69 #define AP_DEVICE_TYPE_CEX5 11 71 70 #define AP_DEVICE_TYPE_CEX6 12 72 71 #define AP_DEVICE_TYPE_CEX7 13 72 + #define AP_DEVICE_TYPE_CEX8 14 73 73 74 74 /* 75 75 * Known function facilities ··· 144 142 145 143 int (*probe)(struct ap_device *); 146 144 void (*remove)(struct ap_device *); 145 + int (*in_use)(unsigned long *apm, unsigned long *aqm); 146 + /* 147 + * Called at the start of the ap bus scan function when 148 + * the crypto config information (qci) has changed. 149 + */ 150 + void (*on_config_changed)(struct ap_config_info *new_config_info, 151 + struct ap_config_info *old_config_info); 152 + /* 153 + * Called at the end of the ap bus scan function when 154 + * the crypto config information (qci) has changed. 155 + */ 156 + void (*on_scan_complete)(struct ap_config_info *new_config_info, 157 + struct ap_config_info *old_config_info); 147 158 }; 148 159 149 160 #define to_ap_drv(x) container_of((x), struct ap_driver, driver) ··· 179 164 int id; /* AP card number. */ 180 165 unsigned int maxmsgsize; /* AP msg limit for this card */ 181 166 bool config; /* configured state */ 167 + bool chkstop; /* checkstop state */ 182 168 atomic64_t total_request_count; /* # requests ever for this AP device.*/ 183 169 }; 184 170 ··· 192 176 spinlock_t lock; /* Per device lock. */ 193 177 enum ap_dev_state dev_state; /* queue device state */ 194 178 bool config; /* configured state */ 179 + bool chkstop; /* checkstop state */ 195 180 ap_qid_t qid; /* AP queue id. */ 196 181 bool interrupt; /* indicate if interrupts are enabled */ 197 182 int queue_count; /* # messages currently on AP queue. */ ··· 251 234 struct ap_message *); 252 235 }; 253 236 254 - #define AP_MSG_FLAG_SPECIAL 1 /* flag msg as 'special' with NQAP */ 237 + #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ 238 + #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ 239 + #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ 255 240 256 241 /** 257 242 * ap_init_message() - Initialize ap_message. ··· 307 288 308 289 struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, 309 290 int comp_type, unsigned int functions, int ml); 291 + 292 + #define APMASKSIZE (BITS_TO_LONGS(AP_DEVICES) * sizeof(unsigned long)) 293 + #define AQMASKSIZE (BITS_TO_LONGS(AP_DOMAINS) * sizeof(unsigned long)) 310 294 311 295 struct ap_perms { 312 296 unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
+11
drivers/s390/crypto/ap_card.c
··· 174 174 175 175 static DEVICE_ATTR_RW(config); 176 176 177 + static ssize_t chkstop_show(struct device *dev, 178 + struct device_attribute *attr, char *buf) 179 + { 180 + struct ap_card *ac = to_ap_card(dev); 181 + 182 + return scnprintf(buf, PAGE_SIZE, "%d\n", ac->chkstop ? 1 : 0); 183 + } 184 + 185 + static DEVICE_ATTR_RO(chkstop); 186 + 177 187 static ssize_t max_msg_size_show(struct device *dev, 178 188 struct device_attribute *attr, char *buf) 179 189 { ··· 204 194 &dev_attr_pendingq_count.attr, 205 195 &dev_attr_modalias.attr, 206 196 &dev_attr_config.attr, 197 + &dev_attr_chkstop.attr, 207 198 &dev_attr_max_msg_size.attr, 208 199 NULL 209 200 };
+18 -1
drivers/s390/crypto/ap_queue.c
··· 455 455 456 456 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event) 457 457 { 458 - if (aq->dev_state > AP_DEV_STATE_UNINITIATED) 458 + if (aq->config && !aq->chkstop && 459 + aq->dev_state > AP_DEV_STATE_UNINITIATED) 459 460 return ap_jumptable[aq->sm_state][event](aq); 460 461 else 461 462 return AP_SM_WAIT_NONE; ··· 616 615 617 616 static DEVICE_ATTR_RO(config); 618 617 618 + static ssize_t chkstop_show(struct device *dev, 619 + struct device_attribute *attr, char *buf) 620 + { 621 + struct ap_queue *aq = to_ap_queue(dev); 622 + int rc; 623 + 624 + spin_lock_bh(&aq->lock); 625 + rc = scnprintf(buf, PAGE_SIZE, "%d\n", aq->chkstop ? 1 : 0); 626 + spin_unlock_bh(&aq->lock); 627 + return rc; 628 + } 629 + 630 + static DEVICE_ATTR_RO(chkstop); 631 + 619 632 #ifdef CONFIG_ZCRYPT_DEBUG 620 633 static ssize_t states_show(struct device *dev, 621 634 struct device_attribute *attr, char *buf) ··· 744 729 &dev_attr_reset.attr, 745 730 &dev_attr_interrupt.attr, 746 731 &dev_attr_config.attr, 732 + &dev_attr_chkstop.attr, 747 733 #ifdef CONFIG_ZCRYPT_DEBUG 748 734 &dev_attr_states.attr, 749 735 &dev_attr_last_err_rc.attr, ··· 931 915 spin_lock_bh(&aq->lock); 932 916 aq->dev_state = AP_DEV_STATE_OPERATING; 933 917 aq->sm_state = AP_SM_STATE_RESET_START; 918 + aq->last_err_rc = 0; 934 919 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); 935 920 spin_unlock_bh(&aq->lock); 936 921 }
+32
drivers/s390/crypto/vfio_ap_debug.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright IBM Corp. 2022 4 + * 5 + * Author(s): Tony Krowiak <akrowiak@linux.ibm.com> 6 + */ 7 + #ifndef VFIO_AP_DEBUG_H 8 + #define VFIO_AP_DEBUG_H 9 + 10 + #include <asm/debug.h> 11 + 12 + #define DBF_ERR 3 /* error conditions */ 13 + #define DBF_WARN 4 /* warning conditions */ 14 + #define DBF_INFO 5 /* informational */ 15 + #define DBF_DEBUG 6 /* for debugging only */ 16 + 17 + #define DBF_MAX_SPRINTF_ARGS 10 18 + 19 + #define VFIO_AP_DBF(...) \ 20 + debug_sprintf_event(vfio_ap_dbf_info, ##__VA_ARGS__) 21 + #define VFIO_AP_DBF_ERR(...) \ 22 + debug_sprintf_event(vfio_ap_dbf_info, DBF_ERR, ##__VA_ARGS__) 23 + #define VFIO_AP_DBF_WARN(...) \ 24 + debug_sprintf_event(vfio_ap_dbf_info, DBF_WARN, ##__VA_ARGS__) 25 + #define VFIO_AP_DBF_INFO(...) \ 26 + debug_sprintf_event(vfio_ap_dbf_info, DBF_INFO, ##__VA_ARGS__) 27 + #define VFIO_AP_DBF_DBG(...) \ 28 + debug_sprintf_event(vfio_ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__) 29 + 30 + extern debug_info_t *vfio_ap_dbf_info; 31 + 32 + #endif /* VFIO_AP_DEBUG_H */
+23
drivers/s390/crypto/vfio_ap_drv.c
··· 14 14 #include <linux/string.h> 15 15 #include <asm/facility.h> 16 16 #include "vfio_ap_private.h" 17 + #include "vfio_ap_debug.h" 17 18 18 19 #define VFIO_AP_ROOT_NAME "vfio_ap" 19 20 #define VFIO_AP_DEV_NAME "matrix" ··· 27 26 MODULE_LICENSE("GPL v2"); 28 27 29 28 struct ap_matrix_dev *matrix_dev; 29 + debug_info_t *vfio_ap_dbf_info; 30 30 31 31 /* Only type 10 adapters (CEX4 and later) are supported 32 32 * by the AP matrix device driver ··· 40 38 { .dev_type = AP_DEVICE_TYPE_CEX6, 41 39 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 42 40 { .dev_type = AP_DEVICE_TYPE_CEX7, 41 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 42 + { .dev_type = AP_DEVICE_TYPE_CEX8, 43 43 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 44 44 { /* end of sibling */ }, 45 45 }; ··· 254 250 root_device_unregister(root_device); 255 251 } 256 252 253 + static int __init vfio_ap_dbf_info_init(void) 254 + { 255 + vfio_ap_dbf_info = debug_register("vfio_ap", 1, 1, 256 + DBF_MAX_SPRINTF_ARGS * sizeof(long)); 257 + 258 + if (!vfio_ap_dbf_info) 259 + return -ENOENT; 260 + 261 + debug_register_view(vfio_ap_dbf_info, &debug_sprintf_view); 262 + debug_set_level(vfio_ap_dbf_info, DBF_WARN); 263 + 264 + return 0; 265 + } 266 + 257 267 static int __init vfio_ap_init(void) 258 268 { 259 269 int ret; 270 + 271 + ret = vfio_ap_dbf_info_init(); 272 + if (ret) 273 + return ret; 260 274 261 275 /* If there are no AP instructions, there is nothing to pass through. */ 262 276 if (!ap_instructions_available()) ··· 306 284 vfio_ap_mdev_unregister(); 307 285 ap_driver_unregister(&vfio_ap_drv); 308 286 vfio_ap_matrix_dev_destroy(); 287 + debug_unregister(vfio_ap_dbf_info); 309 288 } 310 289 311 290 module_init(vfio_ap_init);
+141 -14
drivers/s390/crypto/vfio_ap_ops.c
··· 16 16 #include <linux/bitops.h> 17 17 #include <linux/kvm_host.h> 18 18 #include <linux/module.h> 19 + #include <linux/uuid.h> 19 20 #include <asm/kvm.h> 20 21 #include <asm/zcrypt.h> 21 22 22 23 #include "vfio_ap_private.h" 24 + #include "vfio_ap_debug.h" 23 25 24 26 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" 25 27 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" ··· 186 184 } 187 185 188 186 /** 187 + * vfio_ap_validate_nib - validate a notification indicator byte (nib) address. 188 + * 189 + * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction. 190 + * @nib: the location for storing the nib address. 191 + * @g_pfn: the location for storing the page frame number of the page containing 192 + * the nib. 193 + * 194 + * When the PQAP(AQIC) instruction is executed, general register 2 contains the 195 + * address of the notification indicator byte (nib) used for IRQ notification. 196 + * This function parses the nib from gr2 and calculates the page frame 197 + * number for the guest of the page containing the nib. The values are 198 + * stored in @nib and @g_pfn respectively. 199 + * 200 + * The g_pfn of the nib is then validated to ensure the nib address is valid. 201 + * 202 + * Return: returns zero if the nib address is a valid; otherwise, returns 203 + * -EINVAL. 204 + */ 205 + static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, unsigned long *nib, 206 + unsigned long *g_pfn) 207 + { 208 + *nib = vcpu->run->s.regs.gprs[2]; 209 + *g_pfn = *nib >> PAGE_SHIFT; 210 + 211 + if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *g_pfn))) 212 + return -EINVAL; 213 + 214 + return 0; 215 + } 216 + 217 + /** 189 218 * vfio_ap_irq_enable - Enable Interruption for a APQN 190 219 * 191 220 * @q: the vfio_ap_queue holding AQIC parameters 192 221 * @isc: the guest ISC to register with the GIB interface 193 - * @nib: the notification indicator byte to pin. 222 + * @vcpu: the vcpu object containing the registers specifying the parameters 223 + * passed to the PQAP(AQIC) instruction. 194 224 * 195 225 * Pin the NIB saved in *q 196 226 * Register the guest ISC to GIB interface and retrieve the ··· 238 204 */ 239 205 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, 240 206 int isc, 241 - unsigned long nib) 207 + struct kvm_vcpu *vcpu) 242 208 { 209 + unsigned long nib; 243 210 struct ap_qirq_ctrl aqic_gisa = {}; 244 211 struct ap_queue_status status = {}; 245 212 struct kvm_s390_gisa *gisa; 213 + int nisc; 246 214 struct kvm *kvm; 247 215 unsigned long h_nib, g_pfn, h_pfn; 248 216 int ret; 249 217 250 - g_pfn = nib >> PAGE_SHIFT; 218 + /* Verify that the notification indicator byte address is valid */ 219 + if (vfio_ap_validate_nib(vcpu, &nib, &g_pfn)) { 220 + VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%#lx, g_pfn=%#lx, apqn=%#04x\n", 221 + __func__, nib, g_pfn, q->apqn); 222 + 223 + status.response_code = AP_RESPONSE_INVALID_ADDRESS; 224 + return status; 225 + } 226 + 251 227 ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, 252 228 IOMMU_READ | IOMMU_WRITE, &h_pfn); 253 229 switch (ret) { 254 230 case 1: 255 231 break; 256 232 default: 233 + VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d," 234 + "nib=%#lx, g_pfn=%#lx, apqn=%#04x\n", 235 + __func__, ret, nib, g_pfn, q->apqn); 236 + 257 237 status.response_code = AP_RESPONSE_INVALID_ADDRESS; 258 238 return status; 259 239 } ··· 277 229 278 230 h_nib = (h_pfn << PAGE_SHIFT) | (nib & ~PAGE_MASK); 279 231 aqic_gisa.gisc = isc; 280 - aqic_gisa.isc = kvm_s390_gisc_register(kvm, isc); 232 + 233 + nisc = kvm_s390_gisc_register(kvm, isc); 234 + if (nisc < 0) { 235 + VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n", 236 + __func__, nisc, isc, q->apqn); 237 + 238 + status.response_code = AP_RESPONSE_INVALID_GISA; 239 + return status; 240 + } 241 + 242 + aqic_gisa.isc = nisc; 281 243 aqic_gisa.ir = 1; 282 244 aqic_gisa.gisa = (uint64_t)gisa >> 4; 283 245 ··· 311 253 break; 312 254 } 313 255 256 + if (status.response_code != AP_RESPONSE_NORMAL) { 257 + VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: " 258 + "zone=%#x, ir=%#x, gisc=%#x, f=%#x," 259 + "gisa=%#x, isc=%#x, apqn=%#04x\n", 260 + __func__, status.response_code, 261 + aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc, 262 + aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc, 263 + q->apqn); 264 + } 265 + 314 266 return status; 267 + } 268 + 269 + /** 270 + * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array 271 + * of big endian elements that can be passed by 272 + * value to an s390dbf sprintf event function to 273 + * format a UUID string. 274 + * 275 + * @guid: the object containing the little endian guid 276 + * @uuid: a six-element array of long values that can be passed by value as 277 + * arguments for a formatting string specifying a UUID. 278 + * 279 + * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf 280 + * event functions if the memory for the passed string is available as long as 281 + * the debug feature exists. Since a mediated device can be removed at any 282 + * time, it's name can not be used because %s passes the reference to the string 283 + * in memory and the reference will go stale once the device is removed . 284 + * 285 + * The s390dbf string formatting function allows a maximum of 9 arguments for a 286 + * message to be displayed in the 'sprintf' view. In order to use the bytes 287 + * comprising the mediated device's UUID to display the mediated device name, 288 + * they will have to be converted into an array whose elements can be passed by 289 + * value to sprintf. For example: 290 + * 291 + * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 } 292 + * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804 293 + * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 } 294 + * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx" 295 + */ 296 + static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid) 297 + { 298 + /* 299 + * The input guid is ordered in little endian, so it needs to be 300 + * reordered for displaying a UUID as a string. This specifies the 301 + * guid indices in proper order. 302 + */ 303 + uuid[0] = le32_to_cpup((__le32 *)guid); 304 + uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]); 305 + uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]); 306 + uuid[3] = *((__u16 *)&guid->b[8]); 307 + uuid[4] = *((__u16 *)&guid->b[10]); 308 + uuid[5] = *((__u32 *)&guid->b[12]); 315 309 } 316 310 317 311 /** ··· 391 281 { 392 282 uint64_t status; 393 283 uint16_t apqn; 284 + unsigned long uuid[6]; 394 285 struct vfio_ap_queue *q; 395 286 struct ap_queue_status qstatus = { 396 287 .response_code = AP_RESPONSE_Q_NOT_AVAIL, }; 397 288 struct ap_matrix_mdev *matrix_mdev; 398 289 399 - /* If we do not use the AIV facility just go to userland */ 400 - if (!(vcpu->arch.sie_block->eca & ECA_AIV)) 401 - return -EOPNOTSUPP; 402 - 403 290 apqn = vcpu->run->s.regs.gprs[0] & 0xffff; 404 - mutex_lock(&matrix_dev->lock); 405 291 406 - if (!vcpu->kvm->arch.crypto.pqap_hook) 292 + /* If we do not use the AIV facility just go to userland */ 293 + if (!(vcpu->arch.sie_block->eca & ECA_AIV)) { 294 + VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n", 295 + __func__, apqn, vcpu->arch.sie_block->eca); 296 + 297 + return -EOPNOTSUPP; 298 + } 299 + 300 + mutex_lock(&matrix_dev->lock); 301 + if (!vcpu->kvm->arch.crypto.pqap_hook) { 302 + VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n", 303 + __func__, apqn); 407 304 goto out_unlock; 305 + } 306 + 408 307 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, 409 308 struct ap_matrix_mdev, pqap_hook); 410 309 411 310 /* If the there is no guest using the mdev, there is nothing to do */ 412 - if (!matrix_mdev->kvm) 311 + if (!matrix_mdev->kvm) { 312 + vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); 313 + VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n", 314 + __func__, uuid[0], uuid[1], uuid[2], 315 + uuid[3], uuid[4], uuid[5], apqn); 413 316 goto out_unlock; 317 + } 414 318 415 319 q = vfio_ap_get_queue(matrix_mdev, apqn); 416 - if (!q) 320 + if (!q) { 321 + VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n", 322 + __func__, AP_QID_CARD(apqn), 323 + AP_QID_QUEUE(apqn)); 417 324 goto out_unlock; 325 + } 418 326 419 327 status = vcpu->run->s.regs.gprs[1]; 420 328 421 329 /* If IR bit(16) is set we enable the interrupt */ 422 330 if ((status >> (63 - 16)) & 0x01) 423 - qstatus = vfio_ap_irq_enable(q, status & 0x07, 424 - vcpu->run->s.regs.gprs[2]); 331 + qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu); 425 332 else 426 333 qstatus = vfio_ap_irq_disable(q); 427 334
+35 -18
drivers/s390/crypto/zcrypt_api.c
··· 671 671 spin_lock(&zcrypt_list_lock); 672 672 for_each_zcrypt_card(zc) { 673 673 /* Check for useable accelarator or CCA card */ 674 - if (!zc->online || !zc->card->config || 674 + if (!zc->online || !zc->card->config || zc->card->chkstop || 675 675 !(zc->card->functions & 0x18000000)) 676 676 continue; 677 677 /* Check for size limits */ ··· 692 692 for_each_zcrypt_queue(zq, zc) { 693 693 /* check if device is useable and eligible */ 694 694 if (!zq->online || !zq->ops->rsa_modexpo || 695 - !zq->queue->config) 695 + !zq->queue->config || zq->queue->chkstop) 696 696 continue; 697 697 /* check if device node has admission for this queue */ 698 698 if (!zcrypt_check_queue(perms, ··· 714 714 spin_unlock(&zcrypt_list_lock); 715 715 716 716 if (!pref_zq) { 717 + ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 718 + __func__); 717 719 rc = -ENODEV; 718 720 goto out; 719 721 } ··· 781 779 spin_lock(&zcrypt_list_lock); 782 780 for_each_zcrypt_card(zc) { 783 781 /* Check for useable accelarator or CCA card */ 784 - if (!zc->online || !zc->card->config || 782 + if (!zc->online || !zc->card->config || zc->card->chkstop || 785 783 !(zc->card->functions & 0x18000000)) 786 784 continue; 787 785 /* Check for size limits */ ··· 802 800 for_each_zcrypt_queue(zq, zc) { 803 801 /* check if device is useable and eligible */ 804 802 if (!zq->online || !zq->ops->rsa_modexpo_crt || 805 - !zq->queue->config) 803 + !zq->queue->config || zq->queue->chkstop) 806 804 continue; 807 805 /* check if device node has admission for this queue */ 808 806 if (!zcrypt_check_queue(perms, ··· 824 822 spin_unlock(&zcrypt_list_lock); 825 823 826 824 if (!pref_zq) { 825 + ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 826 + __func__); 827 827 rc = -ENODEV; 828 828 goto out; 829 829 } ··· 876 872 } 877 873 #endif 878 874 879 - rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain); 875 + rc = prep_cca_ap_msg(userspace, xcRB, &ap_msg, &func_code, &domain); 880 876 if (rc) 881 877 goto out; 882 878 ··· 895 891 spin_lock(&zcrypt_list_lock); 896 892 for_each_zcrypt_card(zc) { 897 893 /* Check for useable CCA card */ 898 - if (!zc->online || !zc->card->config || 894 + if (!zc->online || !zc->card->config || zc->card->chkstop || 899 895 !(zc->card->functions & 0x10000000)) 900 896 continue; 901 897 /* Check for user selected CCA card */ ··· 918 914 continue; 919 915 for_each_zcrypt_queue(zq, zc) { 920 916 /* check for device useable and eligible */ 921 - if (!zq->online || 922 - !zq->ops->send_cprb || 923 - !zq->queue->config || 917 + if (!zq->online || !zq->ops->send_cprb || 918 + !zq->queue->config || zq->queue->chkstop || 924 919 (tdom != AUTOSEL_DOM && 925 920 tdom != AP_QID_QUEUE(zq->queue->qid))) 926 921 continue; ··· 943 940 spin_unlock(&zcrypt_list_lock); 944 941 945 942 if (!pref_zq) { 943 + ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 944 + __func__, xcRB->user_defined, *domain); 946 945 rc = -ENODEV; 947 946 goto out; 948 947 } ··· 1021 1016 struct ep11_target_dev *targets; 1022 1017 unsigned short target_num; 1023 1018 unsigned int wgt = 0, pref_wgt = 0; 1024 - unsigned int func_code; 1019 + unsigned int func_code, domain; 1025 1020 struct ap_message ap_msg; 1026 1021 int cpen, qpen, qid = 0, rc = -ENODEV; 1027 1022 struct module *mod; ··· 1058 1053 } 1059 1054 } 1060 1055 1061 - rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code); 1056 + rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1062 1057 if (rc) 1063 1058 goto out_free; 1064 1059 ··· 1067 1062 spin_lock(&zcrypt_list_lock); 1068 1063 for_each_zcrypt_card(zc) { 1069 1064 /* Check for useable EP11 card */ 1070 - if (!zc->online || !zc->card->config || 1065 + if (!zc->online || !zc->card->config || zc->card->chkstop || 1071 1066 !(zc->card->functions & 0x04000000)) 1072 1067 continue; 1073 1068 /* Check for user selected EP11 card */ ··· 1090 1085 continue; 1091 1086 for_each_zcrypt_queue(zq, zc) { 1092 1087 /* check if device is useable and eligible */ 1093 - if (!zq->online || 1094 - !zq->ops->send_ep11_cprb || 1095 - !zq->queue->config || 1088 + if (!zq->online || !zq->ops->send_ep11_cprb || 1089 + !zq->queue->config || zq->queue->chkstop || 1096 1090 (targets && 1097 1091 !is_desired_ep11_queue(zq->queue->qid, 1098 1092 target_num, targets))) ··· 1116 1112 spin_unlock(&zcrypt_list_lock); 1117 1113 1118 1114 if (!pref_zq) { 1115 + if (targets && target_num == 1) { 1116 + ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1117 + __func__, (int) targets->ap_id, 1118 + (int) targets->dom_id); 1119 + } else if (targets) { 1120 + ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", 1121 + __func__, (int) target_num); 1122 + } else { 1123 + ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", 1124 + __func__); 1125 + } 1119 1126 rc = -ENODEV; 1120 1127 goto out_free; 1121 1128 } ··· 1171 1156 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1172 1157 1173 1158 ap_init_message(&ap_msg); 1174 - rc = get_rng_fc(&ap_msg, &func_code, &domain); 1159 + rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1175 1160 if (rc) 1176 1161 goto out; 1177 1162 ··· 1180 1165 spin_lock(&zcrypt_list_lock); 1181 1166 for_each_zcrypt_card(zc) { 1182 1167 /* Check for useable CCA card */ 1183 - if (!zc->online || !zc->card->config || 1168 + if (!zc->online || !zc->card->config || zc->card->chkstop || 1184 1169 !(zc->card->functions & 0x10000000)) 1185 1170 continue; 1186 1171 /* get weight index of the card device */ ··· 1190 1175 for_each_zcrypt_queue(zq, zc) { 1191 1176 /* check if device is useable and eligible */ 1192 1177 if (!zq->online || !zq->ops->rng || 1193 - !zq->queue->config) 1178 + !zq->queue->config || zq->queue->chkstop) 1194 1179 continue; 1195 1180 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1196 1181 continue; ··· 1203 1188 spin_unlock(&zcrypt_list_lock); 1204 1189 1205 1190 if (!pref_zq) { 1191 + ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 1192 + __func__); 1206 1193 rc = -ENODEV; 1207 1194 goto out; 1208 1195 }
+53 -22
drivers/s390/crypto/zcrypt_cex4.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright IBM Corp. 2012, 2019 3 + * Copyright IBM Corp. 2012, 2022 4 4 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com> 5 5 */ 6 6 ··· 36 36 #define CEX4_CLEANUP_TIME (900*HZ) 37 37 38 38 MODULE_AUTHOR("IBM Corporation"); 39 - MODULE_DESCRIPTION("CEX4/CEX5/CEX6/CEX7 Cryptographic Card device driver, " \ 40 - "Copyright IBM Corp. 2019"); 39 + MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \ 40 + "Copyright IBM Corp. 2022"); 41 41 MODULE_LICENSE("GPL"); 42 42 43 43 static struct ap_device_id zcrypt_cex4_card_ids[] = { ··· 48 48 { .dev_type = AP_DEVICE_TYPE_CEX6, 49 49 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 50 50 { .dev_type = AP_DEVICE_TYPE_CEX7, 51 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 52 + { .dev_type = AP_DEVICE_TYPE_CEX8, 51 53 .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 52 54 { /* end of list */ }, 53 55 }; ··· 64 62 { .dev_type = AP_DEVICE_TYPE_CEX6, 65 63 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 66 64 { .dev_type = AP_DEVICE_TYPE_CEX7, 65 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 66 + { .dev_type = AP_DEVICE_TYPE_CEX8, 67 67 .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 68 68 { /* end of list */ }, 69 69 }; ··· 399 395 }; 400 396 401 397 /* 402 - * Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always 398 + * Probe function for CEX[45678] card device. It always 403 399 * accepts the AP device since the bus_match already checked 404 400 * the hardware type. 405 401 * @ap_dev: pointer to the AP device. ··· 418 414 6, 9, 20, 17, 65, 438, 0, 0}; 419 415 static const int CEX7A_SPEED_IDX[NUM_OPS] = { 420 416 6, 8, 17, 15, 54, 362, 0, 0}; 417 + static const int CEX8A_SPEED_IDX[NUM_OPS] = { 418 + 6, 8, 17, 15, 54, 362, 0, 0}; 421 419 422 420 static const int CEX4C_SPEED_IDX[NUM_OPS] = { 423 421 59, 69, 308, 83, 278, 2204, 209, 40}; ··· 429 423 16, 20, 32, 27, 77, 455, 24, 9}; 430 424 static const int CEX7C_SPEED_IDX[NUM_OPS] = { 431 425 14, 16, 26, 23, 64, 376, 23, 8}; 426 + static const int CEX8C_SPEED_IDX[NUM_OPS] = { 427 + 14, 16, 26, 23, 64, 376, 23, 8}; 432 428 433 429 static const int CEX4P_SPEED_IDX[NUM_OPS] = { 434 430 0, 0, 0, 0, 0, 0, 0, 50}; ··· 439 431 static const int CEX6P_SPEED_IDX[NUM_OPS] = { 440 432 0, 0, 0, 0, 0, 0, 0, 9}; 441 433 static const int CEX7P_SPEED_IDX[NUM_OPS] = { 434 + 0, 0, 0, 0, 0, 0, 0, 8}; 435 + static const int CEX8P_SPEED_IDX[NUM_OPS] = { 442 436 0, 0, 0, 0, 0, 0, 0, 8}; 443 437 444 438 struct ap_card *ac = to_ap_card(&ap_dev->device); ··· 465 455 zc->type_string = "CEX6A"; 466 456 zc->user_space_type = ZCRYPT_CEX6; 467 457 zc->speed_rating = CEX6A_SPEED_IDX; 468 - } else { 458 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) { 469 459 zc->type_string = "CEX7A"; 460 + zc->speed_rating = CEX7A_SPEED_IDX; 470 461 /* wrong user space type, just for compatibility 471 462 * with the ZCRYPT_STATUS_MASK ioctl. 472 463 */ 473 464 zc->user_space_type = ZCRYPT_CEX6; 474 - zc->speed_rating = CEX7A_SPEED_IDX; 465 + } else { 466 + zc->type_string = "CEX8A"; 467 + zc->speed_rating = CEX8A_SPEED_IDX; 468 + /* wrong user space type, just for compatibility 469 + * with the ZCRYPT_STATUS_MASK ioctl. 470 + */ 471 + zc->user_space_type = ZCRYPT_CEX6; 475 472 } 476 473 zc->min_mod_size = CEX4A_MIN_MOD_SIZE; 477 474 if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && ··· 494 477 } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { 495 478 if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 496 479 zc->type_string = "CEX4C"; 497 - /* wrong user space type, must be CEX4 480 + zc->speed_rating = CEX4C_SPEED_IDX; 481 + /* wrong user space type, must be CEX3C 498 482 * just keep it for cca compatibility 499 483 */ 500 484 zc->user_space_type = ZCRYPT_CEX3C; 501 - zc->speed_rating = CEX4C_SPEED_IDX; 502 485 } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { 503 486 zc->type_string = "CEX5C"; 504 - /* wrong user space type, must be CEX5 487 + zc->speed_rating = CEX5C_SPEED_IDX; 488 + /* wrong user space type, must be CEX3C 505 489 * just keep it for cca compatibility 506 490 */ 507 491 zc->user_space_type = ZCRYPT_CEX3C; 508 - zc->speed_rating = CEX5C_SPEED_IDX; 509 492 } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) { 510 493 zc->type_string = "CEX6C"; 511 - /* wrong user space type, must be CEX6 512 - * just keep it for cca compatibility 513 - */ 514 - zc->user_space_type = ZCRYPT_CEX3C; 515 494 zc->speed_rating = CEX6C_SPEED_IDX; 516 - } else { 517 - zc->type_string = "CEX7C"; 518 - /* wrong user space type, must be CEX7 495 + /* wrong user space type, must be CEX3C 519 496 * just keep it for cca compatibility 520 497 */ 521 498 zc->user_space_type = ZCRYPT_CEX3C; 499 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) { 500 + zc->type_string = "CEX7C"; 522 501 zc->speed_rating = CEX7C_SPEED_IDX; 502 + /* wrong user space type, must be CEX3C 503 + * just keep it for cca compatibility 504 + */ 505 + zc->user_space_type = ZCRYPT_CEX3C; 506 + } else { 507 + zc->type_string = "CEX8C"; 508 + zc->speed_rating = CEX8C_SPEED_IDX; 509 + /* wrong user space type, must be CEX3C 510 + * just keep it for cca compatibility 511 + */ 512 + zc->user_space_type = ZCRYPT_CEX3C; 523 513 } 524 514 zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 525 515 zc->max_mod_size = CEX4C_MAX_MOD_SIZE; ··· 544 520 zc->type_string = "CEX6P"; 545 521 zc->user_space_type = ZCRYPT_CEX6; 546 522 zc->speed_rating = CEX6P_SPEED_IDX; 547 - } else { 523 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) { 548 524 zc->type_string = "CEX7P"; 525 + zc->speed_rating = CEX7P_SPEED_IDX; 549 526 /* wrong user space type, just for compatibility 550 527 * with the ZCRYPT_STATUS_MASK ioctl. 551 528 */ 552 529 zc->user_space_type = ZCRYPT_CEX6; 553 - zc->speed_rating = CEX7P_SPEED_IDX; 530 + } else { 531 + zc->type_string = "CEX8P"; 532 + zc->speed_rating = CEX8P_SPEED_IDX; 533 + /* wrong user space type, just for compatibility 534 + * with the ZCRYPT_STATUS_MASK ioctl. 535 + */ 536 + zc->user_space_type = ZCRYPT_CEX6; 554 537 } 555 538 zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 556 539 zc->max_mod_size = CEX4C_MAX_MOD_SIZE; ··· 594 563 } 595 564 596 565 /* 597 - * This is called to remove the CEX4/CEX5/CEX6/CEX7 card driver 566 + * This is called to remove the CEX[45678] card driver 598 567 * information if an AP card device is removed. 599 568 */ 600 569 static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) ··· 618 587 }; 619 588 620 589 /* 621 - * Probe function for CEX4/CEX5/CEX6/CEX7 queue device. It always 590 + * Probe function for CEX[45678] queue device. It always 622 591 * accepts the AP device since the bus_match already checked 623 592 * the hardware type. 624 593 * @ap_dev: pointer to the AP device. ··· 684 653 } 685 654 686 655 /* 687 - * This is called to remove the CEX4/CEX5/CEX6/CEX7 queue driver 656 + * This is called to remove the CEX[45678] queue driver 688 657 * information if an AP queue device is removed. 689 658 */ 690 659 static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+10 -2
drivers/s390/crypto/zcrypt_msgtype50.c
··· 156 156 unsigned char reserved3[8]; 157 157 } __packed; 158 158 159 - unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) 159 + int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) 160 160 { 161 161 162 162 if (!mex->inputdatalength) ··· 172 172 return 0; 173 173 } 174 174 175 - unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) 175 + int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) 176 176 { 177 177 178 178 if (!crt->inputdatalength) ··· 497 497 ap_cancel_message(zq->queue, ap_msg); 498 498 out: 499 499 ap_msg->private = NULL; 500 + if (rc) 501 + ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n", 502 + __func__, AP_QID_CARD(zq->queue->qid), 503 + AP_QID_QUEUE(zq->queue->qid), rc); 500 504 return rc; 501 505 } 502 506 ··· 546 542 ap_cancel_message(zq->queue, ap_msg); 547 543 out: 548 544 ap_msg->private = NULL; 545 + if (rc) 546 + ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n", 547 + __func__, AP_QID_CARD(zq->queue->qid), 548 + AP_QID_QUEUE(zq->queue->qid), rc); 549 549 return rc; 550 550 } 551 551
+2 -2
drivers/s390/crypto/zcrypt_msgtype50.h
··· 20 20 21 21 #define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */ 22 22 23 - unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *); 24 - unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *); 23 + int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fc); 24 + int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fc); 25 25 26 26 void zcrypt_msgtype50_init(void); 27 27 void zcrypt_msgtype50_exit(void);
+99 -20
drivers/s390/crypto/zcrypt_msgtype6.c
··· 472 472 *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; 473 473 *dom = (unsigned short *)&msg->cprbx.domain; 474 474 475 + /* check subfunction, US and AU need special flag with NQAP */ 475 476 if (memcmp(function_code, "US", 2) == 0 476 477 || memcmp(function_code, "AU", 2) == 0) 477 478 ap_msg->flags |= AP_MSG_FLAG_SPECIAL; ··· 481 480 if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 482 481 ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 483 482 #endif 483 + 484 + /* check CPRB minor version, set info bits in ap_message flag field */ 485 + switch (*(unsigned short *)(&msg->cprbx.func_id[0])) { 486 + case 0x5432: /* "T2" */ 487 + ap_msg->flags |= AP_MSG_FLAG_USAGE; 488 + break; 489 + case 0x5433: /* "T3" */ 490 + case 0x5435: /* "T5" */ 491 + case 0x5436: /* "T6" */ 492 + case 0x5437: /* "T7" */ 493 + ap_msg->flags |= AP_MSG_FLAG_ADMIN; 494 + break; 495 + default: 496 + ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n", 497 + __func__, msg->cprbx.func_id[0], 498 + msg->cprbx.func_id[1]); 499 + } 484 500 485 501 /* copy data block */ 486 502 if (xcRB->request_data_length && ··· 510 492 511 493 static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg, 512 494 struct ep11_urb *xcRB, 513 - unsigned int *fcode) 495 + unsigned int *fcode, 496 + unsigned int *domain) 514 497 { 515 498 unsigned int lfmt; 516 499 static struct type6_hdr static_type6_ep11_hdr = { ··· 586 567 if (ap_msg->fi.flags & AP_FI_FLAG_TOGGLE_SPECIAL) 587 568 ap_msg->flags ^= AP_MSG_FLAG_SPECIAL; 588 569 #endif 570 + 571 + /* set info bits in ap_message flag field */ 572 + if (msg->cprbx.flags & 0x80) 573 + ap_msg->flags |= AP_MSG_FLAG_ADMIN; 574 + else 575 + ap_msg->flags |= AP_MSG_FLAG_USAGE; 576 + 577 + *domain = msg->cprbx.target_id; 589 578 590 579 return 0; 591 580 } ··· 741 714 char *data = reply->msg; 742 715 743 716 /* Copy CPRB to user */ 717 + if (xcRB->reply_control_blk_length < msg->fmt2.count1) { 718 + ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", 719 + __func__, xcRB->reply_control_blk_length, 720 + msg->fmt2.count1); 721 + return -EMSGSIZE; 722 + } 744 723 if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr, 745 724 data + msg->fmt2.offset1, msg->fmt2.count1)) 746 725 return -EFAULT; 747 726 xcRB->reply_control_blk_length = msg->fmt2.count1; 748 727 749 728 /* Copy data buffer to user */ 750 - if (msg->fmt2.count2) 729 + if (msg->fmt2.count2) { 730 + if (xcRB->reply_data_length < msg->fmt2.count2) { 731 + ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", 732 + __func__, xcRB->reply_data_length, 733 + msg->fmt2.count2); 734 + return -EMSGSIZE; 735 + } 751 736 if (z_copy_to_user(userspace, xcRB->reply_data_addr, 752 737 data + msg->fmt2.offset2, msg->fmt2.count2)) 753 738 return -EFAULT; 739 + } 754 740 xcRB->reply_data_length = msg->fmt2.count2; 741 + 755 742 return 0; 756 743 } 757 744 ··· 785 744 struct type86_fmt2_msg *msg = reply->msg; 786 745 char *data = reply->msg; 787 746 788 - if (xcRB->resp_len < msg->fmt2.count1) 789 - return -EINVAL; 747 + if (xcRB->resp_len < msg->fmt2.count1) { 748 + ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", 749 + __func__, (unsigned int)xcRB->resp_len, 750 + msg->fmt2.count1); 751 + return -EMSGSIZE; 752 + } 790 753 791 754 /* Copy response CPRB to user */ 792 755 if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp, ··· 1158 1113 } 1159 1114 1160 1115 /* 1161 - * Fetch function code from cprb. 1162 - * Extracting the fc requires to copy the cprb from userspace. 1163 - * So this function allocates memory and needs an ap_msg prepared 1116 + * Prepare a CCA AP msg request. 1117 + * Prepare a CCA AP msg: fetch the required data from userspace, 1118 + * prepare the AP msg, fill some info into the ap_message struct, 1119 + * extract some data from the CPRB and give back to the caller. 1120 + * This function allocates memory and needs an ap_msg prepared 1164 1121 * by the caller with ap_init_message(). Also the caller has to 1165 1122 * make sure ap_release_message() is always called even on failure. 1166 1123 */ 1167 - unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *xcRB, 1168 - struct ap_message *ap_msg, 1169 - unsigned int *func_code, unsigned short **dom) 1124 + int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, 1125 + struct ap_message *ap_msg, 1126 + unsigned int *func_code, unsigned short **dom) 1170 1127 { 1171 1128 struct response_type resp_type = { 1172 1129 .type = CEXXC_RESPONSE_TYPE_XCRB, ··· 1200 1153 { 1201 1154 int rc; 1202 1155 struct response_type *rtype = (struct response_type *)(ap_msg->private); 1156 + struct { 1157 + struct type6_hdr hdr; 1158 + struct CPRBX cprbx; 1159 + /* ... more data blocks ... */ 1160 + } __packed * msg = ap_msg->msg; 1161 + 1162 + /* 1163 + * Set the queue's reply buffer length minus 128 byte padding 1164 + * as reply limit for the card firmware. 1165 + */ 1166 + msg->hdr.FromCardLen1 = min_t(unsigned int, msg->hdr.FromCardLen1, 1167 + zq->reply.bufsize - 128); 1168 + if (msg->hdr.FromCardLen2) 1169 + msg->hdr.FromCardLen2 = 1170 + zq->reply.bufsize - msg->hdr.FromCardLen1 - 128; 1203 1171 1204 1172 init_completion(&rtype->work); 1205 1173 rc = ap_queue_message(zq->queue, ap_msg); ··· 1229 1167 /* Signal pending. */ 1230 1168 ap_cancel_message(zq->queue, ap_msg); 1231 1169 out: 1170 + if (rc) 1171 + ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", 1172 + __func__, AP_QID_CARD(zq->queue->qid), 1173 + AP_QID_QUEUE(zq->queue->qid), rc); 1232 1174 return rc; 1233 1175 } 1234 1176 1235 1177 /* 1236 - * Fetch function code from ep11 cprb. 1237 - * Extracting the fc requires to copy the ep11 cprb from userspace. 1238 - * So this function allocates memory and needs an ap_msg prepared 1178 + * Prepare an EP11 AP msg request. 1179 + * Prepare an EP11 AP msg: fetch the required data from userspace, 1180 + * prepare the AP msg, fill some info into the ap_message struct, 1181 + * extract some data from the CPRB and give back to the caller. 1182 + * This function allocates memory and needs an ap_msg prepared 1239 1183 * by the caller with ap_init_message(). Also the caller has to 1240 1184 * make sure ap_release_message() is always called even on failure. 1241 1185 */ 1242 - unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *xcrb, 1243 - struct ap_message *ap_msg, 1244 - unsigned int *func_code) 1186 + int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, 1187 + struct ap_message *ap_msg, 1188 + unsigned int *func_code, unsigned int *domain) 1245 1189 { 1246 1190 struct response_type resp_type = { 1247 1191 .type = CEXXC_RESPONSE_TYPE_EP11, ··· 1263 1195 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1264 1196 if (!ap_msg->private) 1265 1197 return -ENOMEM; 1266 - return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code); 1198 + return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, 1199 + func_code, domain); 1267 1200 } 1268 1201 1269 1202 /* ··· 1295 1226 unsigned char dom_len; /* fixed value 0x4 */ 1296 1227 unsigned int dom_val; /* domain id */ 1297 1228 } __packed * payload_hdr = NULL; 1298 - 1299 1229 1300 1230 /* 1301 1231 * The target domain field within the cprb body/payload block will be ··· 1327 1259 AP_QID_QUEUE(zq->queue->qid); 1328 1260 } 1329 1261 1262 + /* 1263 + * Set the queue's reply buffer length minus the two prepend headers 1264 + * as reply limit for the card firmware. 1265 + */ 1266 + msg->hdr.FromCardLen1 = zq->reply.bufsize - 1267 + sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); 1268 + 1330 1269 init_completion(&rtype->work); 1331 1270 rc = ap_queue_message(zq->queue, ap_msg); 1332 1271 if (rc) ··· 1347 1272 /* Signal pending. */ 1348 1273 ap_cancel_message(zq->queue, ap_msg); 1349 1274 out: 1275 + if (rc) 1276 + ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", 1277 + __func__, AP_QID_CARD(zq->queue->qid), 1278 + AP_QID_QUEUE(zq->queue->qid), rc); 1350 1279 return rc; 1351 1280 } 1352 1281 1353 - unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, 1354 - unsigned int *domain) 1282 + int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, 1283 + unsigned int *domain) 1355 1284 { 1356 1285 struct response_type resp_type = { 1357 1286 .type = CEXXC_RESPONSE_TYPE_XCRB,
+8 -5
drivers/s390/crypto/zcrypt_msgtype6.h
··· 94 94 unsigned int offset4; /* 0x00000000 */ 95 95 } __packed; 96 96 97 - unsigned int get_cprb_fc(bool userspace, struct ica_xcRB *, struct ap_message *, 98 - unsigned int *, unsigned short **); 99 - unsigned int get_ep11cprb_fc(bool userspace, struct ep11_urb *, struct ap_message *, 100 - unsigned int *); 101 - unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); 97 + int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, 98 + struct ap_message *ap_msg, 99 + unsigned int *fc, unsigned short **dom); 100 + int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, 101 + struct ap_message *ap_msg, 102 + unsigned int *fc, unsigned int *dom); 103 + int prep_rng_ap_msg(struct ap_message *ap_msg, 104 + int *fc, unsigned int *dom); 102 105 103 106 #define LOW 10 104 107 #define MEDIUM 100
+5
scripts/mod/modpost.c
··· 658 658 strstarts(symname, "_savevr_") || 659 659 strcmp(symname, ".TOC.") == 0) 660 660 return 1; 661 + 662 + if (info->hdr->e_machine == EM_S390) 663 + /* Expoline thunks are linked on all kernel modules during final link of .ko */ 664 + if (strstarts(symname, "__s390_indirect_jump_r")) 665 + return 1; 661 666 /* Do not ignore this symbol */ 662 667 return 0; 663 668 }
+1 -42
scripts/sorttable.c
··· 261 261 } 262 262 } 263 263 264 - static void s390_sort_relative_table(char *extab_image, int image_size) 265 - { 266 - int i; 267 - 268 - for (i = 0; i < image_size; i += 16) { 269 - char *loc = extab_image + i; 270 - uint64_t handler; 271 - 272 - w(r((uint32_t *)loc) + i, (uint32_t *)loc); 273 - w(r((uint32_t *)(loc + 4)) + (i + 4), (uint32_t *)(loc + 4)); 274 - /* 275 - * 0 is a special self-relative handler value, which means that 276 - * handler should be ignored. It is safe, because it means that 277 - * handler field points to itself, which should never happen. 278 - * When creating extable-relative values, keep it as 0, since 279 - * this should never occur either: it would mean that handler 280 - * field points to the first extable entry. 281 - */ 282 - handler = r8((uint64_t *)(loc + 8)); 283 - if (handler) 284 - handler += i + 8; 285 - w8(handler, (uint64_t *)(loc + 8)); 286 - } 287 - 288 - qsort(extab_image, image_size / 16, 16, compare_relative_table); 289 - 290 - for (i = 0; i < image_size; i += 16) { 291 - char *loc = extab_image + i; 292 - uint64_t handler; 293 - 294 - w(r((uint32_t *)loc) - i, (uint32_t *)loc); 295 - w(r((uint32_t *)(loc + 4)) - (i + 4), (uint32_t *)(loc + 4)); 296 - handler = r8((uint64_t *)(loc + 8)); 297 - if (handler) 298 - handler -= i + 8; 299 - w8(handler, (uint64_t *)(loc + 8)); 300 - } 301 - } 302 - 303 264 static int do_file(char const *const fname, void *addr) 304 265 { 305 266 int rc = -1; ··· 301 340 case EM_386: 302 341 case EM_AARCH64: 303 342 case EM_RISCV: 343 + case EM_S390: 304 344 case EM_X86_64: 305 345 custom_sort = sort_relative_table_with_data; 306 - break; 307 - case EM_S390: 308 - custom_sort = s390_sort_relative_table; 309 346 break; 310 347 case EM_PARISC: 311 348 case EM_PPC:
+1 -2
tools/perf/arch/s390/util/dwarf-regs.c
··· 3 3 * Mapping of DWARF debug register numbers into register names. 4 4 * 5 5 * Copyright IBM Corp. 2010, 2017 6 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 - * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 6 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 8 7 * 9 8 */ 10 9