Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:

- a fix for the vfio ccw translation code

- update an incorrect email address in the MAINTAINERS file

- fix a division by zero oops in the cpum_sf code found by trinity

- two fixes for the error handling of the qdio code

- several spectre related patches to convert all left-over indirect
branches in the kernel to expoline branches

- update defconfigs to avoid warnings due to the netfilter Kconfig
changes

- avoid several compiler warnings in the kexec_file code for s390

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/qdio: don't release memory in qdio_setup_irq()
s390/qdio: fix access to uninitialized qdio_q fields
s390/cpum_sf: ensure sample frequency of perf event attributes is non-zero
s390: use expoline thunks in the BPF JIT
s390: extend expoline to BC instructions
s390: remove indirect branch from do_softirq_own_stack
s390: move spectre sysfs attribute code
s390/kernel: use expoline for indirect branches
s390/ftrace: use expoline for indirect branches
s390/lib: use expoline for indirect branches
s390/crc32-vx: use expoline for indirect branches
s390: move expoline assembler macros to a header
vfio: ccw: fix cleanup if cp_prefetch fails
s390/kexec_file: add declaration of purgatory related globals
s390: update defconfigs
MAINTAINERS: update s390 zcrypt maintainers email address

+422 -167
+1 -1
MAINTAINERS
··· 12220 F: include/uapi/linux/vfio_ccw.h 12221 12222 S390 ZCRYPT DRIVER 12223 - M: Harald Freudenberger <freude@de.ibm.com> 12224 L: linux-s390@vger.kernel.org 12225 W: http://www.ibm.com/developerworks/linux/linux390/ 12226 S: Supported
··· 12220 F: include/uapi/linux/vfio_ccw.h 12221 12222 S390 ZCRYPT DRIVER 12223 + M: Harald Freudenberger <freude@linux.ibm.com> 12224 L: linux-s390@vger.kernel.org 12225 W: http://www.ibm.com/developerworks/linux/linux390/ 12226 S: Supported
+4 -5
arch/s390/configs/debug_defconfig
··· 261 CONFIG_IP_VS_FTP=m 262 CONFIG_IP_VS_PE_SIP=m 263 CONFIG_NF_CONNTRACK_IPV4=m 264 - CONFIG_NF_TABLES_IPV4=m 265 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 266 - CONFIG_NF_TABLES_ARP=m 267 CONFIG_NFT_CHAIN_NAT_IPV4=m 268 CONFIG_IP_NF_IPTABLES=m 269 CONFIG_IP_NF_MATCH_AH=m ··· 284 CONFIG_IP_NF_ARPFILTER=m 285 CONFIG_IP_NF_ARP_MANGLE=m 286 CONFIG_NF_CONNTRACK_IPV6=m 287 - CONFIG_NF_TABLES_IPV6=m 288 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 289 CONFIG_NFT_CHAIN_NAT_IPV6=m 290 CONFIG_IP6_NF_IPTABLES=m ··· 305 CONFIG_IP6_NF_SECURITY=m 306 CONFIG_IP6_NF_NAT=m 307 CONFIG_IP6_NF_TARGET_MASQUERADE=m 308 - CONFIG_NF_TABLES_BRIDGE=m 309 CONFIG_RDS=m 310 CONFIG_RDS_RDMA=m 311 CONFIG_RDS_TCP=m ··· 604 CONFIG_WQ_WATCHDOG=y 605 CONFIG_PANIC_ON_OOPS=y 606 CONFIG_DEBUG_TIMEKEEPING=y 607 - CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 608 CONFIG_PROVE_LOCKING=y 609 CONFIG_LOCK_STAT=y 610 CONFIG_DEBUG_LOCKDEP=y
··· 261 CONFIG_IP_VS_FTP=m 262 CONFIG_IP_VS_PE_SIP=m 263 CONFIG_NF_CONNTRACK_IPV4=m 264 + CONFIG_NF_TABLES_IPV4=y 265 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 266 + CONFIG_NF_TABLES_ARP=y 267 CONFIG_NFT_CHAIN_NAT_IPV4=m 268 CONFIG_IP_NF_IPTABLES=m 269 CONFIG_IP_NF_MATCH_AH=m ··· 284 CONFIG_IP_NF_ARPFILTER=m 285 CONFIG_IP_NF_ARP_MANGLE=m 286 CONFIG_NF_CONNTRACK_IPV6=m 287 + CONFIG_NF_TABLES_IPV6=y 288 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 289 CONFIG_NFT_CHAIN_NAT_IPV6=m 290 CONFIG_IP6_NF_IPTABLES=m ··· 305 CONFIG_IP6_NF_SECURITY=m 306 CONFIG_IP6_NF_NAT=m 307 CONFIG_IP6_NF_TARGET_MASQUERADE=m 308 + CONFIG_NF_TABLES_BRIDGE=y 309 CONFIG_RDS=m 310 CONFIG_RDS_RDMA=m 311 CONFIG_RDS_TCP=m ··· 604 CONFIG_WQ_WATCHDOG=y 605 CONFIG_PANIC_ON_OOPS=y 606 CONFIG_DEBUG_TIMEKEEPING=y 607 CONFIG_PROVE_LOCKING=y 608 CONFIG_LOCK_STAT=y 609 CONFIG_DEBUG_LOCKDEP=y
+4 -4
arch/s390/configs/performance_defconfig
··· 259 CONFIG_IP_VS_FTP=m 260 CONFIG_IP_VS_PE_SIP=m 261 CONFIG_NF_CONNTRACK_IPV4=m 262 - CONFIG_NF_TABLES_IPV4=m 263 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 264 - CONFIG_NF_TABLES_ARP=m 265 CONFIG_NFT_CHAIN_NAT_IPV4=m 266 CONFIG_IP_NF_IPTABLES=m 267 CONFIG_IP_NF_MATCH_AH=m ··· 282 CONFIG_IP_NF_ARPFILTER=m 283 CONFIG_IP_NF_ARP_MANGLE=m 284 CONFIG_NF_CONNTRACK_IPV6=m 285 - CONFIG_NF_TABLES_IPV6=m 286 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 287 CONFIG_NFT_CHAIN_NAT_IPV6=m 288 CONFIG_IP6_NF_IPTABLES=m ··· 303 CONFIG_IP6_NF_SECURITY=m 304 CONFIG_IP6_NF_NAT=m 305 CONFIG_IP6_NF_TARGET_MASQUERADE=m 306 - CONFIG_NF_TABLES_BRIDGE=m 307 CONFIG_RDS=m 308 CONFIG_RDS_RDMA=m 309 CONFIG_RDS_TCP=m
··· 259 CONFIG_IP_VS_FTP=m 260 CONFIG_IP_VS_PE_SIP=m 261 CONFIG_NF_CONNTRACK_IPV4=m 262 + CONFIG_NF_TABLES_IPV4=y 263 CONFIG_NFT_CHAIN_ROUTE_IPV4=m 264 + CONFIG_NF_TABLES_ARP=y 265 CONFIG_NFT_CHAIN_NAT_IPV4=m 266 CONFIG_IP_NF_IPTABLES=m 267 CONFIG_IP_NF_MATCH_AH=m ··· 282 CONFIG_IP_NF_ARPFILTER=m 283 CONFIG_IP_NF_ARP_MANGLE=m 284 CONFIG_NF_CONNTRACK_IPV6=m 285 + CONFIG_NF_TABLES_IPV6=y 286 CONFIG_NFT_CHAIN_ROUTE_IPV6=m 287 CONFIG_NFT_CHAIN_NAT_IPV6=m 288 CONFIG_IP6_NF_IPTABLES=m ··· 303 CONFIG_IP6_NF_SECURITY=m 304 CONFIG_IP6_NF_NAT=m 305 CONFIG_IP6_NF_TARGET_MASQUERADE=m 306 + CONFIG_NF_TABLES_BRIDGE=y 307 CONFIG_RDS=m 308 CONFIG_RDS_RDMA=m 309 CONFIG_RDS_TCP=m
+4 -1
arch/s390/crypto/crc32be-vx.S
··· 13 */ 14 15 #include <linux/linkage.h> 16 #include <asm/vx-insn.h> 17 18 /* Vector register range containing CRC-32 constants */ ··· 67 .quad 0x104C11DB7, 0 # P(x) 68 69 .previous 70 71 .text 72 /* ··· 206 207 .Ldone: 208 VLGVF %r2,%v2,3 209 - br %r14 210 211 .previous
··· 13 */ 14 15 #include <linux/linkage.h> 16 + #include <asm/nospec-insn.h> 17 #include <asm/vx-insn.h> 18 19 /* Vector register range containing CRC-32 constants */ ··· 66 .quad 0x104C11DB7, 0 # P(x) 67 68 .previous 69 + 70 + GEN_BR_THUNK %r14 71 72 .text 73 /* ··· 203 204 .Ldone: 205 VLGVF %r2,%v2,3 206 + BR_EX %r14 207 208 .previous
+3 -1
arch/s390/crypto/crc32le-vx.S
··· 14 */ 15 16 #include <linux/linkage.h> 17 #include <asm/vx-insn.h> 18 19 /* Vector register range containing CRC-32 constants */ ··· 77 78 .previous 79 80 81 .text 82 ··· 266 267 .Ldone: 268 VLGVF %r2,%v2,2 269 - br %r14 270 271 .previous
··· 14 */ 15 16 #include <linux/linkage.h> 17 + #include <asm/nospec-insn.h> 18 #include <asm/vx-insn.h> 19 20 /* Vector register range containing CRC-32 constants */ ··· 76 77 .previous 78 79 + GEN_BR_THUNK %r14 80 81 .text 82 ··· 264 265 .Ldone: 266 VLGVF %r2,%v2,2 267 + BR_EX %r14 268 269 .previous
+196
arch/s390/include/asm/nospec-insn.h
···
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_S390_NOSPEC_ASM_H 3 + #define _ASM_S390_NOSPEC_ASM_H 4 + 5 + #include <asm/alternative-asm.h> 6 + #include <asm/asm-offsets.h> 7 + #include <asm/dwarf.h> 8 + 9 + #ifdef __ASSEMBLY__ 10 + 11 + #ifdef CONFIG_EXPOLINE 12 + 13 + _LC_BR_R1 = __LC_BR_R1 14 + 15 + /* 16 + * The expoline macros are used to create thunks in the same format 17 + * as gcc generates them. The 'comdat' section flag makes sure that 18 + * the various thunks are merged into a single copy. 19 + */ 20 + .macro __THUNK_PROLOG_NAME name 21 + .pushsection .text.\name,"axG",@progbits,\name,comdat 22 + .globl \name 23 + .hidden \name 24 + .type \name,@function 25 + \name: 26 + CFI_STARTPROC 27 + .endm 28 + 29 + .macro __THUNK_EPILOG 30 + CFI_ENDPROC 31 + .popsection 32 + .endm 33 + 34 + .macro __THUNK_PROLOG_BR r1,r2 35 + __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1 36 + .endm 37 + 38 + .macro __THUNK_PROLOG_BC d0,r1,r2 39 + __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1 40 + .endm 41 + 42 + .macro __THUNK_BR r1,r2 43 + jg __s390x_indirect_jump_r\r2\()use_r\r1 44 + .endm 45 + 46 + .macro __THUNK_BC d0,r1,r2 47 + jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1 48 + .endm 49 + 50 + .macro __THUNK_BRASL r1,r2,r3 51 + brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2 52 + .endm 53 + 54 + .macro __DECODE_RR expand,reg,ruse 55 + .set __decode_fail,1 56 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 57 + .ifc \reg,%r\r1 58 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 59 + .ifc \ruse,%r\r2 60 + \expand \r1,\r2 61 + .set __decode_fail,0 62 + .endif 63 + .endr 64 + .endif 65 + .endr 66 + .if __decode_fail == 1 67 + .error "__DECODE_RR failed" 68 + .endif 69 + .endm 70 + 71 + .macro __DECODE_RRR expand,rsave,rtarget,ruse 72 + .set __decode_fail,1 73 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 74 + .ifc \rsave,%r\r1 75 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 76 + .ifc \rtarget,%r\r2 77 + .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 78 + .ifc \ruse,%r\r3 79 + \expand \r1,\r2,\r3 80 + .set __decode_fail,0 81 + .endif 82 + .endr 83 + .endif 84 + .endr 85 + .endif 86 + .endr 87 + .if __decode_fail == 1 88 + .error "__DECODE_RRR failed" 89 + .endif 90 + .endm 91 + 92 + .macro __DECODE_DRR expand,disp,reg,ruse 93 + .set __decode_fail,1 94 + .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 95 + .ifc \reg,%r\r1 96 + .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 97 + .ifc \ruse,%r\r2 98 + \expand \disp,\r1,\r2 99 + .set __decode_fail,0 100 + .endif 101 + .endr 102 + .endif 103 + .endr 104 + .if __decode_fail == 1 105 + .error "__DECODE_DRR failed" 106 + .endif 107 + .endm 108 + 109 + .macro __THUNK_EX_BR reg,ruse 110 + # Be very careful when adding instructions to this macro! 111 + # The ALTERNATIVE replacement code has a .+10 which targets 112 + # the "br \reg" after the code has been patched. 113 + #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 114 + exrl 0,555f 115 + j . 116 + #else 117 + .ifc \reg,%r1 118 + ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35 119 + j . 120 + .else 121 + larl \ruse,555f 122 + ex 0,0(\ruse) 123 + j . 124 + .endif 125 + #endif 126 + 555: br \reg 127 + .endm 128 + 129 + .macro __THUNK_EX_BC disp,reg,ruse 130 + #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 131 + exrl 0,556f 132 + j . 133 + #else 134 + larl \ruse,556f 135 + ex 0,0(\ruse) 136 + j . 137 + #endif 138 + 556: b \disp(\reg) 139 + .endm 140 + 141 + .macro GEN_BR_THUNK reg,ruse=%r1 142 + __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse 143 + __THUNK_EX_BR \reg,\ruse 144 + __THUNK_EPILOG 145 + .endm 146 + 147 + .macro GEN_B_THUNK disp,reg,ruse=%r1 148 + __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse 149 + __THUNK_EX_BC \disp,\reg,\ruse 150 + __THUNK_EPILOG 151 + .endm 152 + 153 + .macro BR_EX reg,ruse=%r1 154 + 557: __DECODE_RR __THUNK_BR,\reg,\ruse 155 + .pushsection .s390_indirect_branches,"a",@progbits 156 + .long 557b-. 157 + .popsection 158 + .endm 159 + 160 + .macro B_EX disp,reg,ruse=%r1 161 + 558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse 162 + .pushsection .s390_indirect_branches,"a",@progbits 163 + .long 558b-. 164 + .popsection 165 + .endm 166 + 167 + .macro BASR_EX rsave,rtarget,ruse=%r1 168 + 559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse 169 + .pushsection .s390_indirect_branches,"a",@progbits 170 + .long 559b-. 171 + .popsection 172 + .endm 173 + 174 + #else 175 + .macro GEN_BR_THUNK reg,ruse=%r1 176 + .endm 177 + 178 + .macro GEN_B_THUNK disp,reg,ruse=%r1 179 + .endm 180 + 181 + .macro BR_EX reg,ruse=%r1 182 + br \reg 183 + .endm 184 + 185 + .macro B_EX disp,reg,ruse=%r1 186 + b \disp(\reg) 187 + .endm 188 + 189 + .macro BASR_EX rsave,rtarget,ruse=%r1 190 + basr \rsave,\rtarget 191 + .endm 192 + #endif 193 + 194 + #endif /* __ASSEMBLY__ */ 195 + 196 + #endif /* _ASM_S390_NOSPEC_ASM_H */
+6
arch/s390/include/asm/purgatory.h
··· 13 14 int verify_sha256_digest(void); 15 16 #endif /* __ASSEMBLY__ */ 17 #endif /* _S390_PURGATORY_H_ */
··· 13 14 int verify_sha256_digest(void); 15 16 + extern u64 kernel_entry; 17 + extern u64 kernel_type; 18 + 19 + extern u64 crash_start; 20 + extern u64 crash_size; 21 + 22 #endif /* __ASSEMBLY__ */ 23 #endif /* _S390_PURGATORY_H_ */
+1
arch/s390/kernel/Makefile
··· 65 66 extra-y += head.o head64.o vmlinux.lds 67 68 CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) 69 70 obj-$(CONFIG_MODULES) += module.o
··· 65 66 extra-y += head.o head64.o vmlinux.lds 67 68 + obj-$(CONFIG_SYSFS) += nospec-sysfs.o 69 CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE) 70 71 obj-$(CONFIG_MODULES) += module.o
+1
arch/s390/kernel/asm-offsets.c
··· 181 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); 182 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); 183 OFFSET(__LC_GMAP, lowcore, gmap); 184 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 185 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 186 /* hardware defined lowcore locations 0x1000 - 0x18ff */
··· 181 OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); 182 OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count); 183 OFFSET(__LC_GMAP, lowcore, gmap); 184 + OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline); 185 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 186 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 187 /* hardware defined lowcore locations 0x1000 - 0x18ff */
+14 -10
arch/s390/kernel/base.S
··· 9 10 #include <linux/linkage.h> 11 #include <asm/asm-offsets.h> 12 #include <asm/ptrace.h> 13 #include <asm/sigp.h> 14 15 ENTRY(s390_base_mcck_handler) 16 basr %r13,0 17 0: lg %r15,__LC_PANIC_STACK # load panic stack 18 aghi %r15,-STACK_FRAME_OVERHEAD 19 larl %r1,s390_base_mcck_handler_fn 20 - lg %r1,0(%r1) 21 - ltgr %r1,%r1 22 jz 1f 23 - basr %r14,%r1 24 1: la %r1,4095 25 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) 26 lpswe __LC_MCK_OLD_PSW ··· 41 basr %r13,0 42 0: aghi %r15,-STACK_FRAME_OVERHEAD 43 larl %r1,s390_base_ext_handler_fn 44 - lg %r1,0(%r1) 45 - ltgr %r1,%r1 46 jz 1f 47 - basr %r14,%r1 48 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC 49 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 50 lpswe __LC_EXT_OLD_PSW ··· 61 basr %r13,0 62 0: aghi %r15,-STACK_FRAME_OVERHEAD 63 larl %r1,s390_base_pgm_handler_fn 64 - lg %r1,0(%r1) 65 - ltgr %r1,%r1 66 jz 1f 67 - basr %r14,%r1 68 lmg %r0,%r15,__LC_SAVE_AREA_SYNC 69 lpswe __LC_PGM_OLD_PSW 70 1: lpswe disabled_wait_psw-0b(%r13) ··· 121 larl %r4,.Lcontinue_psw # Restore PSW flags 122 lpswe 0(%r4) 123 .Lcontinue: 124 - br %r14 125 .align 16 126 .Lrestart_psw: 127 .long 0x00080000,0x80000000 + .Lrestart_part2
··· 9 10 #include <linux/linkage.h> 11 #include <asm/asm-offsets.h> 12 + #include <asm/nospec-insn.h> 13 #include <asm/ptrace.h> 14 #include <asm/sigp.h> 15 + 16 + GEN_BR_THUNK %r9 17 + GEN_BR_THUNK %r14 18 19 ENTRY(s390_base_mcck_handler) 20 basr %r13,0 21 0: lg %r15,__LC_PANIC_STACK # load panic stack 22 aghi %r15,-STACK_FRAME_OVERHEAD 23 larl %r1,s390_base_mcck_handler_fn 24 + lg %r9,0(%r1) 25 + ltgr %r9,%r9 26 jz 1f 27 + BASR_EX %r14,%r9 28 1: la %r1,4095 29 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) 30 lpswe __LC_MCK_OLD_PSW ··· 37 basr %r13,0 38 0: aghi %r15,-STACK_FRAME_OVERHEAD 39 larl %r1,s390_base_ext_handler_fn 40 + lg %r9,0(%r1) 41 + ltgr %r9,%r9 42 jz 1f 43 + BASR_EX %r14,%r9 44 1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC 45 ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 46 lpswe __LC_EXT_OLD_PSW ··· 57 basr %r13,0 58 0: aghi %r15,-STACK_FRAME_OVERHEAD 59 larl %r1,s390_base_pgm_handler_fn 60 + lg %r9,0(%r1) 61 + ltgr %r9,%r9 62 jz 1f 63 + BASR_EX %r14,%r9 64 lmg %r0,%r15,__LC_SAVE_AREA_SYNC 65 lpswe __LC_PGM_OLD_PSW 66 1: lpswe disabled_wait_psw-0b(%r13) ··· 117 larl %r4,.Lcontinue_psw # Restore PSW flags 118 lpswe 0(%r4) 119 .Lcontinue: 120 + BR_EX %r14 121 .align 16 122 .Lrestart_psw: 123 .long 0x00080000,0x80000000 + .Lrestart_part2
+24 -81
arch/s390/kernel/entry.S
··· 28 #include <asm/setup.h> 29 #include <asm/nmi.h> 30 #include <asm/export.h> 31 32 __PT_R0 = __PT_GPRS 33 __PT_R1 = __PT_GPRS + 8 ··· 184 "jnz .+8; .long 0xb2e8d000", 82 185 .endm 186 187 - #ifdef CONFIG_EXPOLINE 188 - 189 - .macro GEN_BR_THUNK name,reg,tmp 190 - .section .text.\name,"axG",@progbits,\name,comdat 191 - .globl \name 192 - .hidden \name 193 - .type \name,@function 194 - \name: 195 - CFI_STARTPROC 196 - #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 197 - exrl 0,0f 198 - #else 199 - larl \tmp,0f 200 - ex 0,0(\tmp) 201 - #endif 202 - j . 203 - 0: br \reg 204 - CFI_ENDPROC 205 - .endm 206 - 207 - GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 208 - GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1 209 - GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11 210 - 211 - .macro BASR_R14_R9 212 - 0: brasl %r14,__s390x_indirect_jump_r1use_r9 213 - .pushsection .s390_indirect_branches,"a",@progbits 214 - .long 0b-. 215 - .popsection 216 - .endm 217 - 218 - .macro BR_R1USE_R14 219 - 0: jg __s390x_indirect_jump_r1use_r14 220 - .pushsection .s390_indirect_branches,"a",@progbits 221 - .long 0b-. 222 - .popsection 223 - .endm 224 - 225 - .macro BR_R11USE_R14 226 - 0: jg __s390x_indirect_jump_r11use_r14 227 - .pushsection .s390_indirect_branches,"a",@progbits 228 - .long 0b-. 229 - .popsection 230 - .endm 231 - 232 - #else /* CONFIG_EXPOLINE */ 233 - 234 - .macro BASR_R14_R9 235 - basr %r14,%r9 236 - .endm 237 - 238 - .macro BR_R1USE_R14 239 - br %r14 240 - .endm 241 - 242 - .macro BR_R11USE_R14 243 - br %r14 244 - .endm 245 - 246 - #endif /* CONFIG_EXPOLINE */ 247 - 248 249 .section .kprobes.text, "ax" 250 .Ldummy: ··· 203 ENTRY(__bpon) 204 .globl __bpon 205 BPON 206 - BR_R1USE_R14 207 208 /* 209 * Scheduler resume function, called by switch_to ··· 227 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 228 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 229 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 230 - BR_R1USE_R14 231 232 .L__critical_start: 233 ··· 294 xgr %r5,%r5 295 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 296 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 297 - BR_R1USE_R14 298 .Lsie_fault: 299 lghi %r14,-EFAULT 300 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code ··· 353 lgf %r9,0(%r8,%r10) # get system call add. 354 TSTMSK __TI_flags(%r12),_TIF_TRACE 355 jnz .Lsysc_tracesys 356 - BASR_R14_R9 # call sys_xxxx 357 stg %r2,__PT_R2(%r11) # store return value 358 359 .Lsysc_return: ··· 538 lmg %r3,%r7,__PT_R3(%r11) 539 stg %r7,STACK_FRAME_OVERHEAD(%r15) 540 lg %r2,__PT_ORIG_GPR2(%r11) 541 - BASR_R14_R9 # call sys_xxx 542 stg %r2,__PT_R2(%r11) # store return value 543 .Lsysc_tracenogo: 544 TSTMSK __TI_flags(%r12),_TIF_TRACE ··· 562 lmg %r9,%r10,__PT_R9(%r11) # load gprs 563 ENTRY(kernel_thread_starter) 564 la %r2,0(%r10) 565 - BASR_R14_R9 566 j .Lsysc_tracenogo 567 568 /* ··· 644 je .Lpgm_return 645 lgf %r9,0(%r10,%r1) # load address of handler routine 646 lgr %r2,%r11 # pass pointer to pt_regs 647 - BASR_R14_R9 # branch to interrupt-handler 648 .Lpgm_return: 649 LOCKDEP_SYS_EXIT 650 tm __PT_PSW+1(%r11),0x01 # returning to user ? ··· 962 stpt __TIMER_IDLE_ENTER(%r2) 963 .Lpsw_idle_lpsw: 964 lpswe __SF_EMPTY(%r15) 965 - BR_R1USE_R14 966 .Lpsw_idle_end: 967 968 /* ··· 1004 .Lsave_fpu_regs_done: 1005 oi __LC_CPU_FLAGS+7,_CIF_FPU 1006 .Lsave_fpu_regs_exit: 1007 - BR_R1USE_R14 1008 .Lsave_fpu_regs_end: 1009 EXPORT_SYMBOL(save_fpu_regs) 1010 ··· 1050 .Lload_fpu_regs_done: 1051 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1052 .Lload_fpu_regs_exit: 1053 - BR_R1USE_R14 1054 .Lload_fpu_regs_end: 1055 1056 .L__critical_end: ··· 1265 jl 0f 1266 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1267 jl .Lcleanup_load_fpu_regs 1268 - 0: BR_R11USE_R14 1269 1270 .align 8 1271 .Lcleanup_table: ··· 1301 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1303 larl %r9,sie_exit # skip forward to sie_exit 1304 - BR_R11USE_R14 1305 #endif 1306 1307 .Lcleanup_system_call: ··· 1355 stg %r15,56(%r11) # r15 stack pointer 1356 # set new psw address and exit 1357 larl %r9,.Lsysc_do_svc 1358 - BR_R11USE_R14 1359 .Lcleanup_system_call_insn: 1360 .quad system_call 1361 .quad .Lsysc_stmg ··· 1367 1368 .Lcleanup_sysc_tif: 1369 larl %r9,.Lsysc_tif 1370 - BR_R11USE_R14 1371 1372 .Lcleanup_sysc_restore: 1373 # check if stpt has been executed ··· 1384 mvc 0(64,%r11),__PT_R8(%r9) 1385 lmg %r0,%r7,__PT_R0(%r9) 1386 1: lmg %r8,%r9,__LC_RETURN_PSW 1387 - BR_R11USE_R14 1388 .Lcleanup_sysc_restore_insn: 1389 .quad .Lsysc_exit_timer 1390 .quad .Lsysc_done - 4 1391 1392 .Lcleanup_io_tif: 1393 larl %r9,.Lio_tif 1394 - BR_R11USE_R14 1395 1396 .Lcleanup_io_restore: 1397 # check if stpt has been executed ··· 1405 mvc 0(64,%r11),__PT_R8(%r9) 1406 lmg %r0,%r7,__PT_R0(%r9) 1407 1: lmg %r8,%r9,__LC_RETURN_PSW 1408 - BR_R11USE_R14 1409 .Lcleanup_io_restore_insn: 1410 .quad .Lio_exit_timer 1411 .quad .Lio_done - 4 ··· 1458 # prepare return psw 1459 nihh %r8,0xfcfd # clear irq & wait state bits 1460 lg %r9,48(%r11) # return from psw_idle 1461 - BR_R11USE_R14 1462 .Lcleanup_idle_insn: 1463 .quad .Lpsw_idle_lpsw 1464 1465 .Lcleanup_save_fpu_regs: 1466 larl %r9,save_fpu_regs 1467 - BR_R11USE_R14 1468 1469 .Lcleanup_load_fpu_regs: 1470 larl %r9,load_fpu_regs 1471 - BR_R11USE_R14 1472 1473 /* 1474 * Integer constants
··· 28 #include <asm/setup.h> 29 #include <asm/nmi.h> 30 #include <asm/export.h> 31 + #include <asm/nospec-insn.h> 32 33 __PT_R0 = __PT_GPRS 34 __PT_R1 = __PT_GPRS + 8 ··· 183 "jnz .+8; .long 0xb2e8d000", 82 184 .endm 185 186 + GEN_BR_THUNK %r9 187 + GEN_BR_THUNK %r14 188 + GEN_BR_THUNK %r14,%r11 189 190 .section .kprobes.text, "ax" 191 .Ldummy: ··· 260 ENTRY(__bpon) 261 .globl __bpon 262 BPON 263 + BR_EX %r14 264 265 /* 266 * Scheduler resume function, called by switch_to ··· 284 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next 285 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 286 ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40 287 + BR_EX %r14 288 289 .L__critical_start: 290 ··· 351 xgr %r5,%r5 352 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 353 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 354 + BR_EX %r14 355 .Lsie_fault: 356 lghi %r14,-EFAULT 357 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code ··· 410 lgf %r9,0(%r8,%r10) # get system call add. 411 TSTMSK __TI_flags(%r12),_TIF_TRACE 412 jnz .Lsysc_tracesys 413 + BASR_EX %r14,%r9 # call sys_xxxx 414 stg %r2,__PT_R2(%r11) # store return value 415 416 .Lsysc_return: ··· 595 lmg %r3,%r7,__PT_R3(%r11) 596 stg %r7,STACK_FRAME_OVERHEAD(%r15) 597 lg %r2,__PT_ORIG_GPR2(%r11) 598 + BASR_EX %r14,%r9 # call sys_xxx 599 stg %r2,__PT_R2(%r11) # store return value 600 .Lsysc_tracenogo: 601 TSTMSK __TI_flags(%r12),_TIF_TRACE ··· 619 lmg %r9,%r10,__PT_R9(%r11) # load gprs 620 ENTRY(kernel_thread_starter) 621 la %r2,0(%r10) 622 + BASR_EX %r14,%r9 623 j .Lsysc_tracenogo 624 625 /* ··· 701 je .Lpgm_return 702 lgf %r9,0(%r10,%r1) # load address of handler routine 703 lgr %r2,%r11 # pass pointer to pt_regs 704 + BASR_EX %r14,%r9 # branch to interrupt-handler 705 .Lpgm_return: 706 LOCKDEP_SYS_EXIT 707 tm __PT_PSW+1(%r11),0x01 # returning to user ? ··· 1019 stpt __TIMER_IDLE_ENTER(%r2) 1020 .Lpsw_idle_lpsw: 1021 lpswe __SF_EMPTY(%r15) 1022 + BR_EX %r14 1023 .Lpsw_idle_end: 1024 1025 /* ··· 1061 .Lsave_fpu_regs_done: 1062 oi __LC_CPU_FLAGS+7,_CIF_FPU 1063 .Lsave_fpu_regs_exit: 1064 + BR_EX %r14 1065 .Lsave_fpu_regs_end: 1066 EXPORT_SYMBOL(save_fpu_regs) 1067 ··· 1107 .Lload_fpu_regs_done: 1108 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1109 .Lload_fpu_regs_exit: 1110 + BR_EX %r14 1111 .Lload_fpu_regs_end: 1112 1113 .L__critical_end: ··· 1322 jl 0f 1323 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1324 jl .Lcleanup_load_fpu_regs 1325 + 0: BR_EX %r14 1326 1327 .align 8 1328 .Lcleanup_table: ··· 1358 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1359 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1360 larl %r9,sie_exit # skip forward to sie_exit 1361 + BR_EX %r14 1362 #endif 1363 1364 .Lcleanup_system_call: ··· 1412 stg %r15,56(%r11) # r15 stack pointer 1413 # set new psw address and exit 1414 larl %r9,.Lsysc_do_svc 1415 + BR_EX %r14,%r11 1416 .Lcleanup_system_call_insn: 1417 .quad system_call 1418 .quad .Lsysc_stmg ··· 1424 1425 .Lcleanup_sysc_tif: 1426 larl %r9,.Lsysc_tif 1427 + BR_EX %r14,%r11 1428 1429 .Lcleanup_sysc_restore: 1430 # check if stpt has been executed ··· 1441 mvc 0(64,%r11),__PT_R8(%r9) 1442 lmg %r0,%r7,__PT_R0(%r9) 1443 1: lmg %r8,%r9,__LC_RETURN_PSW 1444 + BR_EX %r14,%r11 1445 .Lcleanup_sysc_restore_insn: 1446 .quad .Lsysc_exit_timer 1447 .quad .Lsysc_done - 4 1448 1449 .Lcleanup_io_tif: 1450 larl %r9,.Lio_tif 1451 + BR_EX %r14,%r11 1452 1453 .Lcleanup_io_restore: 1454 # check if stpt has been executed ··· 1462 mvc 0(64,%r11),__PT_R8(%r9) 1463 lmg %r0,%r7,__PT_R0(%r9) 1464 1: lmg %r8,%r9,__LC_RETURN_PSW 1465 + BR_EX %r14,%r11 1466 .Lcleanup_io_restore_insn: 1467 .quad .Lio_exit_timer 1468 .quad .Lio_done - 4 ··· 1515 # prepare return psw 1516 nihh %r8,0xfcfd # clear irq & wait state bits 1517 lg %r9,48(%r11) # return from psw_idle 1518 + BR_EX %r14,%r11 1519 .Lcleanup_idle_insn: 1520 .quad .Lpsw_idle_lpsw 1521 1522 .Lcleanup_save_fpu_regs: 1523 larl %r9,save_fpu_regs 1524 + BR_EX %r14,%r11 1525 1526 .Lcleanup_load_fpu_regs: 1527 larl %r9,load_fpu_regs 1528 + BR_EX %r14,%r11 1529 1530 /* 1531 * Integer constants
+2 -3
arch/s390/kernel/irq.c
··· 176 new -= STACK_FRAME_OVERHEAD; 177 ((struct stack_frame *) new)->back_chain = old; 178 asm volatile(" la 15,0(%0)\n" 179 - " basr 14,%2\n" 180 " la 15,0(%1)\n" 181 - : : "a" (new), "a" (old), 182 - "a" (__do_softirq) 183 : "0", "1", "2", "3", "4", "5", "14", 184 "cc", "memory" ); 185 } else {
··· 176 new -= STACK_FRAME_OVERHEAD; 177 ((struct stack_frame *) new)->back_chain = old; 178 asm volatile(" la 15,0(%0)\n" 179 + " brasl 14,__do_softirq\n" 180 " la 15,0(%1)\n" 181 + : : "a" (new), "a" (old) 182 : "0", "1", "2", "3", "4", "5", "14", 183 "cc", "memory" ); 184 } else {
+9 -5
arch/s390/kernel/mcount.S
··· 9 #include <linux/linkage.h> 10 #include <asm/asm-offsets.h> 11 #include <asm/ftrace.h> 12 #include <asm/ptrace.h> 13 #include <asm/export.h> 14 15 .section .kprobes.text, "ax" 16 17 ENTRY(ftrace_stub) 18 - br %r14 19 20 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 21 #define STACK_PTREGS (STACK_FRAME_OVERHEAD) ··· 27 #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 28 29 ENTRY(_mcount) 30 - br %r14 31 32 EXPORT_SYMBOL(_mcount) 33 ··· 57 #endif 58 lgr %r3,%r14 59 la %r5,STACK_PTREGS(%r15) 60 - basr %r14,%r1 61 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 62 # The j instruction gets runtime patched to a nop instruction. 63 # See ftrace_enable_ftrace_graph_caller. ··· 72 #endif 73 lg %r1,(STACK_PTREGS_PSW+8)(%r15) 74 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) 75 - br %r1 76 77 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 78 ··· 85 aghi %r15,STACK_FRAME_OVERHEAD 86 lgr %r14,%r2 87 lmg %r2,%r5,32(%r15) 88 - br %r14 89 90 #endif
··· 9 #include <linux/linkage.h> 10 #include <asm/asm-offsets.h> 11 #include <asm/ftrace.h> 12 + #include <asm/nospec-insn.h> 13 #include <asm/ptrace.h> 14 #include <asm/export.h> 15 + 16 + GEN_BR_THUNK %r1 17 + GEN_BR_THUNK %r14 18 19 .section .kprobes.text, "ax" 20 21 ENTRY(ftrace_stub) 22 + BR_EX %r14 23 24 #define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE) 25 #define STACK_PTREGS (STACK_FRAME_OVERHEAD) ··· 23 #define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW) 24 25 ENTRY(_mcount) 26 + BR_EX %r14 27 28 EXPORT_SYMBOL(_mcount) 29 ··· 53 #endif 54 lgr %r3,%r14 55 la %r5,STACK_PTREGS(%r15) 56 + BASR_EX %r14,%r1 57 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 58 # The j instruction gets runtime patched to a nop instruction. 59 # See ftrace_enable_ftrace_graph_caller. ··· 68 #endif 69 lg %r1,(STACK_PTREGS_PSW+8)(%r15) 70 lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15) 71 + BR_EX %r1 72 73 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 74 ··· 81 aghi %r15,STACK_FRAME_OVERHEAD 82 lgr %r14,%r2 83 lmg %r2,%r5,32(%r15) 84 + BR_EX %r14 85 86 #endif
+20 -24
arch/s390/kernel/nospec-branch.c
··· 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/module.h> 3 #include <linux/device.h> 4 - #include <linux/cpu.h> 5 #include <asm/nospec-branch.h> 6 7 static int __init nobp_setup_early(char *str) ··· 42 return 0; 43 } 44 arch_initcall(nospec_report); 45 - 46 - #ifdef CONFIG_SYSFS 47 - ssize_t cpu_show_spectre_v1(struct device *dev, 48 - struct device_attribute *attr, char *buf) 49 - { 50 - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 51 - } 52 - 53 - ssize_t cpu_show_spectre_v2(struct device *dev, 54 - struct device_attribute *attr, char *buf) 55 - { 56 - if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) 57 - return sprintf(buf, "Mitigation: execute trampolines\n"); 58 - if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) 59 - return sprintf(buf, "Mitigation: limited branch prediction.\n"); 60 - return sprintf(buf, "Vulnerable\n"); 61 - } 62 - #endif 63 64 #ifdef CONFIG_EXPOLINE 65 ··· 93 s32 *epo; 94 95 /* Second part of the instruction replace is always a nop */ 96 - memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4); 97 for (epo = start; epo < end; epo++) { 98 instr = (u8 *) epo + *epo; 99 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) ··· 113 br = thunk + (*(int *)(thunk + 2)) * 2; 114 else 115 continue; 116 - if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) 117 continue; 118 switch (type) { 119 case BRCL_EXPOLINE: 120 - /* brcl to thunk, replace with br + nop */ 121 insnbuf[0] = br[0]; 122 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 123 break; 124 case BRASL_EXPOLINE: 125 - /* brasl to thunk, replace with basr + nop */ 126 - insnbuf[0] = 0x0d; 127 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 128 break; 129 } 130
··· 1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/module.h> 3 #include <linux/device.h> 4 #include <asm/nospec-branch.h> 5 6 static int __init nobp_setup_early(char *str) ··· 43 return 0; 44 } 45 arch_initcall(nospec_report); 46 47 #ifdef CONFIG_EXPOLINE 48 ··· 112 s32 *epo; 113 114 /* Second part of the instruction replace is always a nop */ 115 for (epo = start; epo < end; epo++) { 116 instr = (u8 *) epo + *epo; 117 if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04) ··· 133 br = thunk + (*(int *)(thunk + 2)) * 2; 134 else 135 continue; 136 + /* Check for unconditional branch 0x07f? or 0x47f???? */ 137 + if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0) 138 continue; 139 + 140 + memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4); 141 switch (type) { 142 case BRCL_EXPOLINE: 143 insnbuf[0] = br[0]; 144 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 145 + if (br[0] == 0x47) { 146 + /* brcl to b, replace with bc + nopr */ 147 + insnbuf[2] = br[2]; 148 + insnbuf[3] = br[3]; 149 + } else { 150 + /* brcl to br, replace with bcr + nop */ 151 + } 152 break; 153 case BRASL_EXPOLINE: 154 insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f); 155 + if (br[0] == 0x47) { 156 + /* brasl to b, replace with bas + nopr */ 157 + insnbuf[0] = 0x4d; 158 + insnbuf[2] = br[2]; 159 + insnbuf[3] = br[3]; 160 + } else { 161 + /* brasl to br, replace with basr + nop */ 162 + insnbuf[0] = 0x0d; 163 + } 164 break; 165 } 166
+21
arch/s390/kernel/nospec-sysfs.c
···
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/device.h> 3 + #include <linux/cpu.h> 4 + #include <asm/facility.h> 5 + #include <asm/nospec-branch.h> 6 + 7 + ssize_t cpu_show_spectre_v1(struct device *dev, 8 + struct device_attribute *attr, char *buf) 9 + { 10 + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); 11 + } 12 + 13 + ssize_t cpu_show_spectre_v2(struct device *dev, 14 + struct device_attribute *attr, char *buf) 15 + { 16 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) 17 + return sprintf(buf, "Mitigation: execute trampolines\n"); 18 + if (__test_facility(82, S390_lowcore.alt_stfle_fac_list)) 19 + return sprintf(buf, "Mitigation: limited branch prediction\n"); 20 + return sprintf(buf, "Vulnerable\n"); 21 + }
+4
arch/s390/kernel/perf_cpum_sf.c
··· 753 */ 754 rate = 0; 755 if (attr->freq) { 756 rate = freq_to_sample_rate(&si, attr->sample_freq); 757 rate = hw_limit_rate(&si, rate); 758 attr->freq = 0;
··· 753 */ 754 rate = 0; 755 if (attr->freq) { 756 + if (!attr->sample_freq) { 757 + err = -EINVAL; 758 + goto out; 759 + } 760 rate = freq_to_sample_rate(&si, attr->sample_freq); 761 rate = hw_limit_rate(&si, rate); 762 attr->freq = 0;
+5 -2
arch/s390/kernel/reipl.S
··· 7 8 #include <linux/linkage.h> 9 #include <asm/asm-offsets.h> 10 #include <asm/sigp.h> 11 12 # 13 # Issue "store status" for the current CPU to its prefix page ··· 70 st %r4,0(%r1) 71 st %r5,4(%r1) 72 stg %r2,8(%r1) 73 - lgr %r1,%r2 74 lgr %r2,%r3 75 - br %r1 76 77 .section .bss 78 .align 8
··· 7 8 #include <linux/linkage.h> 9 #include <asm/asm-offsets.h> 10 + #include <asm/nospec-insn.h> 11 #include <asm/sigp.h> 12 + 13 + GEN_BR_THUNK %r9 14 15 # 16 # Issue "store status" for the current CPU to its prefix page ··· 67 st %r4,0(%r1) 68 st %r5,4(%r1) 69 stg %r2,8(%r1) 70 + lgr %r9,%r2 71 lgr %r2,%r3 72 + BR_EX %r9 73 74 .section .bss 75 .align 8
+6 -4
arch/s390/kernel/swsusp.S
··· 13 #include <asm/ptrace.h> 14 #include <asm/thread_info.h> 15 #include <asm/asm-offsets.h> 16 #include <asm/sigp.h> 17 18 /* ··· 25 * (see below) in the resume process. 26 * This function runs with disabled interrupts. 27 */ 28 .section .text 29 ENTRY(swsusp_arch_suspend) 30 stmg %r6,%r15,__SF_GPRS(%r15) ··· 106 spx 0x318(%r1) 107 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 108 lghi %r2,0 109 - br %r14 110 111 /* 112 * Restore saved memory image to correct place and restore register context. ··· 200 larl %r15,init_thread_union 201 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 202 larl %r2,.Lpanic_string 203 - larl %r3,sclp_early_printk 204 lghi %r1,0 205 sam31 206 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 207 - basr %r14,%r3 208 larl %r3,.Ldisabled_wait_31 209 lpsw 0(%r3) 210 4: ··· 269 /* Return 0 */ 270 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 271 lghi %r2,0 272 - br %r14 273 274 .section .data..nosave,"aw",@progbits 275 .align 8
··· 13 #include <asm/ptrace.h> 14 #include <asm/thread_info.h> 15 #include <asm/asm-offsets.h> 16 + #include <asm/nospec-insn.h> 17 #include <asm/sigp.h> 18 19 /* ··· 24 * (see below) in the resume process. 25 * This function runs with disabled interrupts. 26 */ 27 + GEN_BR_THUNK %r14 28 + 29 .section .text 30 ENTRY(swsusp_arch_suspend) 31 stmg %r6,%r15,__SF_GPRS(%r15) ··· 103 spx 0x318(%r1) 104 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 105 lghi %r2,0 106 + BR_EX %r14 107 108 /* 109 * Restore saved memory image to correct place and restore register context. ··· 197 larl %r15,init_thread_union 198 ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) 199 larl %r2,.Lpanic_string 200 lghi %r1,0 201 sam31 202 sigp %r1,%r0,SIGP_SET_ARCHITECTURE 203 + brasl %r14,sclp_early_printk 204 larl %r3,.Ldisabled_wait_31 205 lpsw 0(%r3) 206 4: ··· 267 /* Return 0 */ 268 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 269 lghi %r2,0 270 + BR_EX %r14 271 272 .section .data..nosave,"aw",@progbits 273 .align 8
+11 -8
arch/s390/lib/mem.S
··· 7 8 #include <linux/linkage.h> 9 #include <asm/export.h> 10 11 /* 12 * void *memmove(void *dest, const void *src, size_t n) ··· 36 .Lmemmove_forward_remainder: 37 larl %r5,.Lmemmove_mvc 38 ex %r4,0(%r5) 39 - br %r14 40 .Lmemmove_reverse: 41 ic %r0,0(%r4,%r3) 42 stc %r0,0(%r4,%r1) 43 brctg %r4,.Lmemmove_reverse 44 ic %r0,0(%r4,%r3) 45 stc %r0,0(%r4,%r1) 46 - br %r14 47 .Lmemmove_mvc: 48 mvc 0(1,%r1),0(%r3) 49 EXPORT_SYMBOL(memmove) ··· 80 .Lmemset_clear_remainder: 81 larl %r3,.Lmemset_xc 82 ex %r4,0(%r3) 83 - br %r14 84 .Lmemset_fill: 85 cghi %r4,1 86 lgr %r1,%r2 ··· 98 stc %r3,0(%r1) 99 larl %r5,.Lmemset_mvc 100 ex %r4,0(%r5) 101 - br %r14 102 .Lmemset_fill_exit: 103 stc %r3,0(%r1) 104 - br %r14 105 .Lmemset_xc: 106 xc 0(1,%r1),0(%r1) 107 .Lmemset_mvc: ··· 124 .Lmemcpy_remainder: 125 larl %r5,.Lmemcpy_mvc 126 ex %r4,0(%r5) 127 - br %r14 128 .Lmemcpy_loop: 129 mvc 0(256,%r1),0(%r3) 130 la %r1,256(%r1) ··· 162 \insn %r3,0(%r1) 163 larl %r5,.L__memset_mvc\bits 164 ex %r4,0(%r5) 165 - br %r14 166 .L__memset_exit\bits: 167 \insn %r3,0(%r2) 168 - br %r14 169 .L__memset_mvc\bits: 170 mvc \bytes(1,%r1),0(%r1) 171 .endm
··· 7 8 #include <linux/linkage.h> 9 #include <asm/export.h> 10 + #include <asm/nospec-insn.h> 11 + 12 + GEN_BR_THUNK %r14 13 14 /* 15 * void *memmove(void *dest, const void *src, size_t n) ··· 33 .Lmemmove_forward_remainder: 34 larl %r5,.Lmemmove_mvc 35 ex %r4,0(%r5) 36 + BR_EX %r14 37 .Lmemmove_reverse: 38 ic %r0,0(%r4,%r3) 39 stc %r0,0(%r4,%r1) 40 brctg %r4,.Lmemmove_reverse 41 ic %r0,0(%r4,%r3) 42 stc %r0,0(%r4,%r1) 43 + BR_EX %r14 44 .Lmemmove_mvc: 45 mvc 0(1,%r1),0(%r3) 46 EXPORT_SYMBOL(memmove) ··· 77 .Lmemset_clear_remainder: 78 larl %r3,.Lmemset_xc 79 ex %r4,0(%r3) 80 + BR_EX %r14 81 .Lmemset_fill: 82 cghi %r4,1 83 lgr %r1,%r2 ··· 95 stc %r3,0(%r1) 96 larl %r5,.Lmemset_mvc 97 ex %r4,0(%r5) 98 + BR_EX %r14 99 .Lmemset_fill_exit: 100 stc %r3,0(%r1) 101 + BR_EX %r14 102 .Lmemset_xc: 103 xc 0(1,%r1),0(%r1) 104 .Lmemset_mvc: ··· 121 .Lmemcpy_remainder: 122 larl %r5,.Lmemcpy_mvc 123 ex %r4,0(%r5) 124 + BR_EX %r14 125 .Lmemcpy_loop: 126 mvc 0(256,%r1),0(%r3) 127 la %r1,256(%r1) ··· 159 \insn %r3,0(%r1) 160 larl %r5,.L__memset_mvc\bits 161 ex %r4,0(%r5) 162 + BR_EX %r14 163 .L__memset_exit\bits: 164 \insn %r3,0(%r2) 165 + BR_EX %r14 166 .L__memset_mvc\bits: 167 mvc \bytes(1,%r1),0(%r1) 168 .endm
+10 -6
arch/s390/net/bpf_jit.S
··· 9 */ 10 11 #include <linux/linkage.h> 12 #include "bpf_jit.h" 13 14 /* ··· 55 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ 56 jh sk_load_##NAME##_slow; \ 57 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ 58 - b OFF_OK(%r6); /* Return */ \ 59 \ 60 sk_load_##NAME##_slow:; \ 61 lgr %r2,%r7; /* Arg1 = skb pointer */ \ ··· 65 brasl %r14,skb_copy_bits; /* Get data from skb */ \ 66 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ 67 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ 68 - br %r6; /* Return */ 69 70 sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ 71 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ 72 73 /* 74 * Load 1 byte from SKB (optimized version) ··· 84 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? 85 jnl sk_load_byte_slow 86 llgc %r14,0(%r3,%r12) # Get byte from skb 87 - b OFF_OK(%r6) # Return OK 88 89 sk_load_byte_slow: 90 lgr %r2,%r7 # Arg1 = skb pointer ··· 94 brasl %r14,skb_copy_bits # Get data from skb 95 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer 96 ltgr %r2,%r2 # Set cc to (%r2 != 0) 97 - br %r6 # Return cc 98 99 #define sk_negative_common(NAME, SIZE, LOAD) \ 100 sk_load_##NAME##_slow_neg:; \ ··· 108 jz bpf_error; \ 109 LOAD %r14,0(%r2); /* Get data from pointer */ \ 110 xr %r3,%r3; /* Set cc to zero */ \ 111 - br %r6; /* Return cc */ 112 113 sk_negative_common(word, 4, llgf) 114 sk_negative_common(half, 2, llgh) ··· 117 bpf_error: 118 # force a return 0 from jit handler 119 ltgr %r15,%r15 # Set condition code 120 - br %r6
··· 9 */ 10 11 #include <linux/linkage.h> 12 + #include <asm/nospec-insn.h> 13 #include "bpf_jit.h" 14 15 /* ··· 54 clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \ 55 jh sk_load_##NAME##_slow; \ 56 LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \ 57 + B_EX OFF_OK,%r6; /* Return */ \ 58 \ 59 sk_load_##NAME##_slow:; \ 60 lgr %r2,%r7; /* Arg1 = skb pointer */ \ ··· 64 brasl %r14,skb_copy_bits; /* Get data from skb */ \ 65 LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \ 66 ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \ 67 + BR_EX %r6; /* Return */ 68 69 sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */ 70 sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */ 71 + 72 + GEN_BR_THUNK %r6 73 + GEN_B_THUNK OFF_OK,%r6 74 75 /* 76 * Load 1 byte from SKB (optimized version) ··· 80 clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen? 81 jnl sk_load_byte_slow 82 llgc %r14,0(%r3,%r12) # Get byte from skb 83 + B_EX OFF_OK,%r6 # Return OK 84 85 sk_load_byte_slow: 86 lgr %r2,%r7 # Arg1 = skb pointer ··· 90 brasl %r14,skb_copy_bits # Get data from skb 91 llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer 92 ltgr %r2,%r2 # Set cc to (%r2 != 0) 93 + BR_EX %r6 # Return cc 94 95 #define sk_negative_common(NAME, SIZE, LOAD) \ 96 sk_load_##NAME##_slow_neg:; \ ··· 104 jz bpf_error; \ 105 LOAD %r14,0(%r2); /* Get data from pointer */ \ 106 xr %r3,%r3; /* Set cc to zero */ \ 107 + BR_EX %r6; /* Return cc */ 108 109 sk_negative_common(word, 4, llgf) 110 sk_negative_common(half, 2, llgh) ··· 113 bpf_error: 114 # force a return 0 from jit handler 115 ltgr %r15,%r15 # Set condition code 116 + BR_EX %r6
+61 -2
arch/s390/net/bpf_jit_comp.c
··· 25 #include <linux/bpf.h> 26 #include <asm/cacheflush.h> 27 #include <asm/dis.h> 28 #include <asm/set_memory.h> 29 #include "bpf_jit.h" 30 ··· 43 int base_ip; /* Base address for literal pool */ 44 int ret0_ip; /* Address of return 0 */ 45 int exit_ip; /* Address of exit */ 46 int tail_call_start; /* Tail call start offset */ 47 int labels[1]; /* Labels for local jumps */ 48 }; ··· 252 _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \ 253 REG_SET_SEEN(b1); \ 254 REG_SET_SEEN(b2); \ 255 }) 256 257 #define _EMIT6_IMM(op, imm) \ ··· 486 EMIT4(0xb9040000, REG_2, BPF_REG_0); 487 /* Restore registers */ 488 save_restore_regs(jit, REGS_RESTORE, stack_depth); 489 /* br %r14 */ 490 _EMIT2(0x07fe); 491 } 492 493 /* ··· 1020 /* lg %w1,<d(imm)>(%l) */ 1021 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, 1022 EMIT_CONST_U64(func)); 1023 - /* basr %r14,%w1 */ 1024 - EMIT2(0x0d00, REG_14, REG_W1); 1025 /* lgr %b0,%r2: load return value into %b0 */ 1026 EMIT4(0xb9040000, BPF_REG_0, REG_2); 1027 if ((jit->seen & SEEN_SKB) &&
··· 25 #include <linux/bpf.h> 26 #include <asm/cacheflush.h> 27 #include <asm/dis.h> 28 + #include <asm/facility.h> 29 + #include <asm/nospec-branch.h> 30 #include <asm/set_memory.h> 31 #include "bpf_jit.h" 32 ··· 41 int base_ip; /* Base address for literal pool */ 42 int ret0_ip; /* Address of return 0 */ 43 int exit_ip; /* Address of exit */ 44 + int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ 45 + int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ 46 int tail_call_start; /* Tail call start offset */ 47 int labels[1]; /* Labels for local jumps */ 48 }; ··· 248 _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \ 249 REG_SET_SEEN(b1); \ 250 REG_SET_SEEN(b2); \ 251 + }) 252 + 253 + #define EMIT6_PCREL_RILB(op, b, target) \ 254 + ({ \ 255 + int rel = (target - jit->prg) / 2; \ 256 + _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \ 257 + REG_SET_SEEN(b); \ 258 + }) 259 + 260 + #define EMIT6_PCREL_RIL(op, target) \ 261 + ({ \ 262 + int rel = (target - jit->prg) / 2; \ 263 + _EMIT6(op | rel >> 16, rel & 0xffff); \ 264 }) 265 266 #define _EMIT6_IMM(op, imm) \ ··· 469 EMIT4(0xb9040000, REG_2, BPF_REG_0); 470 /* Restore registers */ 471 save_restore_regs(jit, REGS_RESTORE, stack_depth); 472 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { 473 + jit->r14_thunk_ip = jit->prg; 474 + /* Generate __s390_indirect_jump_r14 thunk */ 475 + if (test_facility(35)) { 476 + /* exrl %r0,.+10 */ 477 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 478 + } else { 479 + /* larl %r1,.+14 */ 480 + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); 481 + /* ex 0,0(%r1) */ 482 + EMIT4_DISP(0x44000000, REG_0, REG_1, 0); 483 + } 484 + /* j . */ 485 + EMIT4_PCREL(0xa7f40000, 0); 486 + } 487 /* br %r14 */ 488 _EMIT2(0x07fe); 489 + 490 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable && 491 + (jit->seen & SEEN_FUNC)) { 492 + jit->r1_thunk_ip = jit->prg; 493 + /* Generate __s390_indirect_jump_r1 thunk */ 494 + if (test_facility(35)) { 495 + /* exrl %r0,.+10 */ 496 + EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); 497 + /* j . */ 498 + EMIT4_PCREL(0xa7f40000, 0); 499 + /* br %r1 */ 500 + _EMIT2(0x07f1); 501 + } else { 502 + /* larl %r1,.+14 */ 503 + EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14); 504 + /* ex 0,S390_lowcore.br_r1_tampoline */ 505 + EMIT4_DISP(0x44000000, REG_0, REG_0, 506 + offsetof(struct lowcore, br_r1_trampoline)); 507 + /* j . */ 508 + EMIT4_PCREL(0xa7f40000, 0); 509 + } 510 + } 511 } 512 513 /* ··· 966 /* lg %w1,<d(imm)>(%l) */ 967 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L, 968 EMIT_CONST_U64(func)); 969 + if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) { 970 + /* brasl %r14,__s390_indirect_jump_r1 */ 971 + EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); 972 + } else { 973 + /* basr %r14,%w1 */ 974 + EMIT2(0x0d00, REG_14, REG_W1); 975 + } 976 /* lgr %b0,%r2: load return value into %b0 */ 977 EMIT4(0xb9040000, BPF_REG_0, REG_2); 978 if ((jit->seen & SEEN_SKB) &&
+3 -9
drivers/s390/cio/qdio_setup.c
··· 141 int i; 142 143 for (i = 0; i < nr_queues; i++) { 144 - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 145 if (!q) 146 return -ENOMEM; 147 ··· 456 { 457 struct ciw *ciw; 458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 459 - int rc; 460 461 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 462 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); ··· 492 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 493 if (!ciw) { 494 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 495 - rc = -EINVAL; 496 - goto out_err; 497 } 498 irq_ptr->equeue = *ciw; 499 500 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 501 if (!ciw) { 502 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 503 - rc = -EINVAL; 504 - goto out_err; 505 } 506 irq_ptr->aqueue = *ciw; 507 ··· 509 init_data->cdev->handler = qdio_int_handler; 510 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 511 return 0; 512 - out_err: 513 - qdio_release_memory(irq_ptr); 514 - return rc; 515 } 516 517 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
··· 141 int i; 142 143 for (i = 0; i < nr_queues; i++) { 144 + q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); 145 if (!q) 146 return -ENOMEM; 147 ··· 456 { 457 struct ciw *ciw; 458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 459 460 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 461 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); ··· 493 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 494 if (!ciw) { 495 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 496 + return -EINVAL; 497 } 498 irq_ptr->equeue = *ciw; 499 500 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 501 if (!ciw) { 502 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 503 + return -EINVAL; 504 } 505 irq_ptr->aqueue = *ciw; 506 ··· 512 init_data->cdev->handler = qdio_int_handler; 513 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 514 return 0; 515 } 516 517 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+12 -1
drivers/s390/cio/vfio_ccw_cp.c
··· 715 * and stores the result to ccwchain list. @cp must have been 716 * initialized by a previous call with cp_init(). Otherwise, undefined 717 * behavior occurs. 718 * 719 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 720 * as helpers to do ccw chain translation inside the kernel. Basically ··· 753 for (idx = 0; idx < len; idx++) { 754 ret = ccwchain_fetch_one(chain, idx, cp); 755 if (ret) 756 - return ret; 757 } 758 } 759 760 return 0; 761 } 762 763 /**
··· 715 * and stores the result to ccwchain list. @cp must have been 716 * initialized by a previous call with cp_init(). Otherwise, undefined 717 * behavior occurs. 718 + * For each chain composing the channel program: 719 + * - On entry ch_len holds the count of CCWs to be translated. 720 + * - On exit ch_len is adjusted to the count of successfully translated CCWs. 721 + * This allows cp_free to find in ch_len the count of CCWs to free in a chain. 722 * 723 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 724 * as helpers to do ccw chain translation inside the kernel. Basically ··· 749 for (idx = 0; idx < len; idx++) { 750 ret = ccwchain_fetch_one(chain, idx, cp); 751 if (ret) 752 + goto out_err; 753 } 754 } 755 756 return 0; 757 + out_err: 758 + /* Only cleanup the chain elements that were actually translated. */ 759 + chain->ch_len = idx; 760 + list_for_each_entry_continue(chain, &cp->ccwchain_list, next) { 761 + chain->ch_len = 0; 762 + } 763 + return ret; 764 } 765 766 /**