Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:
"Just a couple of small improvements, bug fixes and cleanups:

- Add Eric Farman as maintainer for s390 virtio drivers.

- Improve machine check handling, and avoid incorrectly injecting a
machine check into a kvm guest.

- Add cond_resched() call to gmap page table walker in order to avoid
possible huge latencies. Also use non-quiesing sske instruction to
speed up storage key handling.

- Add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP so s390 behaves
similar like common code.

- Get sie control block address from correct stack slot in perf event
code. This fixes potential random memory accesses.

- Change uaccess code so that the exception handler sets the result
of get_user() and __get_kernel_nofault() to zero in case of a
fault. Until now this was done via input parameters for inline
assemblies. Doing it via fault handling is what most or even all
other architectures are doing.

- Couple of other small cleanups and fixes"

* tag 's390-5.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/stack: add union to reflect kvm stack slot usages
s390/stack: merge empty stack frame slots
s390/uaccess: whitespace cleanup
s390/uaccess: use __noreturn instead of __attribute__((noreturn))
s390/uaccess: use exception handler to zero result on get_user() failure
s390/uaccess: use symbolic names for inline assembler operands
s390/mcck: isolate SIE instruction when setting CIF_MCCK_GUEST flag
s390/mm: use non-quiescing sske for KVM switch to keyed guest
s390/gmap: voluntarily schedule during key setting
MAINTAINERS: Update s390 virtio-ccw
s390/kexec: add __GFP_NORETRY to KEXEC_CONTROL_MEMORY_GFP
s390/Kconfig.debug: fix indentation
s390/Kconfig: fix indentation
s390/perf: obtain sie_block from the right address
s390: generate register offsets into pt_regs automatically
s390: simplify early program check handler
s390/crypto: fix scatterwalk_unmap() callers in AES-GCM

+289 -211
+1
MAINTAINERS
··· 21057 21057 VIRTIO DRIVERS FOR S390 21058 21058 M: Cornelia Huck <cohuck@redhat.com> 21059 21059 M: Halil Pasic <pasic@linux.ibm.com> 21060 + M: Eric Farman <farman@linux.ibm.com> 21060 21061 L: linux-s390@vger.kernel.org 21061 21062 L: virtualization@lists.linux-foundation.org 21062 21063 L: kvm@vger.kernel.org
+4 -4
arch/s390/Kconfig
··· 732 732 depends on S390_AP_IOMMU && VFIO_MDEV && KVM 733 733 depends on ZCRYPT 734 734 help 735 - This driver grants access to Adjunct Processor (AP) devices 736 - via the VFIO mediated device interface. 735 + This driver grants access to Adjunct Processor (AP) devices 736 + via the VFIO mediated device interface. 737 737 738 - To compile this driver as a module, choose M here: the module 739 - will be called vfio_ap. 738 + To compile this driver as a module, choose M here: the module 739 + will be called vfio_ap. 740 740 741 741 endmenu 742 742
+6 -6
arch/s390/Kconfig.debug
··· 14 14 If unsure, say N. 15 15 16 16 config CIO_INJECT 17 - bool "CIO Inject interfaces" 18 - depends on DEBUG_KERNEL && DEBUG_FS 19 - help 20 - This option provides a debugging facility to inject certain artificial events 21 - and instruction responses to the CIO layer of Linux kernel. The newly created 22 - debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/* 17 + bool "CIO Inject interfaces" 18 + depends on DEBUG_KERNEL && DEBUG_FS 19 + help 20 + This option provides a debugging facility to inject certain artificial events 21 + and instruction responses to the CIO layer of Linux kernel. The newly created 22 + debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
+2 -2
arch/s390/crypto/aes_s390.c
··· 701 701 unsigned int nbytes) 702 702 { 703 703 gw->walk_bytes_remain -= nbytes; 704 - scatterwalk_unmap(&gw->walk); 704 + scatterwalk_unmap(gw->walk_ptr); 705 705 scatterwalk_advance(&gw->walk, nbytes); 706 706 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 707 707 gw->walk_ptr = NULL; ··· 776 776 goto out; 777 777 } 778 778 779 - scatterwalk_unmap(&gw->walk); 779 + scatterwalk_unmap(gw->walk_ptr); 780 780 gw->walk_ptr = NULL; 781 781 782 782 gw->ptr = gw->buf;
+63 -28
arch/s390/include/asm/asm-extable.h
··· 3 3 #define __ASM_EXTABLE_H 4 4 5 5 #include <linux/stringify.h> 6 + #include <linux/bits.h> 6 7 #include <asm/asm-const.h> 7 8 8 - #define EX_TYPE_NONE 0 9 - #define EX_TYPE_FIXUP 1 10 - #define EX_TYPE_BPF 2 11 - #define EX_TYPE_UACCESS 3 9 + #define EX_TYPE_NONE 0 10 + #define EX_TYPE_FIXUP 1 11 + #define EX_TYPE_BPF 2 12 + #define EX_TYPE_UA_STORE 3 13 + #define EX_TYPE_UA_LOAD_MEM 4 14 + #define EX_TYPE_UA_LOAD_REG 5 15 + 16 + #define EX_DATA_REG_ERR_SHIFT 0 17 + #define EX_DATA_REG_ERR GENMASK(3, 0) 18 + 19 + #define EX_DATA_REG_ADDR_SHIFT 4 20 + #define EX_DATA_REG_ADDR GENMASK(7, 4) 21 + 22 + #define EX_DATA_LEN_SHIFT 8 23 + #define EX_DATA_LEN GENMASK(11, 8) 12 24 13 25 #define __EX_TABLE(_section, _fault, _target, _type) \ 14 26 stringify_in_c(.section _section,"a";) \ ··· 31 19 stringify_in_c(.short 0;) \ 32 20 stringify_in_c(.previous) 33 21 34 - #define __EX_TABLE_UA(_section, _fault, _target, _type, _reg) \ 35 - stringify_in_c(.section _section,"a";) \ 36 - stringify_in_c(.align 4;) \ 37 - stringify_in_c(.long (_fault) - .;) \ 38 - stringify_in_c(.long (_target) - .;) \ 39 - stringify_in_c(.short (_type);) \ 40 - stringify_in_c(.macro extable_reg reg;) \ 41 - stringify_in_c(.set .Lfound, 0;) \ 42 - stringify_in_c(.set .Lregnr, 0;) \ 43 - stringify_in_c(.irp rs,r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15;) \ 44 - stringify_in_c(.ifc "\reg", "%%\rs";) \ 45 - stringify_in_c(.set .Lfound, 1;) \ 46 - stringify_in_c(.short .Lregnr;) \ 47 - stringify_in_c(.endif;) \ 48 - stringify_in_c(.set .Lregnr, .Lregnr+1;) \ 49 - stringify_in_c(.endr;) \ 50 - stringify_in_c(.ifne (.Lfound != 1);) \ 51 - stringify_in_c(.error "extable_reg: bad register argument";) \ 52 - stringify_in_c(.endif;) \ 53 - stringify_in_c(.endm;) \ 54 - stringify_in_c(extable_reg _reg;) \ 55 - stringify_in_c(.purgem extable_reg;) \ 22 + #define __EX_TABLE_UA(_section, _fault, _target, _type, _regerr, _regaddr, _len)\ 23 + stringify_in_c(.section _section,"a";) \ 24 + stringify_in_c(.align 4;) \ 25 + stringify_in_c(.long (_fault) - .;) \ 26 + stringify_in_c(.long (_target) - .;) \ 27 + stringify_in_c(.short (_type);) \ 28 + stringify_in_c(.macro extable_reg regerr, regaddr;) \ 29 + stringify_in_c(.set .Lfound, 0;) \ 30 + stringify_in_c(.set .Lcurr, 0;) \ 31 + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ 32 + stringify_in_c( .ifc "\regerr", "%%r\rs";) \ 33 + stringify_in_c( .set .Lfound, 1;) \ 34 + stringify_in_c( .set .Lregerr, .Lcurr;) \ 35 + stringify_in_c( .endif;) \ 36 + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ 37 + stringify_in_c(.endr;) \ 38 + stringify_in_c(.ifne (.Lfound != 1);) \ 39 + stringify_in_c( .error "extable_reg: bad register argument1";) \ 40 + stringify_in_c(.endif;) \ 41 + stringify_in_c(.set .Lfound, 0;) \ 42 + stringify_in_c(.set .Lcurr, 0;) \ 43 + stringify_in_c(.irp rs,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15;) \ 44 + stringify_in_c( .ifc "\regaddr", "%%r\rs";) \ 45 + stringify_in_c( .set .Lfound, 1;) \ 46 + stringify_in_c( .set .Lregaddr, .Lcurr;) \ 47 + stringify_in_c( .endif;) \ 48 + stringify_in_c( .set .Lcurr, .Lcurr+1;) \ 49 + stringify_in_c(.endr;) \ 50 + stringify_in_c(.ifne (.Lfound != 1);) \ 51 + stringify_in_c( .error "extable_reg: bad register argument2";) \ 52 + stringify_in_c(.endif;) \ 53 + stringify_in_c(.short .Lregerr << EX_DATA_REG_ERR_SHIFT | \ 54 + .Lregaddr << EX_DATA_REG_ADDR_SHIFT | \ 55 + _len << EX_DATA_LEN_SHIFT;) \ 56 + stringify_in_c(.endm;) \ 57 + stringify_in_c(extable_reg _regerr,_regaddr;) \ 58 + stringify_in_c(.purgem extable_reg;) \ 56 59 stringify_in_c(.previous) 57 60 58 61 #define EX_TABLE(_fault, _target) \ 59 62 __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FIXUP) 63 + 60 64 #define EX_TABLE_AMODE31(_fault, _target) \ 61 65 __EX_TABLE(.amode31.ex_table, _fault, _target, EX_TYPE_FIXUP) 62 - #define EX_TABLE_UA(_fault, _target, _reg) \ 63 - __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UACCESS, _reg) 66 + 67 + #define EX_TABLE_UA_STORE(_fault, _target, _regerr) \ 68 + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_STORE, _regerr, _regerr, 0) 69 + 70 + #define EX_TABLE_UA_LOAD_MEM(_fault, _target, _regerr, _regmem, _len) \ 71 + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_MEM, _regerr, _regmem, _len) 72 + 73 + #define EX_TABLE_UA_LOAD_REG(_fault, _target, _regerr, _regzero) \ 74 + __EX_TABLE_UA(__ex_table, _fault, _target, EX_TYPE_UA_LOAD_REG, _regerr, _regzero, 0) 64 75 65 76 #endif /* __ASM_EXTABLE_H */
+1 -1
arch/s390/include/asm/kexec.h
··· 31 31 #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) 32 32 33 33 /* Allocate control page with GFP_DMA */ 34 - #define KEXEC_CONTROL_MEMORY_GFP GFP_DMA 34 + #define KEXEC_CONTROL_MEMORY_GFP (GFP_DMA | __GFP_NORETRY) 35 35 36 36 /* Maximum address we can use for the crash control pages */ 37 37 #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
-6
arch/s390/include/asm/processor.h
··· 304 304 while (1); 305 305 } 306 306 307 - /* 308 - * Basic Program Check Handler. 309 - */ 310 - extern void s390_base_pgm_handler(void); 311 - extern void (*s390_base_pgm_handler_fn)(struct pt_regs *regs); 312 - 313 307 #define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL 314 308 315 309 extern int memcpy_real(void *, unsigned long, size_t);
+9 -2
arch/s390/include/asm/stacktrace.h
··· 39 39 * Kernel uses the packed stack layout (-mpacked-stack). 40 40 */ 41 41 struct stack_frame { 42 - unsigned long empty1[5]; 43 - unsigned int empty2[8]; 42 + union { 43 + unsigned long empty[9]; 44 + struct { 45 + unsigned long sie_control_block; 46 + unsigned long sie_savearea; 47 + unsigned long sie_reason; 48 + unsigned long sie_flags; 49 + }; 50 + }; 44 51 unsigned long gprs[10]; 45 52 unsigned long back_chain; 46 53 };
+118 -99
arch/s390/include/asm/uaccess.h
··· 3 3 * S390 version 4 4 * Copyright IBM Corp. 1999, 2000 5 5 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 - * Martin Schwidefsky (schwidefsky@de.ibm.com) 6 + * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 7 * 8 8 * Derived from "include/asm-i386/uaccess.h" 9 9 */ ··· 55 55 return n; 56 56 } 57 57 58 - int __put_user_bad(void) __attribute__((noreturn)); 59 - int __get_user_bad(void) __attribute__((noreturn)); 60 - 61 58 union oac { 62 59 unsigned int val; 63 60 struct { ··· 77 80 }; 78 81 }; 79 82 80 - #define __put_get_user_asm(to, from, size, oac_spec) \ 83 + int __noreturn __put_user_bad(void); 84 + 85 + #define __put_user_asm(to, from, size) \ 81 86 ({ \ 87 + union oac __oac_spec = { \ 88 + .oac1.as = PSW_BITS_AS_SECONDARY, \ 89 + .oac1.a = 1, \ 90 + }; \ 82 91 int __rc; \ 83 92 \ 84 93 asm volatile( \ ··· 92 89 "0: mvcos %[_to],%[_from],%[_size]\n" \ 93 90 "1: xr %[rc],%[rc]\n" \ 94 91 "2:\n" \ 95 - EX_TABLE_UA(0b,2b,%[rc]) EX_TABLE_UA(1b,2b,%[rc]) \ 92 + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ 93 + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ 96 94 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \ 97 95 : [_size] "d" (size), [_from] "Q" (*(from)), \ 98 - [spec] "d" (oac_spec.val) \ 96 + [spec] "d" (__oac_spec.val) \ 99 97 : "cc", "0"); \ 100 98 __rc; \ 101 99 }) 102 - 103 - #define __put_user_asm(to, from, size) \ 104 - __put_get_user_asm(to, from, size, ((union oac) { \ 105 - .oac1.as = PSW_BITS_AS_SECONDARY, \ 106 - .oac1.a = 1 \ 107 - })) 108 - 109 - #define __get_user_asm(to, from, size) \ 110 - __put_get_user_asm(to, from, size, ((union oac) { \ 111 - .oac2.as = PSW_BITS_AS_SECONDARY, \ 112 - .oac2.a = 1 \ 113 - })) \ 114 100 115 101 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) 116 102 { ··· 132 140 } 133 141 return rc; 134 142 } 143 + 144 + int __noreturn __get_user_bad(void); 145 + 146 + #define __get_user_asm(to, from, size) \ 147 + ({ \ 148 + union oac __oac_spec = { \ 149 + .oac2.as = PSW_BITS_AS_SECONDARY, \ 150 + .oac2.a = 1, \ 151 + }; \ 152 + int __rc; \ 153 + \ 154 + asm volatile( \ 155 + " lr 0,%[spec]\n" \ 156 + "0: mvcos 0(%[_to]),%[_from],%[_size]\n" \ 157 + "1: xr %[rc],%[rc]\n" \ 158 + "2:\n" \ 159 + EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize]) \ 160 + EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize]) \ 161 + : [rc] "=&d" (__rc), "=Q" (*(to)) \ 162 + : [_size] "d" (size), [_from] "Q" (*(from)), \ 163 + [spec] "d" (__oac_spec.val), [_to] "a" (to), \ 164 + [_ksize] "K" (size) \ 165 + : "cc", "0"); \ 166 + __rc; \ 167 + }) 135 168 136 169 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) 137 170 { ··· 194 177 * These are the main single-value transfer routines. They automatically 195 178 * use the right size if we just have the right pointer type. 196 179 */ 197 - #define __put_user(x, ptr) \ 198 - ({ \ 199 - __typeof__(*(ptr)) __x = (x); \ 200 - int __pu_err = -EFAULT; \ 201 - __chk_user_ptr(ptr); \ 202 - switch (sizeof (*(ptr))) { \ 203 - case 1: \ 204 - case 2: \ 205 - case 4: \ 206 - case 8: \ 207 - __pu_err = __put_user_fn(&__x, ptr, \ 208 - sizeof(*(ptr))); \ 209 - break; \ 210 - default: \ 211 - __put_user_bad(); \ 212 - break; \ 213 - } \ 214 - __builtin_expect(__pu_err, 0); \ 180 + #define __put_user(x, ptr) \ 181 + ({ \ 182 + __typeof__(*(ptr)) __x = (x); \ 183 + int __pu_err = -EFAULT; \ 184 + \ 185 + __chk_user_ptr(ptr); \ 186 + switch (sizeof(*(ptr))) { \ 187 + case 1: \ 188 + case 2: \ 189 + case 4: \ 190 + case 8: \ 191 + __pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr))); \ 192 + break; \ 193 + default: \ 194 + __put_user_bad(); \ 195 + break; \ 196 + } \ 197 + __builtin_expect(__pu_err, 0); \ 215 198 }) 216 199 217 - #define put_user(x, ptr) \ 218 - ({ \ 219 - might_fault(); \ 220 - __put_user(x, ptr); \ 200 + #define put_user(x, ptr) \ 201 + ({ \ 202 + might_fault(); \ 203 + __put_user(x, ptr); \ 221 204 }) 222 205 223 - 224 - #define __get_user(x, ptr) \ 225 - ({ \ 226 - int __gu_err = -EFAULT; \ 227 - __chk_user_ptr(ptr); \ 228 - switch (sizeof(*(ptr))) { \ 229 - case 1: { \ 230 - unsigned char __x = 0; \ 231 - __gu_err = __get_user_fn(&__x, ptr, \ 232 - sizeof(*(ptr))); \ 233 - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 234 - break; \ 235 - }; \ 236 - case 2: { \ 237 - unsigned short __x = 0; \ 238 - __gu_err = __get_user_fn(&__x, ptr, \ 239 - sizeof(*(ptr))); \ 240 - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 241 - break; \ 242 - }; \ 243 - case 4: { \ 244 - unsigned int __x = 0; \ 245 - __gu_err = __get_user_fn(&__x, ptr, \ 246 - sizeof(*(ptr))); \ 247 - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 248 - break; \ 249 - }; \ 250 - case 8: { \ 251 - unsigned long long __x = 0; \ 252 - __gu_err = __get_user_fn(&__x, ptr, \ 253 - sizeof(*(ptr))); \ 254 - (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 255 - break; \ 256 - }; \ 257 - default: \ 258 - __get_user_bad(); \ 259 - break; \ 260 - } \ 261 - __builtin_expect(__gu_err, 0); \ 206 + #define __get_user(x, ptr) \ 207 + ({ \ 208 + int __gu_err = -EFAULT; \ 209 + \ 210 + __chk_user_ptr(ptr); \ 211 + switch (sizeof(*(ptr))) { \ 212 + case 1: { \ 213 + unsigned char __x; \ 214 + \ 215 + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 216 + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 217 + break; \ 218 + }; \ 219 + case 2: { \ 220 + unsigned short __x; \ 221 + \ 222 + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 223 + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 224 + break; \ 225 + }; \ 226 + case 4: { \ 227 + unsigned int __x; \ 228 + \ 229 + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 230 + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 231 + break; \ 232 + }; \ 233 + case 8: { \ 234 + unsigned long __x; \ 235 + \ 236 + __gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr))); \ 237 + (x) = *(__force __typeof__(*(ptr)) *)&__x; \ 238 + break; \ 239 + }; \ 240 + default: \ 241 + __get_user_bad(); \ 242 + break; \ 243 + } \ 244 + __builtin_expect(__gu_err, 0); \ 262 245 }) 263 246 264 - #define get_user(x, ptr) \ 265 - ({ \ 266 - might_fault(); \ 267 - __get_user(x, ptr); \ 247 + #define get_user(x, ptr) \ 248 + ({ \ 249 + might_fault(); \ 250 + __get_user(x, ptr); \ 268 251 }) 269 252 270 253 /* ··· 295 278 int __rc; \ 296 279 \ 297 280 asm volatile( \ 298 - "0: " insn " %2,%1\n" \ 299 - "1: xr %0,%0\n" \ 281 + "0: " insn " %[_val],%[_to]\n" \ 282 + "1: xr %[rc],%[rc]\n" \ 300 283 "2:\n" \ 301 - EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 302 - : "=d" (__rc), "+Q" (*(to)) \ 303 - : "d" (val) \ 284 + EX_TABLE_UA_STORE(0b, 2b, %[rc]) \ 285 + EX_TABLE_UA_STORE(1b, 2b, %[rc]) \ 286 + : [rc] "=d" (__rc), [_to] "+Q" (*(to)) \ 287 + : [_val] "d" (val) \ 304 288 : "cc"); \ 305 289 __rc; \ 306 290 }) 307 291 308 292 #define __put_kernel_nofault(dst, src, type, err_label) \ 309 293 do { \ 310 - u64 __x = (u64)(*((type *)(src))); \ 294 + unsigned long __x = (unsigned long)(*((type *)(src))); \ 311 295 int __pk_err; \ 312 296 \ 313 297 switch (sizeof(type)) { \ ··· 339 321 int __rc; \ 340 322 \ 341 323 asm volatile( \ 342 - "0: " insn " %1,%2\n" \ 343 - "1: xr %0,%0\n" \ 324 + "0: " insn " %[_val],%[_from]\n" \ 325 + "1: xr %[rc],%[rc]\n" \ 344 326 "2:\n" \ 345 - EX_TABLE_UA(0b,2b,%0) EX_TABLE_UA(1b,2b,%0) \ 346 - : "=d" (__rc), "+d" (val) \ 347 - : "Q" (*(from)) \ 327 + EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val]) \ 328 + EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val]) \ 329 + : [rc] "=d" (__rc), [_val] "=d" (val) \ 330 + : [_from] "Q" (*(from)) \ 348 331 : "cc"); \ 349 332 __rc; \ 350 333 }) ··· 356 337 \ 357 338 switch (sizeof(type)) { \ 358 339 case 1: { \ 359 - u8 __x = 0; \ 340 + unsigned char __x; \ 360 341 \ 361 342 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \ 362 343 *((type *)(dst)) = (type)__x; \ 363 344 break; \ 364 345 }; \ 365 346 case 2: { \ 366 - u16 __x = 0; \ 347 + unsigned short __x; \ 367 348 \ 368 349 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \ 369 350 *((type *)(dst)) = (type)__x; \ 370 351 break; \ 371 352 }; \ 372 353 case 4: { \ 373 - u32 __x = 0; \ 354 + unsigned int __x; \ 374 355 \ 375 356 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \ 376 357 *((type *)(dst)) = (type)__x; \ 377 358 break; \ 378 359 }; \ 379 360 case 8: { \ 380 - u64 __x = 0; \ 361 + unsigned long __x; \ 381 362 \ 382 363 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \ 383 364 *((type *)(dst)) = (type)__x; \
+1 -1
arch/s390/kernel/Makefile
··· 33 33 CFLAGS_dumpstack.o += -fno-optimize-sibling-calls 34 34 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls 35 35 36 - obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 36 + obj-y := traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o 37 37 obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 38 38 obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o 39 39 obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
+21 -5
arch/s390/kernel/asm-offsets.c
··· 32 32 /* pt_regs offsets */ 33 33 OFFSET(__PT_PSW, pt_regs, psw); 34 34 OFFSET(__PT_GPRS, pt_regs, gprs); 35 + OFFSET(__PT_R0, pt_regs, gprs[0]); 36 + OFFSET(__PT_R1, pt_regs, gprs[1]); 37 + OFFSET(__PT_R2, pt_regs, gprs[2]); 38 + OFFSET(__PT_R3, pt_regs, gprs[3]); 39 + OFFSET(__PT_R4, pt_regs, gprs[4]); 40 + OFFSET(__PT_R5, pt_regs, gprs[5]); 41 + OFFSET(__PT_R6, pt_regs, gprs[6]); 42 + OFFSET(__PT_R7, pt_regs, gprs[7]); 43 + OFFSET(__PT_R8, pt_regs, gprs[8]); 44 + OFFSET(__PT_R9, pt_regs, gprs[9]); 45 + OFFSET(__PT_R10, pt_regs, gprs[10]); 46 + OFFSET(__PT_R11, pt_regs, gprs[11]); 47 + OFFSET(__PT_R12, pt_regs, gprs[12]); 48 + OFFSET(__PT_R13, pt_regs, gprs[13]); 49 + OFFSET(__PT_R14, pt_regs, gprs[14]); 50 + OFFSET(__PT_R15, pt_regs, gprs[15]); 35 51 OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); 36 52 OFFSET(__PT_FLAGS, pt_regs, flags); 37 53 OFFSET(__PT_CR1, pt_regs, cr1); ··· 57 41 /* stack_frame offsets */ 58 42 OFFSET(__SF_BACKCHAIN, stack_frame, back_chain); 59 43 OFFSET(__SF_GPRS, stack_frame, gprs); 60 - OFFSET(__SF_EMPTY, stack_frame, empty1[0]); 61 - OFFSET(__SF_SIE_CONTROL, stack_frame, empty1[1]); 62 - OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]); 63 - OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]); 64 - OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]); 44 + OFFSET(__SF_EMPTY, stack_frame, empty[0]); 45 + OFFSET(__SF_SIE_CONTROL, stack_frame, sie_control_block); 46 + OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea); 47 + OFFSET(__SF_SIE_REASON, stack_frame, sie_reason); 48 + OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags); 65 49 DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame)); 66 50 BLANK(); 67 51 /* idle data offsets */
+3 -30
arch/s390/kernel/base.S arch/s390/kernel/earlypgm.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * arch/s390/kernel/base.S 4 - * 5 3 * Copyright IBM Corp. 2006, 2007 6 4 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 5 */ 8 6 9 7 #include <linux/linkage.h> 10 8 #include <asm/asm-offsets.h> 11 - #include <asm/nospec-insn.h> 12 - #include <asm/ptrace.h> 13 9 14 - GEN_BR_THUNK %r9 15 - GEN_BR_THUNK %r14 16 - 17 - __PT_R0 = __PT_GPRS 18 - __PT_R8 = __PT_GPRS + 64 19 - 20 - ENTRY(s390_base_pgm_handler) 10 + ENTRY(early_pgm_check_handler) 21 11 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 22 12 aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 23 13 la %r11,STACK_FRAME_OVERHEAD(%r15) ··· 16 26 mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW 17 27 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 18 28 lgr %r2,%r11 19 - larl %r1,s390_base_pgm_handler_fn 20 - lg %r9,0(%r1) 21 - ltgr %r9,%r9 22 - jz 1f 23 - BASR_EX %r14,%r9 29 + brasl %r14,__do_early_pgm_check 24 30 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 25 31 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 26 32 lpswe __LC_RETURN_PSW 27 - 1: larl %r13,disabled_wait_psw 28 - lpswe 0(%r13) 29 - ENDPROC(s390_base_pgm_handler) 30 - 31 - .align 8 32 - disabled_wait_psw: 33 - .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler 34 - 35 - .section .bss 36 - .align 8 37 - .globl s390_base_pgm_handler_fn 38 - s390_base_pgm_handler_fn: 39 - .quad 0 40 - .previous 33 + ENDPROC(early_pgm_check_handler)
+2 -3
arch/s390/kernel/early.c
··· 149 149 topology_max_mnest = max_mnest; 150 150 } 151 151 152 - static void early_pgm_check_handler(struct pt_regs *regs) 152 + void __do_early_pgm_check(struct pt_regs *regs) 153 153 { 154 154 if (!fixup_exception(regs)) 155 155 disabled_wait(); ··· 159 159 { 160 160 psw_t psw; 161 161 162 - psw.addr = (unsigned long)s390_base_pgm_handler; 162 + psw.addr = (unsigned long)early_pgm_check_handler; 163 163 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; 164 164 if (IS_ENABLED(CONFIG_KASAN)) 165 165 psw.mask |= PSW_MASK_DAT; 166 166 S390_lowcore.program_new_psw = psw; 167 - s390_base_pgm_handler_fn = early_pgm_check_handler; 168 167 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT; 169 168 } 170 169
+5 -18
arch/s390/kernel/entry.S
··· 29 29 #include <asm/export.h> 30 30 #include <asm/nospec-insn.h> 31 31 32 - __PT_R0 = __PT_GPRS 33 - __PT_R1 = __PT_GPRS + 8 34 - __PT_R2 = __PT_GPRS + 16 35 - __PT_R3 = __PT_GPRS + 24 36 - __PT_R4 = __PT_GPRS + 32 37 - __PT_R5 = __PT_GPRS + 40 38 - __PT_R6 = __PT_GPRS + 48 39 - __PT_R7 = __PT_GPRS + 56 40 - __PT_R8 = __PT_GPRS + 64 41 - __PT_R9 = __PT_GPRS + 72 42 - __PT_R10 = __PT_GPRS + 80 43 - __PT_R11 = __PT_GPRS + 88 44 - __PT_R12 = __PT_GPRS + 96 45 - __PT_R13 = __PT_GPRS + 104 46 - __PT_R14 = __PT_GPRS + 112 47 - __PT_R15 = __PT_GPRS + 120 48 - 49 32 STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER 50 33 STACK_SIZE = 1 << STACK_SHIFT 51 34 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE ··· 251 268 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 252 269 .Lsie_entry: 253 270 sie 0(%r14) 271 + # Let the next instruction be NOP to avoid triggering a machine check 272 + # and handling it in a guest as result of the instruction execution. 273 + nopr 7 274 + .Lsie_leave: 254 275 BPOFF 255 276 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 256 277 .Lsie_skip: ··· 551 564 jno .Lmcck_panic 552 565 #if IS_ENABLED(CONFIG_KVM) 553 566 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f 554 - OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f 567 + OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 555 568 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 556 569 j 5f 557 570 4: CHKSTG .Lmcck_panic
+2
arch/s390/kernel/entry.h
··· 17 17 void io_int_handler(void); 18 18 void mcck_int_handler(void); 19 19 void restart_int_handler(void); 20 + void early_pgm_check_handler(void); 20 21 21 22 void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs); 22 23 void __do_pgm_check(struct pt_regs *regs); 23 24 void __do_syscall(struct pt_regs *regs, int per_trap); 25 + void __do_early_pgm_check(struct pt_regs *regs); 24 26 25 27 void do_protection_exception(struct pt_regs *regs); 26 28 void do_dat_exception(struct pt_regs *regs);
+1 -1
arch/s390/kernel/perf_event.c
··· 30 30 if (!stack) 31 31 return NULL; 32 32 33 - return (struct kvm_s390_sie_block *) stack->empty1[0]; 33 + return (struct kvm_s390_sie_block *)stack->sie_control_block; 34 34 } 35 35 36 36 static bool is_in_guest(struct pt_regs *regs)
+35 -4
arch/s390/mm/extable.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 + #include <linux/bitfield.h> 3 4 #include <linux/extable.h> 5 + #include <linux/string.h> 4 6 #include <linux/errno.h> 5 7 #include <linux/panic.h> 6 8 #include <asm/asm-extable.h> ··· 26 24 return true; 27 25 } 28 26 29 - static bool ex_handler_uaccess(const struct exception_table_entry *ex, struct pt_regs *regs) 27 + static bool ex_handler_ua_store(const struct exception_table_entry *ex, struct pt_regs *regs) 30 28 { 31 - regs->gprs[ex->data] = -EFAULT; 29 + unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 30 + 31 + regs->gprs[reg_err] = -EFAULT; 32 + regs->psw.addr = extable_fixup(ex); 33 + return true; 34 + } 35 + 36 + static bool ex_handler_ua_load_mem(const struct exception_table_entry *ex, struct pt_regs *regs) 37 + { 38 + unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); 39 + unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 40 + size_t len = FIELD_GET(EX_DATA_LEN, ex->data); 41 + 42 + regs->gprs[reg_err] = -EFAULT; 43 + memset((void *)regs->gprs[reg_addr], 0, len); 44 + regs->psw.addr = extable_fixup(ex); 45 + return true; 46 + } 47 + 48 + static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, struct pt_regs *regs) 49 + { 50 + unsigned int reg_zero = FIELD_GET(EX_DATA_REG_ADDR, ex->data); 51 + unsigned int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 52 + 53 + regs->gprs[reg_err] = -EFAULT; 54 + regs->gprs[reg_zero] = 0; 32 55 regs->psw.addr = extable_fixup(ex); 33 56 return true; 34 57 } ··· 70 43 return ex_handler_fixup(ex, regs); 71 44 case EX_TYPE_BPF: 72 45 return ex_handler_bpf(ex, regs); 73 - case EX_TYPE_UACCESS: 74 - return ex_handler_uaccess(ex, regs); 46 + case EX_TYPE_UA_STORE: 47 + return ex_handler_ua_store(ex, regs); 48 + case EX_TYPE_UA_LOAD_MEM: 49 + return ex_handler_ua_load_mem(ex, regs); 50 + case EX_TYPE_UA_LOAD_REG: 51 + return ex_handler_ua_load_reg(ex, regs); 75 52 } 76 53 panic("invalid exception table entry"); 77 54 }
+14
arch/s390/mm/gmap.c
··· 2608 2608 return 0; 2609 2609 } 2610 2610 2611 + /* 2612 + * Give a chance to schedule after setting a key to 256 pages. 2613 + * We only hold the mm lock, which is a rwsem and the kvm srcu. 2614 + * Both can sleep. 2615 + */ 2616 + static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, 2617 + unsigned long next, struct mm_walk *walk) 2618 + { 2619 + cond_resched(); 2620 + return 0; 2621 + } 2622 + 2611 2623 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, 2612 2624 unsigned long hmask, unsigned long next, 2613 2625 struct mm_walk *walk) ··· 2642 2630 end = start + HPAGE_SIZE - 1; 2643 2631 __storage_key_init_range(start, end); 2644 2632 set_bit(PG_arch_1, &page->flags); 2633 + cond_resched(); 2645 2634 return 0; 2646 2635 } 2647 2636 2648 2637 static const struct mm_walk_ops enable_skey_walk_ops = { 2649 2638 .hugetlb_entry = __s390_enable_skey_hugetlb, 2650 2639 .pte_entry = __s390_enable_skey_pte, 2640 + .pmd_entry = __s390_enable_skey_pmd, 2651 2641 }; 2652 2642 2653 2643 int s390_enable_skey(void)
+1 -1
arch/s390/mm/pgtable.c
··· 748 748 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; 749 749 ptev = pte_val(*ptep); 750 750 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) 751 - page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); 751 + page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); 752 752 pgste_set_unlock(ptep, pgste); 753 753 preempt_enable(); 754 754 }