Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:

- Add empty command line parameter handling stubs to kernel for all
command line parameters which are handled in the decompressor. This
avoids invalid "Unknown kernel command line parameters" messages from
the kernel, and also avoids that these will be incorrectly passed to
user space. This caused already confusion, therefore add the empty
stubs

- Add missing phys_to_virt() handling to machine check handler

- Introduce and use a union to be used for zcrypt inline assemblies.
This makes sure that only a register wide member of the union is
passed as input and output parameter to inline assemblies, while
usual C code uses other members of the union to access bit fields of
it

- Add and use a READ_ONCE_ALIGNED_128() macro, which can be used to
atomically read a 128-bit value from memory. This replaces the
(mis-)use of the 128-bit cmpxchg operation to do the same in cpum_sf
code. Currently gcc does not generate the used lpq instruction if
__READ_ONCE() is used for aligned 128-bit accesses, therefore use
this s390 specific helper

- Simplify machine check handler code if a task needs to be killed
because of e.g. register corruption due to a machine malfunction

- Perform CPU reset to clear pending interrupts and TLB entries on an
already stopped target CPU before delegating work to it

- Generate arch/s390/boot/vmlinux.map link map for the decompressor,
when CONFIG_VMLINUX_MAP is enabled for debugging purposes

- Fix segment type handling for dcssblk devices. It incorrectly always
returned type "READ/WRITE" even for read-only segements, which can
result in a kernel panic if somebody tries to write to a read-only
device

- Sort config S390 select list again

- Fix two kprobe reenter bugs revealed by a recently added kprobe kunit
test

* tag 's390-6.3-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/kprobes: fix current_kprobe never cleared after kprobes reenter
s390/kprobes: fix irq mask clobbering on kprobe reenter from post_handler
s390/Kconfig: sort config S390 select list again
s390/extmem: return correct segment type in __segment_load()
s390/decompressor: add link map saving
s390/smp: perform cpu reset before delegating work to target cpu
s390/mcck: cleanup user process termination path
s390/cpum_sf: use READ_ONCE_ALIGNED_128() instead of 128-bit cmpxchg
s390/rwonce: add READ_ONCE_ALIGNED_128() macro
s390/ap,zcrypt,vfio: introduce and use ap_queue_status_reg union
s390/nmi: fix virtual-physical address confusion
s390/setup: do not complain about parameters handled in decompressor

+131 -102
+3 -3
arch/s390/Kconfig
··· 125 125 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 126 126 select ARCH_WANTS_NO_INSTR 127 127 select ARCH_WANT_DEFAULT_BPF_JIT 128 - select ARCH_WANT_IPC_PARSE_VERSION 129 128 select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP 129 + select ARCH_WANT_IPC_PARSE_VERSION 130 130 select BUILDTIME_TABLE_SORT 131 131 select CLONE_BACKWARDS2 132 132 select DMA_OPS if PCI ··· 187 187 select HAVE_KPROBES 188 188 select HAVE_KPROBES_ON_FTRACE 189 189 select HAVE_KRETPROBES 190 - select HAVE_RETHOOK 191 190 select HAVE_KVM 192 191 select HAVE_LIVEPATCH 193 192 select HAVE_MEMBLOCK_PHYS_MAP ··· 199 200 select HAVE_PERF_USER_STACK_DUMP 200 201 select HAVE_REGS_AND_STACK_ACCESS_API 201 202 select HAVE_RELIABLE_STACKTRACE 203 + select HAVE_RETHOOK 202 204 select HAVE_RSEQ 203 205 select HAVE_SAMPLE_FTRACE_DIRECT 204 206 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI ··· 210 210 select HAVE_VIRT_CPU_ACCOUNTING_IDLE 211 211 select IOMMU_HELPER if PCI 212 212 select IOMMU_SUPPORT if PCI 213 + select MMU_GATHER_MERGE_VMAS 213 214 select MMU_GATHER_NO_GATHER 214 215 select MMU_GATHER_RCU_TABLE_FREE 215 - select MMU_GATHER_MERGE_VMAS 216 216 select MODULES_USE_ELF_RELA 217 217 select NEED_DMA_MAP_STATE if PCI 218 218 select NEED_PER_CPU_EMBED_FIRST_CHUNK
+3 -1
arch/s390/boot/Makefile
··· 52 52 OBJECTS := $(addprefix $(obj)/,$(obj-y)) 53 53 OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all)) 54 54 55 + clean-files += vmlinux.map 56 + 55 57 quiet_cmd_section_cmp = SECTCMP $* 56 58 define cmd_section_cmp 57 59 s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \ ··· 73 71 $(obj)/section_cmp%: vmlinux $(obj)/vmlinux FORCE 74 72 $(call if_changed,section_cmp) 75 73 76 - LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup --build-id=sha1 -T 74 + LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup $(if $(CONFIG_VMLINUX_MAP),-Map=$(obj)/vmlinux.map) --build-id=sha1 -T 77 75 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS_ALL) FORCE 78 76 $(call if_changed,ld) 79 77
+52 -48
arch/s390/include/asm/ap.h
··· 49 49 unsigned int _pad2 : 16; 50 50 }; 51 51 52 + /* 53 + * AP queue status reg union to access the reg1 54 + * register with the lower 32 bits comprising the 55 + * ap queue status. 56 + */ 57 + union ap_queue_status_reg { 58 + unsigned long value; 59 + struct { 60 + u32 _pad; 61 + struct ap_queue_status status; 62 + }; 63 + }; 64 + 52 65 /** 53 66 * ap_intructions_available() - Test if AP instructions are available. 54 67 * ··· 95 82 */ 96 83 static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) 97 84 { 98 - struct ap_queue_status reg1; 85 + union ap_queue_status_reg reg1; 99 86 unsigned long reg2; 100 87 101 88 asm volatile( ··· 104 91 " .insn rre,0xb2af0000,0,0\n" /* PQAP(TAPQ) */ 105 92 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 106 93 " lgr %[reg2],2\n" /* gr2 into reg2 */ 107 - : [reg1] "=&d" (reg1), [reg2] "=&d" (reg2) 94 + : [reg1] "=&d" (reg1.value), [reg2] "=&d" (reg2) 108 95 : [qid] "d" (qid) 109 96 : "cc", "0", "1", "2"); 110 97 if (info) 111 98 *info = reg2; 112 - return reg1; 99 + return reg1.status; 113 100 } 114 101 115 102 /** ··· 138 125 static inline struct ap_queue_status ap_rapq(ap_qid_t qid) 139 126 { 140 127 unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ 141 - struct ap_queue_status reg1; 128 + union ap_queue_status_reg reg1; 142 129 143 130 asm volatile( 144 131 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 145 132 " .insn rre,0xb2af0000,0,0\n" /* PQAP(RAPQ) */ 146 133 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 147 - : [reg1] "=&d" (reg1) 134 + : [reg1] "=&d" (reg1.value) 148 135 : [reg0] "d" (reg0) 149 136 : "cc", "0", "1"); 150 - return reg1; 137 + return reg1.status; 151 138 } 152 139 153 140 /** ··· 159 146 static inline struct ap_queue_status ap_zapq(ap_qid_t qid) 160 147 { 161 148 unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ 162 - struct ap_queue_status reg1; 149 + union ap_queue_status_reg reg1; 163 150 164 151 asm volatile( 165 152 " lgr 0,%[reg0]\n" /* qid arg into gr0 */ 166 153 " .insn rre,0xb2af0000,0,0\n" /* PQAP(ZAPQ) */ 167 154 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 168 - : [reg1] "=&d" (reg1) 155 + : [reg1] "=&d" (reg1.value) 169 156 : [reg0] "d" (reg0) 170 157 : "cc", "0", "1"); 171 - return reg1; 158 + return reg1.status; 172 159 } 173 160 174 161 /** ··· 222 209 * parameter to the PQAP(AQIC) instruction. For details please 223 210 * see the AR documentation. 224 211 */ 225 - struct ap_qirq_ctrl { 226 - unsigned int _res1 : 8; 227 - unsigned int zone : 8; /* zone info */ 228 - unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */ 229 - unsigned int _res2 : 4; 230 - unsigned int gisc : 3; /* guest isc field */ 231 - unsigned int _res3 : 6; 232 - unsigned int gf : 2; /* gisa format */ 233 - unsigned int _res4 : 1; 234 - unsigned int gisa : 27; /* gisa origin */ 235 - unsigned int _res5 : 1; 236 - unsigned int isc : 3; /* irq sub class */ 212 + union ap_qirq_ctrl { 213 + unsigned long value; 214 + struct { 215 + unsigned int : 8; 216 + unsigned int zone : 8; /* zone info */ 217 + unsigned int ir : 1; /* ir flag: enable (1) or disable (0) irq */ 218 + unsigned int : 4; 219 + unsigned int gisc : 3; /* guest isc field */ 220 + unsigned int : 6; 221 + unsigned int gf : 2; /* gisa format */ 222 + unsigned int : 1; 223 + unsigned int gisa : 27; /* gisa origin */ 224 + unsigned int : 1; 225 + unsigned int isc : 3; /* irq sub class */ 226 + }; 237 227 }; 238 228 239 229 /** ··· 248 232 * Returns AP queue status. 249 233 */ 250 234 static inline struct ap_queue_status ap_aqic(ap_qid_t qid, 251 - struct ap_qirq_ctrl qirqctrl, 235 + union ap_qirq_ctrl qirqctrl, 252 236 phys_addr_t pa_ind) 253 237 { 254 238 unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */ 255 - union { 256 - unsigned long value; 257 - struct ap_qirq_ctrl qirqctrl; 258 - struct { 259 - u32 _pad; 260 - struct ap_queue_status status; 261 - }; 262 - } reg1; 239 + union ap_queue_status_reg reg1; 263 240 unsigned long reg2 = pa_ind; 264 241 265 - reg1.qirqctrl = qirqctrl; 242 + reg1.value = qirqctrl.value; 266 243 267 244 asm volatile( 268 245 " lgr 0,%[reg0]\n" /* qid param into gr0 */ ··· 263 254 " lgr 2,%[reg2]\n" /* ni addr into gr2 */ 264 255 " .insn rre,0xb2af0000,0,0\n" /* PQAP(AQIC) */ 265 256 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 266 - : [reg1] "+&d" (reg1) 257 + : [reg1] "+&d" (reg1.value) 267 258 : [reg0] "d" (reg0), [reg2] "d" (reg2) 268 259 : "cc", "memory", "0", "1", "2"); 269 260 ··· 300 291 union ap_qact_ap_info *apinfo) 301 292 { 302 293 unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22); 303 - union { 304 - unsigned long value; 305 - struct { 306 - u32 _pad; 307 - struct ap_queue_status status; 308 - }; 309 - } reg1; 294 + union ap_queue_status_reg reg1; 310 295 unsigned long reg2; 311 296 312 297 reg1.value = apinfo->val; ··· 311 308 " .insn rre,0xb2af0000,0,0\n" /* PQAP(QACT) */ 312 309 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 313 310 " lgr %[reg2],2\n" /* qact out info into reg2 */ 314 - : [reg1] "+&d" (reg1), [reg2] "=&d" (reg2) 311 + : [reg1] "+&d" (reg1.value), [reg2] "=&d" (reg2) 315 312 : [reg0] "d" (reg0) 316 313 : "cc", "0", "1", "2"); 317 314 apinfo->val = reg2; ··· 336 333 { 337 334 unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */ 338 335 union register_pair nqap_r1, nqap_r2; 339 - struct ap_queue_status reg1; 336 + union ap_queue_status_reg reg1; 340 337 341 338 nqap_r1.even = (unsigned int)(psmid >> 32); 342 339 nqap_r1.odd = psmid & 0xffffffff; ··· 348 345 "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n" 349 346 " brc 2,0b\n" /* handle partial completion */ 350 347 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 351 - : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1), 348 + : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), 352 349 [nqap_r2] "+&d" (nqap_r2.pair) 353 350 : [nqap_r1] "d" (nqap_r1.pair) 354 351 : "cc", "memory", "0", "1"); 355 - return reg1; 352 + return reg1.status; 356 353 } 357 354 358 355 /** ··· 392 389 unsigned long *resgr0) 393 390 { 394 391 unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL; 395 - struct ap_queue_status reg1; 392 + union ap_queue_status_reg reg1; 396 393 unsigned long reg2; 397 394 union register_pair rp1, rp2; 398 395 ··· 411 408 "2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */ 412 409 " lgr %[reg1],1\n" /* gr1 (status) into reg1 */ 413 410 " lgr %[reg2],2\n" /* gr2 (res length) into reg2 */ 414 - : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1), [reg2] "=&d" (reg2), 415 - [rp1] "+&d" (rp1.pair), [rp2] "+&d" (rp2.pair) 411 + : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1.value), 412 + [reg2] "=&d" (reg2), [rp1] "+&d" (rp1.pair), 413 + [rp2] "+&d" (rp2.pair) 416 414 : 417 415 : "cc", "memory", "0", "1", "2"); 418 416 ··· 425 421 * Signal the caller that this dqap is only partially received 426 422 * with a special status response code 0xFF and *resgr0 updated 427 423 */ 428 - reg1.response_code = 0xFF; 424 + reg1.status.response_code = 0xFF; 429 425 if (resgr0) 430 426 *resgr0 = reg0; 431 427 } else { ··· 434 430 *resgr0 = 0; 435 431 } 436 432 437 - return reg1; 433 + return reg1.status; 438 434 } 439 435 440 436 /*
+2 -3
arch/s390/include/asm/nmi.h
··· 101 101 int nmi_alloc_mcesa(u64 *mcesad); 102 102 void nmi_free_mcesa(u64 *mcesad); 103 103 104 - void s390_handle_mcck(struct pt_regs *regs); 105 - void __s390_handle_mcck(void); 106 - int s390_do_machine_check(struct pt_regs *regs); 104 + void s390_handle_mcck(void); 105 + void s390_do_machine_check(struct pt_regs *regs); 107 106 108 107 #endif /* __ASSEMBLY__ */ 109 108 #endif /* _ASM_S390_NMI_H */
+31
arch/s390/include/asm/rwonce.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_S390_RWONCE_H 4 + #define __ASM_S390_RWONCE_H 5 + 6 + #include <linux/compiler_types.h> 7 + 8 + /* 9 + * Use READ_ONCE_ALIGNED_128() for 128-bit block concurrent (atomic) read 10 + * accesses. Note that x must be 128-bit aligned, otherwise a specification 11 + * exception is generated. 12 + */ 13 + #define READ_ONCE_ALIGNED_128(x) \ 14 + ({ \ 15 + union { \ 16 + typeof(x) __x; \ 17 + __uint128_t val; \ 18 + } __u; \ 19 + \ 20 + BUILD_BUG_ON(sizeof(x) != 16); \ 21 + asm volatile( \ 22 + " lpq %[val],%[_x]\n" \ 23 + : [val] "=d" (__u.val) \ 24 + : [_x] "QS" (x) \ 25 + : "memory"); \ 26 + __u.__x; \ 27 + }) 28 + 29 + #include <asm-generic/rwonce.h> 30 + 31 + #endif /* __ASM_S390_RWONCE_H */
+17
arch/s390/kernel/early.c
··· 36 36 37 37 int __bootdata(is_full_image); 38 38 39 + #define decompressor_handled_param(param) \ 40 + static int __init ignore_decompressor_param_##param(char *s) \ 41 + { \ 42 + return 0; \ 43 + } \ 44 + early_param(#param, ignore_decompressor_param_##param) 45 + 46 + decompressor_handled_param(mem); 47 + decompressor_handled_param(vmalloc); 48 + decompressor_handled_param(dfltcc); 49 + decompressor_handled_param(noexec); 50 + decompressor_handled_param(facilities); 51 + decompressor_handled_param(nokaslr); 52 + #if IS_ENABLED(CONFIG_KVM) 53 + decompressor_handled_param(prot_virt); 54 + #endif 55 + 39 56 static void __init reset_tod_clock(void) 40 57 { 41 58 union tod_clock clk;
-10
arch/s390/kernel/entry.S
··· 562 562 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 563 563 lgr %r2,%r11 # pass pointer to pt_regs 564 564 brasl %r14,s390_do_machine_check 565 - cghi %r2,0 566 - je .Lmcck_return 567 - lg %r1,__LC_KERNEL_STACK # switch to kernel stack 568 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 569 - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 570 - la %r11,STACK_FRAME_OVERHEAD(%r1) 571 - lgr %r2,%r11 572 - lgr %r15,%r1 573 - brasl %r14,s390_handle_mcck 574 - .Lmcck_return: 575 565 lctlg %c1,%c1,__PT_CR1(%r11) 576 566 lmg %r0,%r10,__PT_R0(%r11) 577 567 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+2 -2
arch/s390/kernel/kprobes.c
··· 278 278 { 279 279 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 280 280 kcb->kprobe_status = kcb->prev_kprobe.status; 281 + kcb->prev_kprobe.kp = NULL; 281 282 } 282 283 NOKPROBE_SYMBOL(pop_kprobe); 283 284 ··· 403 402 if (!p) 404 403 return 0; 405 404 405 + resume_execution(p, regs); 406 406 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 407 407 kcb->kprobe_status = KPROBE_HIT_SSDONE; 408 408 p->post_handler(p, regs, 0); 409 409 } 410 - 411 - resume_execution(p, regs); 412 410 pop_kprobe(kcb); 413 411 preempt_enable_no_resched(); 414 412
+6 -20
arch/s390/kernel/nmi.c
··· 156 156 * Main machine check handler function. Will be called with interrupts disabled 157 157 * and machine checks enabled. 158 158 */ 159 - void __s390_handle_mcck(void) 159 + void s390_handle_mcck(void) 160 160 { 161 161 struct mcck_struct mcck; 162 162 ··· 192 192 if (mcck.stp_queue) 193 193 stp_queue_work(); 194 194 if (mcck.kill_task) { 195 - local_irq_enable(); 196 195 printk(KERN_EMERG "mcck: Terminating task because of machine " 197 196 "malfunction (code 0x%016lx).\n", mcck.mcck_code); 198 197 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", 199 198 current->comm, current->pid); 200 - make_task_dead(SIGSEGV); 199 + if (is_global_init(current)) 200 + panic("mcck: Attempting to kill init!\n"); 201 + do_send_sig_info(SIGKILL, SEND_SIG_PRIV, current, PIDTYPE_PID); 201 202 } 202 203 } 203 204 204 - void noinstr s390_handle_mcck(struct pt_regs *regs) 205 - { 206 - trace_hardirqs_off(); 207 - pai_kernel_enter(regs); 208 - __s390_handle_mcck(); 209 - pai_kernel_exit(regs); 210 - trace_hardirqs_on(); 211 - } 212 205 /* 213 206 * returns 0 if register contents could be validated 214 207 * returns 1 otherwise ··· 339 346 struct sie_page *sie_page; 340 347 341 348 /* r14 contains the sie block, which was set in sie64a */ 342 - struct kvm_s390_sie_block *sie_block = 343 - (struct kvm_s390_sie_block *) regs->gprs[14]; 349 + struct kvm_s390_sie_block *sie_block = phys_to_virt(regs->gprs[14]); 344 350 345 351 if (sie_block == NULL) 346 352 /* Something's seriously wrong, stop system. */ ··· 366 374 /* 367 375 * machine check handler. 368 376 */ 369 - int notrace s390_do_machine_check(struct pt_regs *regs) 377 + void notrace s390_do_machine_check(struct pt_regs *regs) 370 378 { 371 379 static int ipd_count; 372 380 static DEFINE_SPINLOCK(ipd_lock); ··· 496 504 } 497 505 clear_cpu_flag(CIF_MCCK_GUEST); 498 506 499 - if (user_mode(regs) && mcck_pending) { 500 - irqentry_nmi_exit(regs, irq_state); 501 - return 1; 502 - } 503 - 504 507 if (mcck_pending) 505 508 schedule_mcck_handler(); 506 509 507 510 irqentry_nmi_exit(regs, irq_state); 508 - return 0; 509 511 } 510 512 NOKPROBE_SYMBOL(s390_do_machine_check); 511 513
+3 -6
arch/s390/kernel/perf_cpum_sf.c
··· 1355 1355 num_sdb++; 1356 1356 1357 1357 /* Reset trailer (using compare-double-and-swap) */ 1358 - /* READ_ONCE() 16 byte header */ 1359 - prev.val = __cdsg(&te->header.val, 0, 0); 1358 + prev.val = READ_ONCE_ALIGNED_128(te->header.val); 1360 1359 do { 1361 1360 old.val = prev.val; 1362 1361 new.val = prev.val; ··· 1557 1558 struct hws_trailer_entry *te; 1558 1559 1559 1560 te = aux_sdb_trailer(aux, alert_index); 1560 - /* READ_ONCE() 16 byte header */ 1561 - prev.val = __cdsg(&te->header.val, 0, 0); 1561 + prev.val = READ_ONCE_ALIGNED_128(te->header.val); 1562 1562 do { 1563 1563 old.val = prev.val; 1564 1564 new.val = prev.val; ··· 1635 1637 idx_old = idx = aux->empty_mark + 1; 1636 1638 for (i = 0; i < range_scan; i++, idx++) { 1637 1639 te = aux_sdb_trailer(aux, idx); 1638 - /* READ_ONCE() 16 byte header */ 1639 - prev.val = __cdsg(&te->header.val, 0, 0); 1640 + prev.val = READ_ONCE_ALIGNED_128(te->header.val); 1640 1641 do { 1641 1642 old.val = prev.val; 1642 1643 new.val = prev.val;
+2 -1
arch/s390/kernel/smp.c
··· 333 333 } 334 334 /* Stop target cpu (if func returns this stops the current cpu). */ 335 335 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 336 + pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0); 336 337 /* Restart func on the target cpu and stop the current cpu. */ 337 338 if (lc) { 338 339 lc->restart_stack = stack; ··· 523 522 if (test_bit(ec_call_function_single, &bits)) 524 523 generic_smp_call_function_single_interrupt(); 525 524 if (test_bit(ec_mcck_pending, &bits)) 526 - __s390_handle_mcck(); 525 + s390_handle_mcck(); 527 526 if (test_bit(ec_irq_work, &bits)) 528 527 irq_work_run(); 529 528 }
+7 -5
arch/s390/mm/extmem.c
··· 289 289 290 290 /* 291 291 * real segment loading function, called from segment_load 292 + * Must return either an error code < 0, or the segment type code >= 0 292 293 */ 293 294 static int 294 295 __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) 295 296 { 296 297 unsigned long start_addr, end_addr, dummy; 297 298 struct dcss_segment *seg; 298 - int rc, diag_cc; 299 + int rc, diag_cc, segtype; 299 300 300 301 start_addr = end_addr = 0; 302 + segtype = -1; 301 303 seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); 302 304 if (seg == NULL) { 303 305 rc = -ENOMEM; ··· 328 326 seg->res_name[8] = '\0'; 329 327 strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name)); 330 328 seg->res->name = seg->res_name; 331 - rc = seg->vm_segtype; 332 - if (rc == SEG_TYPE_SC || 333 - ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) 329 + segtype = seg->vm_segtype; 330 + if (segtype == SEG_TYPE_SC || 331 + ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared)) 334 332 seg->res->flags |= IORESOURCE_READONLY; 335 333 336 334 /* Check for overlapping resources before adding the mapping. */ ··· 388 386 out_free: 389 387 kfree(seg); 390 388 out: 391 - return rc; 389 + return rc < 0 ? rc : segtype; 392 390 } 393 391 394 392 /*
+1 -1
drivers/s390/crypto/ap_queue.c
··· 29 29 */ 30 30 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind) 31 31 { 32 + union ap_qirq_ctrl qirqctrl = { .value = 0 }; 32 33 struct ap_queue_status status; 33 - struct ap_qirq_ctrl qirqctrl = { 0 }; 34 34 35 35 qirqctrl.ir = 1; 36 36 qirqctrl.isc = AP_ISC;
+2 -2
drivers/s390/crypto/vfio_ap_ops.c
··· 301 301 */ 302 302 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) 303 303 { 304 - struct ap_qirq_ctrl aqic_gisa = {}; 304 + union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 305 305 struct ap_queue_status status; 306 306 int retries = 5; 307 307 ··· 384 384 int isc, 385 385 struct kvm_vcpu *vcpu) 386 386 { 387 - struct ap_qirq_ctrl aqic_gisa = {}; 387 + union ap_qirq_ctrl aqic_gisa = { .value = 0 }; 388 388 struct ap_queue_status status = {}; 389 389 struct kvm_s390_gisa *gisa; 390 390 struct page *h_page;