Merge tag 's390-6.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

- Fix machine check handler _CIF_MCCK_GUEST bit setting by adding the
missing base register for relocated lowcore address

- Fix build failure on older linkers by conditionally adding the
-no-pie linker option only when it is supported

- Fix inaccurate kernel messages in vfio-ap by providing descriptive
error notifications for AP queue sharing violations

- Fix PCI isolation logic by ensuring non-VF devices correctly return
false in zpci_bus_is_isolated_vf()

- Fix PCI DMA range map setup by using dma_direct_set_offset() to add a
proper sentinel element, preventing potential overruns and
translation errors

- Cleanup header dependency problems with asm-offsets.c

- Add fault info for unexpected low-address protection faults in user
mode

- Add support for HOTPLUG_SMT, replacing the arch-specific "nosmt"
handling with common code handling

- Use bitop functions to implement CPU flag helper functions to ensure
that bits cannot get lost if modified in different contexts on a CPU

- Remove unused machine_flags for the lowcore

* tag 's390-6.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/vfio-ap: Fix no AP queue sharing allowed message written to kernel log
s390/pci: Fix dev.dma_range_map missing sentinel element
s390/mm: Dump fault info in case of low address protection fault
s390/smp: Add support for HOTPLUG_SMT
s390: Fix linker error when -no-pie option is unavailable
s390/processor: Use bitop functions for cpu flag helper functions
s390/asm-offsets: Remove ASM_OFFSETS_C
s390/asm-offsets: Include ftrace_regs.h instead of ftrace.h
s390/kvm: Split kvm_host header file
s390/pci: Fix zpci_bus_is_isolated_vf() for non-VFs
s390/lowcore: Remove unused machine_flags
s390/entry: Fix setting _CIF_MCCK_GUEST with lowcore relocation

+437 -412
+2 -2
Documentation/admin-guide/kernel-parameters.txt
··· 4265 nosmp [SMP,EARLY] Tells an SMP kernel to act as a UP kernel, 4266 and disable the IO APIC. legacy for "maxcpus=0". 4267 4268 - nosmt [KNL,MIPS,PPC,S390,EARLY] Disable symmetric multithreading (SMT). 4269 Equivalent to smt=1. 4270 4271 - [KNL,X86,PPC] Disable symmetric multithreading (SMT). 4272 nosmt=force: Force disable SMT, cannot be undone 4273 via the sysfs control file. 4274
··· 4265 nosmp [SMP,EARLY] Tells an SMP kernel to act as a UP kernel, 4266 and disable the IO APIC. legacy for "maxcpus=0". 4267 4268 + nosmt [KNL,MIPS,PPC,EARLY] Disable symmetric multithreading (SMT). 4269 Equivalent to smt=1. 4270 4271 + [KNL,X86,PPC,S390] Disable symmetric multithreading (SMT). 4272 nosmt=force: Force disable SMT, cannot be undone 4273 via the sysfs control file. 4274
+1
arch/s390/Kconfig
··· 240 select HAVE_SYSCALL_TRACEPOINTS 241 select HAVE_VIRT_CPU_ACCOUNTING 242 select HAVE_VIRT_CPU_ACCOUNTING_IDLE 243 select IOMMU_HELPER if PCI 244 select IOMMU_SUPPORT if PCI 245 select KASAN_VMALLOC if KASAN
··· 240 select HAVE_SYSCALL_TRACEPOINTS 241 select HAVE_VIRT_CPU_ACCOUNTING 242 select HAVE_VIRT_CPU_ACCOUNTING_IDLE 243 + select HOTPLUG_SMT 244 select IOMMU_HELPER if PCI 245 select IOMMU_SUPPORT if PCI 246 select KASAN_VMALLOC if KASAN
+1 -1
arch/s390/Makefile
··· 15 KBUILD_AFLAGS += -m64 16 KBUILD_CFLAGS += -m64 17 KBUILD_CFLAGS += -fPIC 18 - LDFLAGS_vmlinux := -no-pie --emit-relocs --discard-none 19 extra_tools := relocs 20 aflags_dwarf := -Wa,-gdwarf-2 21 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
··· 15 KBUILD_AFLAGS += -m64 16 KBUILD_CFLAGS += -m64 17 KBUILD_CFLAGS += -fPIC 18 + LDFLAGS_vmlinux := $(call ld-option,-no-pie) --emit-relocs --discard-none 19 extra_tools := relocs 20 aflags_dwarf := -Wa,-gdwarf-2 21 KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
+1 -338
arch/s390/include/asm/kvm_host.h
··· 20 #include <linux/module.h> 21 #include <linux/pci.h> 22 #include <linux/mmu_notifier.h> 23 #include <asm/debug.h> 24 #include <asm/cpu.h> 25 #include <asm/fpu.h> 26 #include <asm/isc.h> 27 #include <asm/guarded_storage.h> 28 29 - #define KVM_S390_BSCA_CPU_SLOTS 64 30 - #define KVM_S390_ESCA_CPU_SLOTS 248 31 #define KVM_MAX_VCPUS 255 32 33 #define KVM_INTERNAL_MEM_SLOTS 1 ··· 49 #define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) 50 #define KVM_REQ_REFRESH_GUEST_PREFIX \ 51 KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 52 - 53 - #define SIGP_CTRL_C 0x80 54 - #define SIGP_CTRL_SCN_MASK 0x3f 55 - 56 - union bsca_sigp_ctrl { 57 - __u8 value; 58 - struct { 59 - __u8 c : 1; 60 - __u8 r : 1; 61 - __u8 scn : 6; 62 - }; 63 - }; 64 - 65 - union esca_sigp_ctrl { 66 - __u16 value; 67 - struct { 68 - __u8 c : 1; 69 - __u8 reserved: 7; 70 - __u8 scn; 71 - }; 72 - }; 73 - 74 - struct esca_entry { 75 - union esca_sigp_ctrl sigp_ctrl; 76 - __u16 reserved1[3]; 77 - __u64 sda; 78 - __u64 reserved2[6]; 79 - }; 80 - 81 - struct bsca_entry { 82 - __u8 reserved0; 83 - union bsca_sigp_ctrl sigp_ctrl; 84 - __u16 reserved[3]; 85 - __u64 sda; 86 - __u64 reserved2[2]; 87 - }; 88 - 89 - union ipte_control { 90 - unsigned long val; 91 - struct { 92 - unsigned long k : 1; 93 - unsigned long kh : 31; 94 - unsigned long kg : 32; 95 - }; 96 - }; 97 - 98 - /* 99 - * Utility is defined as two bytes but having it four bytes wide 100 - * generates more efficient code. Since the following bytes are 101 - * reserved this makes no functional difference. 102 - */ 103 - union sca_utility { 104 - __u32 val; 105 - struct { 106 - __u32 mtcr : 1; 107 - __u32 : 31; 108 - }; 109 - }; 110 - 111 - struct bsca_block { 112 - union ipte_control ipte_control; 113 - __u64 reserved[5]; 114 - __u64 mcn; 115 - union sca_utility utility; 116 - __u8 reserved2[4]; 117 - struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; 118 - }; 119 - 120 - struct esca_block { 121 - union ipte_control ipte_control; 122 - __u64 reserved1[6]; 123 - union sca_utility utility; 124 - __u8 reserved2[4]; 125 - __u64 mcn[4]; 126 - __u64 reserved3[20]; 127 - struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; 128 - }; 129 - 130 - /* 131 - * This struct is used to store some machine check info from lowcore 132 - * for machine checks that happen while the guest is running. 133 - * This info in host's lowcore might be overwritten by a second machine 134 - * check from host when host is in the machine check's high-level handling. 135 - * The size is 24 bytes. 136 - */ 137 - struct mcck_volatile_info { 138 - __u64 mcic; 139 - __u64 failing_storage_address; 140 - __u32 ext_damage_code; 141 - __u32 reserved; 142 - }; 143 - 144 - #define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \ 145 - CR0_MEASUREMENT_ALERT_SUBMASK) 146 - #define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \ 147 - CR14_EXTERNAL_DAMAGE_SUBMASK) 148 - 149 - #define SIDAD_SIZE_MASK 0xff 150 - #define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) 151 - #define sida_size(sie_block) \ 152 - ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) 153 - 154 - #define CPUSTAT_STOPPED 0x80000000 155 - #define CPUSTAT_WAIT 0x10000000 156 - #define CPUSTAT_ECALL_PEND 0x08000000 157 - #define CPUSTAT_STOP_INT 0x04000000 158 - #define CPUSTAT_IO_INT 0x02000000 159 - #define CPUSTAT_EXT_INT 0x01000000 160 - #define CPUSTAT_RUNNING 0x00800000 161 - #define CPUSTAT_RETAINED 0x00400000 162 - #define CPUSTAT_TIMING_SUB 0x00020000 163 - #define CPUSTAT_SIE_SUB 0x00010000 164 - #define CPUSTAT_RRF 0x00008000 165 - #define CPUSTAT_SLSV 0x00004000 166 - #define CPUSTAT_SLSR 0x00002000 167 - #define CPUSTAT_ZARCH 0x00000800 168 - #define CPUSTAT_MCDS 0x00000100 169 - #define CPUSTAT_KSS 0x00000200 170 - #define CPUSTAT_SM 0x00000080 171 - #define CPUSTAT_IBS 0x00000040 172 - #define CPUSTAT_GED2 0x00000010 173 - #define CPUSTAT_G 0x00000008 174 - #define CPUSTAT_GED 0x00000004 175 - #define CPUSTAT_J 0x00000002 176 - #define CPUSTAT_P 0x00000001 177 - 178 - struct kvm_s390_sie_block { 179 - atomic_t cpuflags; /* 0x0000 */ 180 - __u32 : 1; /* 0x0004 */ 181 - __u32 prefix : 18; 182 - __u32 : 1; 183 - __u32 ibc : 12; 184 - __u8 reserved08[4]; /* 0x0008 */ 185 - #define PROG_IN_SIE (1<<0) 186 - __u32 prog0c; /* 0x000c */ 187 - union { 188 - __u8 reserved10[16]; /* 0x0010 */ 189 - struct { 190 - __u64 pv_handle_cpu; 191 - __u64 pv_handle_config; 192 - }; 193 - }; 194 - #define PROG_BLOCK_SIE (1<<0) 195 - #define PROG_REQUEST (1<<1) 196 - atomic_t prog20; /* 0x0020 */ 197 - __u8 reserved24[4]; /* 0x0024 */ 198 - __u64 cputm; /* 0x0028 */ 199 - __u64 ckc; /* 0x0030 */ 200 - __u64 epoch; /* 0x0038 */ 201 - __u32 svcc; /* 0x0040 */ 202 - #define LCTL_CR0 0x8000 203 - #define LCTL_CR6 0x0200 204 - #define LCTL_CR9 0x0040 205 - #define LCTL_CR10 0x0020 206 - #define LCTL_CR11 0x0010 207 - #define LCTL_CR14 0x0002 208 - __u16 lctl; /* 0x0044 */ 209 - __s16 icpua; /* 0x0046 */ 210 - #define ICTL_OPEREXC 0x80000000 211 - #define ICTL_PINT 0x20000000 212 - #define ICTL_LPSW 0x00400000 213 - #define ICTL_STCTL 0x00040000 214 - #define ICTL_ISKE 0x00004000 215 - #define ICTL_SSKE 0x00002000 216 - #define ICTL_RRBE 0x00001000 217 - #define ICTL_TPROT 0x00000200 218 - __u32 ictl; /* 0x0048 */ 219 - #define ECA_CEI 0x80000000 220 - #define ECA_IB 0x40000000 221 - #define ECA_SIGPI 0x10000000 222 - #define ECA_MVPGI 0x01000000 223 - #define ECA_AIV 0x00200000 224 - #define ECA_VX 0x00020000 225 - #define ECA_PROTEXCI 0x00002000 226 - #define ECA_APIE 0x00000008 227 - #define ECA_SII 0x00000001 228 - __u32 eca; /* 0x004c */ 229 - #define ICPT_INST 0x04 230 - #define ICPT_PROGI 0x08 231 - #define ICPT_INSTPROGI 0x0C 232 - #define ICPT_EXTREQ 0x10 233 - #define ICPT_EXTINT 0x14 234 - #define ICPT_IOREQ 0x18 235 - #define ICPT_WAIT 0x1c 236 - #define ICPT_VALIDITY 0x20 237 - #define ICPT_STOP 0x28 238 - #define ICPT_OPEREXC 0x2C 239 - #define ICPT_PARTEXEC 0x38 240 - #define ICPT_IOINST 0x40 241 - #define ICPT_KSS 0x5c 242 - #define ICPT_MCHKREQ 0x60 243 - #define ICPT_INT_ENABLE 0x64 244 - #define ICPT_PV_INSTR 0x68 245 - #define ICPT_PV_NOTIFY 0x6c 246 - #define ICPT_PV_PREF 0x70 247 - __u8 icptcode; /* 0x0050 */ 248 - __u8 icptstatus; /* 0x0051 */ 249 - __u16 ihcpu; /* 0x0052 */ 250 - __u8 reserved54; /* 0x0054 */ 251 - #define IICTL_CODE_NONE 0x00 252 - #define IICTL_CODE_MCHK 0x01 253 - #define IICTL_CODE_EXT 0x02 254 - #define IICTL_CODE_IO 0x03 255 - #define IICTL_CODE_RESTART 0x04 256 - #define IICTL_CODE_SPECIFICATION 0x10 257 - #define IICTL_CODE_OPERAND 0x11 258 - __u8 iictl; /* 0x0055 */ 259 - __u16 ipa; /* 0x0056 */ 260 - __u32 ipb; /* 0x0058 */ 261 - __u32 scaoh; /* 0x005c */ 262 - #define FPF_BPBC 0x20 263 - __u8 fpf; /* 0x0060 */ 264 - #define ECB_GS 0x40 265 - #define ECB_TE 0x10 266 - #define ECB_SPECI 0x08 267 - #define ECB_SRSI 0x04 268 - #define ECB_HOSTPROTINT 0x02 269 - #define ECB_PTF 0x01 270 - __u8 ecb; /* 0x0061 */ 271 - #define ECB2_CMMA 0x80 272 - #define ECB2_IEP 0x20 273 - #define ECB2_PFMFI 0x08 274 - #define ECB2_ESCA 0x04 275 - #define ECB2_ZPCI_LSI 0x02 276 - __u8 ecb2; /* 0x0062 */ 277 - #define ECB3_AISI 0x20 278 - #define ECB3_AISII 0x10 279 - #define ECB3_DEA 0x08 280 - #define ECB3_AES 0x04 281 - #define ECB3_RI 0x01 282 - __u8 ecb3; /* 0x0063 */ 283 - #define ESCA_SCAOL_MASK ~0x3fU 284 - __u32 scaol; /* 0x0064 */ 285 - __u8 sdf; /* 0x0068 */ 286 - __u8 epdx; /* 0x0069 */ 287 - __u8 cpnc; /* 0x006a */ 288 - __u8 reserved6b; /* 0x006b */ 289 - __u32 todpr; /* 0x006c */ 290 - #define GISA_FORMAT1 0x00000001 291 - __u32 gd; /* 0x0070 */ 292 - __u8 reserved74[12]; /* 0x0074 */ 293 - __u64 mso; /* 0x0080 */ 294 - __u64 msl; /* 0x0088 */ 295 - psw_t gpsw; /* 0x0090 */ 296 - __u64 gg14; /* 0x00a0 */ 297 - __u64 gg15; /* 0x00a8 */ 298 - __u8 reservedb0[8]; /* 0x00b0 */ 299 - #define HPID_KVM 0x4 300 - #define HPID_VSIE 0x5 301 - __u8 hpid; /* 0x00b8 */ 302 - __u8 reservedb9[7]; /* 0x00b9 */ 303 - union { 304 - struct { 305 - __u32 eiparams; /* 0x00c0 */ 306 - __u16 extcpuaddr; /* 0x00c4 */ 307 - __u16 eic; /* 0x00c6 */ 308 - }; 309 - __u64 mcic; /* 0x00c0 */ 310 - } __packed; 311 - __u32 reservedc8; /* 0x00c8 */ 312 - union { 313 - struct { 314 - __u16 pgmilc; /* 0x00cc */ 315 - __u16 iprcc; /* 0x00ce */ 316 - }; 317 - __u32 edc; /* 0x00cc */ 318 - } __packed; 319 - union { 320 - struct { 321 - __u32 dxc; /* 0x00d0 */ 322 - __u16 mcn; /* 0x00d4 */ 323 - __u8 perc; /* 0x00d6 */ 324 - __u8 peratmid; /* 0x00d7 */ 325 - }; 326 - __u64 faddr; /* 0x00d0 */ 327 - } __packed; 328 - __u64 peraddr; /* 0x00d8 */ 329 - __u8 eai; /* 0x00e0 */ 330 - __u8 peraid; /* 0x00e1 */ 331 - __u8 oai; /* 0x00e2 */ 332 - __u8 armid; /* 0x00e3 */ 333 - __u8 reservede4[4]; /* 0x00e4 */ 334 - union { 335 - __u64 tecmc; /* 0x00e8 */ 336 - struct { 337 - __u16 subchannel_id; /* 0x00e8 */ 338 - __u16 subchannel_nr; /* 0x00ea */ 339 - __u32 io_int_parm; /* 0x00ec */ 340 - __u32 io_int_word; /* 0x00f0 */ 341 - }; 342 - } __packed; 343 - __u8 reservedf4[8]; /* 0x00f4 */ 344 - #define CRYCB_FORMAT_MASK 0x00000003 345 - #define CRYCB_FORMAT0 0x00000000 346 - #define CRYCB_FORMAT1 0x00000001 347 - #define CRYCB_FORMAT2 0x00000003 348 - __u32 crycbd; /* 0x00fc */ 349 - __u64 gcr[16]; /* 0x0100 */ 350 - union { 351 - __u64 gbea; /* 0x0180 */ 352 - __u64 sidad; 353 - }; 354 - __u8 reserved188[8]; /* 0x0188 */ 355 - __u64 sdnxo; /* 0x0190 */ 356 - __u8 reserved198[8]; /* 0x0198 */ 357 - __u32 fac; /* 0x01a0 */ 358 - __u8 reserved1a4[20]; /* 0x01a4 */ 359 - __u64 cbrlo; /* 0x01b8 */ 360 - __u8 reserved1c0[8]; /* 0x01c0 */ 361 - #define ECD_HOSTREGMGMT 0x20000000 362 - #define ECD_MEF 0x08000000 363 - #define ECD_ETOKENF 0x02000000 364 - #define ECD_ECC 0x00200000 365 - #define ECD_HMAC 0x00004000 366 - __u32 ecd; /* 0x01c8 */ 367 - __u8 reserved1cc[18]; /* 0x01cc */ 368 - __u64 pp; /* 0x01de */ 369 - __u8 reserved1e6[2]; /* 0x01e6 */ 370 - __u64 itdba; /* 0x01e8 */ 371 - __u64 riccbd; /* 0x01f0 */ 372 - __u64 gvrd; /* 0x01f8 */ 373 - } __packed __aligned(512); 374 - 375 - struct kvm_s390_itdb { 376 - __u8 data[256]; 377 - }; 378 - 379 - struct sie_page { 380 - struct kvm_s390_sie_block sie_block; 381 - struct mcck_volatile_info mcck_info; /* 0x0200 */ 382 - __u8 reserved218[360]; /* 0x0218 */ 383 - __u64 pv_grregs[16]; /* 0x0380 */ 384 - __u8 reserved400[512]; /* 0x0400 */ 385 - struct kvm_s390_itdb itdb; /* 0x0600 */ 386 - __u8 reserved700[2304]; /* 0x0700 */ 387 - }; 388 389 struct kvm_vcpu_stat { 390 struct kvm_vcpu_stat_generic generic;
··· 20 #include <linux/module.h> 21 #include <linux/pci.h> 22 #include <linux/mmu_notifier.h> 23 + #include <asm/kvm_host_types.h> 24 #include <asm/debug.h> 25 #include <asm/cpu.h> 26 #include <asm/fpu.h> 27 #include <asm/isc.h> 28 #include <asm/guarded_storage.h> 29 30 #define KVM_MAX_VCPUS 255 31 32 #define KVM_INTERNAL_MEM_SLOTS 1 ··· 50 #define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5) 51 #define KVM_REQ_REFRESH_GUEST_PREFIX \ 52 KVM_ARCH_REQ_FLAGS(6, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 53 54 struct kvm_vcpu_stat { 55 struct kvm_vcpu_stat_generic generic;
+348
arch/s390/include/asm/kvm_host_types.h
···
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_KVM_HOST_TYPES_H 4 + #define _ASM_KVM_HOST_TYPES_H 5 + 6 + #include <linux/atomic.h> 7 + #include <linux/types.h> 8 + 9 + #define KVM_S390_BSCA_CPU_SLOTS 64 10 + #define KVM_S390_ESCA_CPU_SLOTS 248 11 + 12 + #define SIGP_CTRL_C 0x80 13 + #define SIGP_CTRL_SCN_MASK 0x3f 14 + 15 + union bsca_sigp_ctrl { 16 + __u8 value; 17 + struct { 18 + __u8 c : 1; 19 + __u8 r : 1; 20 + __u8 scn : 6; 21 + }; 22 + }; 23 + 24 + union esca_sigp_ctrl { 25 + __u16 value; 26 + struct { 27 + __u8 c : 1; 28 + __u8 reserved: 7; 29 + __u8 scn; 30 + }; 31 + }; 32 + 33 + struct esca_entry { 34 + union esca_sigp_ctrl sigp_ctrl; 35 + __u16 reserved1[3]; 36 + __u64 sda; 37 + __u64 reserved2[6]; 38 + }; 39 + 40 + struct bsca_entry { 41 + __u8 reserved0; 42 + union bsca_sigp_ctrl sigp_ctrl; 43 + __u16 reserved[3]; 44 + __u64 sda; 45 + __u64 reserved2[2]; 46 + }; 47 + 48 + union ipte_control { 49 + unsigned long val; 50 + struct { 51 + unsigned long k : 1; 52 + unsigned long kh : 31; 53 + unsigned long kg : 32; 54 + }; 55 + }; 56 + 57 + /* 58 + * Utility is defined as two bytes but having it four bytes wide 59 + * generates more efficient code. Since the following bytes are 60 + * reserved this makes no functional difference. 61 + */ 62 + union sca_utility { 63 + __u32 val; 64 + struct { 65 + __u32 mtcr : 1; 66 + __u32 : 31; 67 + }; 68 + }; 69 + 70 + struct bsca_block { 71 + union ipte_control ipte_control; 72 + __u64 reserved[5]; 73 + __u64 mcn; 74 + union sca_utility utility; 75 + __u8 reserved2[4]; 76 + struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS]; 77 + }; 78 + 79 + struct esca_block { 80 + union ipte_control ipte_control; 81 + __u64 reserved1[6]; 82 + union sca_utility utility; 83 + __u8 reserved2[4]; 84 + __u64 mcn[4]; 85 + __u64 reserved3[20]; 86 + struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS]; 87 + }; 88 + 89 + /* 90 + * This struct is used to store some machine check info from lowcore 91 + * for machine checks that happen while the guest is running. 92 + * This info in host's lowcore might be overwritten by a second machine 93 + * check from host when host is in the machine check's high-level handling. 94 + * The size is 24 bytes. 95 + */ 96 + struct mcck_volatile_info { 97 + __u64 mcic; 98 + __u64 failing_storage_address; 99 + __u32 ext_damage_code; 100 + __u32 reserved; 101 + }; 102 + 103 + #define CR0_INITIAL_MASK (CR0_UNUSED_56 | CR0_INTERRUPT_KEY_SUBMASK | \ 104 + CR0_MEASUREMENT_ALERT_SUBMASK) 105 + #define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \ 106 + CR14_EXTERNAL_DAMAGE_SUBMASK) 107 + 108 + #define SIDAD_SIZE_MASK 0xff 109 + #define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK) 110 + #define sida_size(sie_block) \ 111 + ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) 112 + 113 + #define CPUSTAT_STOPPED 0x80000000 114 + #define CPUSTAT_WAIT 0x10000000 115 + #define CPUSTAT_ECALL_PEND 0x08000000 116 + #define CPUSTAT_STOP_INT 0x04000000 117 + #define CPUSTAT_IO_INT 0x02000000 118 + #define CPUSTAT_EXT_INT 0x01000000 119 + #define CPUSTAT_RUNNING 0x00800000 120 + #define CPUSTAT_RETAINED 0x00400000 121 + #define CPUSTAT_TIMING_SUB 0x00020000 122 + #define CPUSTAT_SIE_SUB 0x00010000 123 + #define CPUSTAT_RRF 0x00008000 124 + #define CPUSTAT_SLSV 0x00004000 125 + #define CPUSTAT_SLSR 0x00002000 126 + #define CPUSTAT_ZARCH 0x00000800 127 + #define CPUSTAT_MCDS 0x00000100 128 + #define CPUSTAT_KSS 0x00000200 129 + #define CPUSTAT_SM 0x00000080 130 + #define CPUSTAT_IBS 0x00000040 131 + #define CPUSTAT_GED2 0x00000010 132 + #define CPUSTAT_G 0x00000008 133 + #define CPUSTAT_GED 0x00000004 134 + #define CPUSTAT_J 0x00000002 135 + #define CPUSTAT_P 0x00000001 136 + 137 + struct kvm_s390_sie_block { 138 + atomic_t cpuflags; /* 0x0000 */ 139 + __u32 : 1; /* 0x0004 */ 140 + __u32 prefix : 18; 141 + __u32 : 1; 142 + __u32 ibc : 12; 143 + __u8 reserved08[4]; /* 0x0008 */ 144 + #define PROG_IN_SIE (1<<0) 145 + __u32 prog0c; /* 0x000c */ 146 + union { 147 + __u8 reserved10[16]; /* 0x0010 */ 148 + struct { 149 + __u64 pv_handle_cpu; 150 + __u64 pv_handle_config; 151 + }; 152 + }; 153 + #define PROG_BLOCK_SIE (1<<0) 154 + #define PROG_REQUEST (1<<1) 155 + atomic_t prog20; /* 0x0020 */ 156 + __u8 reserved24[4]; /* 0x0024 */ 157 + __u64 cputm; /* 0x0028 */ 158 + __u64 ckc; /* 0x0030 */ 159 + __u64 epoch; /* 0x0038 */ 160 + __u32 svcc; /* 0x0040 */ 161 + #define LCTL_CR0 0x8000 162 + #define LCTL_CR6 0x0200 163 + #define LCTL_CR9 0x0040 164 + #define LCTL_CR10 0x0020 165 + #define LCTL_CR11 0x0010 166 + #define LCTL_CR14 0x0002 167 + __u16 lctl; /* 0x0044 */ 168 + __s16 icpua; /* 0x0046 */ 169 + #define ICTL_OPEREXC 0x80000000 170 + #define ICTL_PINT 0x20000000 171 + #define ICTL_LPSW 0x00400000 172 + #define ICTL_STCTL 0x00040000 173 + #define ICTL_ISKE 0x00004000 174 + #define ICTL_SSKE 0x00002000 175 + #define ICTL_RRBE 0x00001000 176 + #define ICTL_TPROT 0x00000200 177 + __u32 ictl; /* 0x0048 */ 178 + #define ECA_CEI 0x80000000 179 + #define ECA_IB 0x40000000 180 + #define ECA_SIGPI 0x10000000 181 + #define ECA_MVPGI 0x01000000 182 + #define ECA_AIV 0x00200000 183 + #define ECA_VX 0x00020000 184 + #define ECA_PROTEXCI 0x00002000 185 + #define ECA_APIE 0x00000008 186 + #define ECA_SII 0x00000001 187 + __u32 eca; /* 0x004c */ 188 + #define ICPT_INST 0x04 189 + #define ICPT_PROGI 0x08 190 + #define ICPT_INSTPROGI 0x0C 191 + #define ICPT_EXTREQ 0x10 192 + #define ICPT_EXTINT 0x14 193 + #define ICPT_IOREQ 0x18 194 + #define ICPT_WAIT 0x1c 195 + #define ICPT_VALIDITY 0x20 196 + #define ICPT_STOP 0x28 197 + #define ICPT_OPEREXC 0x2C 198 + #define ICPT_PARTEXEC 0x38 199 + #define ICPT_IOINST 0x40 200 + #define ICPT_KSS 0x5c 201 + #define ICPT_MCHKREQ 0x60 202 + #define ICPT_INT_ENABLE 0x64 203 + #define ICPT_PV_INSTR 0x68 204 + #define ICPT_PV_NOTIFY 0x6c 205 + #define ICPT_PV_PREF 0x70 206 + __u8 icptcode; /* 0x0050 */ 207 + __u8 icptstatus; /* 0x0051 */ 208 + __u16 ihcpu; /* 0x0052 */ 209 + __u8 reserved54; /* 0x0054 */ 210 + #define IICTL_CODE_NONE 0x00 211 + #define IICTL_CODE_MCHK 0x01 212 + #define IICTL_CODE_EXT 0x02 213 + #define IICTL_CODE_IO 0x03 214 + #define IICTL_CODE_RESTART 0x04 215 + #define IICTL_CODE_SPECIFICATION 0x10 216 + #define IICTL_CODE_OPERAND 0x11 217 + __u8 iictl; /* 0x0055 */ 218 + __u16 ipa; /* 0x0056 */ 219 + __u32 ipb; /* 0x0058 */ 220 + __u32 scaoh; /* 0x005c */ 221 + #define FPF_BPBC 0x20 222 + __u8 fpf; /* 0x0060 */ 223 + #define ECB_GS 0x40 224 + #define ECB_TE 0x10 225 + #define ECB_SPECI 0x08 226 + #define ECB_SRSI 0x04 227 + #define ECB_HOSTPROTINT 0x02 228 + #define ECB_PTF 0x01 229 + __u8 ecb; /* 0x0061 */ 230 + #define ECB2_CMMA 0x80 231 + #define ECB2_IEP 0x20 232 + #define ECB2_PFMFI 0x08 233 + #define ECB2_ESCA 0x04 234 + #define ECB2_ZPCI_LSI 0x02 235 + __u8 ecb2; /* 0x0062 */ 236 + #define ECB3_AISI 0x20 237 + #define ECB3_AISII 0x10 238 + #define ECB3_DEA 0x08 239 + #define ECB3_AES 0x04 240 + #define ECB3_RI 0x01 241 + __u8 ecb3; /* 0x0063 */ 242 + #define ESCA_SCAOL_MASK ~0x3fU 243 + __u32 scaol; /* 0x0064 */ 244 + __u8 sdf; /* 0x0068 */ 245 + __u8 epdx; /* 0x0069 */ 246 + __u8 cpnc; /* 0x006a */ 247 + __u8 reserved6b; /* 0x006b */ 248 + __u32 todpr; /* 0x006c */ 249 + #define GISA_FORMAT1 0x00000001 250 + __u32 gd; /* 0x0070 */ 251 + __u8 reserved74[12]; /* 0x0074 */ 252 + __u64 mso; /* 0x0080 */ 253 + __u64 msl; /* 0x0088 */ 254 + psw_t gpsw; /* 0x0090 */ 255 + __u64 gg14; /* 0x00a0 */ 256 + __u64 gg15; /* 0x00a8 */ 257 + __u8 reservedb0[8]; /* 0x00b0 */ 258 + #define HPID_KVM 0x4 259 + #define HPID_VSIE 0x5 260 + __u8 hpid; /* 0x00b8 */ 261 + __u8 reservedb9[7]; /* 0x00b9 */ 262 + union { 263 + struct { 264 + __u32 eiparams; /* 0x00c0 */ 265 + __u16 extcpuaddr; /* 0x00c4 */ 266 + __u16 eic; /* 0x00c6 */ 267 + }; 268 + __u64 mcic; /* 0x00c0 */ 269 + } __packed; 270 + __u32 reservedc8; /* 0x00c8 */ 271 + union { 272 + struct { 273 + __u16 pgmilc; /* 0x00cc */ 274 + __u16 iprcc; /* 0x00ce */ 275 + }; 276 + __u32 edc; /* 0x00cc */ 277 + } __packed; 278 + union { 279 + struct { 280 + __u32 dxc; /* 0x00d0 */ 281 + __u16 mcn; /* 0x00d4 */ 282 + __u8 perc; /* 0x00d6 */ 283 + __u8 peratmid; /* 0x00d7 */ 284 + }; 285 + __u64 faddr; /* 0x00d0 */ 286 + } __packed; 287 + __u64 peraddr; /* 0x00d8 */ 288 + __u8 eai; /* 0x00e0 */ 289 + __u8 peraid; /* 0x00e1 */ 290 + __u8 oai; /* 0x00e2 */ 291 + __u8 armid; /* 0x00e3 */ 292 + __u8 reservede4[4]; /* 0x00e4 */ 293 + union { 294 + __u64 tecmc; /* 0x00e8 */ 295 + struct { 296 + __u16 subchannel_id; /* 0x00e8 */ 297 + __u16 subchannel_nr; /* 0x00ea */ 298 + __u32 io_int_parm; /* 0x00ec */ 299 + __u32 io_int_word; /* 0x00f0 */ 300 + }; 301 + } __packed; 302 + __u8 reservedf4[8]; /* 0x00f4 */ 303 + #define CRYCB_FORMAT_MASK 0x00000003 304 + #define CRYCB_FORMAT0 0x00000000 305 + #define CRYCB_FORMAT1 0x00000001 306 + #define CRYCB_FORMAT2 0x00000003 307 + __u32 crycbd; /* 0x00fc */ 308 + __u64 gcr[16]; /* 0x0100 */ 309 + union { 310 + __u64 gbea; /* 0x0180 */ 311 + __u64 sidad; 312 + }; 313 + __u8 reserved188[8]; /* 0x0188 */ 314 + __u64 sdnxo; /* 0x0190 */ 315 + __u8 reserved198[8]; /* 0x0198 */ 316 + __u32 fac; /* 0x01a0 */ 317 + __u8 reserved1a4[20]; /* 0x01a4 */ 318 + __u64 cbrlo; /* 0x01b8 */ 319 + __u8 reserved1c0[8]; /* 0x01c0 */ 320 + #define ECD_HOSTREGMGMT 0x20000000 321 + #define ECD_MEF 0x08000000 322 + #define ECD_ETOKENF 0x02000000 323 + #define ECD_ECC 0x00200000 324 + #define ECD_HMAC 0x00004000 325 + __u32 ecd; /* 0x01c8 */ 326 + __u8 reserved1cc[18]; /* 0x01cc */ 327 + __u64 pp; /* 0x01de */ 328 + __u8 reserved1e6[2]; /* 0x01e6 */ 329 + __u64 itdba; /* 0x01e8 */ 330 + __u64 riccbd; /* 0x01f0 */ 331 + __u64 gvrd; /* 0x01f8 */ 332 + } __packed __aligned(512); 333 + 334 + struct kvm_s390_itdb { 335 + __u8 data[256]; 336 + }; 337 + 338 + struct sie_page { 339 + struct kvm_s390_sie_block sie_block; 340 + struct mcck_volatile_info mcck_info; /* 0x0200 */ 341 + __u8 reserved218[360]; /* 0x0218 */ 342 + __u64 pv_grregs[16]; /* 0x0380 */ 343 + __u8 reserved400[512]; /* 0x0400 */ 344 + struct kvm_s390_itdb itdb; /* 0x0600 */ 345 + __u8 reserved700[2304]; /* 0x0700 */ 346 + }; 347 + 348 + #endif /* _ASM_KVM_HOST_TYPES_H */
+1 -3
arch/s390/include/asm/lowcore.h
··· 164 __u32 spinlock_index; /* 0x03b0 */ 165 __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */ 166 __u64 percpu_offset; /* 0x03b8 */ 167 - __u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */ 168 - __u64 machine_flags; /* 0x03c8 */ 169 - __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ 170 171 __u32 return_lpswe; /* 0x0400 */ 172 __u32 return_mcck_lpswe; /* 0x0404 */
··· 164 __u32 spinlock_index; /* 0x03b0 */ 165 __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */ 166 __u64 percpu_offset; /* 0x03b8 */ 167 + __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */ 168 169 __u32 return_lpswe; /* 0x0400 */ 170 __u32 return_mcck_lpswe; /* 0x0404 */
+7 -12
arch/s390/include/asm/processor.h
··· 31 #include <linux/cpumask.h> 32 #include <linux/linkage.h> 33 #include <linux/irqflags.h> 34 #include <asm/fpu-types.h> 35 #include <asm/cpu.h> 36 #include <asm/page.h> ··· 63 64 static __always_inline void set_cpu_flag(int flag) 65 { 66 - this_pcpu()->flags |= (1UL << flag); 67 } 68 69 static __always_inline void clear_cpu_flag(int flag) 70 { 71 - this_pcpu()->flags &= ~(1UL << flag); 72 } 73 74 static __always_inline bool test_cpu_flag(int flag) 75 { 76 - return this_pcpu()->flags & (1UL << flag); 77 } 78 79 static __always_inline bool test_and_set_cpu_flag(int flag) 80 { 81 - if (test_cpu_flag(flag)) 82 - return true; 83 - set_cpu_flag(flag); 84 - return false; 85 } 86 87 static __always_inline bool test_and_clear_cpu_flag(int flag) 88 { 89 - if (!test_cpu_flag(flag)) 90 - return false; 91 - clear_cpu_flag(flag); 92 - return true; 93 } 94 95 /* ··· 92 */ 93 static __always_inline bool test_cpu_flag_of(int flag, int cpu) 94 { 95 - return per_cpu(pcpu_devices, cpu).flags & (1UL << flag); 96 } 97 98 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
··· 31 #include <linux/cpumask.h> 32 #include <linux/linkage.h> 33 #include <linux/irqflags.h> 34 + #include <linux/bitops.h> 35 #include <asm/fpu-types.h> 36 #include <asm/cpu.h> 37 #include <asm/page.h> ··· 62 63 static __always_inline void set_cpu_flag(int flag) 64 { 65 + set_bit(flag, &this_pcpu()->flags); 66 } 67 68 static __always_inline void clear_cpu_flag(int flag) 69 { 70 + clear_bit(flag, &this_pcpu()->flags); 71 } 72 73 static __always_inline bool test_cpu_flag(int flag) 74 { 75 + return test_bit(flag, &this_pcpu()->flags); 76 } 77 78 static __always_inline bool test_and_set_cpu_flag(int flag) 79 { 80 + return test_and_set_bit(flag, &this_pcpu()->flags); 81 } 82 83 static __always_inline bool test_and_clear_cpu_flag(int flag) 84 { 85 + return test_and_clear_bit(flag, &this_pcpu()->flags); 86 } 87 88 /* ··· 97 */ 98 static __always_inline bool test_cpu_flag_of(int flag, int cpu) 99 { 100 + return test_bit(flag, &per_cpu(pcpu_devices, cpu).flags); 101 } 102 103 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
-3
arch/s390/include/asm/thread_info.h
··· 9 #define _ASM_THREAD_INFO_H 10 11 #include <linux/bits.h> 12 - #ifndef ASM_OFFSETS_C 13 - #include <asm/asm-offsets.h> 14 - #endif 15 16 /* 17 * General size of kernel stacks
··· 9 #define _ASM_THREAD_INFO_H 10 11 #include <linux/bits.h> 12 13 /* 14 * General size of kernel stacks
+6
arch/s390/include/asm/topology.h
··· 61 62 #endif /* CONFIG_SCHED_TOPOLOGY */ 63 64 #define POLARIZATION_UNKNOWN (-1) 65 #define POLARIZATION_HRZ (0) 66 #define POLARIZATION_VL (1)
··· 61 62 #endif /* CONFIG_SCHED_TOPOLOGY */ 63 64 + static inline bool topology_is_primary_thread(unsigned int cpu) 65 + { 66 + return smp_get_base_cpu(cpu) == cpu; 67 + } 68 + #define topology_is_primary_thread topology_is_primary_thread 69 + 70 #define POLARIZATION_UNKNOWN (-1) 71 #define POLARIZATION_HRZ (0) 72 #define POLARIZATION_VL (1)
+3 -4
arch/s390/kernel/asm-offsets.c
··· 5 * and format the required data. 6 */ 7 8 - #define ASM_OFFSETS_C 9 - 10 #include <linux/kbuild.h> 11 - #include <linux/kvm_host.h> 12 #include <linux/sched.h> 13 #include <linux/purgatory.h> 14 #include <linux/pgtable.h> 15 - #include <linux/ftrace.h> 16 #include <asm/stacktrace.h> 17 18 int main(void) 19 {
··· 5 * and format the required data. 6 */ 7 8 #include <linux/kbuild.h> 9 #include <linux/sched.h> 10 #include <linux/purgatory.h> 11 #include <linux/pgtable.h> 12 + #include <linux/ftrace_regs.h> 13 + #include <asm/kvm_host_types.h> 14 #include <asm/stacktrace.h> 15 + #include <asm/ptrace.h> 16 17 int main(void) 18 {
+1
arch/s390/kernel/dumpstack.c
··· 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task_stack.h> 20 #include <asm/processor.h> 21 #include <asm/debug.h> 22 #include <asm/dis.h>
··· 17 #include <linux/sched.h> 18 #include <linux/sched/debug.h> 19 #include <linux/sched/task_stack.h> 20 + #include <asm/asm-offsets.h> 21 #include <asm/processor.h> 22 #include <asm/debug.h> 23 #include <asm/dis.h>
+1
arch/s390/kernel/early.c
··· 22 #include <asm/asm-extable.h> 23 #include <linux/memblock.h> 24 #include <asm/access-regs.h> 25 #include <asm/machine.h> 26 #include <asm/diag.h> 27 #include <asm/ebcdic.h>
··· 22 #include <asm/asm-extable.h> 23 #include <linux/memblock.h> 24 #include <asm/access-regs.h> 25 + #include <asm/asm-offsets.h> 26 #include <asm/machine.h> 27 #include <asm/diag.h> 28 #include <asm/ebcdic.h>
+1 -1
arch/s390/kernel/entry.S
··· 468 clgrjl %r9,%r14, 4f 469 larl %r14,.Lsie_leave 470 clgrjhe %r9,%r14, 4f 471 - lg %r10,__LC_PCPU 472 oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 473 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 474 SIEEXIT __SF_SIE_CONTROL(%r15),%r13
··· 468 clgrjl %r9,%r14, 4f 469 larl %r14,.Lsie_leave 470 clgrjhe %r9,%r14, 4f 471 + lg %r10,__LC_PCPU(%r13) 472 oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 473 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 474 SIEEXIT __SF_SIE_CONTROL(%r15),%r13
-1
arch/s390/kernel/setup.c
··· 414 lc->clock_comparator = clock_comparator_max; 415 lc->current_task = (unsigned long)&init_task; 416 lc->lpp = LPP_MAGIC; 417 - lc->machine_flags = get_lowcore()->machine_flags; 418 lc->preempt_count = get_lowcore()->preempt_count; 419 nmi_alloc_mcesa_early(&lc->mcesad); 420 lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
··· 414 lc->clock_comparator = clock_comparator_max; 415 lc->current_task = (unsigned long)&init_task; 416 lc->lpp = LPP_MAGIC; 417 lc->preempt_count = get_lowcore()->preempt_count; 418 nmi_alloc_mcesa_early(&lc->mcesad); 419 lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
+1 -8
arch/s390/kernel/smp.c
··· 99 static unsigned int smp_max_threads __initdata = -1U; 100 cpumask_t cpu_setup_mask; 101 102 - static int __init early_nosmt(char *s) 103 - { 104 - smp_max_threads = 1; 105 - return 0; 106 - } 107 - early_param("nosmt", early_nosmt); 108 - 109 static int __init early_smt(char *s) 110 { 111 get_option(&s, &smp_max_threads); ··· 258 lc->percpu_offset = __per_cpu_offset[cpu]; 259 lc->kernel_asce = get_lowcore()->kernel_asce; 260 lc->user_asce = s390_invalid_asce; 261 - lc->machine_flags = get_lowcore()->machine_flags; 262 lc->user_timer = lc->system_timer = 263 lc->steal_timer = lc->avg_steal_timer = 0; 264 abs_lc = get_abs_lowcore(); ··· 801 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 802 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 803 pcpu_set_smt(mtid); 804 805 /* Print number of CPUs */ 806 c_cpus = s_cpus = 0;
··· 99 static unsigned int smp_max_threads __initdata = -1U; 100 cpumask_t cpu_setup_mask; 101 102 static int __init early_smt(char *s) 103 { 104 get_option(&s, &smp_max_threads); ··· 265 lc->percpu_offset = __per_cpu_offset[cpu]; 266 lc->kernel_asce = get_lowcore()->kernel_asce; 267 lc->user_asce = s390_invalid_asce; 268 lc->user_timer = lc->system_timer = 269 lc->steal_timer = lc->avg_steal_timer = 0; 270 abs_lc = get_abs_lowcore(); ··· 809 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 810 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 811 pcpu_set_smt(mtid); 812 + cpu_smt_set_num_threads(smp_cpu_mtid + 1, smp_cpu_mtid + 1); 813 814 /* Print number of CPUs */ 815 c_cpus = s_cpus = 0;
+1
arch/s390/kernel/stacktrace.c
··· 9 #include <linux/stacktrace.h> 10 #include <linux/uaccess.h> 11 #include <linux/compat.h> 12 #include <asm/stacktrace.h> 13 #include <asm/unwind.h> 14 #include <asm/kprobes.h>
··· 9 #include <linux/stacktrace.h> 10 #include <linux/uaccess.h> 11 #include <linux/compat.h> 12 + #include <asm/asm-offsets.h> 13 #include <asm/stacktrace.h> 14 #include <asm/unwind.h> 15 #include <asm/kprobes.h>
+1
arch/s390/mm/fault.c
··· 376 if (unlikely(!teid.b61)) { 377 if (user_mode(regs)) { 378 /* Low-address protection in user mode: cannot happen */ 379 die(regs, "Low-address protection"); 380 } 381 /*
··· 376 if (unlikely(!teid.b61)) { 377 if (user_mode(regs)) { 378 /* Low-address protection in user mode: cannot happen */ 379 + dump_fault_info(regs); 380 die(regs, "Low-address protection"); 381 } 382 /*
+1
arch/s390/mm/pfault.c
··· 9 #include <linux/init.h> 10 #include <linux/irq.h> 11 #include <asm/asm-extable.h> 12 #include <asm/pfault.h> 13 #include <asm/diag.h> 14
··· 9 #include <linux/init.h> 10 #include <linux/irq.h> 11 #include <asm/asm-extable.h> 12 + #include <asm/asm-offsets.h> 13 #include <asm/pfault.h> 14 #include <asm/diag.h> 15
+14 -13
arch/s390/pci/pci_bus.c
··· 287 static void pci_dma_range_setup(struct pci_dev *pdev) 288 { 289 struct zpci_dev *zdev = to_zpci(pdev); 290 - struct bus_dma_region *map; 291 - u64 aligned_end; 292 293 - map = kzalloc(sizeof(*map), GFP_KERNEL); 294 - if (!map) 295 - return; 296 - 297 - map->cpu_start = 0; 298 - map->dma_start = PAGE_ALIGN(zdev->start_dma); 299 aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1); 300 - if (aligned_end >= map->dma_start) 301 - map->size = aligned_end - map->dma_start; 302 else 303 - map->size = 0; 304 - WARN_ON_ONCE(map->size == 0); 305 306 - pdev->dev.dma_range_map = map; 307 } 308 309 void pcibios_bus_add_device(struct pci_dev *pdev) ··· 357 static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev) 358 { 359 struct pci_dev *pdev; 360 361 pdev = zpci_iov_find_parent_pf(zbus, zdev); 362 if (!pdev)
··· 287 static void pci_dma_range_setup(struct pci_dev *pdev) 288 { 289 struct zpci_dev *zdev = to_zpci(pdev); 290 + u64 aligned_end, size; 291 + dma_addr_t dma_start; 292 + int ret; 293 294 + dma_start = PAGE_ALIGN(zdev->start_dma); 295 aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1); 296 + if (aligned_end >= dma_start) 297 + size = aligned_end - dma_start; 298 else 299 + size = 0; 300 + WARN_ON_ONCE(size == 0); 301 302 + ret = dma_direct_set_offset(&pdev->dev, 0, dma_start, size); 303 + if (ret) 304 + pr_err("Failed to allocate DMA range map for %s\n", pci_name(pdev)); 305 } 306 307 void pcibios_bus_add_device(struct pci_dev *pdev) ··· 359 static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev) 360 { 361 struct pci_dev *pdev; 362 + 363 + if (!zdev->vfn) 364 + return false; 365 366 pdev = zpci_iov_find_parent_pf(zbus, zdev); 367 if (!pdev)
+46 -26
drivers/s390/crypto/vfio_ap_ops.c
··· 873 vfio_put_device(&matrix_mdev->vdev); 874 } 875 876 - #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ 877 - "already assigned to %s" 878 879 - static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, 880 - unsigned long *apm, 881 - unsigned long *aqm) 882 { 883 unsigned long apid, apqi; 884 - const struct device *dev = mdev_dev(matrix_mdev->mdev); 885 - const char *mdev_name = dev_name(dev); 886 887 - for_each_set_bit_inv(apid, apm, AP_DEVICES) 888 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 889 - dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); 890 } 891 892 /** 893 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 894 * 895 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 896 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 897 * 898 - * Verifies that each APQN derived from the Cartesian product of a bitmap of 899 - * AP adapter IDs and AP queue indexes is not configured for any matrix 900 - * mediated device. AP queue sharing is not allowed. 901 * 902 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 903 */ 904 - static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, 905 unsigned long *mdev_aqm) 906 { 907 - struct ap_matrix_mdev *matrix_mdev; 908 DECLARE_BITMAP(apm, AP_DEVICES); 909 DECLARE_BITMAP(aqm, AP_DOMAINS); 910 911 - list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { 912 /* 913 - * If the input apm and aqm are fields of the matrix_mdev 914 - * object, then move on to the next matrix_mdev. 915 */ 916 - if (mdev_apm == matrix_mdev->matrix.apm && 917 - mdev_aqm == matrix_mdev->matrix.aqm) 918 continue; 919 920 memset(apm, 0, sizeof(apm)); ··· 942 * We work on full longs, as we can only exclude the leftover 943 * bits in non-inverse order. The leftover is all zeros. 944 */ 945 - if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, 946 - AP_DEVICES)) 947 continue; 948 949 - if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, 950 - AP_DOMAINS)) 951 continue; 952 953 - vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); 954 955 return -EADDRINUSE; 956 } ··· 980 matrix_mdev->matrix.aqm)) 981 return -EADDRNOTAVAIL; 982 983 - return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, 984 matrix_mdev->matrix.aqm); 985 } 986 ··· 2536 2537 mutex_lock(&matrix_dev->guests_lock); 2538 mutex_lock(&matrix_dev->mdevs_lock); 2539 - ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); 2540 mutex_unlock(&matrix_dev->mdevs_lock); 2541 mutex_unlock(&matrix_dev->guests_lock); 2542
··· 873 vfio_put_device(&matrix_mdev->vdev); 874 } 875 876 + #define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" 877 878 + #define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" 879 + 880 + static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, 881 + struct ap_matrix_mdev *assigned_to, 882 + unsigned long *apm, unsigned long *aqm) 883 { 884 unsigned long apid, apqi; 885 886 + for_each_set_bit_inv(apid, apm, AP_DEVICES) { 887 + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { 888 + dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, 889 + apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); 890 + } 891 + } 892 + } 893 + 894 + static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, 895 + unsigned long *apm, unsigned long *aqm) 896 + { 897 + unsigned long apid, apqi; 898 + 899 + for_each_set_bit_inv(apid, apm, AP_DEVICES) { 900 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) 901 + dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); 902 + } 903 } 904 905 /** 906 * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs 907 * 908 + * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being 909 + * assigned; or, NULL if this function was called by the AP bus 910 + * driver in_use callback to verify none of the APQNs being reserved 911 + * for the host device driver are in use by a vfio_ap mediated device 912 * @mdev_apm: mask indicating the APIDs of the APQNs to be verified 913 * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified 914 * 915 + * Verifies that each APQN derived from the Cartesian product of APIDs 916 + * represented by the bits set in @mdev_apm and the APQIs of the bits set in 917 + * @mdev_aqm is not assigned to a mediated device other than the mdev to which 918 + * the APQN is being assigned (@assignee). AP queue sharing is not allowed. 919 * 920 * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. 921 */ 922 + static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, 923 + unsigned long *mdev_apm, 924 unsigned long *mdev_aqm) 925 { 926 + struct ap_matrix_mdev *assigned_to; 927 DECLARE_BITMAP(apm, AP_DEVICES); 928 DECLARE_BITMAP(aqm, AP_DOMAINS); 929 930 + list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { 931 /* 932 + * If the mdev to which the mdev_apm and mdev_aqm is being 933 + * assigned is the same as the mdev being verified 934 */ 935 + if (assignee == assigned_to) 936 continue; 937 938 memset(apm, 0, sizeof(apm)); ··· 924 * We work on full longs, as we can only exclude the leftover 925 * bits in non-inverse order. The leftover is all zeros. 926 */ 927 + if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) 928 continue; 929 930 + if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) 931 continue; 932 933 + if (assignee) 934 + vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); 935 + else 936 + vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); 937 938 return -EADDRINUSE; 939 } ··· 961 matrix_mdev->matrix.aqm)) 962 return -EADDRNOTAVAIL; 963 964 + return vfio_ap_mdev_verify_no_sharing(matrix_mdev, 965 + matrix_mdev->matrix.apm, 966 matrix_mdev->matrix.aqm); 967 } 968 ··· 2516 2517 mutex_lock(&matrix_dev->guests_lock); 2518 mutex_lock(&matrix_dev->mdevs_lock); 2519 + ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); 2520 mutex_unlock(&matrix_dev->mdevs_lock); 2521 mutex_unlock(&matrix_dev->guests_lock); 2522