Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-s390-next-4.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux

From: Christian Borntraeger <borntraeger@de.ibm.com>

KVM: s390: features for 4.12

1. guarded storage support for guests
This contains an s390 base Linux feature branch that is necessary
to implement the KVM part
2. Provide an interface to implement adapter interruption suppression
which is necessary for proper zPCI support
3. Use more defines instead of numbers
4. Provide logging for lazy enablement of runtime instrumentation

+1252 -326
+17
Documentation/virtual/kvm/api.txt
··· 3983 3983 This capability can be enabled dynamically even if VCPUs were already 3984 3984 created and are running. 3985 3985 3986 + 7.9 KVM_CAP_S390_GS 3987 + 3988 + Architectures: s390 3989 + Parameters: none 3990 + Returns: 0 on success; -EINVAL if the machine does not support 3991 + guarded storage; -EBUSY if a VCPU has already been created. 3992 + 3993 + Allows use of guarded storage for the KVM guest. 3994 + 3995 + 7.10 KVM_CAP_S390_AIS 3996 + 3997 + Architectures: s390 3998 + Parameters: none 3999 + 4000 + Allow use of adapter-interruption suppression. 4001 + Returns: 0 on success; -EBUSY if a VCPU has already been created. 4002 + 3986 4003 8. Other capabilities. 3987 4004 ---------------------- 3988 4005
+38 -3
Documentation/virtual/kvm/devices/s390_flic.txt
··· 14 14 - purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ) 15 15 - enable/disable for the guest transparent async page faults 16 16 - register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*) 17 + - modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM) 18 + - inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT) 17 19 18 20 Groups: 19 21 KVM_DEV_FLIC_ENQUEUE ··· 66 64 __u8 isc; 67 65 __u8 maskable; 68 66 __u8 swap; 69 - __u8 pad; 67 + __u8 flags; 70 68 }; 71 69 72 70 id contains the unique id for the adapter, isc the I/O interruption subclass 73 - to use, maskable whether this adapter may be masked (interrupts turned off) 74 - and swap whether the indicators need to be byte swapped. 71 + to use, maskable whether this adapter may be masked (interrupts turned off), 72 + swap whether the indicators need to be byte swapped, and flags contains 73 + further characteristics of the adapter. 74 + Currently defined values for 'flags' are: 75 + - KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS 76 + (adapter-interrupt-suppression) facility. This flag only has an effect if 77 + the AIS capability is enabled. 78 + Unknown flag values are ignored. 75 79 76 80 77 81 KVM_DEV_FLIC_ADAPTER_MODIFY ··· 108 100 KVM_S390_IO_ADAPTER_UNMAP 109 101 release a userspace page for the translated address specified in addr 110 102 from the list of mappings 103 + 104 + KVM_DEV_FLIC_AISM 105 + modify the adapter-interruption-suppression mode for a given isc if the 106 + AIS capability is enabled. Takes a kvm_s390_ais_req describing: 107 + 108 + struct kvm_s390_ais_req { 109 + __u8 isc; 110 + __u16 mode; 111 + }; 112 + 113 + isc contains the target I/O interruption subclass, mode the target 114 + adapter-interruption-suppression mode. The following modes are 115 + currently supported: 116 + - KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection 117 + is always allowed; 118 + - KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq 119 + injection is only allowed once and the following adapter interrupts 120 + will be suppressed until the mode is set again to ALL-Interruptions 121 + or SINGLE-Interruption mode. 122 + 123 + KVM_DEV_FLIC_AIRQ_INJECT 124 + Inject adapter interrupts on a specified adapter. 125 + attr->attr contains the unique id for the adapter, which allows for 126 + adapter-specific checks and actions. 127 + For adapters subject to AIS, handle the airq injection suppression for 128 + an isc according to the adapter-interruption-suppression mode on condition 129 + that the AIS capability is enabled. 111 130 112 131 Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on 113 132 FLIC with an unknown group or attribute gives the error code EINVAL (instead of
+1
arch/s390/include/asm/elf.h
··· 105 105 #define HWCAP_S390_VXRS 2048 106 106 #define HWCAP_S390_VXRS_BCD 4096 107 107 #define HWCAP_S390_VXRS_EXT 8192 108 + #define HWCAP_S390_GS 16384 108 109 109 110 /* Internal bits, not exposed via elf */ 110 111 #define HWCAP_INT_SIE 1UL
+38 -2
arch/s390/include/asm/kvm_host.h
··· 25 25 #include <asm/cpu.h> 26 26 #include <asm/fpu/api.h> 27 27 #include <asm/isc.h> 28 + #include <asm/guarded_storage.h> 28 29 29 30 #define KVM_S390_BSCA_CPU_SLOTS 64 30 31 #define KVM_S390_ESCA_CPU_SLOTS 248 ··· 165 164 #define ICTL_RRBE 0x00001000 166 165 #define ICTL_TPROT 0x00000200 167 166 __u32 ictl; /* 0x0048 */ 167 + #define ECA_CEI 0x80000000 168 + #define ECA_IB 0x40000000 169 + #define ECA_SIGPI 0x10000000 170 + #define ECA_MVPGI 0x01000000 171 + #define ECA_VX 0x00020000 172 + #define ECA_PROTEXCI 0x00002000 173 + #define ECA_SII 0x00000001 168 174 __u32 eca; /* 0x004c */ 169 175 #define ICPT_INST 0x04 170 176 #define ICPT_PROGI 0x08 171 177 #define ICPT_INSTPROGI 0x0C 178 + #define ICPT_EXTREQ 0x10 172 179 #define ICPT_EXTINT 0x14 180 + #define ICPT_IOREQ 0x18 181 + #define ICPT_WAIT 0x1c 173 182 #define ICPT_VALIDITY 0x20 174 183 #define ICPT_STOP 0x28 175 184 #define ICPT_OPEREXC 0x2C ··· 193 182 __u32 ipb; /* 0x0058 */ 194 183 __u32 scaoh; /* 0x005c */ 195 184 __u8 reserved60; /* 0x0060 */ 185 + #define ECB_GS 0x40 186 + #define ECB_TE 0x10 187 + #define ECB_SRSI 0x04 188 + #define ECB_HOSTPROTINT 0x02 196 189 __u8 ecb; /* 0x0061 */ 190 + #define ECB2_CMMA 0x80 191 + #define ECB2_IEP 0x20 192 + #define ECB2_PFMFI 0x08 193 + #define ECB2_ESCA 0x04 197 194 __u8 ecb2; /* 0x0062 */ 198 - #define ECB3_AES 0x04 199 195 #define ECB3_DEA 0x08 196 + #define ECB3_AES 0x04 197 + #define ECB3_RI 0x01 200 198 __u8 ecb3; /* 0x0063 */ 201 199 __u32 scaol; /* 0x0064 */ 202 200 __u8 reserved68[4]; /* 0x0068 */ ··· 239 219 __u32 crycbd; /* 0x00fc */ 240 220 __u64 gcr[16]; /* 0x0100 */ 241 221 __u64 gbea; /* 0x0180 */ 242 - __u8 reserved188[24]; /* 0x0188 */ 222 + __u8 reserved188[8]; /* 0x0188 */ 223 + __u64 sdnxo; /* 0x0190 */ 224 + __u8 reserved198[8]; /* 0x0198 */ 243 225 __u32 fac; /* 0x01a0 */ 244 226 __u8 reserved1a4[20]; /* 0x01a4 */ 245 227 __u64 cbrlo; /* 0x01b8 */ 246 228 __u8 reserved1c0[8]; /* 0x01c0 */ 229 + #define ECD_HOSTREGMGMT 0x20000000 247 230 __u32 ecd; /* 0x01c8 */ 248 231 __u8 reserved1cc[18]; /* 0x01cc */ 249 232 __u64 pp; /* 0x01de */ ··· 521 498 #define FIRQ_CNTR_PFAULT 3 522 499 #define FIRQ_MAX_COUNT 4 523 500 501 + /* mask the AIS mode for a given ISC */ 502 + #define AIS_MODE_MASK(isc) (0x80 >> isc) 503 + 504 + #define KVM_S390_AIS_MODE_ALL 0 505 + #define KVM_S390_AIS_MODE_SINGLE 1 506 + 524 507 struct kvm_s390_float_interrupt { 525 508 unsigned long pending_irqs; 526 509 spinlock_t lock; ··· 536 507 struct kvm_s390_ext_info srv_signal; 537 508 int next_rr_cpu; 538 509 unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; 510 + struct mutex ais_lock; 511 + u8 simm; 512 + u8 nimm; 513 + int ais_enabled; 539 514 }; 540 515 541 516 struct kvm_hw_wp_info_arch { ··· 587 554 /* if vsie is active, currently executed shadow sie control block */ 588 555 struct kvm_s390_sie_block *vsie_block; 589 556 unsigned int host_acrs[NUM_ACRS]; 557 + struct gs_cb *host_gscb; 590 558 struct fpu host_fpregs; 591 559 struct kvm_s390_local_interrupt local_int; 592 560 struct hrtimer ckc_timer; ··· 608 574 */ 609 575 seqcount_t cputm_seqcount; 610 576 __u64 cputm_start; 577 + bool gs_enabled; 611 578 }; 612 579 613 580 struct kvm_vm_stat { ··· 631 596 bool maskable; 632 597 bool masked; 633 598 bool swap; 599 + bool suppressible; 634 600 struct rw_semaphore maps_lock; 635 601 struct list_head maps; 636 602 atomic_t nr_maps;
+3 -6
arch/s390/include/asm/lowcore.h
··· 157 157 __u64 stfle_fac_list[32]; /* 0x0f00 */ 158 158 __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */ 159 159 160 - /* Pointer to vector register save area */ 161 - __u64 vector_save_area_addr; /* 0x11b0 */ 160 + /* Pointer to the machine check extended save area */ 161 + __u64 mcesad; /* 0x11b0 */ 162 162 163 163 /* 64 bit extparam used for pfault/diag 250: defined by architecture */ 164 164 __u64 ext_params2; /* 0x11B8 */ ··· 182 182 183 183 /* Transaction abort diagnostic block */ 184 184 __u8 pgm_tdb[256]; /* 0x1800 */ 185 - __u8 pad_0x1900[0x1c00-0x1900]; /* 0x1900 */ 186 - 187 - /* Software defined save area for vector registers */ 188 - __u8 vector_save_area[1024]; /* 0x1c00 */ 185 + __u8 pad_0x1900[0x2000-0x1900]; /* 0x1900 */ 189 186 } __packed; 190 187 191 188 #define S390_lowcore (*((struct lowcore *) 0))
+11 -1
arch/s390/include/asm/nmi.h
··· 58 58 u64 ie : 1; /* 32 indirect storage error */ 59 59 u64 ar : 1; /* 33 access register validity */ 60 60 u64 da : 1; /* 34 delayed access exception */ 61 - u64 : 7; /* 35-41 */ 61 + u64 : 1; /* 35 */ 62 + u64 gs : 1; /* 36 guarded storage registers */ 63 + u64 : 5; /* 37-41 */ 62 64 u64 pr : 1; /* 42 tod programmable register validity */ 63 65 u64 fc : 1; /* 43 fp control register validity */ 64 66 u64 ap : 1; /* 44 ancillary report */ ··· 69 67 u64 cc : 1; /* 47 clock comparator validity */ 70 68 u64 : 16; /* 47-63 */ 71 69 }; 70 + }; 71 + 72 + #define MCESA_ORIGIN_MASK (~0x3ffUL) 73 + #define MCESA_LC_MASK (0xfUL) 74 + 75 + struct mcesa { 76 + u8 vector_save_area[1024]; 77 + u8 guarded_storage_save_area[32]; 72 78 }; 73 79 74 80 struct pt_regs;
+5
arch/s390/include/asm/processor.h
··· 135 135 struct list_head list; 136 136 /* cpu runtime instrumentation */ 137 137 struct runtime_instr_cb *ri_cb; 138 + struct gs_cb *gs_cb; /* Current guarded storage cb */ 139 + struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ 138 140 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 139 141 /* 140 142 * Warning: 'fpu' is dynamically-sized. It *MUST* be at ··· 216 214 217 215 /* Free all resources held by a thread. */ 218 216 extern void release_thread(struct task_struct *); 217 + 218 + /* Free guarded storage control block for current */ 219 + void exit_thread_gs(void); 219 220 220 221 /* 221 222 * Return saved PC of a blocked thread.
+2
arch/s390/include/asm/setup.h
··· 31 31 #define MACHINE_FLAG_VX _BITUL(13) 32 32 #define MACHINE_FLAG_CAD _BITUL(14) 33 33 #define MACHINE_FLAG_NX _BITUL(15) 34 + #define MACHINE_FLAG_GS _BITUL(16) 34 35 35 36 #define LPP_MAGIC _BITUL(31) 36 37 #define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL) ··· 71 70 #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) 72 71 #define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) 73 72 #define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX) 73 + #define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS) 74 74 75 75 /* 76 76 * Console mode. Override with conmode=
+3
arch/s390/include/asm/switch_to.h
··· 10 10 #include <linux/thread_info.h> 11 11 #include <asm/fpu/api.h> 12 12 #include <asm/ptrace.h> 13 + #include <asm/guarded_storage.h> 13 14 14 15 extern struct task_struct *__switch_to(void *, void *); 15 16 extern void update_cr_regs(struct task_struct *task); ··· 34 33 save_fpu_regs(); \ 35 34 save_access_regs(&prev->thread.acrs[0]); \ 36 35 save_ri_cb(prev->thread.ri_cb); \ 36 + save_gs_cb(prev->thread.gs_cb); \ 37 37 } \ 38 38 if (next->mm) { \ 39 39 update_cr_regs(next); \ 40 40 set_cpu_flag(CIF_FPU); \ 41 41 restore_access_regs(&next->thread.acrs[0]); \ 42 42 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 43 + restore_gs_cb(next->thread.gs_cb); \ 43 44 } \ 44 45 prev = __switch_to(prev,next); \ 45 46 } while (0)
+7 -5
arch/s390/include/asm/thread_info.h
··· 54 54 #define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ 55 55 #define TIF_SIGPENDING 1 /* signal pending */ 56 56 #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 57 - #define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 58 - #define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 59 - #define TIF_SECCOMP 5 /* secure computing */ 60 - #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 61 - #define TIF_UPROBE 7 /* breakpointed or single-stepping */ 57 + #define TIF_UPROBE 3 /* breakpointed or single-stepping */ 58 + #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */ 59 + #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 60 + #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 61 + #define TIF_SECCOMP 10 /* secure computing */ 62 + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ 62 63 #define TIF_31BIT 16 /* 32bit process */ 63 64 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 64 65 #define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ ··· 77 76 #define _TIF_UPROBE _BITUL(TIF_UPROBE) 78 77 #define _TIF_31BIT _BITUL(TIF_31BIT) 79 78 #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) 79 + #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) 80 80 81 81 #endif /* _ASM_THREAD_INFO_H */
+1
arch/s390/include/uapi/asm/Kbuild
··· 12 12 header-y += debug.h 13 13 header-y += errno.h 14 14 header-y += fcntl.h 15 + header-y += guarded_storage.h 15 16 header-y += hypfs.h 16 17 header-y += ioctl.h 17 18 header-y += ioctls.h
+77
arch/s390/include/uapi/asm/guarded_storage.h
··· 1 + #ifndef _GUARDED_STORAGE_H 2 + #define _GUARDED_STORAGE_H 3 + 4 + #include <linux/types.h> 5 + 6 + struct gs_cb { 7 + __u64 reserved; 8 + __u64 gsd; 9 + __u64 gssm; 10 + __u64 gs_epl_a; 11 + }; 12 + 13 + struct gs_epl { 14 + __u8 pad1; 15 + union { 16 + __u8 gs_eam; 17 + struct { 18 + __u8 : 6; 19 + __u8 e : 1; 20 + __u8 b : 1; 21 + }; 22 + }; 23 + union { 24 + __u8 gs_eci; 25 + struct { 26 + __u8 tx : 1; 27 + __u8 cx : 1; 28 + __u8 : 5; 29 + __u8 in : 1; 30 + }; 31 + }; 32 + union { 33 + __u8 gs_eai; 34 + struct { 35 + __u8 : 1; 36 + __u8 t : 1; 37 + __u8 as : 2; 38 + __u8 ar : 4; 39 + }; 40 + }; 41 + __u32 pad2; 42 + __u64 gs_eha; 43 + __u64 gs_eia; 44 + __u64 gs_eoa; 45 + __u64 gs_eir; 46 + __u64 gs_era; 47 + }; 48 + 49 + #define GS_ENABLE 0 50 + #define GS_DISABLE 1 51 + #define GS_SET_BC_CB 2 52 + #define GS_CLEAR_BC_CB 3 53 + #define GS_BROADCAST 4 54 + 55 + static inline void load_gs_cb(struct gs_cb *gs_cb) 56 + { 57 + asm volatile(".insn rxy,0xe3000000004d,0,%0" : : "Q" (*gs_cb)); 58 + } 59 + 60 + static inline void store_gs_cb(struct gs_cb *gs_cb) 61 + { 62 + asm volatile(".insn rxy,0xe30000000049,0,%0" : : "Q" (*gs_cb)); 63 + } 64 + 65 + static inline void save_gs_cb(struct gs_cb *gs_cb) 66 + { 67 + if (gs_cb) 68 + store_gs_cb(gs_cb); 69 + } 70 + 71 + static inline void restore_gs_cb(struct gs_cb *gs_cb) 72 + { 73 + if (gs_cb) 74 + load_gs_cb(gs_cb); 75 + } 76 + 77 + #endif /* _GUARDED_STORAGE_H */
+23 -2
arch/s390/include/uapi/asm/kvm.h
··· 26 26 #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 27 27 #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 28 28 #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 29 + #define KVM_DEV_FLIC_AISM 9 30 + #define KVM_DEV_FLIC_AIRQ_INJECT 10 29 31 /* 30 32 * We can have up to 4*64k pending subchannels + 8 adapter interrupts, 31 33 * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. ··· 43 41 __u8 isc; 44 42 __u8 maskable; 45 43 __u8 swap; 46 - __u8 pad; 44 + __u8 flags; 45 + }; 46 + 47 + #define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 48 + 49 + struct kvm_s390_ais_req { 50 + __u8 isc; 51 + __u16 mode; 47 52 }; 48 53 49 54 #define KVM_S390_IO_ADAPTER_MASK 1 ··· 206 197 #define KVM_SYNC_VRS (1UL << 6) 207 198 #define KVM_SYNC_RICCB (1UL << 7) 208 199 #define KVM_SYNC_FPRS (1UL << 8) 200 + #define KVM_SYNC_GSCB (1UL << 9) 201 + /* length and alignment of the sdnx as a power of two */ 202 + #define SDNXC 8 203 + #define SDNXL (1UL << SDNXC) 209 204 /* definition of registers in kvm_run */ 210 205 struct kvm_sync_regs { 211 206 __u64 prefix; /* prefix register */ ··· 230 217 }; 231 218 __u8 reserved[512]; /* for future vector expansion */ 232 219 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ 233 - __u8 padding[52]; /* riccb needs to be 64byte aligned */ 220 + __u8 padding1[52]; /* riccb needs to be 64byte aligned */ 234 221 __u8 riccb[64]; /* runtime instrumentation controls block */ 222 + __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ 223 + union { 224 + __u8 sdnx[SDNXL]; /* state description annex */ 225 + struct { 226 + __u64 reserved1[2]; 227 + __u64 gscb[4]; 228 + }; 229 + }; 235 230 }; 236 231 237 232 #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+1 -1
arch/s390/include/uapi/asm/unistd.h
··· 313 313 #define __NR_copy_file_range 375 314 314 #define __NR_preadv2 376 315 315 #define __NR_pwritev2 377 316 - /* Number 378 is reserved for guarded storage */ 316 + #define __NR_s390_guarded_storage 378 317 317 #define __NR_statx 379 318 318 #define NR_syscalls 380 319 319
+1 -1
arch/s390/kernel/Makefile
··· 57 57 obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 58 58 obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o 59 59 obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 60 - obj-y += runtime_instr.o cache.o fpu.o dumpstack.o 60 + obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o 61 61 obj-y += entry.o reipl.o relocate_kernel.o 62 62 63 63 extra-y += head.o head64.o vmlinux.lds
+1 -1
arch/s390/kernel/asm-offsets.c
··· 175 175 /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 176 176 OFFSET(__LC_DUMP_REIPL, lowcore, ipib); 177 177 /* hardware defined lowcore locations 0x1000 - 0x18ff */ 178 - OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr); 178 + OFFSET(__LC_MCESAD, lowcore, mcesad); 179 179 OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2); 180 180 OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area); 181 181 OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area);
+1
arch/s390/kernel/compat_wrapper.c
··· 178 178 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 179 179 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); 180 180 COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); 181 + COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); 181 182 COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
+2
arch/s390/kernel/early.c
··· 358 358 S390_lowcore.machine_flags |= MACHINE_FLAG_NX; 359 359 __ctl_set_bit(0, 20); 360 360 } 361 + if (test_facility(133)) 362 + S390_lowcore.machine_flags |= MACHINE_FLAG_GS; 361 363 } 362 364 363 365 static inline void save_vector_registers(void)
+25 -1
arch/s390/kernel/entry.S
··· 47 47 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 48 48 49 49 _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 50 - _TIF_UPROBE) 50 + _TIF_UPROBE | _TIF_GUARDED_STORAGE) 51 51 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 52 52 _TIF_SYSCALL_TRACEPOINT) 53 53 _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ ··· 332 332 TSTMSK __TI_flags(%r12),_TIF_UPROBE 333 333 jo .Lsysc_uprobe_notify 334 334 #endif 335 + TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 336 + jo .Lsysc_guarded_storage 335 337 TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 336 338 jo .Lsysc_singlestep 337 339 TSTMSK __TI_flags(%r12),_TIF_SIGPENDING ··· 409 407 larl %r14,.Lsysc_return 410 408 jg uprobe_notify_resume 411 409 #endif 410 + 411 + # 412 + # _TIF_GUARDED_STORAGE is set, call guarded_storage_load 413 + # 414 + .Lsysc_guarded_storage: 415 + lgr %r2,%r11 # pass pointer to pt_regs 416 + larl %r14,.Lsysc_return 417 + jg gs_load_bc_cb 412 418 413 419 # 414 420 # _PIF_PER_TRAP is set, call do_per_trap ··· 673 663 jo .Lio_sigpending 674 664 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 675 665 jo .Lio_notify_resume 666 + TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 667 + jo .Lio_guarded_storage 676 668 TSTMSK __LC_CPU_FLAGS,_CIF_FPU 677 669 jo .Lio_vxrs 678 670 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) ··· 707 695 .Lio_vxrs: 708 696 larl %r14,.Lio_return 709 697 jg load_fpu_regs 698 + 699 + # 700 + # _TIF_GUARDED_STORAGE is set, call guarded_storage_load 701 + # 702 + .Lio_guarded_storage: 703 + # TRACE_IRQS_ON already done at .Lio_return 704 + ssm __LC_SVC_NEW_PSW # reenable interrupts 705 + lgr %r2,%r11 # pass pointer to pt_regs 706 + brasl %r14,gs_load_bc_cb 707 + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 708 + TRACE_IRQS_OFF 709 + j .Lio_return 710 710 711 711 # 712 712 # _TIF_NEED_RESCHED is set, call schedule
+2
arch/s390/kernel/entry.h
··· 74 74 75 75 long sys_s390_personality(unsigned int personality); 76 76 long sys_s390_runtime_instr(int command, int signum); 77 + long sys_s390_guarded_storage(int command, struct gs_cb __user *); 77 78 long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); 78 79 long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); 79 80 80 81 DECLARE_PER_CPU(u64, mt_cycles[8]); 81 82 82 83 void verify_facilities(void); 84 + void gs_load_bc_cb(struct pt_regs *regs); 83 85 void set_fs_fixup(void); 84 86 85 87 #endif /* _ENTRY_H */
+128
arch/s390/kernel/guarded_storage.c
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + #include <linux/syscalls.h> 8 + #include <linux/signal.h> 9 + #include <linux/mm.h> 10 + #include <linux/slab.h> 11 + #include <asm/guarded_storage.h> 12 + #include "entry.h" 13 + 14 + void exit_thread_gs(void) 15 + { 16 + kfree(current->thread.gs_cb); 17 + kfree(current->thread.gs_bc_cb); 18 + current->thread.gs_cb = current->thread.gs_bc_cb = NULL; 19 + } 20 + 21 + static int gs_enable(void) 22 + { 23 + struct gs_cb *gs_cb; 24 + 25 + if (!current->thread.gs_cb) { 26 + gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL); 27 + if (!gs_cb) 28 + return -ENOMEM; 29 + gs_cb->gsd = 25; 30 + preempt_disable(); 31 + __ctl_set_bit(2, 4); 32 + load_gs_cb(gs_cb); 33 + current->thread.gs_cb = gs_cb; 34 + preempt_enable(); 35 + } 36 + return 0; 37 + } 38 + 39 + static int gs_disable(void) 40 + { 41 + if (current->thread.gs_cb) { 42 + preempt_disable(); 43 + kfree(current->thread.gs_cb); 44 + current->thread.gs_cb = NULL; 45 + __ctl_clear_bit(2, 4); 46 + preempt_enable(); 47 + } 48 + return 0; 49 + } 50 + 51 + static int gs_set_bc_cb(struct gs_cb __user *u_gs_cb) 52 + { 53 + struct gs_cb *gs_cb; 54 + 55 + gs_cb = current->thread.gs_bc_cb; 56 + if (!gs_cb) { 57 + gs_cb = kzalloc(sizeof(*gs_cb), GFP_KERNEL); 58 + if (!gs_cb) 59 + return -ENOMEM; 60 + current->thread.gs_bc_cb = gs_cb; 61 + } 62 + if (copy_from_user(gs_cb, u_gs_cb, sizeof(*gs_cb))) 63 + return -EFAULT; 64 + return 0; 65 + } 66 + 67 + static int gs_clear_bc_cb(void) 68 + { 69 + struct gs_cb *gs_cb; 70 + 71 + gs_cb = current->thread.gs_bc_cb; 72 + current->thread.gs_bc_cb = NULL; 73 + kfree(gs_cb); 74 + return 0; 75 + } 76 + 77 + void gs_load_bc_cb(struct pt_regs *regs) 78 + { 79 + struct gs_cb *gs_cb; 80 + 81 + preempt_disable(); 82 + clear_thread_flag(TIF_GUARDED_STORAGE); 83 + gs_cb = current->thread.gs_bc_cb; 84 + if (gs_cb) { 85 + kfree(current->thread.gs_cb); 86 + current->thread.gs_bc_cb = NULL; 87 + __ctl_set_bit(2, 4); 88 + load_gs_cb(gs_cb); 89 + current->thread.gs_cb = gs_cb; 90 + } 91 + preempt_enable(); 92 + } 93 + 94 + static int gs_broadcast(void) 95 + { 96 + struct task_struct *sibling; 97 + 98 + read_lock(&tasklist_lock); 99 + for_each_thread(current, sibling) { 100 + if (!sibling->thread.gs_bc_cb) 101 + continue; 102 + if (test_and_set_tsk_thread_flag(sibling, TIF_GUARDED_STORAGE)) 103 + kick_process(sibling); 104 + } 105 + read_unlock(&tasklist_lock); 106 + return 0; 107 + } 108 + 109 + SYSCALL_DEFINE2(s390_guarded_storage, int, command, 110 + struct gs_cb __user *, gs_cb) 111 + { 112 + if (!MACHINE_HAS_GS) 113 + return -EOPNOTSUPP; 114 + switch (command) { 115 + case GS_ENABLE: 116 + return gs_enable(); 117 + case GS_DISABLE: 118 + return gs_disable(); 119 + case GS_SET_BC_CB: 120 + return gs_set_bc_cb(gs_cb); 121 + case GS_CLEAR_BC_CB: 122 + return gs_clear_bc_cb(); 123 + case GS_BROADCAST: 124 + return gs_broadcast(); 125 + default: 126 + return -EINVAL; 127 + } 128 + }
+12 -1
arch/s390/kernel/machine_kexec.c
··· 27 27 #include <asm/cacheflush.h> 28 28 #include <asm/os_info.h> 29 29 #include <asm/switch_to.h> 30 + #include <asm/nmi.h> 30 31 31 32 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 32 33 ··· 103 102 */ 104 103 static noinline void __machine_kdump(void *image) 105 104 { 105 + struct mcesa *mcesa; 106 + unsigned long cr2_old, cr2_new; 106 107 int this_cpu, cpu; 107 108 108 109 lgr_info_log(); ··· 117 114 continue; 118 115 } 119 116 /* Store status of the boot CPU */ 117 + mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); 120 118 if (MACHINE_HAS_VX) 121 - save_vx_regs((void *) &S390_lowcore.vector_save_area); 119 + save_vx_regs((__vector128 *) mcesa->vector_save_area); 120 + if (MACHINE_HAS_GS) { 121 + __ctl_store(cr2_old, 2, 2); 122 + cr2_new = cr2_old | (1UL << 4); 123 + __ctl_load(cr2_new, 2, 2); 124 + save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area); 125 + __ctl_load(cr2_old, 2, 2); 126 + } 122 127 /* 123 128 * To create a good backchain for this CPU in the dump store_status 124 129 * is passed the address of a function. The address is saved into
+17 -2
arch/s390/kernel/nmi.c
··· 106 106 int kill_task; 107 107 u64 zero; 108 108 void *fpt_save_area; 109 + struct mcesa *mcesa; 109 110 110 111 kill_task = 0; 111 112 zero = 0; ··· 166 165 : : "Q" (S390_lowcore.fpt_creg_save_area)); 167 166 } 168 167 168 + mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); 169 169 if (!MACHINE_HAS_VX) { 170 170 /* Validate floating point registers */ 171 171 asm volatile( ··· 211 209 " la 1,%0\n" 212 210 " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */ 213 211 " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */ 214 - : : "Q" (*(struct vx_array *) 215 - &S390_lowcore.vector_save_area) : "1"); 212 + : : "Q" (*(struct vx_array *) mcesa->vector_save_area) 213 + : "1"); 216 214 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); 217 215 } 218 216 /* Validate access registers */ ··· 225 223 * Terminating task. 226 224 */ 227 225 kill_task = 1; 226 + } 227 + /* Validate guarded storage registers */ 228 + if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) { 229 + if (!mci.gs) 230 + /* 231 + * Guarded storage register can't be restored and 232 + * the current processes uses guarded storage. 233 + * It has to be terminated. 234 + */ 235 + kill_task = 1; 236 + else 237 + load_gs_cb((struct gs_cb *) 238 + mcesa->guarded_storage_save_area); 228 239 } 229 240 /* 230 241 * We don't even try to validate the TOD register, since we simply
+6 -1
arch/s390/kernel/process.c
··· 73 73 */ 74 74 void exit_thread(struct task_struct *tsk) 75 75 { 76 - if (tsk == current) 76 + if (tsk == current) { 77 77 exit_thread_runtime_instr(); 78 + exit_thread_gs(); 79 + } 78 80 } 79 81 80 82 void flush_thread(void) ··· 161 159 /* Don't copy runtime instrumentation info */ 162 160 p->thread.ri_cb = NULL; 163 161 frame->childregs.psw.mask &= ~PSW_MASK_RI; 162 + /* Don't copy guarded storage control block */ 163 + p->thread.gs_cb = NULL; 164 + p->thread.gs_bc_cb = NULL; 164 165 165 166 /* Set a new TLS ? */ 166 167 if (clone_flags & CLONE_SETTLS) {
+1 -1
arch/s390/kernel/processor.c
··· 95 95 { 96 96 static const char *hwcap_str[] = { 97 97 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 98 - "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe" 98 + "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs" 99 99 }; 100 100 static const char * const int_hwcap_str[] = { 101 101 "sie"
+71 -13
arch/s390/kernel/ptrace.c
··· 44 44 struct pt_regs *regs = task_pt_regs(task); 45 45 struct thread_struct *thread = &task->thread; 46 46 struct per_regs old, new; 47 + unsigned long cr0_old, cr0_new; 48 + unsigned long cr2_old, cr2_new; 49 + int cr0_changed, cr2_changed; 47 50 51 + __ctl_store(cr0_old, 0, 0); 52 + __ctl_store(cr2_old, 2, 2); 53 + cr0_new = cr0_old; 54 + cr2_new = cr2_old; 48 55 /* Take care of the enable/disable of transactional execution. */ 49 56 if (MACHINE_HAS_TE) { 50 - unsigned long cr, cr_new; 51 - 52 - __ctl_store(cr, 0, 0); 53 57 /* Set or clear transaction execution TXC bit 8. */ 54 - cr_new = cr | (1UL << 55); 58 + cr0_new |= (1UL << 55); 55 59 if (task->thread.per_flags & PER_FLAG_NO_TE) 56 - cr_new &= ~(1UL << 55); 57 - if (cr_new != cr) 58 - __ctl_load(cr_new, 0, 0); 60 + cr0_new &= ~(1UL << 55); 59 61 /* Set or clear transaction execution TDC bits 62 and 63. */ 60 - __ctl_store(cr, 2, 2); 61 - cr_new = cr & ~3UL; 62 + cr2_new &= ~3UL; 62 63 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 63 64 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) 64 - cr_new |= 1UL; 65 + cr2_new |= 1UL; 65 66 else 66 - cr_new |= 2UL; 67 + cr2_new |= 2UL; 67 68 } 68 - if (cr_new != cr) 69 - __ctl_load(cr_new, 2, 2); 70 69 } 70 + /* Take care of enable/disable of guarded storage. */ 71 + if (MACHINE_HAS_GS) { 72 + cr2_new &= ~(1UL << 4); 73 + if (task->thread.gs_cb) 74 + cr2_new |= (1UL << 4); 75 + } 76 + /* Load control register 0/2 iff changed */ 77 + cr0_changed = cr0_new != cr0_old; 78 + cr2_changed = cr2_new != cr2_old; 79 + if (cr0_changed) 80 + __ctl_load(cr0_new, 0, 0); 81 + if (cr2_changed) 82 + __ctl_load(cr2_new, 2, 2); 71 83 /* Copy user specified PER registers */ 72 84 new.control = thread->per_user.control; 73 85 new.start = thread->per_user.start; ··· 1149 1137 data, 0, sizeof(unsigned int)); 1150 1138 } 1151 1139 1140 + static int s390_gs_cb_get(struct task_struct *target, 1141 + const struct user_regset *regset, 1142 + unsigned int pos, unsigned int count, 1143 + void *kbuf, void __user *ubuf) 1144 + { 1145 + struct gs_cb *data = target->thread.gs_cb; 1146 + 1147 + if (!MACHINE_HAS_GS) 1148 + return -ENODEV; 1149 + if (!data) 1150 + return -ENODATA; 1151 + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1152 + data, 0, sizeof(struct gs_cb)); 1153 + } 1154 + 1155 + static int s390_gs_cb_set(struct task_struct *target, 1156 + const struct user_regset *regset, 1157 + unsigned int pos, unsigned int count, 1158 + const void *kbuf, const void __user *ubuf) 1159 + { 1160 + struct gs_cb *data = target->thread.gs_cb; 1161 + 1162 + if (!MACHINE_HAS_GS) 1163 + return -ENODEV; 1164 + if (!data) 1165 + return -ENODATA; 1166 + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1167 + data, 0, sizeof(struct gs_cb)); 1168 + } 1169 + 1152 1170 static const struct user_regset s390_regsets[] = { 1153 1171 { 1154 1172 .core_note_type = NT_PRSTATUS, ··· 1235 1193 .align = sizeof(__vector128), 1236 1194 .get = s390_vxrs_high_get, 1237 1195 .set = s390_vxrs_high_set, 1196 + }, 1197 + { 1198 + .core_note_type = NT_S390_GS_CB, 1199 + .n = sizeof(struct gs_cb) / sizeof(__u64), 1200 + .size = sizeof(__u64), 1201 + .align = sizeof(__u64), 1202 + .get = s390_gs_cb_get, 1203 + .set = s390_gs_cb_set, 1238 1204 }, 1239 1205 }; 1240 1206 ··· 1471 1421 .align = sizeof(compat_long_t), 1472 1422 .get = s390_compat_regs_high_get, 1473 1423 .set = s390_compat_regs_high_set, 1424 + }, 1425 + { 1426 + .core_note_type = NT_S390_GS_CB, 1427 + .n = sizeof(struct gs_cb) / sizeof(__u64), 1428 + .size = sizeof(__u64), 1429 + .align = sizeof(__u64), 1430 + .get = s390_gs_cb_get, 1431 + .set = s390_gs_cb_set, 1474 1432 }, 1475 1433 }; 1476 1434
+15 -3
arch/s390/kernel/setup.c
··· 339 339 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 340 340 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 341 341 MAX_FACILITY_BIT/8); 342 - if (MACHINE_HAS_VX) 343 - lc->vector_save_area_addr = 344 - (unsigned long) &lc->vector_save_area; 342 + if (MACHINE_HAS_VX || MACHINE_HAS_GS) { 343 + unsigned long bits, size; 344 + 345 + bits = MACHINE_HAS_GS ? 11 : 10; 346 + size = 1UL << bits; 347 + lc->mcesad = (__u64) memblock_virt_alloc(size, size); 348 + if (MACHINE_HAS_GS) 349 + lc->mcesad |= bits; 350 + } 345 351 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 346 352 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 347 353 lc->async_enter_timer = S390_lowcore.async_enter_timer; ··· 784 778 if (test_facility(135)) 785 779 elf_hwcap |= HWCAP_S390_VXRS_BCD; 786 780 } 781 + 782 + /* 783 + * Guarded storage support HWCAP_S390_GS is bit 12. 784 + */ 785 + if (MACHINE_HAS_GS) 786 + elf_hwcap |= HWCAP_S390_GS; 787 787 788 788 get_cpu_id(&cpu_id); 789 789 add_device_randomness(&cpu_id, sizeof(cpu_id));
+38 -5
arch/s390/kernel/smp.c
··· 51 51 #include <asm/os_info.h> 52 52 #include <asm/sigp.h> 53 53 #include <asm/idle.h> 54 + #include <asm/nmi.h> 54 55 #include "entry.h" 55 56 56 57 enum { ··· 78 77 79 78 static u8 boot_core_type; 80 79 static struct pcpu pcpu_devices[NR_CPUS]; 80 + 81 + static struct kmem_cache *pcpu_mcesa_cache; 81 82 82 83 unsigned int smp_cpu_mt_shift; 83 84 EXPORT_SYMBOL(smp_cpu_mt_shift); ··· 191 188 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 192 189 { 193 190 unsigned long async_stack, panic_stack; 191 + unsigned long mcesa_origin, mcesa_bits; 194 192 struct lowcore *lc; 195 193 194 + mcesa_origin = mcesa_bits = 0; 196 195 if (pcpu != &pcpu_devices[0]) { 197 196 pcpu->lowcore = (struct lowcore *) 198 197 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); ··· 202 197 panic_stack = __get_free_page(GFP_KERNEL); 203 198 if (!pcpu->lowcore || !panic_stack || !async_stack) 204 199 goto out; 200 + if (MACHINE_HAS_VX || MACHINE_HAS_GS) { 201 + mcesa_origin = (unsigned long) 202 + kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL); 203 + if (!mcesa_origin) 204 + goto out; 205 + mcesa_bits = MACHINE_HAS_GS ? 11 : 0; 206 + } 205 207 } else { 206 208 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; 207 209 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; 210 + mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK; 211 + mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK; 208 212 } 209 213 lc = pcpu->lowcore; 210 214 memcpy(lc, &S390_lowcore, 512); 211 215 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 212 216 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; 213 217 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 218 + lc->mcesad = mcesa_origin | mcesa_bits; 214 219 lc->cpu_nr = cpu; 215 220 lc->spinlock_lockval = arch_spin_lockval(cpu); 216 - if (MACHINE_HAS_VX) 217 - lc->vector_save_area_addr = 218 - (unsigned long) &lc->vector_save_area; 219 221 if (vdso_alloc_per_cpu(lc)) 220 222 goto out; 221 223 lowcore_ptr[cpu] = lc; ··· 230 218 return 0; 231 219 out: 232 220 if (pcpu != &pcpu_devices[0]) { 221 + if (mcesa_origin) 222 + kmem_cache_free(pcpu_mcesa_cache, 223 + (void *) mcesa_origin); 233 224 free_page(panic_stack); 234 225 free_pages(async_stack, ASYNC_ORDER); 235 226 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); ··· 244 229 245 230 static void pcpu_free_lowcore(struct pcpu *pcpu) 246 231 { 232 + unsigned long mcesa_origin; 233 + 247 234 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 248 235 lowcore_ptr[pcpu - pcpu_devices] = NULL; 249 236 vdso_free_per_cpu(pcpu->lowcore); 250 237 if (pcpu == &pcpu_devices[0]) 251 238 return; 239 + if (MACHINE_HAS_VX || MACHINE_HAS_GS) { 240 + mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK; 241 + kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin); 242 + } 252 243 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); 253 244 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); 254 245 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); ··· 571 550 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 572 551 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 573 552 return -EIO; 574 - if (!MACHINE_HAS_VX) 553 + if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) 575 554 return 0; 576 - pa = __pa(pcpu->lowcore->vector_save_area_addr); 555 + pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK); 556 + if (MACHINE_HAS_GS) 557 + pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK; 577 558 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 578 559 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 579 560 return -EIO; ··· 920 897 921 898 void __init smp_prepare_cpus(unsigned int max_cpus) 922 899 { 900 + unsigned long size; 901 + 923 902 /* request the 0x1201 emergency signal external interrupt */ 924 903 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 925 904 panic("Couldn't request external interrupt 0x1201"); 926 905 /* request the 0x1202 external call external interrupt */ 927 906 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 928 907 panic("Couldn't request external interrupt 0x1202"); 908 + /* create slab cache for the machine-check-extended-save-areas */ 909 + if (MACHINE_HAS_VX || MACHINE_HAS_GS) { 910 + size = 1UL << (MACHINE_HAS_GS ? 11 : 10); 911 + pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas", 912 + size, size, 0, NULL); 913 + if (!pcpu_mcesa_cache) 914 + panic("Couldn't create nmi save area cache"); 915 + } 929 916 } 930 917 931 918 void __init smp_prepare_boot_cpu(void)
+1 -1
arch/s390/kernel/syscalls.S
··· 386 386 SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ 387 387 SYSCALL(sys_preadv2,compat_sys_preadv2) 388 388 SYSCALL(sys_pwritev2,compat_sys_pwritev2) 389 - NI_SYSCALL 389 + SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */ 390 390 SYSCALL(sys_statx,compat_sys_statx)
+3 -3
arch/s390/kvm/gaccess.c
··· 262 262 263 263 int ipte_lock_held(struct kvm_vcpu *vcpu) 264 264 { 265 - if (vcpu->arch.sie_block->eca & 1) { 265 + if (vcpu->arch.sie_block->eca & ECA_SII) { 266 266 int rc; 267 267 268 268 read_lock(&vcpu->kvm->arch.sca_lock); ··· 361 361 362 362 void ipte_lock(struct kvm_vcpu *vcpu) 363 363 { 364 - if (vcpu->arch.sie_block->eca & 1) 364 + if (vcpu->arch.sie_block->eca & ECA_SII) 365 365 ipte_lock_siif(vcpu); 366 366 else 367 367 ipte_lock_simple(vcpu); ··· 369 369 370 370 void ipte_unlock(struct kvm_vcpu *vcpu) 371 371 { 372 - if (vcpu->arch.sie_block->eca & 1) 372 + if (vcpu->arch.sie_block->eca & ECA_SII) 373 373 ipte_unlock_siif(vcpu); 374 374 else 375 375 ipte_unlock_simple(vcpu);
+12 -12
arch/s390/kvm/intercept.c
··· 35 35 [0xb6] = kvm_s390_handle_stctl, 36 36 [0xb7] = kvm_s390_handle_lctl, 37 37 [0xb9] = kvm_s390_handle_b9, 38 + [0xe3] = kvm_s390_handle_e3, 38 39 [0xe5] = kvm_s390_handle_e5, 39 40 [0xeb] = kvm_s390_handle_eb, 40 41 }; ··· 369 368 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa, 370 369 vcpu->arch.sie_block->ipb); 371 370 372 - if (vcpu->arch.sie_block->ipa == 0xb256 && 373 - test_kvm_facility(vcpu->kvm, 74)) 371 + if (vcpu->arch.sie_block->ipa == 0xb256) 374 372 return handle_sthyi(vcpu); 375 373 376 374 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0) ··· 404 404 return -EOPNOTSUPP; 405 405 406 406 switch (vcpu->arch.sie_block->icptcode) { 407 - case 0x10: 408 - case 0x18: 407 + case ICPT_EXTREQ: 408 + case ICPT_IOREQ: 409 409 return handle_noop(vcpu); 410 - case 0x04: 410 + case ICPT_INST: 411 411 rc = handle_instruction(vcpu); 412 412 break; 413 - case 0x08: 413 + case ICPT_PROGI: 414 414 return handle_prog(vcpu); 415 - case 0x14: 415 + case ICPT_EXTINT: 416 416 return handle_external_interrupt(vcpu); 417 - case 0x1c: 417 + case ICPT_WAIT: 418 418 return kvm_s390_handle_wait(vcpu); 419 - case 0x20: 419 + case ICPT_VALIDITY: 420 420 return handle_validity(vcpu); 421 - case 0x28: 421 + case ICPT_STOP: 422 422 return handle_stop(vcpu); 423 - case 0x2c: 423 + case ICPT_OPEREXC: 424 424 rc = handle_operexc(vcpu); 425 425 break; 426 - case 0x38: 426 + case ICPT_PARTEXEC: 427 427 rc = handle_partial_execution(vcpu); 428 428 break; 429 429 default:
+125 -10
arch/s390/kvm/interrupt.c
··· 410 410 struct kvm_s390_mchk_info *mchk) 411 411 { 412 412 unsigned long ext_sa_addr; 413 + unsigned long lc; 413 414 freg_t fprs[NUM_FPRS]; 414 415 union mci mci; 415 416 int rc; ··· 421 420 save_access_regs(vcpu->run->s.regs.acrs); 422 421 423 422 /* Extended save area */ 424 - rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr, 425 - sizeof(unsigned long)); 426 - /* Only bits 0-53 are used for address formation */ 427 - ext_sa_addr &= ~0x3ffUL; 423 + rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr, 424 + sizeof(unsigned long)); 425 + /* Only bits 0 through 63-LC are used for address formation */ 426 + lc = ext_sa_addr & MCESA_LC_MASK; 427 + if (test_kvm_facility(vcpu->kvm, 133)) { 428 + switch (lc) { 429 + case 0: 430 + case 10: 431 + ext_sa_addr &= ~0x3ffUL; 432 + break; 433 + case 11: 434 + ext_sa_addr &= ~0x7ffUL; 435 + break; 436 + case 12: 437 + ext_sa_addr &= ~0xfffUL; 438 + break; 439 + default: 440 + ext_sa_addr = 0; 441 + break; 442 + } 443 + } else { 444 + ext_sa_addr &= ~0x3ffUL; 445 + } 446 + 428 447 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) { 429 448 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs, 430 449 512)) 431 450 mci.vr = 0; 432 451 } else { 433 452 mci.vr = 0; 453 + } 454 + if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133) 455 + && (lc == 11 || lc == 12)) { 456 + if (write_guest_abs(vcpu, ext_sa_addr + 1024, 457 + &vcpu->run->s.regs.gscb, 32)) 458 + mci.gs = 0; 459 + } else { 460 + mci.gs = 0; 434 461 } 435 462 436 463 /* General interruption information */ ··· 1997 1968 adapter->maskable = adapter_info.maskable; 1998 1969 adapter->masked = false; 1999 1970 adapter->swap = adapter_info.swap; 1971 + adapter->suppressible = (adapter_info.flags) & 1972 + KVM_S390_ADAPTER_SUPPRESSIBLE; 2000 1973 dev->kvm->arch.adapters[adapter->id] = adapter; 2001 1974 2002 1975 return 0; ··· 2152 2121 return 0; 2153 2122 } 2154 2123 2124 + static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr) 2125 + { 2126 + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2127 + struct kvm_s390_ais_req req; 2128 + int ret = 0; 2129 + 2130 + if (!fi->ais_enabled) 2131 + return -ENOTSUPP; 2132 + 2133 + if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2134 + return -EFAULT; 2135 + 2136 + if (req.isc > MAX_ISC) 2137 + return -EINVAL; 2138 + 2139 + trace_kvm_s390_modify_ais_mode(req.isc, 2140 + (fi->simm & AIS_MODE_MASK(req.isc)) ? 2141 + (fi->nimm & AIS_MODE_MASK(req.isc)) ? 2142 + 2 : KVM_S390_AIS_MODE_SINGLE : 2143 + KVM_S390_AIS_MODE_ALL, req.mode); 2144 + 2145 + mutex_lock(&fi->ais_lock); 2146 + switch (req.mode) { 2147 + case KVM_S390_AIS_MODE_ALL: 2148 + fi->simm &= ~AIS_MODE_MASK(req.isc); 2149 + fi->nimm &= ~AIS_MODE_MASK(req.isc); 2150 + break; 2151 + case KVM_S390_AIS_MODE_SINGLE: 2152 + fi->simm |= AIS_MODE_MASK(req.isc); 2153 + fi->nimm &= ~AIS_MODE_MASK(req.isc); 2154 + break; 2155 + default: 2156 + ret = -EINVAL; 2157 + } 2158 + mutex_unlock(&fi->ais_lock); 2159 + 2160 + return ret; 2161 + } 2162 + 2163 + static int kvm_s390_inject_airq(struct kvm *kvm, 2164 + struct s390_io_adapter *adapter) 2165 + { 2166 + struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; 2167 + struct kvm_s390_interrupt s390int = { 2168 + .type = KVM_S390_INT_IO(1, 0, 0, 0), 2169 + .parm = 0, 2170 + .parm64 = (adapter->isc << 27) | 0x80000000, 2171 + }; 2172 + int ret = 0; 2173 + 2174 + if (!fi->ais_enabled || !adapter->suppressible) 2175 + return kvm_s390_inject_vm(kvm, &s390int); 2176 + 2177 + mutex_lock(&fi->ais_lock); 2178 + if (fi->nimm & AIS_MODE_MASK(adapter->isc)) { 2179 + trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc); 2180 + goto out; 2181 + } 2182 + 2183 + ret = kvm_s390_inject_vm(kvm, &s390int); 2184 + if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) { 2185 + fi->nimm |= AIS_MODE_MASK(adapter->isc); 2186 + trace_kvm_s390_modify_ais_mode(adapter->isc, 2187 + KVM_S390_AIS_MODE_SINGLE, 2); 2188 + } 2189 + out: 2190 + mutex_unlock(&fi->ais_lock); 2191 + return ret; 2192 + } 2193 + 2194 + static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr) 2195 + { 2196 + unsigned int id = attr->attr; 2197 + struct s390_io_adapter *adapter = get_io_adapter(kvm, id); 2198 + 2199 + if (!adapter) 2200 + return -EINVAL; 2201 + 2202 + return kvm_s390_inject_airq(kvm, adapter); 2203 + } 2204 + 2155 2205 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 2156 2206 { 2157 2207 int r = 0; ··· 2269 2157 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2270 2158 r = clear_io_irq(dev->kvm, attr); 2271 2159 break; 2160 + case KVM_DEV_FLIC_AISM: 2161 + r = modify_ais_mode(dev->kvm, attr); 2162 + break; 2163 + case KVM_DEV_FLIC_AIRQ_INJECT: 2164 + r = flic_inject_airq(dev->kvm, attr); 2165 + break; 2272 2166 default: 2273 2167 r = -EINVAL; 2274 2168 } ··· 2294 2176 case KVM_DEV_FLIC_ADAPTER_REGISTER: 2295 2177 case KVM_DEV_FLIC_ADAPTER_MODIFY: 2296 2178 case KVM_DEV_FLIC_CLEAR_IO_IRQ: 2179 + case KVM_DEV_FLIC_AISM: 2180 + case KVM_DEV_FLIC_AIRQ_INJECT: 2297 2181 return 0; 2298 2182 } 2299 2183 return -ENXIO; ··· 2406 2286 ret = adapter_indicators_set(kvm, adapter, &e->adapter); 2407 2287 up_read(&adapter->maps_lock); 2408 2288 if ((ret > 0) && !adapter->masked) { 2409 - struct kvm_s390_interrupt s390int = { 2410 - .type = KVM_S390_INT_IO(1, 0, 0, 0), 2411 - .parm = 0, 2412 - .parm64 = (adapter->isc << 27) | 0x80000000, 2413 - }; 2414 - ret = kvm_s390_inject_vm(kvm, &s390int); 2289 + ret = kvm_s390_inject_airq(kvm, adapter); 2415 2290 if (ret == 0) 2416 2291 ret = 1; 2417 2292 }
+105 -22
arch/s390/kvm/kvm-s390.c
··· 380 380 case KVM_CAP_S390_SKEYS: 381 381 case KVM_CAP_S390_IRQ_STATE: 382 382 case KVM_CAP_S390_USER_INSTR0: 383 + case KVM_CAP_S390_AIS: 383 384 r = 1; 384 385 break; 385 386 case KVM_CAP_S390_MEM_OP: ··· 405 404 break; 406 405 case KVM_CAP_S390_RI: 407 406 r = test_facility(64); 407 + break; 408 + case KVM_CAP_S390_GS: 409 + r = test_facility(133); 408 410 break; 409 411 default: 410 412 r = 0; ··· 543 539 } 544 540 mutex_unlock(&kvm->lock); 545 541 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", 542 + r ? "(not available)" : "(success)"); 543 + break; 544 + case KVM_CAP_S390_AIS: 545 + mutex_lock(&kvm->lock); 546 + if (kvm->created_vcpus) { 547 + r = -EBUSY; 548 + } else { 549 + set_kvm_facility(kvm->arch.model.fac_mask, 72); 550 + set_kvm_facility(kvm->arch.model.fac_list, 72); 551 + kvm->arch.float_int.ais_enabled = 1; 552 + r = 0; 553 + } 554 + mutex_unlock(&kvm->lock); 555 + VM_EVENT(kvm, 3, "ENABLE: AIS %s", 556 + r ? "(not available)" : "(success)"); 557 + break; 558 + case KVM_CAP_S390_GS: 559 + r = -EINVAL; 560 + mutex_lock(&kvm->lock); 561 + if (atomic_read(&kvm->online_vcpus)) { 562 + r = -EBUSY; 563 + } else if (test_facility(133)) { 564 + set_kvm_facility(kvm->arch.model.fac_mask, 133); 565 + set_kvm_facility(kvm->arch.model.fac_list, 133); 566 + r = 0; 567 + } 568 + mutex_unlock(&kvm->lock); 569 + VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", 546 570 r ? "(not available)" : "(success)"); 547 571 break; 548 572 case KVM_CAP_S390_USER_STSI: ··· 1530 1498 1531 1499 kvm_s390_crypto_init(kvm); 1532 1500 1501 + mutex_init(&kvm->arch.float_int.ais_lock); 1502 + kvm->arch.float_int.simm = 0; 1503 + kvm->arch.float_int.nimm = 0; 1504 + kvm->arch.float_int.ais_enabled = 0; 1533 1505 spin_lock_init(&kvm->arch.float_int.lock); 1534 1506 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1535 1507 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); ··· 1682 1646 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block; 1683 1647 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); 1684 1648 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU; 1685 - vcpu->arch.sie_block->ecb2 |= 0x04U; 1649 + vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; 1686 1650 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); 1687 1651 } else { 1688 1652 struct bsca_block *sca = vcpu->kvm->arch.sca; ··· 1736 1700 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { 1737 1701 vcpu->arch.sie_block->scaoh = scaoh; 1738 1702 vcpu->arch.sie_block->scaol = scaol; 1739 - vcpu->arch.sie_block->ecb2 |= 0x04U; 1703 + vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; 1740 1704 } 1741 1705 kvm->arch.sca = new_sca; 1742 1706 kvm->arch.use_esca = 1; ··· 1785 1749 kvm_s390_set_prefix(vcpu, 0); 1786 1750 if (test_kvm_facility(vcpu->kvm, 64)) 1787 1751 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 1752 + if (test_kvm_facility(vcpu->kvm, 133)) 1753 + vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; 1788 1754 /* fprs can be synchronized via vrs, even if the guest has no vx. With 1789 1755 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. 1790 1756 */ ··· 1977 1939 if (!vcpu->arch.sie_block->cbrlo) 1978 1940 return -ENOMEM; 1979 1941 1980 - vcpu->arch.sie_block->ecb2 |= 0x80; 1981 - vcpu->arch.sie_block->ecb2 &= ~0x08; 1942 + vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; 1943 + vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI; 1982 1944 return 0; 1983 1945 } 1984 1946 ··· 2008 1970 2009 1971 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */ 2010 1972 if (MACHINE_HAS_ESOP) 2011 - vcpu->arch.sie_block->ecb |= 0x02; 1973 + vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; 2012 1974 if (test_kvm_facility(vcpu->kvm, 9)) 2013 - vcpu->arch.sie_block->ecb |= 0x04; 1975 + vcpu->arch.sie_block->ecb |= ECB_SRSI; 2014 1976 if (test_kvm_facility(vcpu->kvm, 73)) 2015 - vcpu->arch.sie_block->ecb |= 0x10; 1977 + vcpu->arch.sie_block->ecb |= ECB_TE; 2016 1978 2017 1979 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi) 2018 - vcpu->arch.sie_block->ecb2 |= 0x08; 1980 + vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; 2019 1981 if (test_kvm_facility(vcpu->kvm, 130)) 2020 - vcpu->arch.sie_block->ecb2 |= 0x20; 2021 - vcpu->arch.sie_block->eca = 0x1002000U; 1982 + vcpu->arch.sie_block->ecb2 |= ECB2_IEP; 1983 + vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; 2022 1984 if (sclp.has_cei) 2023 - vcpu->arch.sie_block->eca |= 0x80000000U; 1985 + vcpu->arch.sie_block->eca |= ECA_CEI; 2024 1986 if (sclp.has_ib) 2025 - vcpu->arch.sie_block->eca |= 0x40000000U; 1987 + vcpu->arch.sie_block->eca |= ECA_IB; 2026 1988 if (sclp.has_siif) 2027 - vcpu->arch.sie_block->eca |= 1; 1989 + vcpu->arch.sie_block->eca |= ECA_SII; 2028 1990 if (sclp.has_sigpif) 2029 - vcpu->arch.sie_block->eca |= 0x10000000U; 1991 + vcpu->arch.sie_block->eca |= ECA_SIGPI; 2030 1992 if (test_kvm_facility(vcpu->kvm, 129)) { 2031 - vcpu->arch.sie_block->eca |= 0x00020000; 2032 - vcpu->arch.sie_block->ecd |= 0x20000000; 1993 + vcpu->arch.sie_block->eca |= ECA_VX; 1994 + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 2033 1995 } 1996 + vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx) 1997 + | SDNXC; 2034 1998 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb; 2035 1999 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 2036 2000 ··· 2759 2719 2760 2720 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2761 2721 { 2722 + struct runtime_instr_cb *riccb; 2723 + struct gs_cb *gscb; 2724 + 2725 + riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; 2726 + gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; 2762 2727 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 2763 2728 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 2764 2729 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) ··· 2792 2747 * we should enable RI here instead of doing the lazy enablement. 2793 2748 */ 2794 2749 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && 2795 - test_kvm_facility(vcpu->kvm, 64)) { 2796 - struct runtime_instr_cb *riccb = 2797 - (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; 2798 - 2799 - if (riccb->valid) 2800 - vcpu->arch.sie_block->ecb3 |= 0x01; 2750 + test_kvm_facility(vcpu->kvm, 64) && 2751 + riccb->valid && 2752 + !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { 2753 + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)"); 2754 + vcpu->arch.sie_block->ecb3 |= ECB3_RI; 2755 + } 2756 + /* 2757 + * If userspace sets the gscb (e.g. after migration) to non-zero, 2758 + * we should enable GS here instead of doing the lazy enablement. 2759 + */ 2760 + if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && 2761 + test_kvm_facility(vcpu->kvm, 133) && 2762 + gscb->gssm && 2763 + !vcpu->arch.gs_enabled) { 2764 + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)"); 2765 + vcpu->arch.sie_block->ecb |= ECB_GS; 2766 + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 2767 + vcpu->arch.gs_enabled = 1; 2801 2768 } 2802 2769 save_access_regs(vcpu->arch.host_acrs); 2803 2770 restore_access_regs(vcpu->run->s.regs.acrs); ··· 2825 2768 if (test_fp_ctl(current->thread.fpu.fpc)) 2826 2769 /* User space provided an invalid FPC, let's clear it */ 2827 2770 current->thread.fpu.fpc = 0; 2771 + if (MACHINE_HAS_GS) { 2772 + preempt_disable(); 2773 + __ctl_set_bit(2, 4); 2774 + if (current->thread.gs_cb) { 2775 + vcpu->arch.host_gscb = current->thread.gs_cb; 2776 + save_gs_cb(vcpu->arch.host_gscb); 2777 + } 2778 + if (vcpu->arch.gs_enabled) { 2779 + current->thread.gs_cb = (struct gs_cb *) 2780 + &vcpu->run->s.regs.gscb; 2781 + restore_gs_cb(current->thread.gs_cb); 2782 + } 2783 + preempt_enable(); 2784 + } 2828 2785 2829 2786 kvm_run->kvm_dirty_regs = 0; 2830 2787 } ··· 2865 2794 /* Restore will be done lazily at return */ 2866 2795 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; 2867 2796 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; 2797 + if (MACHINE_HAS_GS) { 2798 + __ctl_set_bit(2, 4); 2799 + if (vcpu->arch.gs_enabled) 2800 + save_gs_cb(current->thread.gs_cb); 2801 + preempt_disable(); 2802 + current->thread.gs_cb = vcpu->arch.host_gscb; 2803 + restore_gs_cb(vcpu->arch.host_gscb); 2804 + preempt_enable(); 2805 + if (!vcpu->arch.host_gscb) 2806 + __ctl_clear_bit(2, 4); 2807 + vcpu->arch.host_gscb = NULL; 2808 + } 2868 2809 2869 2810 } 2870 2811
+2 -1
arch/s390/kvm/kvm-s390.h
··· 25 25 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 26 26 27 27 /* Transactional Memory Execution related macros */ 28 - #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) 28 + #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) 29 29 #define TDB_FORMAT1 1 30 30 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) 31 31 ··· 246 246 int is_valid_psw(psw_t *psw); 247 247 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); 248 248 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 249 + int kvm_s390_handle_e3(struct kvm_vcpu *vcpu); 249 250 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); 250 251 int kvm_s390_handle_01(struct kvm_vcpu *vcpu); 251 252 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
+30 -1
arch/s390/kvm/priv.c
··· 37 37 static int handle_ri(struct kvm_vcpu *vcpu) 38 38 { 39 39 if (test_kvm_facility(vcpu->kvm, 64)) { 40 - vcpu->arch.sie_block->ecb3 |= 0x01; 40 + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); 41 + vcpu->arch.sie_block->ecb3 |= ECB3_RI; 41 42 kvm_s390_retry_instr(vcpu); 42 43 return 0; 43 44 } else ··· 53 52 return -EOPNOTSUPP; 54 53 } 55 54 55 + static int handle_gs(struct kvm_vcpu *vcpu) 56 + { 57 + if (test_kvm_facility(vcpu->kvm, 133)) { 58 + VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)"); 59 + preempt_disable(); 60 + __ctl_set_bit(2, 4); 61 + current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; 62 + restore_gs_cb(current->thread.gs_cb); 63 + preempt_enable(); 64 + vcpu->arch.sie_block->ecb |= ECB_GS; 65 + vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 66 + vcpu->arch.gs_enabled = 1; 67 + kvm_s390_retry_instr(vcpu); 68 + return 0; 69 + } else 70 + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 71 + } 72 + 73 + int kvm_s390_handle_e3(struct kvm_vcpu *vcpu) 74 + { 75 + int code = vcpu->arch.sie_block->ipb & 0xff; 76 + 77 + if (code == 0x49 || code == 0x4d) 78 + return handle_gs(vcpu); 79 + else 80 + return -EOPNOTSUPP; 81 + } 56 82 /* Handle SCK (SET CLOCK) interception */ 57 83 static int handle_set_clock(struct kvm_vcpu *vcpu) 58 84 { ··· 787 759 [0x3b] = handle_io_inst, 788 760 [0x3c] = handle_io_inst, 789 761 [0x50] = handle_ipte_interlock, 762 + [0x56] = handle_sthyi, 790 763 [0x5f] = handle_io_inst, 791 764 [0x74] = handle_io_inst, 792 765 [0x76] = handle_io_inst,
+3
arch/s390/kvm/sthyi.c
··· 404 404 u64 code, addr, cc = 0; 405 405 struct sthyi_sctns *sctns = NULL; 406 406 407 + if (!test_kvm_facility(vcpu->kvm, 74)) 408 + return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 409 + 407 410 /* 408 411 * STHYI requires extensive locking in the higher hypervisors 409 412 * and is very computational/memory expensive. Therefore we
+52
arch/s390/kvm/trace-s390.h
··· 280 280 __entry->state ? "enabling" : "disabling", __entry->id) 281 281 ); 282 282 283 + /* 284 + * Trace point for modifying ais mode for a given isc. 285 + */ 286 + TRACE_EVENT(kvm_s390_modify_ais_mode, 287 + TP_PROTO(__u8 isc, __u16 from, __u16 to), 288 + TP_ARGS(isc, from, to), 289 + 290 + TP_STRUCT__entry( 291 + __field(__u8, isc) 292 + __field(__u16, from) 293 + __field(__u16, to) 294 + ), 295 + 296 + TP_fast_assign( 297 + __entry->isc = isc; 298 + __entry->from = from; 299 + __entry->to = to; 300 + ), 301 + 302 + TP_printk("for isc %x, modifying interruption mode from %s to %s", 303 + __entry->isc, 304 + (__entry->from == KVM_S390_AIS_MODE_ALL) ? 305 + "ALL-Interruptions Mode" : 306 + (__entry->from == KVM_S390_AIS_MODE_SINGLE) ? 307 + "Single-Interruption Mode" : "No-Interruptions Mode", 308 + (__entry->to == KVM_S390_AIS_MODE_ALL) ? 309 + "ALL-Interruptions Mode" : 310 + (__entry->to == KVM_S390_AIS_MODE_SINGLE) ? 311 + "Single-Interruption Mode" : "No-Interruptions Mode") 312 + ); 313 + 314 + /* 315 + * Trace point for suppressed adapter I/O interrupt. 316 + */ 317 + TRACE_EVENT(kvm_s390_airq_suppressed, 318 + TP_PROTO(__u32 id, __u8 isc), 319 + TP_ARGS(id, isc), 320 + 321 + TP_STRUCT__entry( 322 + __field(__u32, id) 323 + __field(__u8, isc) 324 + ), 325 + 326 + TP_fast_assign( 327 + __entry->id = id; 328 + __entry->isc = isc; 329 + ), 330 + 331 + TP_printk("adapter I/O interrupt suppressed (id:%x isc:%x)", 332 + __entry->id, __entry->isc) 333 + ); 334 + 283 335 284 336 #endif /* _TRACE_KVMS390_H */ 285 337
+55 -17
arch/s390/kvm/vsie.c
··· 249 249 { 250 250 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 251 251 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 252 - bool had_tx = scb_s->ecb & 0x10U; 252 + bool had_tx = scb_s->ecb & ECB_TE; 253 253 unsigned long new_mso = 0; 254 254 int rc; 255 255 ··· 307 307 scb_s->ihcpu = scb_o->ihcpu; 308 308 309 309 /* MVPG and Protection Exception Interpretation are always available */ 310 - scb_s->eca |= scb_o->eca & 0x01002000U; 310 + scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI); 311 311 /* Host-protection-interruption introduced with ESOP */ 312 312 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP)) 313 - scb_s->ecb |= scb_o->ecb & 0x02U; 313 + scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT; 314 314 /* transactional execution */ 315 315 if (test_kvm_facility(vcpu->kvm, 73)) { 316 316 /* remap the prefix is tx is toggled on */ 317 - if ((scb_o->ecb & 0x10U) && !had_tx) 317 + if ((scb_o->ecb & ECB_TE) && !had_tx) 318 318 prefix_unmapped(vsie_page); 319 - scb_s->ecb |= scb_o->ecb & 0x10U; 319 + scb_s->ecb |= scb_o->ecb & ECB_TE; 320 320 } 321 321 /* SIMD */ 322 322 if (test_kvm_facility(vcpu->kvm, 129)) { 323 - scb_s->eca |= scb_o->eca & 0x00020000U; 324 - scb_s->ecd |= scb_o->ecd & 0x20000000U; 323 + scb_s->eca |= scb_o->eca & ECA_VX; 324 + scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 325 325 } 326 326 /* Run-time-Instrumentation */ 327 327 if (test_kvm_facility(vcpu->kvm, 64)) 328 - scb_s->ecb3 |= scb_o->ecb3 & 0x01U; 328 + scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI; 329 329 /* Instruction Execution Prevention */ 330 330 if (test_kvm_facility(vcpu->kvm, 130)) 331 - scb_s->ecb2 |= scb_o->ecb2 & 0x20U; 331 + scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP; 332 + /* Guarded Storage */ 333 + if (test_kvm_facility(vcpu->kvm, 133)) { 334 + scb_s->ecb |= scb_o->ecb & ECB_GS; 335 + scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT; 336 + } 332 337 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF)) 333 - scb_s->eca |= scb_o->eca & 0x00000001U; 338 + scb_s->eca |= scb_o->eca & ECA_SII; 334 339 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB)) 335 - scb_s->eca |= scb_o->eca & 0x40000000U; 340 + scb_s->eca |= scb_o->eca & ECA_IB; 336 341 if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI)) 337 - scb_s->eca |= scb_o->eca & 0x80000000U; 342 + scb_s->eca |= scb_o->eca & ECA_CEI; 338 343 339 344 prepare_ibc(vcpu, vsie_page); 340 345 rc = shadow_crycb(vcpu, vsie_page); ··· 411 406 prefix += scb_s->mso; 412 407 413 408 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); 414 - if (!rc && (scb_s->ecb & 0x10U)) 409 + if (!rc && (scb_s->ecb & ECB_TE)) 415 410 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, 416 411 prefix + PAGE_SIZE); 417 412 /* ··· 501 496 unpin_guest_page(vcpu->kvm, gpa, hpa); 502 497 scb_s->riccbd = 0; 503 498 } 499 + 500 + hpa = scb_s->sdnxo; 501 + if (hpa) { 502 + gpa = scb_o->sdnxo; 503 + unpin_guest_page(vcpu->kvm, gpa, hpa); 504 + scb_s->sdnxo = 0; 505 + } 504 506 } 505 507 506 508 /* ··· 555 543 } 556 544 557 545 gpa = scb_o->itdba & ~0xffUL; 558 - if (gpa && (scb_s->ecb & 0x10U)) { 546 + if (gpa && (scb_s->ecb & ECB_TE)) { 559 547 if (!(gpa & ~0x1fffU)) { 560 548 rc = set_validity_icpt(scb_s, 0x0080U); 561 549 goto unpin; ··· 570 558 } 571 559 572 560 gpa = scb_o->gvrd & ~0x1ffUL; 573 - if (gpa && (scb_s->eca & 0x00020000U) && 574 - !(scb_s->ecd & 0x20000000U)) { 561 + if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { 575 562 if (!(gpa & ~0x1fffUL)) { 576 563 rc = set_validity_icpt(scb_s, 0x1310U); 577 564 goto unpin; ··· 588 577 } 589 578 590 579 gpa = scb_o->riccbd & ~0x3fUL; 591 - if (gpa && (scb_s->ecb3 & 0x01U)) { 580 + if (gpa && (scb_s->ecb3 & ECB3_RI)) { 592 581 if (!(gpa & ~0x1fffUL)) { 593 582 rc = set_validity_icpt(scb_s, 0x0043U); 594 583 goto unpin; ··· 601 590 if (rc) 602 591 goto unpin; 603 592 scb_s->riccbd = hpa; 593 + } 594 + if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) { 595 + unsigned long sdnxc; 596 + 597 + gpa = scb_o->sdnxo & ~0xfUL; 598 + sdnxc = scb_o->sdnxo & 0xfUL; 599 + if (!gpa || !(gpa & ~0x1fffUL)) { 600 + rc = set_validity_icpt(scb_s, 0x10b0U); 601 + goto unpin; 602 + } 603 + if (sdnxc < 6 || sdnxc > 12) { 604 + rc = set_validity_icpt(scb_s, 0x10b1U); 605 + goto unpin; 606 + } 607 + if (gpa & ((1 << sdnxc) - 1)) { 608 + rc = set_validity_icpt(scb_s, 0x10b2U); 609 + goto unpin; 610 + } 611 + /* Due to alignment rules (checked above) this cannot 612 + * cross page boundaries 613 + */ 614 + rc = pin_guest_page(vcpu->kvm, gpa, &hpa); 615 + if (rc == -EINVAL) 616 + rc = set_validity_icpt(scb_s, 0x10b0U); 617 + if (rc) 618 + goto unpin; 619 + scb_s->sdnxo = hpa; 604 620 } 605 621 return 0; 606 622 unpin:
+1 -1
drivers/gpio/gpio-altera-a10sr.c
··· 96 96 gpio->regmap = a10sr->regmap; 97 97 98 98 gpio->gp = altr_a10sr_gc; 99 - 99 + gpio->gp.parent = pdev->dev.parent; 100 100 gpio->gp.of_node = pdev->dev.of_node; 101 101 102 102 ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
+11 -15
drivers/gpio/gpio-altera.c
··· 90 90 91 91 altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); 92 92 93 - if (type == IRQ_TYPE_NONE) 93 + if (type == IRQ_TYPE_NONE) { 94 + irq_set_handler_locked(d, handle_bad_irq); 94 95 return 0; 95 - if (type == IRQ_TYPE_LEVEL_HIGH && 96 - altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) 96 + } 97 + if (type == altera_gc->interrupt_trigger) { 98 + if (type == IRQ_TYPE_LEVEL_HIGH) 99 + irq_set_handler_locked(d, handle_level_irq); 100 + else 101 + irq_set_handler_locked(d, handle_simple_irq); 97 102 return 0; 98 - if (type == IRQ_TYPE_EDGE_RISING && 99 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) 100 - return 0; 101 - if (type == IRQ_TYPE_EDGE_FALLING && 102 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING) 103 - return 0; 104 - if (type == IRQ_TYPE_EDGE_BOTH && 105 - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH) 106 - return 0; 107 - 103 + } 104 + irq_set_handler_locked(d, handle_bad_irq); 108 105 return -EINVAL; 109 106 } 110 107 ··· 227 230 chained_irq_exit(chip, desc); 228 231 } 229 232 230 - 231 233 static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) 232 234 { 233 235 struct altera_gpio_chip *altera_gc; ··· 306 310 altera_gc->interrupt_trigger = reg; 307 311 308 312 ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, 309 - handle_simple_irq, IRQ_TYPE_NONE); 313 + handle_bad_irq, IRQ_TYPE_NONE); 310 314 311 315 if (ret) { 312 316 dev_err(&pdev->dev, "could not add irqchip\n");
+60 -5
drivers/gpio/gpio-mcp23s08.c
··· 270 270 static irqreturn_t mcp23s08_irq(int irq, void *data) 271 271 { 272 272 struct mcp23s08 *mcp = data; 273 - int intcap, intf, i; 273 + int intcap, intf, i, gpio, gpio_orig, intcap_mask; 274 274 unsigned int child_irq; 275 + bool intf_set, intcap_changed, gpio_bit_changed, 276 + defval_changed, gpio_set; 275 277 276 278 mutex_lock(&mcp->lock); 277 279 if (mcp_read(mcp, MCP_INTF, &intf) < 0) { ··· 289 287 } 290 288 291 289 mcp->cache[MCP_INTCAP] = intcap; 290 + 291 + /* This clears the interrupt(configurable on S18) */ 292 + if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) { 293 + mutex_unlock(&mcp->lock); 294 + return IRQ_HANDLED; 295 + } 296 + gpio_orig = mcp->cache[MCP_GPIO]; 297 + mcp->cache[MCP_GPIO] = gpio; 292 298 mutex_unlock(&mcp->lock); 293 299 300 + if (mcp->cache[MCP_INTF] == 0) { 301 + /* There is no interrupt pending */ 302 + return IRQ_HANDLED; 303 + } 304 + 305 + dev_dbg(mcp->chip.parent, 306 + "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", 307 + intcap, intf, gpio_orig, gpio); 294 308 295 309 for (i = 0; i < mcp->chip.ngpio; i++) { 296 - if ((BIT(i) & mcp->cache[MCP_INTF]) && 297 - ((BIT(i) & intcap & mcp->irq_rise) || 298 - (mcp->irq_fall & ~intcap & BIT(i)) || 299 - (BIT(i) & mcp->cache[MCP_INTCON]))) { 310 + /* We must check all of the inputs on the chip, 311 + * otherwise we may not notice a change on >=2 pins. 312 + * 313 + * On at least the mcp23s17, INTCAP is only updated 314 + * one byte at a time(INTCAPA and INTCAPB are 315 + * not written to at the same time - only on a per-bank 316 + * basis). 317 + * 318 + * INTF only contains the single bit that caused the 319 + * interrupt per-bank. On the mcp23s17, there is 320 + * INTFA and INTFB. If two pins are changed on the A 321 + * side at the same time, INTF will only have one bit 322 + * set. If one pin on the A side and one pin on the B 323 + * side are changed at the same time, INTF will have 324 + * two bits set. Thus, INTF can't be the only check 325 + * to see if the input has changed. 326 + */ 327 + 328 + intf_set = BIT(i) & mcp->cache[MCP_INTF]; 329 + if (i < 8 && intf_set) 330 + intcap_mask = 0x00FF; 331 + else if (i >= 8 && intf_set) 332 + intcap_mask = 0xFF00; 333 + else 334 + intcap_mask = 0x00; 335 + 336 + intcap_changed = (intcap_mask & 337 + (BIT(i) & mcp->cache[MCP_INTCAP])) != 338 + (intcap_mask & (BIT(i) & gpio_orig)); 339 + gpio_set = BIT(i) & mcp->cache[MCP_GPIO]; 340 + gpio_bit_changed = (BIT(i) & gpio_orig) != 341 + (BIT(i) & mcp->cache[MCP_GPIO]); 342 + defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) && 343 + ((BIT(i) & mcp->cache[MCP_GPIO]) != 344 + (BIT(i) & mcp->cache[MCP_DEFVAL])); 345 + 346 + if (((gpio_bit_changed || intcap_changed) && 347 + (BIT(i) & mcp->irq_rise) && gpio_set) || 348 + ((gpio_bit_changed || intcap_changed) && 349 + (BIT(i) & mcp->irq_fall) && !gpio_set) || 350 + defval_changed) { 300 351 child_irq = irq_find_mapping(mcp->chip.irqdomain, i); 301 352 handle_nested_irq(child_irq); 302 353 }
+3 -4
drivers/gpio/gpio-mockup.c
··· 197 197 struct seq_file *sfile; 198 198 struct gpio_desc *desc; 199 199 struct gpio_chip *gc; 200 - int status, val; 200 + int val; 201 201 char buf; 202 202 203 203 sfile = file->private_data; ··· 206 206 chip = priv->chip; 207 207 gc = &chip->gc; 208 208 209 - status = copy_from_user(&buf, usr_buf, 1); 210 - if (status) 211 - return status; 209 + if (copy_from_user(&buf, usr_buf, 1)) 210 + return -EFAULT; 212 211 213 212 if (buf == '0') 214 213 val = 0;
+3 -10
drivers/gpio/gpio-xgene.c
··· 42 42 struct gpio_chip chip; 43 43 void __iomem *base; 44 44 spinlock_t lock; 45 - #ifdef CONFIG_PM 46 45 u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; 47 - #endif 48 46 }; 49 47 50 48 static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) ··· 136 138 return 0; 137 139 } 138 140 139 - #ifdef CONFIG_PM 140 - static int xgene_gpio_suspend(struct device *dev) 141 + static __maybe_unused int xgene_gpio_suspend(struct device *dev) 141 142 { 142 143 struct xgene_gpio *gpio = dev_get_drvdata(dev); 143 144 unsigned long bank_offset; ··· 149 152 return 0; 150 153 } 151 154 152 - static int xgene_gpio_resume(struct device *dev) 155 + static __maybe_unused int xgene_gpio_resume(struct device *dev) 153 156 { 154 157 struct xgene_gpio *gpio = dev_get_drvdata(dev); 155 158 unsigned long bank_offset; ··· 163 166 } 164 167 165 168 static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); 166 - #define XGENE_GPIO_PM_OPS (&xgene_gpio_pm) 167 - #else 168 - #define XGENE_GPIO_PM_OPS NULL 169 - #endif 170 169 171 170 static int xgene_gpio_probe(struct platform_device *pdev) 172 171 { ··· 234 241 .name = "xgene-gpio", 235 242 .of_match_table = xgene_gpio_of_match, 236 243 .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), 237 - .pm = XGENE_GPIO_PM_OPS, 244 + .pm = &xgene_gpio_pm, 238 245 }, 239 246 .probe = xgene_gpio_probe, 240 247 };
+3 -2
drivers/hid/Kconfig
··· 175 175 Support for Cherry Cymotion keyboard. 176 176 177 177 config HID_CHICONY 178 - tristate "Chicony Tactical pad" 178 + tristate "Chicony devices" 179 179 depends on HID 180 180 default !EXPERT 181 181 ---help--- 182 - Support for Chicony Tactical pad. 182 + Support for Chicony Tactical pad and special keys on Chicony keyboards. 183 183 184 184 config HID_CORSAIR 185 185 tristate "Corsair devices" ··· 190 190 191 191 Supported devices: 192 192 - Vengeance K90 193 + - Scimitar PRO RGB 193 194 194 195 config HID_PRODIKEYS 195 196 tristate "Prodikeys PC-MIDI Keyboard support"
+1
drivers/hid/hid-chicony.c
··· 86 86 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 87 87 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 88 88 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 89 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 89 90 { } 90 91 }; 91 92 MODULE_DEVICE_TABLE(hid, ch_devices);
+2
drivers/hid/hid-core.c
··· 1870 1870 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1871 1871 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1872 1872 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1873 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 1873 1874 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1874 1875 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1875 1876 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, ··· 1911 1910 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1912 1911 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1913 1912 { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1913 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1914 1914 { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1915 1915 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1916 1916 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+47
drivers/hid/hid-corsair.c
··· 3 3 * 4 4 * Supported devices: 5 5 * - Vengeance K90 Keyboard 6 + * - Scimitar PRO RGB Gaming Mouse 6 7 * 7 8 * Copyright (c) 2015 Clement Vuchener 9 + * Copyright (c) 2017 Oscar Campos 8 10 */ 9 11 10 12 /* ··· 672 670 return 0; 673 671 } 674 672 673 + /* 674 + * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is 675 + * non parseable as they define two consecutive Logical Minimum for 676 + * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16 677 + * that should be obviousy 0x26 for Logical Magimum of 16 bits. This 678 + * prevents poper parsing of the report descriptor due Logical 679 + * Minimum being larger than Logical Maximum. 680 + * 681 + * This driver fixes the report descriptor for: 682 + * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse 683 + */ 684 + 685 + static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, 686 + unsigned int *rsize) 687 + { 688 + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 689 + 690 + if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { 691 + /* 692 + * Corsair Scimitar RGB Pro report descriptor is broken and 693 + * defines two different Logical Minimum for the Consumer 694 + * Application. The byte 77 should be a 0x26 defining a 16 695 + * bits integer for the Logical Maximum but it is a 0x16 696 + * instead (Logical Minimum) 697 + */ 698 + switch (hdev->product) { 699 + case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB: 700 + if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16 701 + && rdesc[78] == 0xff && rdesc[79] == 0x0f) { 702 + hid_info(hdev, "Fixing up report descriptor\n"); 703 + rdesc[77] = 0x26; 704 + } 705 + break; 706 + } 707 + 708 + } 709 + return rdesc; 710 + } 711 + 675 712 static const struct hid_device_id corsair_devices[] = { 676 713 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), 677 714 .driver_data = CORSAIR_USE_K90_MACRO | 678 715 CORSAIR_USE_K90_BACKLIGHT }, 716 + { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, 717 + USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 679 718 {} 680 719 }; 681 720 ··· 729 686 .event = corsair_event, 730 687 .remove = corsair_remove, 731 688 .input_mapping = corsair_input_mapping, 689 + .report_fixup = corsair_mouse_report_fixup, 732 690 }; 733 691 734 692 module_hid_driver(corsair_driver); 735 693 736 694 MODULE_LICENSE("GPL"); 695 + /* Original K90 driver author */ 737 696 MODULE_AUTHOR("Clement Vuchener"); 697 + /* Scimitar PRO RGB driver author */ 698 + MODULE_AUTHOR("Oscar Campos"); 738 699 MODULE_DESCRIPTION("HID driver for Corsair devices");
+4
drivers/hid/hid-ids.h
··· 278 278 #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 279 279 #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 280 280 #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 281 + #define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38 282 + #define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39 283 + #define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e 281 284 282 285 #define USB_VENDOR_ID_CREATIVELABS 0x041e 283 286 #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c ··· 560 557 561 558 #define USB_VENDOR_ID_JESS 0x0c45 562 559 #define USB_DEVICE_ID_JESS_YUREX 0x1010 560 + #define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112 563 561 564 562 #define USB_VENDOR_ID_JESS2 0x0f30 565 563 #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
+2
drivers/hid/hid-sony.c
··· 2632 2632 sony_leds_remove(sc); 2633 2633 if (sc->quirks & SONY_BATTERY_SUPPORT) 2634 2634 sony_battery_remove(sc); 2635 + if (sc->touchpad) 2636 + sony_unregister_touchpad(sc); 2635 2637 sony_cancel_work_sync(sc); 2636 2638 kfree(sc->output_report_dmabuf); 2637 2639 sony_remove_dev_list(sc);
+3
drivers/hid/usbhid/hid-quirks.c
··· 80 80 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, 81 81 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, 82 82 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 83 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 84 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 85 + { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 83 86 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 84 87 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 85 88 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+3 -1
drivers/hid/wacom_sys.c
··· 2579 2579 2580 2580 /* make sure we don't trigger the LEDs */ 2581 2581 wacom_led_groups_release(wacom); 2582 - wacom_release_resources(wacom); 2582 + 2583 + if (wacom->wacom_wac.features.type != REMOTE) 2584 + wacom_release_resources(wacom); 2583 2585 2584 2586 hid_set_drvdata(hdev, NULL); 2585 2587 }
+6 -4
drivers/hid/wacom_wac.c
··· 1959 1959 input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); 1960 1960 input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); 1961 1961 input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); 1962 - input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1963 - input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1962 + if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) { 1963 + input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); 1964 + input_set_capability(input, EV_KEY, BTN_TOOL_LENS); 1965 + } 1964 1966 break; 1965 1967 case WACOM_HID_WD_FINGERWHEEL: 1966 1968 wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); ··· 4199 4197 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 4200 4198 static const struct wacom_features wacom_features_0x360 = 4201 4199 { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, 4202 - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4200 + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; 4203 4201 static const struct wacom_features wacom_features_0x361 = 4204 4202 { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, 4205 - INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; 4203 + INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; 4206 4204 4207 4205 static const struct wacom_features wacom_features_HID_ANY_ID = 4208 4206 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
+3 -3
drivers/remoteproc/Kconfig
··· 76 76 depends on OF && ARCH_QCOM 77 77 depends on REMOTEPROC 78 78 depends on QCOM_SMEM 79 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 79 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 80 80 select MFD_SYSCON 81 81 select QCOM_MDT_LOADER 82 82 select QCOM_RPROC_COMMON ··· 93 93 depends on OF && ARCH_QCOM 94 94 depends on QCOM_SMEM 95 95 depends on REMOTEPROC 96 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 96 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 97 97 select MFD_SYSCON 98 98 select QCOM_RPROC_COMMON 99 99 select QCOM_SCM ··· 104 104 config QCOM_WCNSS_PIL 105 105 tristate "Qualcomm WCNSS Peripheral Image Loader" 106 106 depends on OF && ARCH_QCOM 107 - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) 107 + depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) 108 108 depends on QCOM_SMEM 109 109 depends on REMOTEPROC 110 110 select QCOM_MDT_LOADER
-14
drivers/scsi/Kconfig
··· 1253 1253 This makes debugging information from the lpfc driver 1254 1254 available via the debugfs filesystem. 1255 1255 1256 - config LPFC_NVME_INITIATOR 1257 - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" 1258 - depends on SCSI_LPFC && NVME_FC 1259 - ---help--- 1260 - This enables NVME Initiator support in the Emulex lpfc driver. 1261 - 1262 - config LPFC_NVME_TARGET 1263 - bool "Emulex LightPulse Fibre Channel NVME Initiator Support" 1264 - depends on SCSI_LPFC && NVME_TARGET_FC 1265 - ---help--- 1266 - This enables NVME Target support in the Emulex lpfc driver. 1267 - Target enablement must still be enabled on a per adapter 1268 - basis by module parameters. 1269 - 1270 1256 config SCSI_SIM710 1271 1257 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" 1272 1258 depends on (EISA || MCA) && SCSI
+32 -21
drivers/scsi/hpsa.c
··· 2956 2956 /* fill_cmd can't fail here, no data buffer to map. */ 2957 2957 (void) fill_cmd(c, reset_type, h, NULL, 0, 0, 2958 2958 scsi3addr, TYPE_MSG); 2959 - rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 2959 + rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); 2960 2960 if (rc) { 2961 2961 dev_warn(&h->pdev->dev, "Failed to send reset command\n"); 2962 2962 goto out; ··· 3714 3714 * # (integer code indicating one of several NOT READY states 3715 3715 * describing why a volume is to be kept offline) 3716 3716 */ 3717 - static int hpsa_volume_offline(struct ctlr_info *h, 3717 + static unsigned char hpsa_volume_offline(struct ctlr_info *h, 3718 3718 unsigned char scsi3addr[]) 3719 3719 { 3720 3720 struct CommandList *c; ··· 3735 3735 DEFAULT_TIMEOUT); 3736 3736 if (rc) { 3737 3737 cmd_free(h, c); 3738 - return 0; 3738 + return HPSA_VPD_LV_STATUS_UNSUPPORTED; 3739 3739 } 3740 3740 sense = c->err_info->SenseInfo; 3741 3741 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) ··· 3746 3746 cmd_status = c->err_info->CommandStatus; 3747 3747 scsi_status = c->err_info->ScsiStatus; 3748 3748 cmd_free(h, c); 3749 - /* Is the volume 'not ready'? */ 3750 - if (cmd_status != CMD_TARGET_STATUS || 3751 - scsi_status != SAM_STAT_CHECK_CONDITION || 3752 - sense_key != NOT_READY || 3753 - asc != ASC_LUN_NOT_READY) { 3754 - return 0; 3755 - } 3756 3749 3757 3750 /* Determine the reason for not ready state */ 3758 3751 ldstat = hpsa_get_volume_status(h, scsi3addr); 3759 3752 3760 3753 /* Keep volume offline in certain cases: */ 3761 3754 switch (ldstat) { 3755 + case HPSA_LV_FAILED: 3762 3756 case HPSA_LV_UNDERGOING_ERASE: 3763 3757 case HPSA_LV_NOT_AVAILABLE: 3764 3758 case HPSA_LV_UNDERGOING_RPI: ··· 3774 3780 default: 3775 3781 break; 3776 3782 } 3777 - return 0; 3783 + return HPSA_LV_OK; 3778 3784 } 3779 3785 3780 3786 /* ··· 3847 3853 /* Do an inquiry to the device to see what it is. */ 3848 3854 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 3849 3855 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { 3850 - /* Inquiry failed (msg printed already) */ 3851 3856 dev_err(&h->pdev->dev, 3852 - "hpsa_update_device_info: inquiry failed\n"); 3853 - rc = -EIO; 3857 + "%s: inquiry failed, device will be skipped.\n", 3858 + __func__); 3859 + rc = HPSA_INQUIRY_FAILED; 3854 3860 goto bail_out; 3855 3861 } 3856 3862 ··· 3879 3885 if ((this_device->devtype == TYPE_DISK || 3880 3886 this_device->devtype == TYPE_ZBC) && 3881 3887 is_logical_dev_addr_mode(scsi3addr)) { 3882 - int volume_offline; 3888 + unsigned char volume_offline; 3883 3889 3884 3890 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); 3885 3891 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) 3886 3892 hpsa_get_ioaccel_status(h, scsi3addr, this_device); 3887 3893 volume_offline = hpsa_volume_offline(h, scsi3addr); 3888 - if (volume_offline < 0 || volume_offline > 0xff) 3889 - volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; 3890 - this_device->volume_offline = volume_offline & 0xff; 3894 + if (volume_offline == HPSA_LV_FAILED) { 3895 + rc = HPSA_LV_FAILED; 3896 + dev_err(&h->pdev->dev, 3897 + "%s: LV failed, device will be skipped.\n", 3898 + __func__); 3899 + goto bail_out; 3900 + } 3891 3901 } else { 3892 3902 this_device->raid_level = RAID_UNKNOWN; 3893 3903 this_device->offload_config = 0; ··· 4377 4379 goto out; 4378 4380 } 4379 4381 if (rc) { 4380 - dev_warn(&h->pdev->dev, 4381 - "Inquiry failed, skipping device.\n"); 4382 + h->drv_req_rescan = 1; 4382 4383 continue; 4383 4384 } 4384 4385 ··· 5555 5558 5556 5559 spin_lock_irqsave(&h->scan_lock, flags); 5557 5560 h->scan_finished = 1; 5558 - wake_up_all(&h->scan_wait_queue); 5561 + wake_up(&h->scan_wait_queue); 5559 5562 spin_unlock_irqrestore(&h->scan_lock, flags); 5560 5563 } 5561 5564 ··· 5573 5576 if (unlikely(lockup_detected(h))) 5574 5577 return hpsa_scan_complete(h); 5575 5578 5579 + /* 5580 + * If a scan is already waiting to run, no need to add another 5581 + */ 5582 + spin_lock_irqsave(&h->scan_lock, flags); 5583 + if (h->scan_waiting) { 5584 + spin_unlock_irqrestore(&h->scan_lock, flags); 5585 + return; 5586 + } 5587 + 5588 + spin_unlock_irqrestore(&h->scan_lock, flags); 5589 + 5576 5590 /* wait until any scan already in progress is finished. */ 5577 5591 while (1) { 5578 5592 spin_lock_irqsave(&h->scan_lock, flags); 5579 5593 if (h->scan_finished) 5580 5594 break; 5595 + h->scan_waiting = 1; 5581 5596 spin_unlock_irqrestore(&h->scan_lock, flags); 5582 5597 wait_event(h->scan_wait_queue, h->scan_finished); 5583 5598 /* Note: We don't need to worry about a race between this ··· 5599 5590 */ 5600 5591 } 5601 5592 h->scan_finished = 0; /* mark scan as in progress */ 5593 + h->scan_waiting = 0; 5602 5594 spin_unlock_irqrestore(&h->scan_lock, flags); 5603 5595 5604 5596 if (unlikely(lockup_detected(h))) ··· 8802 8792 init_waitqueue_head(&h->event_sync_wait_queue); 8803 8793 mutex_init(&h->reset_mutex); 8804 8794 h->scan_finished = 1; /* no scan currently in progress */ 8795 + h->scan_waiting = 0; 8805 8796 8806 8797 pci_set_drvdata(pdev, h); 8807 8798 h->ndevices = 0;
+1
drivers/scsi/hpsa.h
··· 201 201 dma_addr_t errinfo_pool_dhandle; 202 202 unsigned long *cmd_pool_bits; 203 203 int scan_finished; 204 + u8 scan_waiting : 1; 204 205 spinlock_t scan_lock; 205 206 wait_queue_head_t scan_wait_queue; 206 207
+2
drivers/scsi/hpsa_cmd.h
··· 156 156 #define CFGTBL_BusType_Fibre2G 0x00000200l 157 157 158 158 /* VPD Inquiry types */ 159 + #define HPSA_INQUIRY_FAILED 0x02 159 160 #define HPSA_VPD_SUPPORTED_PAGES 0x00 160 161 #define HPSA_VPD_LV_DEVICE_ID 0x83 161 162 #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 ··· 167 166 /* Logical volume states */ 168 167 #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 169 168 #define HPSA_LV_OK 0x0 169 + #define HPSA_LV_FAILED 0x01 170 170 #define HPSA_LV_NOT_AVAILABLE 0x0b 171 171 #define HPSA_LV_UNDERGOING_ERASE 0x0F 172 172 #define HPSA_LV_UNDERGOING_RPI 0x12
+2 -2
drivers/scsi/lpfc/lpfc_attr.c
··· 3315 3315 * lpfc_enable_fc4_type: Defines what FC4 types are supported. 3316 3316 * Supported Values: 1 - register just FCP 3317 3317 * 3 - register both FCP and NVME 3318 - * Supported values are [1,3]. Default value is 3 3318 + * Supported values are [1,3]. Default value is 1 3319 3319 */ 3320 - LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, 3320 + LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, 3321 3321 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, 3322 3322 "Define fc4 type to register with fabric."); 3323 3323
+7
drivers/scsi/lpfc/lpfc_init.c
··· 5891 5891 /* Check to see if it matches any module parameter */ 5892 5892 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { 5893 5893 if (wwn == lpfc_enable_nvmet[i]) { 5894 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 5894 5895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5895 5896 "6017 NVME Target %016llx\n", 5896 5897 wwn); 5897 5898 phba->nvmet_support = 1; /* a match */ 5899 + #else 5900 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5901 + "6021 Can't enable NVME Target." 5902 + " NVME_TARGET_FC infrastructure" 5903 + " is not in kernel\n"); 5904 + #endif 5898 5905 } 5899 5906 } 5900 5907 }
+4 -4
drivers/scsi/lpfc/lpfc_nvme.c
··· 2149 2149 /* localport is allocated from the stack, but the registration 2150 2150 * call allocates heap memory as well as the private area. 2151 2151 */ 2152 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2152 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2153 2153 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, 2154 2154 &vport->phba->pcidev->dev, &localport); 2155 2155 #else ··· 2190 2190 void 2191 2191 lpfc_nvme_destroy_localport(struct lpfc_vport *vport) 2192 2192 { 2193 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2193 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2194 2194 struct nvme_fc_local_port *localport; 2195 2195 struct lpfc_nvme_lport *lport; 2196 2196 struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; ··· 2274 2274 int 2275 2275 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2276 2276 { 2277 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2277 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2278 2278 int ret = 0; 2279 2279 struct nvme_fc_local_port *localport; 2280 2280 struct lpfc_nvme_lport *lport; ··· 2403 2403 void 2404 2404 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2405 2405 { 2406 - #ifdef CONFIG_LPFC_NVME_INITIATOR 2406 + #if (IS_ENABLED(CONFIG_NVME_FC)) 2407 2407 int ret; 2408 2408 struct nvme_fc_local_port *localport; 2409 2409 struct lpfc_nvme_lport *lport;
+4 -4
drivers/scsi/lpfc/lpfc_nvmet.c
··· 671 671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | 672 672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; 673 673 674 - #ifdef CONFIG_LPFC_NVME_TARGET 674 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 675 675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, 676 676 &phba->pcidev->dev, 677 677 &phba->targetport); ··· 756 756 void 757 757 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) 758 758 { 759 - #ifdef CONFIG_LPFC_NVME_TARGET 759 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 760 760 struct lpfc_nvmet_tgtport *tgtp; 761 761 762 762 if (phba->nvmet_support == 0) ··· 788 788 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 789 789 struct hbq_dmabuf *nvmebuf) 790 790 { 791 - #ifdef CONFIG_LPFC_NVME_TARGET 791 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 792 792 struct lpfc_nvmet_tgtport *tgtp; 793 793 struct fc_frame_header *fc_hdr; 794 794 struct lpfc_nvmet_rcv_ctx *ctxp; ··· 891 891 struct rqb_dmabuf *nvmebuf, 892 892 uint64_t isr_timestamp) 893 893 { 894 - #ifdef CONFIG_LPFC_NVME_TARGET 894 + #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) 895 895 struct lpfc_nvmet_rcv_ctx *ctxp; 896 896 struct lpfc_nvmet_tgtport *tgtp; 897 897 struct fc_frame_header *fc_hdr;
+2 -2
drivers/scsi/megaraid/megaraid_sas.h
··· 35 35 /* 36 36 * MegaRAID SAS Driver meta data 37 37 */ 38 - #define MEGASAS_VERSION "07.701.16.00-rc1" 39 - #define MEGASAS_RELDATE "February 2, 2017" 38 + #define MEGASAS_VERSION "07.701.17.00-rc1" 39 + #define MEGASAS_RELDATE "March 2, 2017" 40 40 41 41 /* 42 42 * Device IDs
+12 -5
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1963 1963 if (!mr_device_priv_data) 1964 1964 return -ENOMEM; 1965 1965 sdev->hostdata = mr_device_priv_data; 1966 + 1967 + atomic_set(&mr_device_priv_data->r1_ldio_hint, 1968 + instance->r1_ldio_hint_default); 1966 1969 return 0; 1967 1970 } 1968 1971 ··· 5037 5034 &instance->irq_context[j]); 5038 5035 /* Retry irq register for IO_APIC*/ 5039 5036 instance->msix_vectors = 0; 5040 - if (is_probe) 5037 + if (is_probe) { 5038 + pci_free_irq_vectors(instance->pdev); 5041 5039 return megasas_setup_irqs_ioapic(instance); 5042 - else 5040 + } else { 5043 5041 return -1; 5042 + } 5044 5043 } 5045 5044 } 5046 5045 return 0; ··· 5282 5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET); 5283 5278 } 5284 5279 5285 - i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5286 - if (i < 0) 5287 - goto fail_setup_irqs; 5280 + if (!instance->msix_vectors) { 5281 + i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); 5282 + if (i < 0) 5283 + goto fail_setup_irqs; 5284 + } 5288 5285 5289 5286 dev_info(&instance->pdev->dev, 5290 5287 "firmware supports msix\t: (%d)", fw_msix_count);
+2 -2
drivers/scsi/megaraid/megaraid_sas_fusion.c
··· 2159 2159 cpu_sel = MR_RAID_CTX_CPUSEL_1; 2160 2160 2161 2161 if (is_stream_detected(rctx_g35) && 2162 - (raid->level == 5) && 2162 + ((raid->level == 5) || (raid->level == 6)) && 2163 2163 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && 2164 2164 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) 2165 2165 cpu_sel = MR_RAID_CTX_CPUSEL_0; ··· 2338 2338 fp_possible = false; 2339 2339 atomic_dec(&instance->fw_outstanding); 2340 2340 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || 2341 - atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { 2341 + (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { 2342 2342 fp_possible = false; 2343 2343 atomic_dec(&instance->fw_outstanding); 2344 2344 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+1 -1
drivers/scsi/ufs/ufshcd.c
··· 7642 7642 if (kstrtoul(buf, 0, &value)) 7643 7643 return -EINVAL; 7644 7644 7645 - if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) 7645 + if (value >= UFS_PM_LVL_MAX) 7646 7646 return -EINVAL; 7647 7647 7648 7648 spin_lock_irqsave(hba->host->host_lock, flags);
+6 -5
drivers/tty/serial/st-asc.c
··· 575 575 pinctrl_select_state(ascport->pinctrl, 576 576 ascport->states[NO_HW_FLOWCTRL]); 577 577 578 - gpiod = devm_get_gpiod_from_child(port->dev, "rts", 579 - &np->fwnode); 580 - if (!IS_ERR(gpiod)) { 581 - gpiod_direction_output(gpiod, 0); 578 + gpiod = devm_fwnode_get_gpiod_from_child(port->dev, 579 + "rts", 580 + &np->fwnode, 581 + GPIOD_OUT_LOW, 582 + np->name); 583 + if (!IS_ERR(gpiod)) 582 584 ascport->rts = gpiod; 583 - } 584 585 } 585 586 } 586 587
+1
fs/f2fs/debug.c
··· 196 196 si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); 197 197 si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; 198 198 si->base_mem += NM_I(sbi)->nat_blocks / 8; 199 + si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short); 199 200 200 201 get_cache: 201 202 si->cache_mem = 0;
+1 -1
fs/f2fs/dir.c
··· 750 750 dentry_blk = page_address(page); 751 751 bit_pos = dentry - dentry_blk->dentry; 752 752 for (i = 0; i < slots; i++) 753 - clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 753 + __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 754 754 755 755 /* Let's check and deallocate this dentry page */ 756 756 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+2
fs/f2fs/f2fs.h
··· 561 561 struct mutex build_lock; /* lock for build free nids */ 562 562 unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; 563 563 unsigned char *nat_block_bitmap; 564 + unsigned short *free_nid_count; /* free nid count of NAT block */ 565 + spinlock_t free_nid_lock; /* protect updating of nid count */ 564 566 565 567 /* for checkpoint */ 566 568 char *nat_bitmap; /* NAT bitmap pointer */
+77 -86
fs/f2fs/node.c
··· 338 338 set_nat_flag(e, IS_CHECKPOINTED, false); 339 339 __set_nat_cache_dirty(nm_i, e); 340 340 341 - if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR) 342 - clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits); 343 - 344 341 /* update fsync_mark if its inode nat entry is still alive */ 345 342 if (ni->nid != ni->ino) 346 343 e = __lookup_nat_cache(nm_i, ni->ino); ··· 1820 1823 kmem_cache_free(free_nid_slab, i); 1821 1824 } 1822 1825 1823 - void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) 1826 + static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 1827 + bool set, bool build, bool locked) 1824 1828 { 1825 1829 struct f2fs_nm_info *nm_i = NM_I(sbi); 1826 1830 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); ··· 1831 1833 return; 1832 1834 1833 1835 if (set) 1834 - set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1836 + __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1835 1837 else 1836 - clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1838 + __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 1839 + 1840 + if (!locked) 1841 + spin_lock(&nm_i->free_nid_lock); 1842 + if (set) 1843 + nm_i->free_nid_count[nat_ofs]++; 1844 + else if (!build) 1845 + nm_i->free_nid_count[nat_ofs]--; 1846 + if (!locked) 1847 + spin_unlock(&nm_i->free_nid_lock); 1837 1848 } 1838 1849 1839 1850 static void scan_nat_page(struct f2fs_sb_info *sbi, ··· 1854 1847 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 1855 1848 int i; 1856 1849 1857 - set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1850 + if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 1851 + return; 1852 + 1853 + __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 1858 1854 1859 1855 i = start_nid % NAT_ENTRY_PER_BLOCK; 1860 1856 ··· 1871 1861 f2fs_bug_on(sbi, blk_addr == NEW_ADDR); 1872 1862 if (blk_addr == NULL_ADDR) 1873 1863 freed = add_free_nid(sbi, start_nid, true); 1874 - update_free_nid_bitmap(sbi, start_nid, freed); 1864 + update_free_nid_bitmap(sbi, start_nid, freed, true, false); 1875 1865 } 1876 1866 } 1877 1867 ··· 1886 1876 1887 1877 for (i = 0; i < nm_i->nat_blocks; i++) { 1888 1878 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 1879 + continue; 1880 + if (!nm_i->free_nid_count[i]) 1889 1881 continue; 1890 1882 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 1891 1883 nid_t nid; ··· 1919 1907 up_read(&nm_i->nat_tree_lock); 1920 1908 } 1921 1909 1922 - static int scan_nat_bits(struct f2fs_sb_info *sbi) 1923 - { 1924 - struct f2fs_nm_info *nm_i = NM_I(sbi); 1925 - struct page *page; 1926 - unsigned int i = 0; 1927 - nid_t nid; 1928 - 1929 - if (!enabled_nat_bits(sbi, NULL)) 1930 - return -EAGAIN; 1931 - 1932 - down_read(&nm_i->nat_tree_lock); 1933 - check_empty: 1934 - i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 1935 - if (i >= nm_i->nat_blocks) { 1936 - i = 0; 1937 - goto check_partial; 1938 - } 1939 - 1940 - for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK; 1941 - nid++) { 1942 - if (unlikely(nid >= nm_i->max_nid)) 1943 - break; 1944 - add_free_nid(sbi, nid, true); 1945 - } 1946 - 1947 - if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) 1948 - goto out; 1949 - i++; 1950 - goto check_empty; 1951 - 1952 - check_partial: 1953 - i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 1954 - if (i >= nm_i->nat_blocks) { 1955 - disable_nat_bits(sbi, true); 1956 - up_read(&nm_i->nat_tree_lock); 1957 - return -EINVAL; 1958 - } 1959 - 1960 - nid = i * NAT_ENTRY_PER_BLOCK; 1961 - page = get_current_nat_page(sbi, nid); 1962 - scan_nat_page(sbi, page, nid); 1963 - f2fs_put_page(page, 1); 1964 - 1965 - if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) { 1966 - i++; 1967 - goto check_partial; 1968 - } 1969 - out: 1970 - up_read(&nm_i->nat_tree_lock); 1971 - return 0; 1972 - } 1973 - 1974 1910 static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 1975 1911 { 1976 1912 struct f2fs_nm_info *nm_i = NM_I(sbi); ··· 1940 1980 1941 1981 if (nm_i->nid_cnt[FREE_NID_LIST]) 1942 1982 return; 1943 - 1944 - /* try to find free nids with nat_bits */ 1945 - if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) 1946 - return; 1947 - } 1948 - 1949 - /* find next valid candidate */ 1950 - if (enabled_nat_bits(sbi, NULL)) { 1951 - int idx = find_next_zero_bit_le(nm_i->full_nat_bits, 1952 - nm_i->nat_blocks, 0); 1953 - 1954 - if (idx >= nm_i->nat_blocks) 1955 - set_sbi_flag(sbi, SBI_NEED_FSCK); 1956 - else 1957 - nid = idx * NAT_ENTRY_PER_BLOCK; 1958 1983 } 1959 1984 1960 1985 /* readahead nat pages to be scanned */ ··· 2026 2081 __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); 2027 2082 nm_i->available_nids--; 2028 2083 2029 - update_free_nid_bitmap(sbi, *nid, false); 2084 + update_free_nid_bitmap(sbi, *nid, false, false, false); 2030 2085 2031 2086 spin_unlock(&nm_i->nid_list_lock); 2032 2087 return true; ··· 2082 2137 2083 2138 nm_i->available_nids++; 2084 2139 2085 - update_free_nid_bitmap(sbi, nid, true); 2140 + update_free_nid_bitmap(sbi, nid, true, false, false); 2086 2141 2087 2142 spin_unlock(&nm_i->nid_list_lock); 2088 2143 ··· 2328 2383 list_add_tail(&nes->set_list, head); 2329 2384 } 2330 2385 2331 - void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2386 + static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2332 2387 struct page *page) 2333 2388 { 2334 2389 struct f2fs_nm_info *nm_i = NM_I(sbi); ··· 2347 2402 valid++; 2348 2403 } 2349 2404 if (valid == 0) { 2350 - set_bit_le(nat_index, nm_i->empty_nat_bits); 2351 - clear_bit_le(nat_index, nm_i->full_nat_bits); 2405 + __set_bit_le(nat_index, nm_i->empty_nat_bits); 2406 + __clear_bit_le(nat_index, nm_i->full_nat_bits); 2352 2407 return; 2353 2408 } 2354 2409 2355 - clear_bit_le(nat_index, nm_i->empty_nat_bits); 2410 + __clear_bit_le(nat_index, nm_i->empty_nat_bits); 2356 2411 if (valid == NAT_ENTRY_PER_BLOCK) 2357 - set_bit_le(nat_index, nm_i->full_nat_bits); 2412 + __set_bit_le(nat_index, nm_i->full_nat_bits); 2358 2413 else 2359 - clear_bit_le(nat_index, nm_i->full_nat_bits); 2414 + __clear_bit_le(nat_index, nm_i->full_nat_bits); 2360 2415 } 2361 2416 2362 2417 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, ··· 2412 2467 add_free_nid(sbi, nid, false); 2413 2468 spin_lock(&NM_I(sbi)->nid_list_lock); 2414 2469 NM_I(sbi)->available_nids++; 2415 - update_free_nid_bitmap(sbi, nid, true); 2470 + update_free_nid_bitmap(sbi, nid, true, false, false); 2416 2471 spin_unlock(&NM_I(sbi)->nid_list_lock); 2417 2472 } else { 2418 2473 spin_lock(&NM_I(sbi)->nid_list_lock); 2419 - update_free_nid_bitmap(sbi, nid, false); 2474 + update_free_nid_bitmap(sbi, nid, false, false, false); 2420 2475 spin_unlock(&NM_I(sbi)->nid_list_lock); 2421 2476 } 2422 2477 } ··· 2522 2577 return 0; 2523 2578 } 2524 2579 2580 + inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 2581 + { 2582 + struct f2fs_nm_info *nm_i = NM_I(sbi); 2583 + unsigned int i = 0; 2584 + nid_t nid, last_nid; 2585 + 2586 + if (!enabled_nat_bits(sbi, NULL)) 2587 + return; 2588 + 2589 + for (i = 0; i < nm_i->nat_blocks; i++) { 2590 + i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 2591 + if (i >= nm_i->nat_blocks) 2592 + break; 2593 + 2594 + __set_bit_le(i, nm_i->nat_block_bitmap); 2595 + 2596 + nid = i * NAT_ENTRY_PER_BLOCK; 2597 + last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; 2598 + 2599 + spin_lock(&nm_i->free_nid_lock); 2600 + for (; nid < last_nid; nid++) 2601 + update_free_nid_bitmap(sbi, nid, true, true, true); 2602 + spin_unlock(&nm_i->free_nid_lock); 2603 + } 2604 + 2605 + for (i = 0; i < nm_i->nat_blocks; i++) { 2606 + i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 2607 + if (i >= nm_i->nat_blocks) 2608 + break; 2609 + 2610 + __set_bit_le(i, nm_i->nat_block_bitmap); 2611 + } 2612 + } 2613 + 2525 2614 static int init_node_manager(struct f2fs_sb_info *sbi) 2526 2615 { 2527 2616 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); ··· 2617 2638 return 0; 2618 2639 } 2619 2640 2620 - int init_free_nid_cache(struct f2fs_sb_info *sbi) 2641 + static int init_free_nid_cache(struct f2fs_sb_info *sbi) 2621 2642 { 2622 2643 struct f2fs_nm_info *nm_i = NM_I(sbi); 2623 2644 ··· 2630 2651 GFP_KERNEL); 2631 2652 if (!nm_i->nat_block_bitmap) 2632 2653 return -ENOMEM; 2654 + 2655 + nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * 2656 + sizeof(unsigned short), GFP_KERNEL); 2657 + if (!nm_i->free_nid_count) 2658 + return -ENOMEM; 2659 + 2660 + spin_lock_init(&nm_i->free_nid_lock); 2661 + 2633 2662 return 0; 2634 2663 } 2635 2664 ··· 2656 2669 err = init_free_nid_cache(sbi); 2657 2670 if (err) 2658 2671 return err; 2672 + 2673 + /* load free nid status from nat_bits table */ 2674 + load_free_nid_bitmap(sbi); 2659 2675 2660 2676 build_free_nids(sbi, true, true); 2661 2677 return 0; ··· 2720 2730 2721 2731 kvfree(nm_i->nat_block_bitmap); 2722 2732 kvfree(nm_i->free_nid_bitmap); 2733 + kvfree(nm_i->free_nid_count); 2723 2734 2724 2735 kfree(nm_i->nat_bitmap); 2725 2736 kfree(nm_i->nat_bits);
+6
fs/f2fs/segment.c
··· 1163 1163 if (f2fs_discard_en(sbi) && 1164 1164 !f2fs_test_and_set_bit(offset, se->discard_map)) 1165 1165 sbi->discard_blks--; 1166 + 1167 + /* don't overwrite by SSR to keep node chain */ 1168 + if (se->type == CURSEG_WARM_NODE) { 1169 + if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 1170 + se->ckpt_valid_blocks++; 1171 + } 1166 1172 } else { 1167 1173 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { 1168 1174 #ifdef CONFIG_F2FS_CHECK_FS
-16
include/linux/gpio/consumer.h
··· 143 143 struct fwnode_handle *child, 144 144 enum gpiod_flags flags, 145 145 const char *label); 146 - /* FIXME: delete this helper when users are switched over */ 147 - static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, 148 - const char *con_id, struct fwnode_handle *child) 149 - { 150 - return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 151 - 0, child, 152 - GPIOD_ASIS, 153 - "?"); 154 - } 155 146 156 147 #else /* CONFIG_GPIOLIB */ 157 148 ··· 431 440 struct fwnode_handle *child, 432 441 enum gpiod_flags flags, 433 442 const char *label) 434 - { 435 - return ERR_PTR(-ENOSYS); 436 - } 437 - 438 - /* FIXME: delete this when all users are switched over */ 439 - static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, 440 - const char *con_id, struct fwnode_handle *child) 441 443 { 442 444 return ERR_PTR(-ENOSYS); 443 445 }
+1
include/uapi/linux/elf.h
··· 409 409 #define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */ 410 410 #define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */ 411 411 #define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */ 412 + #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ 412 413 #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ 413 414 #define NT_ARM_TLS 0x401 /* ARM TLS register */ 414 415 #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+2
include/uapi/linux/kvm.h
··· 890 890 #define KVM_CAP_MIPS_VZ 137 891 891 #define KVM_CAP_MIPS_TE 138 892 892 #define KVM_CAP_MIPS_64BIT 139 893 + #define KVM_CAP_S390_GS 140 894 + #define KVM_CAP_S390_AIS 141 893 895 894 896 #ifdef KVM_CAP_IRQ_ROUTING 895 897
-2
mm/swap_slots.c
··· 267 267 { 268 268 struct swap_slots_cache *cache; 269 269 270 - WARN_ON_ONCE(!swap_slot_cache_initialized); 271 - 272 270 cache = &get_cpu_var(swp_slots); 273 271 if (use_swap_slot_cache && cache->slots_ret) { 274 272 spin_lock_irq(&cache->free_lock);