Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
"There is only one new feature in this pull for the 4.4 merge window,
most of it is small enhancements, cleanup and bug fixes:

- Add the s390 backend for the software dirty bit tracking. This
adds two new pgtable functions pte_clear_soft_dirty and
pmd_clear_soft_dirty which is why there is a hit to
arch/x86/include/asm/pgtable.h in this pull request.

- A series of cleanup patches for the AP bus, this includes the
removal of the support for two outdated crypto cards (PCICC and
PCICA).

- The irq handling / signaling on buffer full in the runtime
instrumentation code is dropped.

- Some micro optimizations: remove unnecessary memory barriers for a
couple of functions: [smb_]rmb, [smb_]wmb, atomics, bitops, and for
spin_unlock. Use the builtin bswap if available and make
test_and_set_bit_lock more cache friendly.

- Statistics and a tracepoint for the diagnose calls to the
hypervisor.

- The CPU measurement facility support to sample KVM guests is
improved.

- The vector instructions are now always enabled for user space
processes if the hardware has the vector facility. This simplifies
the FPU handling code. The fpu-internal.h header is split into fpu
internals, api and types just like x86.

- Cleanup and improvements for the common I/O layer.

- Rework udelay to solve a problem with kprobe. udelay has busy loop
semantics but still uses an idle processor state for the wait"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (66 commits)
s390: remove runtime instrumentation interrupts
s390/cio: de-duplicate subchannel validation
s390/css: unneeded initialization in for_each_subchannel
s390/Kconfig: use builtin bswap
s390/dasd: fix disconnected device with valid path mask
s390/dasd: fix invalid PAV assignment after suspend/resume
s390/dasd: fix double free in dasd_eckd_read_conf
s390/kernel: fix ptrace peek/poke for floating point registers
s390/cio: move ccw_device_stlck functions
s390/cio: move ccw_device_call_handler
s390/topology: reduce per_cpu() invocations
s390/nmi: reduce size of percpu variable
s390/nmi: fix terminology
s390/nmi: remove casts
s390/nmi: remove pointless error strings
s390: don't store registers on disabled wait anymore
s390: get rid of __set_psw_mask()
s390/fpu: split fpu-internal.h into fpu internals, api, and type headers
s390/dasd: fix list_del corruption after lcu changes
s390/spinlock: remove unneeded serializations at unlock
...

+2424 -3680
+2
arch/s390/Kconfig
··· 101 101 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 102 102 select ARCH_SUPPORTS_ATOMIC_RMW 103 103 select ARCH_SUPPORTS_NUMA_BALANCING 104 + select ARCH_USE_BUILTIN_BSWAP 104 105 select ARCH_USE_CMPXCHG_LOCKREF 105 106 select ARCH_WANTS_PROT_NUMA_PROT_NONE 106 107 select ARCH_WANT_IPC_PARSE_VERSION ··· 119 118 select HAVE_ARCH_EARLY_PFN_TO_NID 120 119 select HAVE_ARCH_JUMP_LABEL 121 120 select HAVE_ARCH_SECCOMP_FILTER 121 + select HAVE_ARCH_SOFT_DIRTY 122 122 select HAVE_ARCH_TRACEHOOK 123 123 select HAVE_ARCH_TRANSPARENT_HUGEPAGE 124 124 select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
+9 -1
arch/s390/hypfs/hypfs_diag.c
··· 15 15 #include <linux/string.h> 16 16 #include <linux/vmalloc.h> 17 17 #include <linux/mm.h> 18 + #include <asm/diag.h> 18 19 #include <asm/ebcdic.h> 19 20 #include "hypfs.h" 20 21 ··· 337 336 338 337 /* Diagnose 204 functions */ 339 338 340 - static int diag204(unsigned long subcode, unsigned long size, void *addr) 339 + static inline int __diag204(unsigned long subcode, unsigned long size, void *addr) 341 340 { 342 341 register unsigned long _subcode asm("0") = subcode; 343 342 register unsigned long _size asm("1") = size; ··· 350 349 if (_subcode) 351 350 return -1; 352 351 return _size; 352 + } 353 + 354 + static int diag204(unsigned long subcode, unsigned long size, void *addr) 355 + { 356 + diag_stat_inc(DIAG_STAT_X204); 357 + return __diag204(subcode, size, addr); 353 358 } 354 359 355 360 /* ··· 512 505 { 513 506 int rc = -EOPNOTSUPP; 514 507 508 + diag_stat_inc(DIAG_STAT_X224); 515 509 asm volatile( 516 510 " diag %1,%2,0x224\n" 517 511 "0: lhi %0,0x0\n"
+2
arch/s390/hypfs/hypfs_diag0c.c
··· 8 8 9 9 #include <linux/slab.h> 10 10 #include <linux/cpu.h> 11 + #include <asm/diag.h> 11 12 #include <asm/hypfs.h> 12 13 #include "hypfs.h" 13 14 ··· 19 18 */ 20 19 static void diag0c(struct hypfs_diag0c_entry *entry) 21 20 { 21 + diag_stat_inc(DIAG_STAT_X00C); 22 22 asm volatile ( 23 23 " sam31\n" 24 24 " diag %0,%0,0x0c\n"
+8 -1
arch/s390/hypfs/hypfs_sprp.c
··· 13 13 #include <linux/types.h> 14 14 #include <linux/uaccess.h> 15 15 #include <asm/compat.h> 16 + #include <asm/diag.h> 16 17 #include <asm/sclp.h> 17 18 #include "hypfs.h" 18 19 ··· 23 22 24 23 #define DIAG304_CMD_MAX 2 25 24 26 - static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) 25 + static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd) 27 26 { 28 27 register unsigned long _data asm("2") = (unsigned long) data; 29 28 register unsigned long _rc asm("3"); ··· 33 32 : "=d" (_rc) : "d" (_data), "d" (_cmd) : "memory"); 34 33 35 34 return _rc; 35 + } 36 + 37 + static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) 38 + { 39 + diag_stat_inc(DIAG_STAT_X304); 40 + return __hypfs_sprp_diag304(data, cmd); 36 41 } 37 42 38 43 static void hypfs_sprp_free(const void *data)
+2
arch/s390/hypfs/hypfs_vm.c
··· 9 9 #include <linux/errno.h> 10 10 #include <linux/string.h> 11 11 #include <linux/vmalloc.h> 12 + #include <asm/diag.h> 12 13 #include <asm/ebcdic.h> 13 14 #include <asm/timex.h> 14 15 #include "hypfs.h" ··· 67 66 memset(parm_list.aci_grp, 0x40, NAME_LEN); 68 67 rc = -1; 69 68 69 + diag_stat_inc(DIAG_STAT_X2FC); 70 70 asm volatile( 71 71 " diag %0,%1,0x2fc\n" 72 72 "0:\n"
+2
arch/s390/include/asm/appldata.h
··· 7 7 #ifndef _ASM_S390_APPLDATA_H 8 8 #define _ASM_S390_APPLDATA_H 9 9 10 + #include <asm/diag.h> 10 11 #include <asm/io.h> 11 12 12 13 #define APPLDATA_START_INTERVAL_REC 0x80 ··· 54 53 parm_list.buffer_length = length; 55 54 parm_list.product_id_addr = (unsigned long) id; 56 55 parm_list.buffer_addr = virt_to_phys(buffer); 56 + diag_stat_inc(DIAG_STAT_X0DC); 57 57 asm volatile( 58 58 " diag %1,%0,0xdc" 59 59 : "=d" (ry)
-2
arch/s390/include/asm/atomic.h
··· 36 36 \ 37 37 typecheck(atomic_t *, ptr); \ 38 38 asm volatile( \ 39 - __barrier \ 40 39 op_string " %0,%2,%1\n" \ 41 40 __barrier \ 42 41 : "=d" (old_val), "+Q" ((ptr)->counter) \ ··· 179 180 \ 180 181 typecheck(atomic64_t *, ptr); \ 181 182 asm volatile( \ 182 - __barrier \ 183 183 op_string " %0,%2,%1\n" \ 184 184 __barrier \ 185 185 : "=d" (old_val), "+Q" ((ptr)->counter) \
+4 -4
arch/s390/include/asm/barrier.h
··· 22 22 23 23 #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) 24 24 25 - #define rmb() mb() 26 - #define wmb() mb() 27 - #define dma_rmb() rmb() 28 - #define dma_wmb() wmb() 25 + #define rmb() barrier() 26 + #define wmb() barrier() 27 + #define dma_rmb() mb() 28 + #define dma_wmb() mb() 29 29 #define smp_mb() mb() 30 30 #define smp_rmb() rmb() 31 31 #define smp_wmb() wmb()
+34 -21
arch/s390/include/asm/bitops.h
··· 11 11 * big-endian system because, unlike little endian, the number of each 12 12 * bit depends on the word size. 13 13 * 14 - * The bitop functions are defined to work on unsigned longs, so for an 15 - * s390x system the bits end up numbered: 14 + * The bitop functions are defined to work on unsigned longs, so the bits 15 + * end up numbered: 16 16 * |63..............0|127............64|191...........128|255...........192| 17 - * and on s390: 18 - * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224| 19 17 * 20 18 * There are a few little-endian macros used mostly for filesystem 21 - * bitmaps, these work on similar bit arrays layouts, but 22 - * byte-oriented: 19 + * bitmaps, these work on similar bit array layouts, but byte-oriented: 23 20 * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| 24 21 * 25 - * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit 26 - * number field needs to be reversed compared to the big-endian bit 27 - * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). 22 + * The main difference is that bit 3-5 in the bit number field needs to be 23 + * reversed compared to the big-endian bit fields. This can be achieved by 24 + * XOR with 0x38. 28 25 * 29 - * We also have special functions which work with an MSB0 encoding: 30 - * on an s390x system the bits are numbered: 26 + * We also have special functions which work with an MSB0 encoding. 27 + * The bits are numbered: 31 28 * |0..............63|64............127|128...........191|192...........255| 32 - * and on s390: 33 - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| 34 29 * 35 - * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit 36 - * number field needs to be reversed compared to the LSB0 encoded bit 37 - * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b). 30 + * The main difference is that bit 0-63 in the bit number field needs to be 31 + * reversed compared to the LSB0 encoded bit fields. This can be achieved by 32 + * XOR with 0x3f. 38 33 * 39 34 */ 40 35 ··· 59 64 \ 60 65 typecheck(unsigned long *, (__addr)); \ 61 66 asm volatile( \ 62 - __barrier \ 63 67 __op_string " %0,%2,%1\n" \ 64 68 __barrier \ 65 69 : "=d" (__old), "+Q" (*(__addr)) \ ··· 270 276 return (*addr >> (nr & 7)) & 1; 271 277 } 272 278 279 + static inline int test_and_set_bit_lock(unsigned long nr, 280 + volatile unsigned long *ptr) 281 + { 282 + if (test_bit(nr, ptr)) 283 + return 1; 284 + return test_and_set_bit(nr, ptr); 285 + } 286 + 287 + static inline void clear_bit_unlock(unsigned long nr, 288 + volatile unsigned long *ptr) 289 + { 290 + smp_mb__before_atomic(); 291 + clear_bit(nr, ptr); 292 + } 293 + 294 + static inline void __clear_bit_unlock(unsigned long nr, 295 + volatile unsigned long *ptr) 296 + { 297 + smp_mb(); 298 + __clear_bit(nr, ptr); 299 + } 300 + 273 301 /* 274 302 * Functions which use MSB0 bit numbering. 275 - * On an s390x system the bits are numbered: 303 + * The bits are numbered: 276 304 * |0..............63|64............127|128...........191|192...........255| 277 - * and on s390: 278 - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| 279 305 */ 280 306 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 281 307 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, ··· 460 446 #include <asm-generic/bitops/ffz.h> 461 447 #include <asm-generic/bitops/find.h> 462 448 #include <asm-generic/bitops/hweight.h> 463 - #include <asm-generic/bitops/lock.h> 464 449 #include <asm-generic/bitops/sched.h> 465 450 #include <asm-generic/bitops/le.h> 466 451 #include <asm-generic/bitops/ext2-atomic-setbit.h>
+10
arch/s390/include/asm/cio.h
··· 5 5 #define _ASM_S390_CIO_H_ 6 6 7 7 #include <linux/spinlock.h> 8 + #include <linux/bitops.h> 8 9 #include <asm/types.h> 9 10 10 11 #define LPM_ANYPATH 0xff ··· 295 294 (dev_id1->devno == dev_id2->devno)) 296 295 return 1; 297 296 return 0; 297 + } 298 + 299 + /** 300 + * pathmask_to_pos() - find the position of the left-most bit in a pathmask 301 + * @mask: pathmask with at least one bit set 302 + */ 303 + static inline u8 pathmask_to_pos(u8 mask) 304 + { 305 + return 8 - ffs(mask); 298 306 } 299 307 300 308 void channel_subsystem_reinit(void);
+1
arch/s390/include/asm/cmb.h
··· 6 6 struct ccw_device; 7 7 extern int enable_cmf(struct ccw_device *cdev); 8 8 extern int disable_cmf(struct ccw_device *cdev); 9 + extern int __disable_cmf(struct ccw_device *cdev); 9 10 extern u64 cmf_read(struct ccw_device *cdev, int index); 10 11 extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data); 11 12
+3 -27
arch/s390/include/asm/cmpxchg.h
··· 32 32 __old; \ 33 33 }) 34 34 35 - #define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 35 + #define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 36 36 ({ \ 37 37 register __typeof__(*(p1)) __old1 asm("2") = (o1); \ 38 38 register __typeof__(*(p2)) __old2 asm("3") = (o2); \ ··· 40 40 register __typeof__(*(p2)) __new2 asm("5") = (n2); \ 41 41 int cc; \ 42 42 asm volatile( \ 43 - insn " %[old],%[new],%[ptr]\n" \ 43 + " cdsg %[old],%[new],%[ptr]\n" \ 44 44 " ipm %[cc]\n" \ 45 45 " srl %[cc],28" \ 46 46 : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \ ··· 50 50 !cc; \ 51 51 }) 52 52 53 - #define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \ 54 - __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds") 55 - 56 - #define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \ 57 - __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg") 58 - 59 - extern void __cmpxchg_double_called_with_bad_pointer(void); 60 - 61 - #define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 62 - ({ \ 63 - int __ret; \ 64 - switch (sizeof(*(p1))) { \ 65 - case 4: \ 66 - __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \ 67 - break; \ 68 - case 8: \ 69 - __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \ 70 - break; \ 71 - default: \ 72 - __cmpxchg_double_called_with_bad_pointer(); \ 73 - } \ 74 - __ret; \ 75 - }) 76 - 77 53 #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ 78 54 ({ \ 79 55 __typeof__(p1) __p1 = (p1); \ ··· 57 81 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ 58 82 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ 59 83 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ 60 - __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ 84 + __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \ 61 85 }) 62 86 63 87 #define system_has_cmpxchg_double() 1
-5
arch/s390/include/asm/cpu_mf.h
··· 22 22 #define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ 23 23 #define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ 24 24 #define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ 25 - #define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */ 26 - #define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program 27 - buffer full */ 28 - 29 25 #define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA) 30 26 #define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ 31 27 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ 32 28 CPU_MF_INT_SF_LSDA) 33 - #define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL) 34 29 35 30 /* CPU measurement facility support */ 36 31 static inline int cpum_cf_avail(void)
-2
arch/s390/include/asm/ctl_reg.h
··· 46 46 __ctl_load(reg, cr, cr); 47 47 } 48 48 49 - void __ctl_set_vx(void); 50 - 51 49 void smp_ctl_set_bit(int cr, int bit); 52 50 void smp_ctl_clear_bit(int cr, int bit); 53 51
+29
arch/s390/include/asm/diag.h
··· 8 8 #ifndef _ASM_S390_DIAG_H 9 9 #define _ASM_S390_DIAG_H 10 10 11 + #include <linux/percpu.h> 12 + 13 + enum diag_stat_enum { 14 + DIAG_STAT_X008, 15 + DIAG_STAT_X00C, 16 + DIAG_STAT_X010, 17 + DIAG_STAT_X014, 18 + DIAG_STAT_X044, 19 + DIAG_STAT_X064, 20 + DIAG_STAT_X09C, 21 + DIAG_STAT_X0DC, 22 + DIAG_STAT_X204, 23 + DIAG_STAT_X210, 24 + DIAG_STAT_X224, 25 + DIAG_STAT_X250, 26 + DIAG_STAT_X258, 27 + DIAG_STAT_X288, 28 + DIAG_STAT_X2C4, 29 + DIAG_STAT_X2FC, 30 + DIAG_STAT_X304, 31 + DIAG_STAT_X308, 32 + DIAG_STAT_X500, 33 + NR_DIAG_STAT 34 + }; 35 + 36 + void diag_stat_inc(enum diag_stat_enum nr); 37 + void diag_stat_inc_norecursion(enum diag_stat_enum nr); 38 + 11 39 /* 12 40 * Diagnose 10: Release page range 13 41 */ ··· 46 18 start_addr = start_pfn << PAGE_SHIFT; 47 19 end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; 48 20 21 + diag_stat_inc(DIAG_STAT_X010); 49 22 asm volatile( 50 23 "0: diag %0,%1,0x10\n" 51 24 "1:\n"
+6 -4
arch/s390/include/asm/etr.h
··· 211 211 #define ETR_PTFF_SGS 0x43 /* set gross steering rate */ 212 212 213 213 /* Functions needed by the machine check handler */ 214 - void etr_switch_to_local(void); 215 - void etr_sync_check(void); 214 + int etr_switch_to_local(void); 215 + int etr_sync_check(void); 216 + void etr_queue_work(void); 216 217 217 218 /* notifier for syncs */ 218 219 extern struct atomic_notifier_head s390_epoch_delta_notifier; ··· 254 253 } __attribute__ ((packed)); 255 254 256 255 /* Functions needed by the machine check handler */ 257 - void stp_sync_check(void); 258 - void stp_island_check(void); 256 + int stp_sync_check(void); 257 + int stp_island_check(void); 258 + void stp_queue_work(void); 259 259 260 260 #endif /* __S390_ETR_H */
+4 -47
arch/s390/include/asm/fpu-internal.h arch/s390/include/asm/fpu/internal.h
··· 1 1 /* 2 - * General floating pointer and vector register helpers 2 + * FPU state and register content conversion primitives 3 3 * 4 4 * Copyright IBM Corp. 2015 5 5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> ··· 8 8 #ifndef _ASM_S390_FPU_INTERNAL_H 9 9 #define _ASM_S390_FPU_INTERNAL_H 10 10 11 - #define FPU_USE_VX 1 /* Vector extension is active */ 12 - 13 - #ifndef __ASSEMBLY__ 14 - 15 - #include <linux/errno.h> 16 11 #include <linux/string.h> 17 - #include <asm/linkage.h> 18 12 #include <asm/ctl_reg.h> 19 - #include <asm/sigcontext.h> 20 - 21 - struct fpu { 22 - __u32 fpc; /* Floating-point control */ 23 - __u32 flags; 24 - union { 25 - void *regs; 26 - freg_t *fprs; /* Floating-point register save area */ 27 - __vector128 *vxrs; /* Vector register save area */ 28 - }; 29 - }; 30 - 31 - void save_fpu_regs(void); 32 - 33 - #define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) 34 - #define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) 35 - 36 - /* VX array structure for address operand constraints in inline assemblies */ 37 - struct vx_array { __vector128 _[__NUM_VXRS]; }; 38 - 39 - static inline int test_fp_ctl(u32 fpc) 40 - { 41 - u32 orig_fpc; 42 - int rc; 43 - 44 - asm volatile( 45 - " efpc %1\n" 46 - " sfpc %2\n" 47 - "0: sfpc %1\n" 48 - " la %0,0\n" 49 - "1:\n" 50 - EX_TABLE(0b,1b) 51 - : "=d" (rc), "=d" (orig_fpc) 52 - : "d" (fpc), "0" (-EINVAL)); 53 - return rc; 54 - } 13 + #include <asm/fpu/types.h> 55 14 56 15 static inline void save_vx_regs_safe(__vector128 *vxrs) 57 16 { ··· 48 89 static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) 49 90 { 50 91 fpregs->pad = 0; 51 - if (is_vx_fpu(fpu)) 92 + if (MACHINE_HAS_VX) 52 93 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); 53 94 else 54 95 memcpy((freg_t *)&fpregs->fprs, fpu->fprs, ··· 57 98 58 99 static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) 59 100 { 60 - if (is_vx_fpu(fpu)) 101 + if (MACHINE_HAS_VX) 61 102 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); 62 103 else 63 104 memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, 64 105 sizeof(fpregs->fprs)); 65 106 } 66 - 67 - #endif 68 107 69 108 #endif /* _ASM_S390_FPU_INTERNAL_H */
+30
arch/s390/include/asm/fpu/api.h
··· 1 + /* 2 + * In-kernel FPU support functions 3 + * 4 + * Copyright IBM Corp. 2015 5 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 6 + */ 7 + 8 + #ifndef _ASM_S390_FPU_API_H 9 + #define _ASM_S390_FPU_API_H 10 + 11 + void save_fpu_regs(void); 12 + 13 + static inline int test_fp_ctl(u32 fpc) 14 + { 15 + u32 orig_fpc; 16 + int rc; 17 + 18 + asm volatile( 19 + " efpc %1\n" 20 + " sfpc %2\n" 21 + "0: sfpc %1\n" 22 + " la %0,0\n" 23 + "1:\n" 24 + EX_TABLE(0b,1b) 25 + : "=d" (rc), "=d" (orig_fpc) 26 + : "d" (fpc), "0" (-EINVAL)); 27 + return rc; 28 + } 29 + 30 + #endif /* _ASM_S390_FPU_API_H */
+25
arch/s390/include/asm/fpu/types.h
··· 1 + /* 2 + * FPU data structures 3 + * 4 + * Copyright IBM Corp. 2015 5 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 6 + */ 7 + 8 + #ifndef _ASM_S390_FPU_TYPES_H 9 + #define _ASM_S390_FPU_TYPES_H 10 + 11 + #include <asm/sigcontext.h> 12 + 13 + struct fpu { 14 + __u32 fpc; /* Floating-point control */ 15 + union { 16 + void *regs; 17 + freg_t *fprs; /* Floating-point register save area */ 18 + __vector128 *vxrs; /* Vector register save area */ 19 + }; 20 + }; 21 + 22 + /* VX array structure for address operand constraints in inline assemblies */ 23 + struct vx_array { __vector128 _[__NUM_VXRS]; }; 24 + 25 + #endif /* _ASM_S390_FPU_TYPES_H */
+2
arch/s390/include/asm/idle.h
··· 24 24 extern struct device_attribute dev_attr_idle_count; 25 25 extern struct device_attribute dev_attr_idle_time_us; 26 26 27 + void psw_idle(struct s390_idle_data *, unsigned long); 28 + 27 29 #endif /* _S390_IDLE_H */
+13 -1
arch/s390/include/asm/irq.h
··· 47 47 IRQEXT_IUC, 48 48 IRQEXT_CMS, 49 49 IRQEXT_CMC, 50 - IRQEXT_CMR, 51 50 IRQEXT_FTP, 52 51 IRQIO_CIO, 53 52 IRQIO_QAI, ··· 94 95 IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, 95 96 IRQ_SUBCLASS_SERVICE_SIGNAL = 9, 96 97 }; 98 + 99 + #define CR0_IRQ_SUBCLASS_MASK \ 100 + ((1UL << (63 - 30)) /* Warning Track */ | \ 101 + (1UL << (63 - 48)) /* Malfunction Alert */ | \ 102 + (1UL << (63 - 49)) /* Emergency Signal */ | \ 103 + (1UL << (63 - 50)) /* External Call */ | \ 104 + (1UL << (63 - 52)) /* Clock Comparator */ | \ 105 + (1UL << (63 - 53)) /* CPU Timer */ | \ 106 + (1UL << (63 - 54)) /* Service Signal */ | \ 107 + (1UL << (63 - 57)) /* Interrupt Key */ | \ 108 + (1UL << (63 - 58)) /* Measurement Alert */ | \ 109 + (1UL << (63 - 59)) /* Timing Alert */ | \ 110 + (1UL << (63 - 62))) /* IUCV */ 97 111 98 112 void irq_subclass_register(enum irq_subclass subclass); 99 113 void irq_subclass_unregister(enum irq_subclass subclass);
+1 -1
arch/s390/include/asm/kvm_host.h
··· 22 22 #include <linux/kvm.h> 23 23 #include <asm/debug.h> 24 24 #include <asm/cpu.h> 25 - #include <asm/fpu-internal.h> 25 + #include <asm/fpu/api.h> 26 26 #include <asm/isc.h> 27 27 28 28 #define KVM_MAX_VCPUS 64
+58 -9
arch/s390/include/asm/kvm_para.h
··· 27 27 #define __S390_KVM_PARA_H 28 28 29 29 #include <uapi/asm/kvm_para.h> 30 + #include <asm/diag.h> 30 31 31 - 32 - 33 - static inline long kvm_hypercall0(unsigned long nr) 32 + static inline long __kvm_hypercall0(unsigned long nr) 34 33 { 35 34 register unsigned long __nr asm("1") = nr; 36 35 register long __rc asm("2"); ··· 39 40 return __rc; 40 41 } 41 42 42 - static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) 43 + static inline long kvm_hypercall0(unsigned long nr) 44 + { 45 + diag_stat_inc(DIAG_STAT_X500); 46 + return __kvm_hypercall0(nr); 47 + } 48 + 49 + static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1) 43 50 { 44 51 register unsigned long __nr asm("1") = nr; 45 52 register unsigned long __p1 asm("2") = p1; ··· 56 51 return __rc; 57 52 } 58 53 59 - static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, 54 + static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) 55 + { 56 + diag_stat_inc(DIAG_STAT_X500); 57 + return __kvm_hypercall1(nr, p1); 58 + } 59 + 60 + static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1, 60 61 unsigned long p2) 61 62 { 62 63 register unsigned long __nr asm("1") = nr; ··· 76 65 return __rc; 77 66 } 78 67 79 - static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, 68 + static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, 69 + unsigned long p2) 70 + { 71 + diag_stat_inc(DIAG_STAT_X500); 72 + return __kvm_hypercall2(nr, p1, p2); 73 + } 74 + 75 + static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1, 80 76 unsigned long p2, unsigned long p3) 81 77 { 82 78 register unsigned long __nr asm("1") = nr; ··· 98 80 return __rc; 99 81 } 100 82 83 + static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, 84 + unsigned long p2, unsigned long p3) 85 + { 86 + diag_stat_inc(DIAG_STAT_X500); 87 + return __kvm_hypercall3(nr, p1, p2, p3); 88 + } 101 89 102 - static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, 90 + static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1, 103 91 unsigned long p2, unsigned long p3, 104 92 unsigned long p4) 105 93 { ··· 122 98 return __rc; 123 99 } 124 100 125 - static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, 101 + static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, 102 + unsigned long p2, unsigned long p3, 103 + unsigned long p4) 104 + { 105 + diag_stat_inc(DIAG_STAT_X500); 106 + return __kvm_hypercall4(nr, p1, p2, p3, p4); 107 + } 108 + 109 + static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1, 126 110 unsigned long p2, unsigned long p3, 127 111 unsigned long p4, unsigned long p5) 128 112 { ··· 148 116 return __rc; 149 117 } 150 118 151 - static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, 119 + static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, 120 + unsigned long p2, unsigned long p3, 121 + unsigned long p4, unsigned long p5) 122 + { 123 + diag_stat_inc(DIAG_STAT_X500); 124 + return __kvm_hypercall5(nr, p1, p2, p3, p4, p5); 125 + } 126 + 127 + static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1, 152 128 unsigned long p2, unsigned long p3, 153 129 unsigned long p4, unsigned long p5, 154 130 unsigned long p6) ··· 175 135 "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6) 176 136 : "memory", "cc"); 177 137 return __rc; 138 + } 139 + 140 + static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, 141 + unsigned long p2, unsigned long p3, 142 + unsigned long p4, unsigned long p5, 143 + unsigned long p6) 144 + { 145 + diag_stat_inc(DIAG_STAT_X500); 146 + return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6); 178 147 } 179 148 180 149 /* kvm on s390 is always paravirtualization enabled */
+9 -2
arch/s390/include/asm/lowcore.h
··· 67 67 __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ 68 68 __u32 stfl_fac_list; /* 0x00c8 */ 69 69 __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */ 70 - __u32 mcck_interruption_code[2]; /* 0x00e8 */ 70 + __u64 mcck_interruption_code; /* 0x00e8 */ 71 71 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 72 72 __u32 external_damage_code; /* 0x00f4 */ 73 73 __u64 failing_storage_address; /* 0x00f8 */ ··· 132 132 /* Address space pointer. */ 133 133 __u64 kernel_asce; /* 0x0358 */ 134 134 __u64 user_asce; /* 0x0360 */ 135 - __u64 current_pid; /* 0x0368 */ 135 + 136 + /* 137 + * The lpp and current_pid fields form a 138 + * 64-bit value that is set as program 139 + * parameter with the LPP instruction. 140 + */ 141 + __u32 lpp; /* 0x0368 */ 142 + __u32 current_pid; /* 0x036c */ 136 143 137 144 /* SMP info area */ 138 145 __u32 cpu_nr; /* 0x0370 */
+55 -43
arch/s390/include/asm/nmi.h
··· 11 11 #ifndef _ASM_S390_NMI_H 12 12 #define _ASM_S390_NMI_H 13 13 14 + #include <linux/const.h> 14 15 #include <linux/types.h> 15 16 16 - struct mci { 17 - __u32 sd : 1; /* 00 system damage */ 18 - __u32 pd : 1; /* 01 instruction-processing damage */ 19 - __u32 sr : 1; /* 02 system recovery */ 20 - __u32 : 1; /* 03 */ 21 - __u32 cd : 1; /* 04 timing-facility damage */ 22 - __u32 ed : 1; /* 05 external damage */ 23 - __u32 : 1; /* 06 */ 24 - __u32 dg : 1; /* 07 degradation */ 25 - __u32 w : 1; /* 08 warning pending */ 26 - __u32 cp : 1; /* 09 channel-report pending */ 27 - __u32 sp : 1; /* 10 service-processor damage */ 28 - __u32 ck : 1; /* 11 channel-subsystem damage */ 29 - __u32 : 2; /* 12-13 */ 30 - __u32 b : 1; /* 14 backed up */ 31 - __u32 : 1; /* 15 */ 32 - __u32 se : 1; /* 16 storage error uncorrected */ 33 - __u32 sc : 1; /* 17 storage error corrected */ 34 - __u32 ke : 1; /* 18 storage-key error uncorrected */ 35 - __u32 ds : 1; /* 19 storage degradation */ 36 - __u32 wp : 1; /* 20 psw mwp validity */ 37 - __u32 ms : 1; /* 21 psw mask and key validity */ 38 - __u32 pm : 1; /* 22 psw program mask and cc validity */ 39 - __u32 ia : 1; /* 23 psw instruction address validity */ 40 - __u32 fa : 1; /* 24 failing storage address validity */ 41 - __u32 vr : 1; /* 25 vector register validity */ 42 - __u32 ec : 1; /* 26 external damage code validity */ 43 - __u32 fp : 1; /* 27 floating point register validity */ 44 - __u32 gr : 1; /* 28 general register validity */ 45 - __u32 cr : 1; /* 29 control register validity */ 46 - __u32 : 1; /* 30 */ 47 - __u32 st : 1; /* 31 storage logical validity */ 48 - __u32 ie : 1; /* 32 indirect storage error */ 49 - __u32 ar : 1; /* 33 access register validity */ 50 - __u32 da : 1; /* 34 delayed access exception */ 51 - __u32 : 7; /* 35-41 */ 52 - __u32 pr : 1; /* 42 tod programmable register validity */ 53 - __u32 fc : 1; /* 43 fp control register validity */ 54 - __u32 ap : 1; /* 44 ancillary report */ 55 - __u32 : 1; /* 45 */ 56 - __u32 ct : 1; /* 46 cpu timer validity */ 57 - __u32 cc : 1; /* 47 clock comparator validity */ 58 - __u32 : 16; /* 47-63 */ 17 + #define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63) 18 + #define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46) 19 + #define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20) 20 + #define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23) 21 + 22 + #ifndef __ASSEMBLY__ 23 + 24 + union mci { 25 + unsigned long val; 26 + struct { 27 + u64 sd : 1; /* 00 system damage */ 28 + u64 pd : 1; /* 01 instruction-processing damage */ 29 + u64 sr : 1; /* 02 system recovery */ 30 + u64 : 1; /* 03 */ 31 + u64 cd : 1; /* 04 timing-facility damage */ 32 + u64 ed : 1; /* 05 external damage */ 33 + u64 : 1; /* 06 */ 34 + u64 dg : 1; /* 07 degradation */ 35 + u64 w : 1; /* 08 warning pending */ 36 + u64 cp : 1; /* 09 channel-report pending */ 37 + u64 sp : 1; /* 10 service-processor damage */ 38 + u64 ck : 1; /* 11 channel-subsystem damage */ 39 + u64 : 2; /* 12-13 */ 40 + u64 b : 1; /* 14 backed up */ 41 + u64 : 1; /* 15 */ 42 + u64 se : 1; /* 16 storage error uncorrected */ 43 + u64 sc : 1; /* 17 storage error corrected */ 44 + u64 ke : 1; /* 18 storage-key error uncorrected */ 45 + u64 ds : 1; /* 19 storage degradation */ 46 + u64 wp : 1; /* 20 psw mwp validity */ 47 + u64 ms : 1; /* 21 psw mask and key validity */ 48 + u64 pm : 1; /* 22 psw program mask and cc validity */ 49 + u64 ia : 1; /* 23 psw instruction address validity */ 50 + u64 fa : 1; /* 24 failing storage address validity */ 51 + u64 vr : 1; /* 25 vector register validity */ 52 + u64 ec : 1; /* 26 external damage code validity */ 53 + u64 fp : 1; /* 27 floating point register validity */ 54 + u64 gr : 1; /* 28 general register validity */ 55 + u64 cr : 1; /* 29 control register validity */ 56 + u64 : 1; /* 30 */ 57 + u64 st : 1; /* 31 storage logical validity */ 58 + u64 ie : 1; /* 32 indirect storage error */ 59 + u64 ar : 1; /* 33 access register validity */ 60 + u64 da : 1; /* 34 delayed access exception */ 61 + u64 : 7; /* 35-41 */ 62 + u64 pr : 1; /* 42 tod programmable register validity */ 63 + u64 fc : 1; /* 43 fp control register validity */ 64 + u64 ap : 1; /* 44 ancillary report */ 65 + u64 : 1; /* 45 */ 66 + u64 ct : 1; /* 46 cpu timer validity */ 67 + u64 cc : 1; /* 47 clock comparator validity */ 68 + u64 : 16; /* 47-63 */ 69 + }; 59 70 }; 60 71 61 72 struct pt_regs; ··· 74 63 extern void s390_handle_mcck(void); 75 64 extern void s390_do_machine_check(struct pt_regs *regs); 76 65 66 + #endif /* __ASSEMBLY__ */ 77 67 #endif /* _ASM_S390_NMI_H */
+60 -6
arch/s390/include/asm/pgtable.h
··· 193 193 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ 194 194 #define __HAVE_ARCH_PTE_SPECIAL 195 195 196 + #ifdef CONFIG_MEM_SOFT_DIRTY 197 + #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ 198 + #else 199 + #define _PAGE_SOFT_DIRTY 0x000 200 + #endif 201 + 196 202 /* Set of bits not changed in pte_modify */ 197 203 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ 198 - _PAGE_YOUNG) 204 + _PAGE_YOUNG | _PAGE_SOFT_DIRTY) 199 205 200 206 /* 201 207 * handle_pte_fault uses pte_present and pte_none to find out the pte type ··· 290 284 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */ 291 285 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ 292 286 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ 287 + 288 + #ifdef CONFIG_MEM_SOFT_DIRTY 289 + #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ 290 + #else 291 + #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ 292 + #endif 293 293 294 294 /* 295 295 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): ··· 601 589 } 602 590 #endif 603 591 592 + static inline int pte_soft_dirty(pte_t pte) 593 + { 594 + return pte_val(pte) & _PAGE_SOFT_DIRTY; 595 + } 596 + #define pte_swp_soft_dirty pte_soft_dirty 597 + 598 + static inline pte_t pte_mksoft_dirty(pte_t pte) 599 + { 600 + pte_val(pte) |= _PAGE_SOFT_DIRTY; 601 + return pte; 602 + } 603 + #define pte_swp_mksoft_dirty pte_mksoft_dirty 604 + 605 + static inline pte_t pte_clear_soft_dirty(pte_t pte) 606 + { 607 + pte_val(pte) &= ~_PAGE_SOFT_DIRTY; 608 + return pte; 609 + } 610 + #define pte_swp_clear_soft_dirty pte_clear_soft_dirty 611 + 612 + static inline int pmd_soft_dirty(pmd_t pmd) 613 + { 614 + return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; 615 + } 616 + 617 + static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) 618 + { 619 + pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; 620 + return pmd; 621 + } 622 + 623 + static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 624 + { 625 + pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; 626 + return pmd; 627 + } 628 + 604 629 static inline pgste_t pgste_get_lock(pte_t *ptep) 605 630 { 606 631 unsigned long new = 0; ··· 938 889 939 890 static inline pte_t pte_mkdirty(pte_t pte) 940 891 { 941 - pte_val(pte) |= _PAGE_DIRTY; 892 + pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; 942 893 if (pte_val(pte) & _PAGE_WRITE) 943 894 pte_val(pte) &= ~_PAGE_PROTECT; 944 895 return pte; ··· 1267 1218 pte_t entry, int dirty) 1268 1219 { 1269 1220 pgste_t pgste; 1221 + pte_t oldpte; 1270 1222 1271 - if (pte_same(*ptep, entry)) 1223 + oldpte = *ptep; 1224 + if (pte_same(oldpte, entry)) 1272 1225 return 0; 1273 1226 if (mm_has_pgste(vma->vm_mm)) { 1274 1227 pgste = pgste_get_lock(ptep); ··· 1280 1229 ptep_flush_direct(vma->vm_mm, address, ptep); 1281 1230 1282 1231 if (mm_has_pgste(vma->vm_mm)) { 1283 - pgste_set_key(ptep, pgste, entry, vma->vm_mm); 1232 + if (pte_val(oldpte) & _PAGE_INVALID) 1233 + pgste_set_key(ptep, pgste, entry, vma->vm_mm); 1284 1234 pgste = pgste_set_pte(ptep, pgste, entry); 1285 1235 pgste_set_unlock(ptep, pgste); 1286 1236 } else ··· 1392 1340 static inline pmd_t pmd_mkdirty(pmd_t pmd) 1393 1341 { 1394 1342 if (pmd_large(pmd)) { 1395 - pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; 1343 + pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | 1344 + _SEGMENT_ENTRY_SOFT_DIRTY; 1396 1345 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) 1397 1346 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; 1398 1347 } ··· 1424 1371 if (pmd_large(pmd)) { 1425 1372 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | 1426 1373 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | 1427 - _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; 1374 + _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT | 1375 + _SEGMENT_ENTRY_SOFT_DIRTY; 1428 1376 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1429 1377 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) 1430 1378 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+31 -66
arch/s390/include/asm/processor.h
··· 11 11 #ifndef __ASM_S390_PROCESSOR_H 12 12 #define __ASM_S390_PROCESSOR_H 13 13 14 + #include <linux/const.h> 15 + 14 16 #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 15 17 #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ 16 18 #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ 17 - #define CIF_FPU 3 /* restore vector registers */ 19 + #define CIF_FPU 3 /* restore FPU registers */ 20 + #define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ 18 21 19 - #define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) 20 - #define _CIF_ASCE (1<<CIF_ASCE) 21 - #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) 22 - #define _CIF_FPU (1<<CIF_FPU) 22 + #define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) 23 + #define _CIF_ASCE _BITUL(CIF_ASCE) 24 + #define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) 25 + #define _CIF_FPU _BITUL(CIF_FPU) 26 + #define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) 23 27 24 28 #ifndef __ASSEMBLY__ 25 29 ··· 34 30 #include <asm/ptrace.h> 35 31 #include <asm/setup.h> 36 32 #include <asm/runtime_instr.h> 37 - #include <asm/fpu-internal.h> 33 + #include <asm/fpu/types.h> 34 + #include <asm/fpu/internal.h> 38 35 39 36 static inline void set_cpu_flag(int flag) 40 37 { 41 - S390_lowcore.cpu_flags |= (1U << flag); 38 + S390_lowcore.cpu_flags |= (1UL << flag); 42 39 } 43 40 44 41 static inline void clear_cpu_flag(int flag) 45 42 { 46 - S390_lowcore.cpu_flags &= ~(1U << flag); 43 + S390_lowcore.cpu_flags &= ~(1UL << flag); 47 44 } 48 45 49 46 static inline int test_cpu_flag(int flag) 50 47 { 51 - return !!(S390_lowcore.cpu_flags & (1U << flag)); 48 + return !!(S390_lowcore.cpu_flags & (1UL << flag)); 52 49 } 53 50 54 51 #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) ··· 107 102 struct list_head list; 108 103 /* cpu runtime instrumentation */ 109 104 struct runtime_instr_cb *ri_cb; 110 - int ri_signum; 111 105 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 112 106 }; 113 107 ··· 143 139 144 140 #define ARCH_MIN_TASKALIGN 8 145 141 142 + extern __vector128 init_task_fpu_regs[__NUM_VXRS]; 146 143 #define INIT_THREAD { \ 147 144 .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ 145 + .fpu.regs = (void *)&init_task_fpu_regs, \ 148 146 } 149 147 150 148 /* ··· 223 217 * Set PSW mask to specified value, while leaving the 224 218 * PSW addr pointing to the next instruction. 225 219 */ 226 - static inline void __load_psw_mask (unsigned long mask) 220 + static inline void __load_psw_mask(unsigned long mask) 227 221 { 228 222 unsigned long addr; 229 223 psw_t psw; ··· 249 243 return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); 250 244 } 251 245 246 + static inline void local_mcck_enable(void) 247 + { 248 + __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK); 249 + } 250 + 251 + static inline void local_mcck_disable(void) 252 + { 253 + __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK); 254 + } 255 + 252 256 /* 253 257 * Rewind PSW instruction address by specified number of bytes. 254 258 */ ··· 282 266 */ 283 267 static inline void __noreturn disabled_wait(unsigned long code) 284 268 { 285 - unsigned long ctl_buf; 286 - psw_t dw_psw; 269 + psw_t psw; 287 270 288 - dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; 289 - dw_psw.addr = code; 290 - /* 291 - * Store status and then load disabled wait psw, 292 - * the processor is dead afterwards 293 - */ 294 - asm volatile( 295 - " stctg 0,0,0(%2)\n" 296 - " ni 4(%2),0xef\n" /* switch off protection */ 297 - " lctlg 0,0,0(%2)\n" 298 - " lghi 1,0x1000\n" 299 - " stpt 0x328(1)\n" /* store timer */ 300 - " stckc 0x330(1)\n" /* store clock comparator */ 301 - " stpx 0x318(1)\n" /* store prefix register */ 302 - " stam 0,15,0x340(1)\n"/* store access registers */ 303 - " stfpc 0x31c(1)\n" /* store fpu control */ 304 - " std 0,0x200(1)\n" /* store f0 */ 305 - " std 1,0x208(1)\n" /* store f1 */ 306 - " std 2,0x210(1)\n" /* store f2 */ 307 - " std 3,0x218(1)\n" /* store f3 */ 308 - " std 4,0x220(1)\n" /* store f4 */ 309 - " std 5,0x228(1)\n" /* store f5 */ 310 - " std 6,0x230(1)\n" /* store f6 */ 311 - " std 7,0x238(1)\n" /* store f7 */ 312 - " std 8,0x240(1)\n" /* store f8 */ 313 - " std 9,0x248(1)\n" /* store f9 */ 314 - " std 10,0x250(1)\n" /* store f10 */ 315 - " std 11,0x258(1)\n" /* store f11 */ 316 - " std 12,0x260(1)\n" /* store f12 */ 317 - " std 13,0x268(1)\n" /* store f13 */ 318 - " std 14,0x270(1)\n" /* store f14 */ 319 - " std 15,0x278(1)\n" /* store f15 */ 320 - " stmg 0,15,0x280(1)\n"/* store general registers */ 321 - " stctg 0,15,0x380(1)\n"/* store control registers */ 322 - " oi 0x384(1),0x10\n"/* fake protection bit */ 323 - " lpswe 0(%1)" 324 - : "=m" (ctl_buf) 325 - : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); 271 + psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; 272 + psw.addr = code; 273 + __load_psw(psw); 326 274 while (1); 327 275 } 328 - 329 - /* 330 - * Use to set psw mask except for the first byte which 331 - * won't be changed by this function. 332 - */ 333 - static inline void 334 - __set_psw_mask(unsigned long mask) 335 - { 336 - __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); 337 - } 338 - 339 - #define local_mcck_enable() \ 340 - __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK) 341 - #define local_mcck_disable() \ 342 - __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT) 343 276 344 277 /* 345 278 * Basic Machine Check/Program Check Handler.
+6 -5
arch/s390/include/asm/ptrace.h
··· 6 6 #ifndef _S390_PTRACE_H 7 7 #define _S390_PTRACE_H 8 8 9 + #include <linux/const.h> 9 10 #include <uapi/asm/ptrace.h> 10 11 11 12 #define PIF_SYSCALL 0 /* inside a system call */ 12 13 #define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ 13 14 14 - #define _PIF_SYSCALL (1<<PIF_SYSCALL) 15 - #define _PIF_PER_TRAP (1<<PIF_PER_TRAP) 15 + #define _PIF_SYSCALL _BITUL(PIF_SYSCALL) 16 + #define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) 16 17 17 18 #ifndef __ASSEMBLY__ 18 19 ··· 129 128 130 129 static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) 131 130 { 132 - regs->flags |= (1U << flag); 131 + regs->flags |= (1UL << flag); 133 132 } 134 133 135 134 static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) 136 135 { 137 - regs->flags &= ~(1U << flag); 136 + regs->flags &= ~(1UL << flag); 138 137 } 139 138 140 139 static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) 141 140 { 142 - return !!(regs->flags & (1U << flag)); 141 + return !!(regs->flags & (1UL << flag)); 143 142 } 144 143 145 144 /*
+27 -23
arch/s390/include/asm/setup.h
··· 5 5 #ifndef _ASM_S390_SETUP_H 6 6 #define _ASM_S390_SETUP_H 7 7 8 + #include <linux/const.h> 8 9 #include <uapi/asm/setup.h> 9 10 10 11 11 12 #define PARMAREA 0x10400 13 + 14 + /* 15 + * Machine features detected in head.S 16 + */ 17 + 18 + #define MACHINE_FLAG_VM _BITUL(0) 19 + #define MACHINE_FLAG_IEEE _BITUL(1) 20 + #define MACHINE_FLAG_CSP _BITUL(2) 21 + #define MACHINE_FLAG_MVPG _BITUL(3) 22 + #define MACHINE_FLAG_DIAG44 _BITUL(4) 23 + #define MACHINE_FLAG_IDTE _BITUL(5) 24 + #define MACHINE_FLAG_DIAG9C _BITUL(6) 25 + #define MACHINE_FLAG_KVM _BITUL(8) 26 + #define MACHINE_FLAG_ESOP _BITUL(9) 27 + #define MACHINE_FLAG_EDAT1 _BITUL(10) 28 + #define MACHINE_FLAG_EDAT2 _BITUL(11) 29 + #define MACHINE_FLAG_LPAR _BITUL(12) 30 + #define MACHINE_FLAG_LPP _BITUL(13) 31 + #define MACHINE_FLAG_TOPOLOGY _BITUL(14) 32 + #define MACHINE_FLAG_TE _BITUL(15) 33 + #define MACHINE_FLAG_TLB_LC _BITUL(17) 34 + #define MACHINE_FLAG_VX _BITUL(18) 35 + #define MACHINE_FLAG_CAD _BITUL(19) 36 + 37 + #define LPP_MAGIC _BITUL(31) 38 + #define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL) 12 39 13 40 #ifndef __ASSEMBLY__ 14 41 ··· 54 27 extern unsigned long max_physmem_end; 55 28 56 29 extern void detect_memory_memblock(void); 57 - 58 - /* 59 - * Machine features detected in head.S 60 - */ 61 - 62 - #define MACHINE_FLAG_VM (1UL << 0) 63 - #define MACHINE_FLAG_IEEE (1UL << 1) 64 - #define MACHINE_FLAG_CSP (1UL << 2) 65 - #define MACHINE_FLAG_MVPG (1UL << 3) 66 - #define MACHINE_FLAG_DIAG44 (1UL << 4) 67 - #define MACHINE_FLAG_IDTE (1UL << 5) 68 - #define MACHINE_FLAG_DIAG9C (1UL << 6) 69 - #define MACHINE_FLAG_KVM (1UL << 8) 70 - #define MACHINE_FLAG_ESOP (1UL << 9) 71 - #define MACHINE_FLAG_EDAT1 (1UL << 10) 72 - #define MACHINE_FLAG_EDAT2 (1UL << 11) 73 - #define MACHINE_FLAG_LPAR (1UL << 12) 74 - #define MACHINE_FLAG_LPP (1UL << 13) 75 - #define MACHINE_FLAG_TOPOLOGY (1UL << 14) 76 - #define MACHINE_FLAG_TE (1UL << 15) 77 - #define MACHINE_FLAG_TLB_LC (1UL << 17) 78 - #define MACHINE_FLAG_VX (1UL << 18) 79 - #define MACHINE_FLAG_CAD (1UL << 19) 80 30 81 31 #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 82 32 #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
-3
arch/s390/include/asm/spinlock.h
··· 87 87 { 88 88 typecheck(unsigned int, lp->lock); 89 89 asm volatile( 90 - __ASM_BARRIER 91 90 "st %1,%0\n" 92 91 : "+Q" (lp->lock) 93 92 : "d" (0) ··· 168 169 \ 169 170 typecheck(unsigned int *, ptr); \ 170 171 asm volatile( \ 171 - "bcr 14,0\n" \ 172 172 op_string " %0,%2,%1\n" \ 173 173 : "=d" (old_val), "+Q" (*ptr) \ 174 174 : "d" (op_val) \ ··· 241 243 242 244 rw->owner = 0; 243 245 asm volatile( 244 - __ASM_BARRIER 245 246 "st %1,%0\n" 246 247 : "+Q" (rw->lock) 247 248 : "d" (0)
+1 -1
arch/s390/include/asm/switch_to.h
··· 8 8 #define __ASM_SWITCH_TO_H 9 9 10 10 #include <linux/thread_info.h> 11 - #include <asm/fpu-internal.h> 11 + #include <asm/fpu/api.h> 12 12 #include <asm/ptrace.h> 13 13 14 14 extern struct task_struct *__switch_to(void *, void *);
+12 -10
arch/s390/include/asm/thread_info.h
··· 7 7 #ifndef _ASM_THREAD_INFO_H 8 8 #define _ASM_THREAD_INFO_H 9 9 10 + #include <linux/const.h> 11 + 10 12 /* 11 13 * Size of kernel stack for each process 12 14 */ ··· 85 83 #define TIF_BLOCK_STEP 20 /* This task is block stepped */ 86 84 #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ 87 85 88 - #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 89 - #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 90 - #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 91 - #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 92 - #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 93 - #define _TIF_SECCOMP (1<<TIF_SECCOMP) 94 - #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 95 - #define _TIF_UPROBE (1<<TIF_UPROBE) 96 - #define _TIF_31BIT (1<<TIF_31BIT) 97 - #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 86 + #define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) 87 + #define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) 88 + #define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) 89 + #define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) 90 + #define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) 91 + #define _TIF_SECCOMP _BITUL(TIF_SECCOMP) 92 + #define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) 93 + #define _TIF_UPROBE _BITUL(TIF_UPROBE) 94 + #define _TIF_31BIT _BITUL(TIF_31BIT) 95 + #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) 98 96 99 97 #define is_32bit_task() (test_thread_flag(TIF_31BIT)) 100 98
+43
arch/s390/include/asm/trace/diag.h
··· 1 + /* 2 + * Tracepoint header for s390 diagnose calls 3 + * 4 + * Copyright IBM Corp. 2015 5 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 + */ 7 + 8 + #undef TRACE_SYSTEM 9 + #define TRACE_SYSTEM s390 10 + 11 + #if !defined(_TRACE_S390_DIAG_H) || defined(TRACE_HEADER_MULTI_READ) 12 + #define _TRACE_S390_DIAG_H 13 + 14 + #include <linux/tracepoint.h> 15 + 16 + #undef TRACE_INCLUDE_PATH 17 + #undef TRACE_INCLUDE_FILE 18 + 19 + #define TRACE_INCLUDE_PATH asm/trace 20 + #define TRACE_INCLUDE_FILE diag 21 + 22 + TRACE_EVENT(diagnose, 23 + TP_PROTO(unsigned short nr), 24 + TP_ARGS(nr), 25 + TP_STRUCT__entry( 26 + __field(unsigned short, nr) 27 + ), 28 + TP_fast_assign( 29 + __entry->nr = nr; 30 + ), 31 + TP_printk("nr=0x%x", __entry->nr) 32 + ); 33 + 34 + #ifdef CONFIG_TRACEPOINTS 35 + void trace_diagnose_norecursion(int diag_nr); 36 + #else 37 + static inline void trace_diagnose_norecursion(int diag_nr) { } 38 + #endif 39 + 40 + #endif /* _TRACE_S390_DIAG_H */ 41 + 42 + /* This part must be outside protection */ 43 + #include <trace/define_trace.h>
+2
arch/s390/kernel/Makefile
··· 66 66 obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o 67 67 obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o 68 68 69 + obj-$(CONFIG_TRACEPOINTS) += trace.o 70 + 69 71 # vdso 70 72 obj-y += vdso64/ 71 73 obj-$(CONFIG_COMPAT) += vdso32/
+149 -141
arch/s390/kernel/asm-offsets.c
··· 23 23 24 24 int main(void) 25 25 { 26 - DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); 27 - DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); 28 - DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 26 + /* task struct offsets */ 27 + OFFSET(__TASK_thread_info, task_struct, stack); 28 + OFFSET(__TASK_thread, task_struct, thread); 29 + OFFSET(__TASK_pid, task_struct, pid); 29 30 BLANK(); 30 - DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); 31 - DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); 32 - DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); 33 - DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); 34 - DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); 35 - DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); 36 - DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); 37 - DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); 31 + /* thread struct offsets */ 32 + OFFSET(__THREAD_ksp, thread_struct, ksp); 33 + OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc); 34 + OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs); 35 + OFFSET(__THREAD_per_cause, thread_struct, per_event.cause); 36 + OFFSET(__THREAD_per_address, thread_struct, per_event.address); 37 + OFFSET(__THREAD_per_paid, thread_struct, per_event.paid); 38 + OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb); 38 39 BLANK(); 39 - DEFINE(__TI_task, offsetof(struct thread_info, task)); 40 - DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 41 - DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); 42 - DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); 43 - DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 44 - DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 45 - DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); 46 - DEFINE(__TI_last_break, offsetof(struct thread_info, last_break)); 40 + /* thread info offsets */ 41 + OFFSET(__TI_task, thread_info, task); 42 + OFFSET(__TI_flags, thread_info, flags); 43 + OFFSET(__TI_sysc_table, thread_info, sys_call_table); 44 + OFFSET(__TI_cpu, thread_info, cpu); 45 + OFFSET(__TI_precount, thread_info, preempt_count); 46 + OFFSET(__TI_user_timer, thread_info, user_timer); 47 + OFFSET(__TI_system_timer, thread_info, system_timer); 48 + OFFSET(__TI_last_break, thread_info, last_break); 47 49 BLANK(); 48 - DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 49 - DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 50 - DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); 51 - DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); 52 - DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); 53 - DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); 54 - DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); 55 - DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); 50 + /* pt_regs offsets */ 51 + OFFSET(__PT_ARGS, pt_regs, args); 52 + OFFSET(__PT_PSW, pt_regs, psw); 53 + OFFSET(__PT_GPRS, pt_regs, gprs); 54 + OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); 55 + OFFSET(__PT_INT_CODE, pt_regs, int_code); 56 + OFFSET(__PT_INT_PARM, pt_regs, int_parm); 57 + OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long); 58 + OFFSET(__PT_FLAGS, pt_regs, flags); 56 59 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); 57 60 BLANK(); 58 - DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); 59 - DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); 60 - DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); 61 + /* stack_frame offsets */ 62 + OFFSET(__SF_BACKCHAIN, stack_frame, back_chain); 63 + OFFSET(__SF_GPRS, stack_frame, gprs); 64 + OFFSET(__SF_EMPTY, stack_frame, empty1); 61 65 BLANK(); 62 66 /* timeval/timezone offsets for use by vdso */ 63 - DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); 64 - DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); 65 - DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); 66 - DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); 67 - DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); 68 - DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); 69 - DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); 70 - DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 71 - DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec)); 72 - DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec)); 73 - DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 74 - DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 75 - DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); 76 - DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); 77 - DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 78 - DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 67 + OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count); 68 + OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp); 69 + OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec); 70 + OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec); 71 + OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec); 72 + OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec); 73 + OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec); 74 + OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec); 75 + OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec); 76 + OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec); 77 + OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest); 78 + OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available); 79 + OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult); 80 + OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); 81 + OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); 82 + OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); 83 + BLANK(); 79 84 /* constants used by the vdso */ 80 85 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 81 86 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); ··· 91 86 DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); 92 87 BLANK(); 93 88 /* idle data offsets */ 94 - DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); 95 - DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); 96 - DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); 97 - DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); 98 - /* lowcore offsets */ 99 - DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 100 - DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); 101 - DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); 102 - DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); 103 - DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); 104 - DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 105 - DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 106 - DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); 107 - DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); 108 - DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); 109 - DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); 110 - DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 111 - DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); 112 - DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 113 - DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); 114 - DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); 115 - DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); 116 - DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 117 - DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); 118 - DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); 119 - DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); 120 - DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); 121 - DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); 122 - DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); 123 - DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); 124 - DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); 125 - DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); 126 - DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); 127 - DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); 128 - DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); 129 - DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); 130 - DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); 131 - DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); 132 - DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); 133 - DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); 134 - DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); 89 + OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); 90 + OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); 91 + OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); 92 + OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); 135 93 BLANK(); 136 - DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); 137 - DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); 138 - DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); 139 - DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); 140 - DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); 141 - DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); 142 - DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); 143 - DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); 144 - DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer)); 145 - DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); 146 - DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); 147 - DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); 148 - DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer)); 149 - DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); 150 - DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); 151 - DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); 152 - DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid)); 153 - DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); 154 - DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 155 - DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 156 - DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 157 - DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); 158 - DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); 159 - DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); 160 - DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); 161 - DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); 162 - DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 163 - DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 164 - DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 165 - DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 166 - DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 94 + /* hardware defined lowcore locations 0x000 - 0x1ff */ 95 + OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params); 96 + OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr); 97 + OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code); 98 + OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc); 99 + OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code); 100 + OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc); 101 + OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code); 102 + OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code); 103 + OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num); 104 + OFFSET(__LC_PER_CODE, _lowcore, per_code); 105 + OFFSET(__LC_PER_ATMID, _lowcore, per_atmid); 106 + OFFSET(__LC_PER_ADDRESS, _lowcore, per_address); 107 + OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id); 108 + OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id); 109 + OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id); 110 + OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id); 111 + OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code); 112 + OFFSET(__LC_MON_CODE, _lowcore, monitor_code); 113 + OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id); 114 + OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr); 115 + OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm); 116 + OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word); 117 + OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list); 118 + OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code); 119 + OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address); 120 + OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr); 121 + OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw); 122 + OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw); 123 + OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw); 124 + OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw); 125 + OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw); 126 + OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw); 127 + OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw); 128 + OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw); 129 + OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw); 130 + OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw); 131 + OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw); 132 + OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw); 133 + /* software defined lowcore locations 0x200 - 0xdff*/ 134 + OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync); 135 + OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async); 136 + OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart); 137 + OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags); 138 + OFFSET(__LC_RETURN_PSW, _lowcore, return_psw); 139 + OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw); 140 + OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer); 141 + OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer); 142 + OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer); 143 + OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer); 144 + OFFSET(__LC_USER_TIMER, _lowcore, user_timer); 145 + OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer); 146 + OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer); 147 + OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer); 148 + OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock); 149 + OFFSET(__LC_INT_CLOCK, _lowcore, int_clock); 150 + OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock); 151 + OFFSET(__LC_CURRENT, _lowcore, current_task); 152 + OFFSET(__LC_THREAD_INFO, _lowcore, thread_info); 153 + OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack); 154 + OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack); 155 + OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack); 156 + OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack); 157 + OFFSET(__LC_RESTART_FN, _lowcore, restart_fn); 158 + OFFSET(__LC_RESTART_DATA, _lowcore, restart_data); 159 + OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source); 160 + OFFSET(__LC_USER_ASCE, _lowcore, user_asce); 161 + OFFSET(__LC_LPP, _lowcore, lpp); 162 + OFFSET(__LC_CURRENT_PID, _lowcore, current_pid); 163 + OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset); 164 + OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data); 165 + OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags); 166 + OFFSET(__LC_GMAP, _lowcore, gmap); 167 + OFFSET(__LC_PASTE, _lowcore, paste); 168 + /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ 169 + OFFSET(__LC_DUMP_REIPL, _lowcore, ipib); 170 + /* hardware defined lowcore locations 0x1000 - 0x18ff */ 171 + OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr); 172 + OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2); 173 + OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area); 174 + OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area); 175 + OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area); 176 + OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area); 177 + OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area); 178 + OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area); 179 + OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area); 180 + OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area); 181 + OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area); 182 + OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area); 183 + OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb); 167 184 BLANK(); 168 - DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 169 - DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 170 - DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); 171 - DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area)); 172 - DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area)); 173 - DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 174 - DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 175 - DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 176 - DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); 177 - DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); 178 - DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); 179 - DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); 180 - DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); 181 - DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); 182 - DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); 183 - DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 184 - DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); 185 - DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 186 - DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 187 - DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 188 - DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 189 - DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 190 - DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 185 + /* gmap/sie offsets */ 186 + OFFSET(__GMAP_ASCE, gmap, asce); 187 + OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); 188 + OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); 191 189 return 0; 192 190 }
+3 -4
arch/s390/kernel/compat_signal.c
··· 249 249 return -EFAULT; 250 250 251 251 /* Save vector registers to signal stack */ 252 - if (is_vx_task(current)) { 252 + if (MACHINE_HAS_VX) { 253 253 for (i = 0; i < __NUM_VXRS_LOW; i++) 254 254 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); 255 255 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, ··· 277 277 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 278 278 279 279 /* Restore vector registers from signal stack */ 280 - if (is_vx_task(current)) { 280 + if (MACHINE_HAS_VX) { 281 281 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, 282 282 sizeof(sregs_ext->vxrs_low)) || 283 283 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, ··· 470 470 */ 471 471 uc_flags = UC_GPRS_HIGH; 472 472 if (MACHINE_HAS_VX) { 473 - if (is_vx_task(current)) 474 - uc_flags |= UC_VXRS; 473 + uc_flags |= UC_VXRS; 475 474 } else 476 475 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + 477 476 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
+2
arch/s390/kernel/cpcmd.c
··· 14 14 #include <linux/spinlock.h> 15 15 #include <linux/stddef.h> 16 16 #include <linux/string.h> 17 + #include <asm/diag.h> 17 18 #include <asm/ebcdic.h> 18 19 #include <asm/cpcmd.h> 19 20 #include <asm/io.h> ··· 71 70 memcpy(cpcmd_buf, cmd, cmdlen); 72 71 ASCEBC(cpcmd_buf, cmdlen); 73 72 73 + diag_stat_inc(DIAG_STAT_X008); 74 74 if (response) { 75 75 memset(response, 0, rlen); 76 76 response_len = rlen;
+4 -12
arch/s390/kernel/crash_dump.c
··· 32 32 .regions = &oldmem_region, 33 33 }; 34 34 35 - #define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ 36 - for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \ 37 - &memblock.physmem, \ 38 - &oldmem_type, p_start, \ 39 - p_end, p_nid); \ 40 - i != (u64)ULLONG_MAX; \ 41 - __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\ 42 - &oldmem_type, \ 43 - p_start, p_end, p_nid)) 44 - 45 35 struct dump_save_areas dump_save_areas; 46 36 47 37 /* ··· 505 515 int cnt = 0; 506 516 u64 idx; 507 517 508 - for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) 518 + for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, 519 + MEMBLOCK_NONE, NULL, NULL, NULL) 509 520 cnt++; 510 521 return cnt; 511 522 } ··· 519 528 phys_addr_t start, end; 520 529 u64 idx; 521 530 522 - for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { 531 + for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, 532 + MEMBLOCK_NONE, &start, &end, NULL) { 523 533 phdr->p_filesz = end - start; 524 534 phdr->p_type = PT_LOAD; 525 535 phdr->p_offset = start;
+133 -1
arch/s390/kernel/diag.c
··· 6 6 */ 7 7 8 8 #include <linux/module.h> 9 + #include <linux/cpu.h> 10 + #include <linux/seq_file.h> 11 + #include <linux/debugfs.h> 9 12 #include <asm/diag.h> 13 + #include <asm/trace/diag.h> 14 + 15 + struct diag_stat { 16 + unsigned int counter[NR_DIAG_STAT]; 17 + }; 18 + 19 + static DEFINE_PER_CPU(struct diag_stat, diag_stat); 20 + 21 + struct diag_desc { 22 + int code; 23 + char *name; 24 + }; 25 + 26 + static const struct diag_desc diag_map[NR_DIAG_STAT] = { 27 + [DIAG_STAT_X008] = { .code = 0x008, .name = "Console Function" }, 28 + [DIAG_STAT_X00C] = { .code = 0x00c, .name = "Pseudo Timer" }, 29 + [DIAG_STAT_X010] = { .code = 0x010, .name = "Release Pages" }, 30 + [DIAG_STAT_X014] = { .code = 0x014, .name = "Spool File Services" }, 31 + [DIAG_STAT_X044] = { .code = 0x044, .name = "Voluntary Timeslice End" }, 32 + [DIAG_STAT_X064] = { .code = 0x064, .name = "NSS Manipulation" }, 33 + [DIAG_STAT_X09C] = { .code = 0x09c, .name = "Relinquish Timeslice" }, 34 + [DIAG_STAT_X0DC] = { .code = 0x0dc, .name = "Appldata Control" }, 35 + [DIAG_STAT_X204] = { .code = 0x204, .name = "Logical-CPU Utilization" }, 36 + [DIAG_STAT_X210] = { .code = 0x210, .name = "Device Information" }, 37 + [DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" }, 38 + [DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" }, 39 + [DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" }, 40 + [DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" }, 41 + [DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" }, 42 + [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" }, 43 + [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" }, 44 + [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, 45 + [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, 46 + }; 47 + 48 + static int show_diag_stat(struct seq_file *m, void *v) 49 + { 50 + struct diag_stat *stat; 51 + unsigned long n = (unsigned long) v - 1; 52 + int cpu, prec, tmp; 53 + 54 + get_online_cpus(); 55 + if (n == 0) { 56 + seq_puts(m, " "); 57 + 58 + for_each_online_cpu(cpu) { 59 + prec = 10; 60 + for (tmp = 10; cpu >= tmp; tmp *= 10) 61 + prec--; 62 + seq_printf(m, "%*s%d", prec, "CPU", cpu); 63 + } 64 + seq_putc(m, '\n'); 65 + } else if (n <= NR_DIAG_STAT) { 66 + seq_printf(m, "diag %03x:", diag_map[n-1].code); 67 + for_each_online_cpu(cpu) { 68 + stat = &per_cpu(diag_stat, cpu); 69 + seq_printf(m, " %10u", stat->counter[n-1]); 70 + } 71 + seq_printf(m, " %s\n", diag_map[n-1].name); 72 + } 73 + put_online_cpus(); 74 + return 0; 75 + } 76 + 77 + static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) 78 + { 79 + return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; 80 + } 81 + 82 + static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) 83 + { 84 + ++*pos; 85 + return show_diag_stat_start(m, pos); 86 + } 87 + 88 + static void show_diag_stat_stop(struct seq_file *m, void *v) 89 + { 90 + } 91 + 92 + static const struct seq_operations show_diag_stat_sops = { 93 + .start = show_diag_stat_start, 94 + .next = show_diag_stat_next, 95 + .stop = show_diag_stat_stop, 96 + .show = show_diag_stat, 97 + }; 98 + 99 + static int show_diag_stat_open(struct inode *inode, struct file *file) 100 + { 101 + return seq_open(file, &show_diag_stat_sops); 102 + } 103 + 104 + static const struct file_operations show_diag_stat_fops = { 105 + .open = show_diag_stat_open, 106 + .read = seq_read, 107 + .llseek = seq_lseek, 108 + .release = seq_release, 109 + }; 110 + 111 + 112 + static int __init show_diag_stat_init(void) 113 + { 114 + debugfs_create_file("diag_stat", 0400, NULL, NULL, 115 + &show_diag_stat_fops); 116 + return 0; 117 + } 118 + 119 + device_initcall(show_diag_stat_init); 120 + 121 + void diag_stat_inc(enum diag_stat_enum nr) 122 + { 123 + this_cpu_inc(diag_stat.counter[nr]); 124 + trace_diagnose(diag_map[nr].code); 125 + } 126 + EXPORT_SYMBOL(diag_stat_inc); 127 + 128 + void diag_stat_inc_norecursion(enum diag_stat_enum nr) 129 + { 130 + this_cpu_inc(diag_stat.counter[nr]); 131 + trace_diagnose_norecursion(diag_map[nr].code); 132 + } 133 + EXPORT_SYMBOL(diag_stat_inc_norecursion); 10 134 11 135 /* 12 136 * Diagnose 14: Input spool file manipulation 13 137 */ 14 - int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 138 + static inline int __diag14(unsigned long rx, unsigned long ry1, 139 + unsigned long subcode) 15 140 { 16 141 register unsigned long _ry1 asm("2") = ry1; 17 142 register unsigned long _ry2 asm("3") = subcode; ··· 153 28 : "cc"); 154 29 155 30 return rc; 31 + } 32 + 33 + int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) 34 + { 35 + diag_stat_inc(DIAG_STAT_X014); 36 + return __diag14(rx, ry1, subcode); 156 37 } 157 38 EXPORT_SYMBOL(diag14); 158 39 ··· 179 48 spin_lock_irqsave(&diag210_lock, flags); 180 49 diag210_tmp = *addr; 181 50 51 + diag_stat_inc(DIAG_STAT_X210); 182 52 asm volatile( 183 53 " lhi %0,-1\n" 184 54 " sam31\n"
+14 -1
arch/s390/kernel/early.c
··· 17 17 #include <linux/pfn.h> 18 18 #include <linux/uaccess.h> 19 19 #include <linux/kernel.h> 20 + #include <asm/diag.h> 20 21 #include <asm/ebcdic.h> 21 22 #include <asm/ipl.h> 22 23 #include <asm/lowcore.h> ··· 287 286 int rc; 288 287 289 288 cpu_address = stap(); 289 + diag_stat_inc(DIAG_STAT_X09C); 290 290 asm volatile( 291 291 " diag %2,0,0x9c\n" 292 292 "0: la %0,0\n" ··· 302 300 { 303 301 int rc; 304 302 303 + diag_stat_inc(DIAG_STAT_X044); 305 304 asm volatile( 306 305 " diag 0,0,0x44\n" 307 306 "0: la %0,0\n" ··· 329 326 S390_lowcore.machine_flags |= MACHINE_FLAG_TE; 330 327 if (test_facility(51)) 331 328 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 332 - if (test_facility(129)) 329 + if (test_facility(129)) { 333 330 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 331 + __ctl_set_bit(0, 17); 332 + } 334 333 } 334 + 335 + static int __init disable_vector_extension(char *str) 336 + { 337 + S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; 338 + __ctl_clear_bit(0, 17); 339 + return 1; 340 + } 341 + early_param("novx", disable_vector_extension); 335 342 336 343 static int __init cad_setup(char *str) 337 344 {
+95 -135
arch/s390/kernel/entry.S
··· 20 20 #include <asm/page.h> 21 21 #include <asm/sigp.h> 22 22 #include <asm/irq.h> 23 - #include <asm/fpu-internal.h> 24 23 #include <asm/vx-insn.h> 24 + #include <asm/setup.h> 25 + #include <asm/nmi.h> 25 26 26 27 __PT_R0 = __PT_GPRS 27 28 __PT_R1 = __PT_GPRS + 8 ··· 140 139 #endif 141 140 .endm 142 141 142 + /* 143 + * The TSTMSK macro generates a test-under-mask instruction by 144 + * calculating the memory offset for the specified mask value. 145 + * Mask value can be any constant. The macro shifts the mask 146 + * value to calculate the memory offset for the test-under-mask 147 + * instruction. 148 + */ 149 + .macro TSTMSK addr, mask, size=8, bytepos=0 150 + .if (\bytepos < \size) && (\mask >> 8) 151 + .if (\mask & 0xff) 152 + .error "Mask exceeds byte boundary" 153 + .endif 154 + TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 155 + .exitm 156 + .endif 157 + .ifeq \mask 158 + .error "Mask must not be zero" 159 + .endif 160 + off = \size - \bytepos - 1 161 + tm off+\addr, \mask 162 + .endm 163 + 143 164 .section .kprobes.text, "ax" 144 165 145 166 /* ··· 187 164 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 188 165 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next 189 166 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 190 - mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 167 + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 191 168 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 169 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 170 + bzr %r14 171 + .insn s,0xb2800000,__LC_LPP # set program parameter 192 172 br %r14 193 173 194 174 .L__critical_start: ··· 206 180 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 207 181 stg %r2,__SF_EMPTY(%r15) # save control block pointer 208 182 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 209 - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason 210 - tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? 183 + xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 184 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 211 185 jno .Lsie_load_guest_gprs 212 186 brasl %r14,load_fpu_regs # load guest fp/vx regs 213 187 .Lsie_load_guest_gprs: ··· 221 195 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 222 196 tm __SIE_PROG20+3(%r14),3 # last exit... 223 197 jnz .Lsie_skip 224 - tm __LC_CPU_FLAGS+7,_CIF_FPU 198 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 225 199 jo .Lsie_skip # exit if fp/vx regs changed 226 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 227 - jz .Lsie_enter 228 - .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid 229 - .Lsie_enter: 230 200 sie 0(%r14) 231 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 232 - jz .Lsie_skip 233 - .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id 234 201 .Lsie_skip: 235 202 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 236 203 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce ··· 240 221 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 241 222 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 242 223 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 243 - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code 224 + lg %r2,__SF_EMPTY+16(%r15) # return exit reason code 244 225 br %r14 245 226 .Lsie_fault: 246 227 lghi %r14,-EFAULT 247 - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code 228 + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 248 229 j sie_exit 249 230 250 231 EX_TABLE(.Lrewind_pad,.Lsie_fault) ··· 290 271 stg %r2,__PT_ORIG_GPR2(%r11) 291 272 stg %r7,STACK_FRAME_OVERHEAD(%r15) 292 273 lgf %r9,0(%r8,%r10) # get system call add. 293 - tm __TI_flags+7(%r12),_TIF_TRACE 274 + TSTMSK __TI_flags(%r12),_TIF_TRACE 294 275 jnz .Lsysc_tracesys 295 276 basr %r14,%r9 # call sys_xxxx 296 277 stg %r2,__PT_R2(%r11) # store return value ··· 298 279 .Lsysc_return: 299 280 LOCKDEP_SYS_EXIT 300 281 .Lsysc_tif: 301 - tm __PT_FLAGS+7(%r11),_PIF_WORK 282 + TSTMSK __PT_FLAGS(%r11),_PIF_WORK 302 283 jnz .Lsysc_work 303 - tm __TI_flags+7(%r12),_TIF_WORK 284 + TSTMSK __TI_flags(%r12),_TIF_WORK 304 285 jnz .Lsysc_work # check for work 305 - tm __LC_CPU_FLAGS+7,_CIF_WORK 286 + TSTMSK __LC_CPU_FLAGS,_CIF_WORK 306 287 jnz .Lsysc_work 307 288 .Lsysc_restore: 308 289 lg %r14,__LC_VDSO_PER_CPU ··· 318 299 # One of the work bits is on. Find out which one. 319 300 # 320 301 .Lsysc_work: 321 - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 302 + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 322 303 jo .Lsysc_mcck_pending 323 - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 304 + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 324 305 jo .Lsysc_reschedule 325 306 #ifdef CONFIG_UPROBES 326 - tm __TI_flags+7(%r12),_TIF_UPROBE 307 + TSTMSK __TI_flags(%r12),_TIF_UPROBE 327 308 jo .Lsysc_uprobe_notify 328 309 #endif 329 - tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 310 + TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 330 311 jo .Lsysc_singlestep 331 - tm __TI_flags+7(%r12),_TIF_SIGPENDING 312 + TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 332 313 jo .Lsysc_sigpending 333 - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 314 + TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 334 315 jo .Lsysc_notify_resume 335 - tm __LC_CPU_FLAGS+7,_CIF_FPU 316 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 336 317 jo .Lsysc_vxrs 337 - tm __LC_CPU_FLAGS+7,_CIF_ASCE 318 + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE 338 319 jo .Lsysc_uaccess 339 320 j .Lsysc_return # beware of critical section cleanup 340 321 ··· 373 354 .Lsysc_sigpending: 374 355 lgr %r2,%r11 # pass pointer to pt_regs 375 356 brasl %r14,do_signal 376 - tm __PT_FLAGS+7(%r11),_PIF_SYSCALL 357 + TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 377 358 jno .Lsysc_return 378 359 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 379 360 lg %r10,__TI_sysc_table(%r12) # address of system call table ··· 433 414 basr %r14,%r9 # call sys_xxx 434 415 stg %r2,__PT_R2(%r11) # store return value 435 416 .Lsysc_tracenogo: 436 - tm __TI_flags+7(%r12),_TIF_TRACE 417 + TSTMSK __TI_flags(%r12),_TIF_TRACE 437 418 jz .Lsysc_return 438 419 lgr %r2,%r11 # pass pointer to pt_regs 439 420 larl %r14,.Lsysc_return ··· 563 544 stmg %r8,%r9,__PT_PSW(%r11) 564 545 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 565 546 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 547 + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 548 + jo .Lio_restore 566 549 TRACE_IRQS_OFF 567 550 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 568 551 .Lio_loop: ··· 575 554 lghi %r3,THIN_INTERRUPT 576 555 .Lio_call: 577 556 brasl %r14,do_IRQ 578 - tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR 557 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 579 558 jz .Lio_return 580 559 tpi 0 581 560 jz .Lio_return ··· 585 564 LOCKDEP_SYS_EXIT 586 565 TRACE_IRQS_ON 587 566 .Lio_tif: 588 - tm __TI_flags+7(%r12),_TIF_WORK 567 + TSTMSK __TI_flags(%r12),_TIF_WORK 589 568 jnz .Lio_work # there is work to do (signals etc.) 590 - tm __LC_CPU_FLAGS+7,_CIF_WORK 569 + TSTMSK __LC_CPU_FLAGS,_CIF_WORK 591 570 jnz .Lio_work 592 571 .Lio_restore: 593 572 lg %r14,__LC_VDSO_PER_CPU ··· 615 594 # check for preemptive scheduling 616 595 icm %r0,15,__TI_precount(%r12) 617 596 jnz .Lio_restore # preemption is disabled 618 - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 597 + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 619 598 jno .Lio_restore 620 599 # switch to kernel stack 621 600 lg %r1,__PT_R15(%r11) ··· 647 626 # One of the work bits is on. Find out which one. 648 627 # 649 628 .Lio_work_tif: 650 - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 629 + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 651 630 jo .Lio_mcck_pending 652 - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 631 + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 653 632 jo .Lio_reschedule 654 - tm __TI_flags+7(%r12),_TIF_SIGPENDING 633 + TSTMSK __TI_flags(%r12),_TIF_SIGPENDING 655 634 jo .Lio_sigpending 656 - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 635 + TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 657 636 jo .Lio_notify_resume 658 - tm __LC_CPU_FLAGS+7,_CIF_FPU 637 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 659 638 jo .Lio_vxrs 660 - tm __LC_CPU_FLAGS+7,_CIF_ASCE 639 + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE 661 640 jo .Lio_uaccess 662 641 j .Lio_return # beware of critical section cleanup 663 642 ··· 740 719 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 741 720 mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 742 721 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 722 + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 723 + jo .Lio_restore 743 724 TRACE_IRQS_OFF 744 725 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 745 726 lgr %r2,%r11 # pass pointer to pt_regs ··· 771 748 br %r14 772 749 .Lpsw_idle_end: 773 750 774 - /* Store floating-point controls and floating-point or vector extension 775 - * registers instead. A critical section cleanup assures that the registers 776 - * are stored even if interrupted for some other work. The register %r2 777 - * designates a struct fpu to store register contents. If the specified 778 - * structure does not contain a register save area, the register store is 779 - * omitted (see also comments in arch_dup_task_struct()). 780 - * 781 - * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore 782 - * of the register contents at system call or io return. 751 + /* 752 + * Store floating-point controls and floating-point or vector register 753 + * depending whether the vector facility is available. A critical section 754 + * cleanup assures that the registers are stored even if interrupted for 755 + * some other work. The CIF_FPU flag is set to trigger a lazy restore 756 + * of the register contents at return from io or a system call. 783 757 */ 784 758 ENTRY(save_fpu_regs) 785 759 lg %r2,__LC_CURRENT 786 760 aghi %r2,__TASK_thread 787 - tm __LC_CPU_FLAGS+7,_CIF_FPU 761 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 788 762 bor %r14 789 763 stfpc __THREAD_FPU_fpc(%r2) 790 764 .Lsave_fpu_regs_fpc_end: 791 765 lg %r3,__THREAD_FPU_regs(%r2) 792 - ltgr %r3,%r3 793 - jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU 794 - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX 766 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 795 767 jz .Lsave_fpu_regs_fp # no -> store FP regs 796 768 .Lsave_fpu_regs_vx_low: 797 769 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) ··· 815 797 br %r14 816 798 .Lsave_fpu_regs_end: 817 799 818 - /* Load floating-point controls and floating-point or vector extension 819 - * registers. A critical section cleanup assures that the register contents 820 - * are loaded even if interrupted for some other work. Depending on the saved 821 - * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. 800 + /* 801 + * Load floating-point controls and floating-point or vector registers. 802 + * A critical section cleanup assures that the register contents are 803 + * loaded even if interrupted for some other work. 822 804 * 823 805 * There are special calling conventions to fit into sysc and io return work: 824 806 * %r15: <kernel stack> 825 807 * The function requires: 826 - * %r4 and __SF_EMPTY+32(%r15) 808 + * %r4 827 809 */ 828 810 load_fpu_regs: 829 811 lg %r4,__LC_CURRENT 830 812 aghi %r4,__TASK_thread 831 - tm __LC_CPU_FLAGS+7,_CIF_FPU 813 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 832 814 bnor %r14 833 815 lfpc __THREAD_FPU_fpc(%r4) 834 - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 835 - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 816 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 836 817 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 837 - jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs 838 - .Lload_fpu_regs_vx_ctl: 839 - tm __SF_EMPTY+32+5(%r15),2 # test VX control 840 - jo .Lload_fpu_regs_vx 841 - oi __SF_EMPTY+32+5(%r15),2 # set VX control 842 - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 818 + jz .Lload_fpu_regs_fp # -> no VX, load FP regs 843 819 .Lload_fpu_regs_vx: 844 820 VLM %v0,%v15,0,%r4 845 821 .Lload_fpu_regs_vx_high: 846 822 VLM %v16,%v31,256,%r4 847 823 j .Lload_fpu_regs_done 848 - .Lload_fpu_regs_fp_ctl: 849 - tm __SF_EMPTY+32+5(%r15),2 # test VX control 850 - jz .Lload_fpu_regs_fp 851 - ni __SF_EMPTY+32+5(%r15),253 # clear VX control 852 - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 853 824 .Lload_fpu_regs_fp: 854 825 ld 0,0(%r4) 855 826 ld 1,8(%r4) ··· 861 854 br %r14 862 855 .Lload_fpu_regs_end: 863 856 864 - /* Test and set the vector enablement control in CR0.46 */ 865 - ENTRY(__ctl_set_vx) 866 - stctg %c0,%c0,__SF_EMPTY(%r15) 867 - tm __SF_EMPTY+5(%r15),2 868 - bor %r14 869 - oi __SF_EMPTY+5(%r15),2 870 - lctlg %c0,%c0,__SF_EMPTY(%r15) 871 - br %r14 872 - .L__ctl_set_vx_end: 873 - 874 857 .L__critical_end: 875 858 876 859 /* ··· 875 878 lg %r12,__LC_THREAD_INFO 876 879 larl %r13,cleanup_critical 877 880 lmg %r8,%r9,__LC_MCK_OLD_PSW 878 - tm __LC_MCCK_CODE,0x80 # system damage? 881 + TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 879 882 jo .Lmcck_panic # yes -> rest of mcck code invalid 880 883 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 881 884 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 882 - tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 885 + TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 883 886 jo 3f 884 887 la %r14,__LC_SYNC_ENTER_TIMER 885 888 clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER ··· 893 896 la %r14,__LC_LAST_UPDATE_TIMER 894 897 2: spt 0(%r14) 895 898 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 896 - 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 899 + 3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID) 897 900 jno .Lmcck_panic # no -> skip cleanup critical 898 901 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER 899 902 .Lmcck_skip: ··· 913 916 la %r11,STACK_FRAME_OVERHEAD(%r1) 914 917 lgr %r15,%r1 915 918 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 916 - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 919 + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 917 920 jno .Lmcck_return 918 921 TRACE_IRQS_OFF 919 922 brasl %r14,s390_handle_mcck ··· 938 941 # PSW restart interrupt handler 939 942 # 940 943 ENTRY(restart_int_handler) 941 - stg %r15,__LC_SAVE_AREA_RESTART 944 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP 945 + jz 0f 946 + .insn s,0xb2800000,__LC_LPP 947 + 0: stg %r15,__LC_SAVE_AREA_RESTART 942 948 lg %r15,__LC_RESTART_STACK 943 949 aghi %r15,-__PT_SIZE # create pt_regs on stack 944 950 xc 0(__PT_SIZE,%r15),0(%r15) ··· 1019 1019 jl 0f 1020 1020 clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1021 1021 jl .Lcleanup_load_fpu_regs 1022 - clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx 1023 - jl 0f 1024 - clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end 1025 - jl .Lcleanup___ctl_set_vx 1026 1022 0: br %r14 1027 1023 1028 1024 .align 8 ··· 1037 1041 .quad .Lsave_fpu_regs_end 1038 1042 .quad load_fpu_regs 1039 1043 .quad .Lload_fpu_regs_end 1040 - .quad __ctl_set_vx 1041 - .quad .L__ctl_set_vx_end 1042 1044 1043 1045 #if IS_ENABLED(CONFIG_KVM) 1044 1046 .Lcleanup_table_sie: ··· 1045 1051 1046 1052 .Lcleanup_sie: 1047 1053 lg %r9,__SF_EMPTY(%r15) # get control block pointer 1048 - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 1049 - jz 0f 1050 - .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id 1051 - 0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1054 + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1052 1055 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1053 1056 larl %r9,sie_exit # skip forward to sie_exit 1054 1057 br %r14 ··· 1197 1206 .quad .Lpsw_idle_lpsw 1198 1207 1199 1208 .Lcleanup_save_fpu_regs: 1200 - tm __LC_CPU_FLAGS+7,_CIF_FPU 1209 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1201 1210 bor %r14 1202 1211 clg %r9,BASED(.Lcleanup_save_fpu_regs_done) 1203 1212 jhe 5f ··· 1215 1224 stfpc __THREAD_FPU_fpc(%r2) 1216 1225 1: # Load register save area and check if VX is active 1217 1226 lg %r3,__THREAD_FPU_regs(%r2) 1218 - ltgr %r3,%r3 1219 - jz 5f # no save area -> set CIF_FPU 1220 - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX 1227 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1221 1228 jz 4f # no VX -> store FP regs 1222 1229 2: # Store vector registers (V0-V15) 1223 1230 VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) ··· 1255 1266 .quad .Lsave_fpu_regs_done 1256 1267 1257 1268 .Lcleanup_load_fpu_regs: 1258 - tm __LC_CPU_FLAGS+7,_CIF_FPU 1269 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU 1259 1270 bnor %r14 1260 1271 clg %r9,BASED(.Lcleanup_load_fpu_regs_done) 1261 1272 jhe 1f 1262 1273 clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) 1263 1274 jhe 2f 1264 - clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) 1265 - jhe 3f 1266 1275 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) 1267 - jhe 4f 1276 + jhe 3f 1268 1277 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) 1269 - jhe 5f 1270 - clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) 1271 - jhe 6f 1278 + jhe 4f 1272 1279 lg %r4,__LC_CURRENT 1273 1280 aghi %r4,__TASK_thread 1274 1281 lfpc __THREAD_FPU_fpc(%r4) 1275 - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 1282 + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 1276 1283 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1277 - jz 3f # -> no VX, load FP regs 1278 - 6: # Set VX-enablement control 1279 - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 1280 - tm __SF_EMPTY+32+5(%r15),2 # test VX control 1281 - jo 5f 1282 - oi __SF_EMPTY+32+5(%r15),2 # set VX control 1283 - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 1284 - 5: # Load V0 ..V15 registers 1284 + jz 2f # -> no VX, load FP regs 1285 + 4: # Load V0 ..V15 registers 1285 1286 VLM %v0,%v15,0,%r4 1286 - 4: # Load V16..V31 registers 1287 + 3: # Load V16..V31 registers 1287 1288 VLM %v16,%v31,256,%r4 1288 1289 j 1f 1289 - 3: # Clear VX-enablement control for FP 1290 - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 1291 - tm __SF_EMPTY+32+5(%r15),2 # test VX control 1292 - jz 2f 1293 - ni __SF_EMPTY+32+5(%r15),253 # clear VX control 1294 - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 1295 1290 2: # Load floating-point registers 1296 1291 ld 0,0(%r4) 1297 1292 ld 1,8(%r4) ··· 1297 1324 ni __LC_CPU_FLAGS+7,255-_CIF_FPU 1298 1325 lg %r9,48(%r11) # return from load_fpu_regs 1299 1326 br %r14 1300 - .Lcleanup_load_fpu_regs_vx_ctl: 1301 - .quad .Lload_fpu_regs_vx_ctl 1302 1327 .Lcleanup_load_fpu_regs_vx: 1303 1328 .quad .Lload_fpu_regs_vx 1304 1329 .Lcleanup_load_fpu_regs_vx_high: 1305 1330 .quad .Lload_fpu_regs_vx_high 1306 - .Lcleanup_load_fpu_regs_fp_ctl: 1307 - .quad .Lload_fpu_regs_fp_ctl 1308 1331 .Lcleanup_load_fpu_regs_fp: 1309 1332 .quad .Lload_fpu_regs_fp 1310 1333 .Lcleanup_load_fpu_regs_done: 1311 1334 .quad .Lload_fpu_regs_done 1312 - 1313 - .Lcleanup___ctl_set_vx: 1314 - stctg %c0,%c0,__SF_EMPTY(%r15) 1315 - tm __SF_EMPTY+5(%r15),2 1316 - bor %r14 1317 - oi __SF_EMPTY+5(%r15),2 1318 - lctlg %c0,%c0,__SF_EMPTY(%r15) 1319 - lg %r9,48(%r11) # return from __ctl_set_vx 1320 - br %r14 1321 1335 1322 1336 /* 1323 1337 * Integer constants
-3
arch/s390/kernel/entry.h
··· 16 16 void mcck_int_handler(void); 17 17 void restart_int_handler(void); 18 18 void restart_call_handler(void); 19 - void psw_idle(struct s390_idle_data *, unsigned long); 20 19 21 20 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 22 21 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 23 - 24 - int alloc_vector_registers(struct task_struct *tsk); 25 22 26 23 void do_protection_exception(struct pt_regs *regs); 27 24 void do_dat_exception(struct pt_regs *regs);
+6 -1
arch/s390/kernel/head64.S
··· 16 16 17 17 __HEAD 18 18 ENTRY(startup_continue) 19 - larl %r1,sched_clock_base_cc 19 + tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ? 20 + jz 0f 21 + xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid 22 + mvi __LC_LPP,0x80 # and set LPP_MAGIC 23 + .insn s,0xb2800000,__LC_LPP # load program parameter 24 + 0: larl %r1,sched_clock_base_cc 20 25 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK 21 26 larl %r13,.LPG1 # get base 22 27 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+8 -1
arch/s390/kernel/ipl.c
··· 17 17 #include <linux/gfp.h> 18 18 #include <linux/crash_dump.h> 19 19 #include <linux/debug_locks.h> 20 + #include <asm/diag.h> 20 21 #include <asm/ipl.h> 21 22 #include <asm/smp.h> 22 23 #include <asm/setup.h> ··· 166 165 167 166 static struct sclp_ipl_info sclp_ipl_info; 168 167 169 - int diag308(unsigned long subcode, void *addr) 168 + static inline int __diag308(unsigned long subcode, void *addr) 170 169 { 171 170 register unsigned long _addr asm("0") = (unsigned long) addr; 172 171 register unsigned long _rc asm("1") = 0; ··· 178 177 : "+d" (_addr), "+d" (_rc) 179 178 : "d" (subcode) : "cc", "memory"); 180 179 return _rc; 180 + } 181 + 182 + int diag308(unsigned long subcode, void *addr) 183 + { 184 + diag_stat_inc(DIAG_STAT_X308); 185 + return __diag308(subcode, addr); 181 186 } 182 187 EXPORT_SYMBOL_GPL(diag308); 183 188
-1
arch/s390/kernel/irq.c
··· 69 69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"}, 70 70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 71 71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 72 - {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, 73 72 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"}, 74 73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 75 74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
+61 -59
arch/s390/kernel/nmi.c
··· 21 21 #include <asm/nmi.h> 22 22 #include <asm/crw.h> 23 23 #include <asm/switch_to.h> 24 - #include <asm/fpu-internal.h> 25 24 #include <asm/ctl_reg.h> 26 25 27 26 struct mcck_struct { 28 - int kill_task; 29 - int channel_report; 30 - int warning; 31 - unsigned long long mcck_code; 27 + unsigned int kill_task : 1; 28 + unsigned int channel_report : 1; 29 + unsigned int warning : 1; 30 + unsigned int etr_queue : 1; 31 + unsigned int stp_queue : 1; 32 + unsigned long mcck_code; 32 33 }; 33 34 34 35 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); 35 36 36 - static void s390_handle_damage(char *msg) 37 + static void s390_handle_damage(void) 37 38 { 38 39 smp_send_stop(); 39 40 disabled_wait((unsigned long) __builtin_return_address(0)); ··· 82 81 if (xchg(&mchchk_wng_posted, 1) == 0) 83 82 kill_cad_pid(SIGPWR, 1); 84 83 } 84 + if (mcck.etr_queue) 85 + etr_queue_work(); 86 + if (mcck.stp_queue) 87 + stp_queue_work(); 85 88 if (mcck.kill_task) { 86 89 local_irq_enable(); 87 90 printk(KERN_EMERG "mcck: Terminating task because of machine " 88 - "malfunction (code 0x%016llx).\n", mcck.mcck_code); 91 + "malfunction (code 0x%016lx).\n", mcck.mcck_code); 89 92 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", 90 93 current->comm, current->pid); 91 94 do_exit(SIGSEGV); ··· 101 96 * returns 0 if all registers could be validated 102 97 * returns 1 otherwise 103 98 */ 104 - static int notrace s390_revalidate_registers(struct mci *mci) 99 + static int notrace s390_validate_registers(union mci mci) 105 100 { 106 101 int kill_task; 107 102 u64 zero; ··· 110 105 kill_task = 0; 111 106 zero = 0; 112 107 113 - if (!mci->gr) { 108 + if (!mci.gr) { 114 109 /* 115 110 * General purpose registers couldn't be restored and have 116 111 * unknown contents. Process needs to be terminated. 117 112 */ 118 113 kill_task = 1; 119 114 } 120 - if (!mci->fp) { 115 + if (!mci.fp) { 121 116 /* 122 117 * Floating point registers can't be restored and 123 118 * therefore the process needs to be terminated. ··· 126 121 } 127 122 fpt_save_area = &S390_lowcore.floating_pt_save_area; 128 123 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 129 - if (!mci->fc) { 124 + if (!mci.fc) { 130 125 /* 131 126 * Floating point control register can't be restored. 132 127 * Task will be terminated. ··· 137 132 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 138 133 139 134 if (!MACHINE_HAS_VX) { 140 - /* Revalidate floating point registers */ 135 + /* Validate floating point registers */ 141 136 asm volatile( 142 137 " ld 0,0(%0)\n" 143 138 " ld 1,8(%0)\n" ··· 157 152 " ld 15,120(%0)\n" 158 153 : : "a" (fpt_save_area)); 159 154 } else { 160 - /* Revalidate vector registers */ 155 + /* Validate vector registers */ 161 156 union ctlreg0 cr0; 162 157 163 - if (!mci->vr) { 158 + if (!mci.vr) { 164 159 /* 165 160 * Vector registers can't be restored and therefore 166 161 * the process needs to be terminated. ··· 178 173 &S390_lowcore.vector_save_area) : "1"); 179 174 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); 180 175 } 181 - /* Revalidate access registers */ 176 + /* Validate access registers */ 182 177 asm volatile( 183 178 " lam 0,15,0(%0)" 184 179 : : "a" (&S390_lowcore.access_regs_save_area)); 185 - if (!mci->ar) { 180 + if (!mci.ar) { 186 181 /* 187 182 * Access registers have unknown contents. 188 183 * Terminating task. 189 184 */ 190 185 kill_task = 1; 191 186 } 192 - /* Revalidate control registers */ 193 - if (!mci->cr) { 187 + /* Validate control registers */ 188 + if (!mci.cr) { 194 189 /* 195 190 * Control registers have unknown contents. 196 191 * Can't recover and therefore stopping machine. 197 192 */ 198 - s390_handle_damage("invalid control registers."); 193 + s390_handle_damage(); 199 194 } else { 200 195 asm volatile( 201 196 " lctlg 0,15,0(%0)" 202 197 : : "a" (&S390_lowcore.cregs_save_area)); 203 198 } 204 199 /* 205 - * We don't even try to revalidate the TOD register, since we simply 200 + * We don't even try to validate the TOD register, since we simply 206 201 * can't write something sensible into that register. 207 202 */ 208 203 /* 209 - * See if we can revalidate the TOD programmable register with its 204 + * See if we can validate the TOD programmable register with its 210 205 * old contents (should be zero) otherwise set it to zero. 211 206 */ 212 - if (!mci->pr) 207 + if (!mci.pr) 213 208 asm volatile( 214 209 " sr 0,0\n" 215 210 " sckpf" ··· 220 215 " sckpf" 221 216 : : "a" (&S390_lowcore.tod_progreg_save_area) 222 217 : "0", "cc"); 223 - /* Revalidate clock comparator register */ 218 + /* Validate clock comparator register */ 224 219 set_clock_comparator(S390_lowcore.clock_comparator); 225 220 /* Check if old PSW is valid */ 226 - if (!mci->wp) 221 + if (!mci.wp) 227 222 /* 228 223 * Can't tell if we come from user or kernel mode 229 224 * -> stopping machine. 230 225 */ 231 - s390_handle_damage("old psw invalid."); 226 + s390_handle_damage(); 232 227 233 - if (!mci->ms || !mci->pm || !mci->ia) 228 + if (!mci.ms || !mci.pm || !mci.ia) 234 229 kill_task = 1; 235 230 236 231 return kill_task; ··· 254 249 static unsigned long long last_ipd; 255 250 struct mcck_struct *mcck; 256 251 unsigned long long tmp; 257 - struct mci *mci; 252 + union mci mci; 258 253 int umode; 259 254 260 255 nmi_enter(); 261 256 inc_irq_stat(NMI_NMI); 262 - mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 257 + mci.val = S390_lowcore.mcck_interruption_code; 263 258 mcck = this_cpu_ptr(&cpu_mcck); 264 259 umode = user_mode(regs); 265 260 266 - if (mci->sd) { 261 + if (mci.sd) { 267 262 /* System damage -> stopping machine */ 268 - s390_handle_damage("received system damage machine check."); 263 + s390_handle_damage(); 269 264 } 270 - if (mci->pd) { 271 - if (mci->b) { 265 + if (mci.pd) { 266 + if (mci.b) { 272 267 /* Processing backup -> verify if we can survive this */ 273 268 u64 z_mcic, o_mcic, t_mcic; 274 269 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); ··· 276 271 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 277 272 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 278 273 1ULL<<16); 279 - t_mcic = *(u64 *)mci; 274 + t_mcic = mci.val; 280 275 281 276 if (((t_mcic & z_mcic) != 0) || 282 277 ((t_mcic & o_mcic) != o_mcic)) { 283 - s390_handle_damage("processing backup machine " 284 - "check with damage."); 278 + s390_handle_damage(); 285 279 } 286 280 287 281 /* ··· 295 291 ipd_count = 1; 296 292 last_ipd = tmp; 297 293 if (ipd_count == MAX_IPD_COUNT) 298 - s390_handle_damage("too many ipd retries."); 294 + s390_handle_damage(); 299 295 spin_unlock(&ipd_lock); 300 296 } else { 301 297 /* Processing damage -> stopping machine */ 302 - s390_handle_damage("received instruction processing " 303 - "damage machine check."); 298 + s390_handle_damage(); 304 299 } 305 300 } 306 - if (s390_revalidate_registers(mci)) { 301 + if (s390_validate_registers(mci)) { 307 302 if (umode) { 308 303 /* 309 304 * Couldn't restore all register contents while in 310 305 * user mode -> mark task for termination. 311 306 */ 312 307 mcck->kill_task = 1; 313 - mcck->mcck_code = *(unsigned long long *) mci; 308 + mcck->mcck_code = mci.val; 314 309 set_cpu_flag(CIF_MCCK_PENDING); 315 310 } else { 316 311 /* 317 312 * Couldn't restore all register contents while in 318 313 * kernel mode -> stopping machine. 319 314 */ 320 - s390_handle_damage("unable to revalidate registers."); 315 + s390_handle_damage(); 321 316 } 322 317 } 323 - if (mci->cd) { 318 + if (mci.cd) { 324 319 /* Timing facility damage */ 325 - s390_handle_damage("TOD clock damaged"); 320 + s390_handle_damage(); 326 321 } 327 - if (mci->ed && mci->ec) { 322 + if (mci.ed && mci.ec) { 328 323 /* External damage */ 329 324 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) 330 - etr_sync_check(); 325 + mcck->etr_queue |= etr_sync_check(); 331 326 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) 332 - etr_switch_to_local(); 327 + mcck->etr_queue |= etr_switch_to_local(); 333 328 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) 334 - stp_sync_check(); 329 + mcck->stp_queue |= stp_sync_check(); 335 330 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) 336 - stp_island_check(); 331 + mcck->stp_queue |= stp_island_check(); 332 + if (mcck->etr_queue || mcck->stp_queue) 333 + set_cpu_flag(CIF_MCCK_PENDING); 337 334 } 338 - if (mci->se) 335 + if (mci.se) 339 336 /* Storage error uncorrected */ 340 - s390_handle_damage("received storage error uncorrected " 341 - "machine check."); 342 - if (mci->ke) 337 + s390_handle_damage(); 338 + if (mci.ke) 343 339 /* Storage key-error uncorrected */ 344 - s390_handle_damage("received storage key-error uncorrected " 345 - "machine check."); 346 - if (mci->ds && mci->fa) 340 + s390_handle_damage(); 341 + if (mci.ds && mci.fa) 347 342 /* Storage degradation */ 348 - s390_handle_damage("received storage degradation machine " 349 - "check."); 350 - if (mci->cp) { 343 + s390_handle_damage(); 344 + if (mci.cp) { 351 345 /* Channel report word pending */ 352 346 mcck->channel_report = 1; 353 347 set_cpu_flag(CIF_MCCK_PENDING); 354 348 } 355 - if (mci->w) { 349 + if (mci.w) { 356 350 /* Warning pending */ 357 351 mcck->warning = 1; 358 352 set_cpu_flag(CIF_MCCK_PENDING);
+6 -4
arch/s390/kernel/perf_cpum_sf.c
··· 1019 1019 break; 1020 1020 } 1021 1021 1022 - /* The host-program-parameter (hpp) contains the pid of 1023 - * the CPU thread as set by sie64a() in entry.S. 1024 - * If non-zero assume a guest sample. 1022 + /* 1023 + * A non-zero guest program parameter indicates a guest 1024 + * sample. 1025 + * Note that some early samples might be misaccounted to 1026 + * the host. 1025 1027 */ 1026 - if (sfr->basic.hpp) 1028 + if (sfr->basic.gpp) 1027 1029 sde_regs->in_guest = 1; 1028 1030 1029 1031 overflow = 0;
+19 -18
arch/s390/kernel/process.c
··· 23 23 #include <linux/kprobes.h> 24 24 #include <linux/random.h> 25 25 #include <linux/module.h> 26 + #include <linux/init_task.h> 26 27 #include <asm/io.h> 27 28 #include <asm/processor.h> 28 29 #include <asm/vtimer.h> ··· 36 35 #include "entry.h" 37 36 38 37 asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 38 + 39 + /* FPU save area for the init task */ 40 + __vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data; 39 41 40 42 /* 41 43 * Return saved PC of a blocked thread. used in kernel/sched. ··· 91 87 92 88 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 93 89 { 90 + size_t fpu_regs_size; 91 + 94 92 *dst = *src; 95 93 96 - /* Set up a new floating-point register save area */ 97 - dst->thread.fpu.fpc = 0; 98 - dst->thread.fpu.flags = 0; /* Always start with VX disabled */ 99 - dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, 100 - GFP_KERNEL|__GFP_REPEAT); 101 - if (!dst->thread.fpu.fprs) 94 + /* 95 + * If the vector extension is available, it is enabled for all tasks, 96 + * and, thus, the FPU register save area must be allocated accordingly. 97 + */ 98 + fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS 99 + : sizeof(freg_t) * __NUM_FPRS; 100 + dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT); 101 + if (!dst->thread.fpu.regs) 102 102 return -ENOMEM; 103 103 104 104 /* 105 105 * Save the floating-point or vector register state of the current 106 - * task. The state is not saved for early kernel threads, for example, 107 - * the init_task, which do not have an allocated save area. 108 - * The CIF_FPU flag is set in any case to lazy clear or restore a saved 109 - * state when switching to a different task or returning to user space. 106 + * task and set the CIF_FPU flag to lazy restore the FPU register 107 + * state when returning to user space. 110 108 */ 111 109 save_fpu_regs(); 112 110 dst->thread.fpu.fpc = current->thread.fpu.fpc; 113 - if (is_vx_task(current)) 114 - convert_vx_to_fp(dst->thread.fpu.fprs, 115 - current->thread.fpu.vxrs); 116 - else 117 - memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs, 118 - sizeof(freg_t) * __NUM_FPRS); 111 + memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size); 112 + 119 113 return 0; 120 114 } 121 115 ··· 171 169 172 170 /* Don't copy runtime instrumentation info */ 173 171 p->thread.ri_cb = NULL; 174 - p->thread.ri_signum = 0; 175 172 frame->childregs.psw.mask &= ~PSW_MASK_RI; 176 173 177 174 /* Set a new TLS ? */ ··· 200 199 save_fpu_regs(); 201 200 fpregs->fpc = current->thread.fpu.fpc; 202 201 fpregs->pad = 0; 203 - if (is_vx_task(current)) 202 + if (MACHINE_HAS_VX) 204 203 convert_vx_to_fp((freg_t *)&fpregs->fprs, 205 204 current->thread.fpu.vxrs); 206 205 else
+4 -1
arch/s390/kernel/processor.c
··· 11 11 #include <linux/seq_file.h> 12 12 #include <linux/delay.h> 13 13 #include <linux/cpu.h> 14 + #include <asm/diag.h> 14 15 #include <asm/elf.h> 15 16 #include <asm/lowcore.h> 16 17 #include <asm/param.h> ··· 21 20 22 21 void notrace cpu_relax(void) 23 22 { 24 - if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) 23 + if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) { 24 + diag_stat_inc(DIAG_STAT_X044); 25 25 asm volatile("diag 0,0,0x44"); 26 + } 26 27 barrier(); 27 28 } 28 29 EXPORT_SYMBOL(cpu_relax);
+19 -33
arch/s390/kernel/ptrace.c
··· 239 239 * or the child->thread.fpu.vxrs array 240 240 */ 241 241 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 242 - if (is_vx_task(child)) 242 + if (MACHINE_HAS_VX) 243 243 tmp = *(addr_t *) 244 244 ((addr_t) child->thread.fpu.vxrs + 2*offset); 245 245 else 246 246 tmp = *(addr_t *) 247 - ((addr_t) &child->thread.fpu.fprs + offset); 247 + ((addr_t) child->thread.fpu.fprs + offset); 248 248 249 249 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 250 250 /* ··· 383 383 * or the child->thread.fpu.vxrs array 384 384 */ 385 385 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 386 - if (is_vx_task(child)) 386 + if (MACHINE_HAS_VX) 387 387 *(addr_t *)((addr_t) 388 388 child->thread.fpu.vxrs + 2*offset) = data; 389 389 else 390 390 *(addr_t *)((addr_t) 391 - &child->thread.fpu.fprs + offset) = data; 391 + child->thread.fpu.fprs + offset) = data; 392 392 393 393 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 394 394 /* ··· 617 617 * or the child->thread.fpu.vxrs array 618 618 */ 619 619 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 620 - if (is_vx_task(child)) 620 + if (MACHINE_HAS_VX) 621 621 tmp = *(__u32 *) 622 622 ((addr_t) child->thread.fpu.vxrs + 2*offset); 623 623 else 624 624 tmp = *(__u32 *) 625 - ((addr_t) &child->thread.fpu.fprs + offset); 625 + ((addr_t) child->thread.fpu.fprs + offset); 626 626 627 627 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 628 628 /* ··· 742 742 * or the child->thread.fpu.vxrs array 743 743 */ 744 744 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 745 - if (is_vx_task(child)) 745 + if (MACHINE_HAS_VX) 746 746 *(__u32 *)((addr_t) 747 747 child->thread.fpu.vxrs + 2*offset) = tmp; 748 748 else 749 749 *(__u32 *)((addr_t) 750 - &child->thread.fpu.fprs + offset) = tmp; 750 + child->thread.fpu.fprs + offset) = tmp; 751 751 752 752 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 753 753 /* ··· 981 981 if (rc) 982 982 return rc; 983 983 984 - if (is_vx_task(target)) 984 + if (MACHINE_HAS_VX) 985 985 convert_fp_to_vx(target->thread.fpu.vxrs, fprs); 986 986 else 987 987 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); ··· 1047 1047 1048 1048 if (!MACHINE_HAS_VX) 1049 1049 return -ENODEV; 1050 - if (is_vx_task(target)) { 1051 - if (target == current) 1052 - save_fpu_regs(); 1053 - for (i = 0; i < __NUM_VXRS_LOW; i++) 1054 - vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); 1055 - } else 1056 - memset(vxrs, 0, sizeof(vxrs)); 1050 + if (target == current) 1051 + save_fpu_regs(); 1052 + for (i = 0; i < __NUM_VXRS_LOW; i++) 1053 + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); 1057 1054 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1058 1055 } 1059 1056 ··· 1064 1067 1065 1068 if (!MACHINE_HAS_VX) 1066 1069 return -ENODEV; 1067 - if (!is_vx_task(target)) { 1068 - rc = alloc_vector_registers(target); 1069 - if (rc) 1070 - return rc; 1071 - } else if (target == current) 1070 + if (target == current) 1072 1071 save_fpu_regs(); 1073 1072 1074 1073 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); ··· 1084 1091 1085 1092 if (!MACHINE_HAS_VX) 1086 1093 return -ENODEV; 1087 - if (is_vx_task(target)) { 1088 - if (target == current) 1089 - save_fpu_regs(); 1090 - memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, 1091 - sizeof(vxrs)); 1092 - } else 1093 - memset(vxrs, 0, sizeof(vxrs)); 1094 + if (target == current) 1095 + save_fpu_regs(); 1096 + memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); 1097 + 1094 1098 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1095 1099 } 1096 1100 ··· 1100 1110 1101 1111 if (!MACHINE_HAS_VX) 1102 1112 return -ENODEV; 1103 - if (!is_vx_task(target)) { 1104 - rc = alloc_vector_registers(target); 1105 - if (rc) 1106 - return rc; 1107 - } else if (target == current) 1113 + if (target == current) 1108 1114 save_fpu_regs(); 1109 1115 1110 1116 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+3 -61
arch/s390/kernel/runtime_instr.c
··· 18 18 /* empty control block to disable RI by loading it */ 19 19 struct runtime_instr_cb runtime_instr_empty_cb; 20 20 21 - static int runtime_instr_avail(void) 22 - { 23 - return test_facility(64); 24 - } 25 - 26 21 static void disable_runtime_instr(void) 27 22 { 28 23 struct pt_regs *regs = task_pt_regs(current); ··· 35 40 static void init_runtime_instr_cb(struct runtime_instr_cb *cb) 36 41 { 37 42 cb->buf_limit = 0xfff; 38 - cb->int_requested = 1; 39 43 cb->pstate = 1; 40 44 cb->pstate_set_buf = 1; 41 45 cb->pstate_sample = 1; ··· 51 57 return; 52 58 disable_runtime_instr(); 53 59 kfree(task->thread.ri_cb); 54 - task->thread.ri_signum = 0; 55 60 task->thread.ri_cb = NULL; 56 61 } 57 62 58 - static void runtime_instr_int_handler(struct ext_code ext_code, 59 - unsigned int param32, unsigned long param64) 60 - { 61 - struct siginfo info; 62 - 63 - if (!(param32 & CPU_MF_INT_RI_MASK)) 64 - return; 65 - 66 - inc_irq_stat(IRQEXT_CMR); 67 - 68 - if (!current->thread.ri_cb) 69 - return; 70 - if (current->thread.ri_signum < SIGRTMIN || 71 - current->thread.ri_signum > SIGRTMAX) { 72 - WARN_ON_ONCE(1); 73 - return; 74 - } 75 - 76 - memset(&info, 0, sizeof(info)); 77 - info.si_signo = current->thread.ri_signum; 78 - info.si_code = SI_QUEUE; 79 - if (param32 & CPU_MF_INT_RI_BUF_FULL) 80 - info.si_int = ENOBUFS; 81 - else if (param32 & CPU_MF_INT_RI_HALTED) 82 - info.si_int = ECANCELED; 83 - else 84 - return; /* unknown reason */ 85 - 86 - send_sig_info(current->thread.ri_signum, &info, current); 87 - } 88 - 89 - SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) 63 + SYSCALL_DEFINE1(s390_runtime_instr, int, command) 90 64 { 91 65 struct runtime_instr_cb *cb; 92 66 93 - if (!runtime_instr_avail()) 67 + if (!test_facility(64)) 94 68 return -EOPNOTSUPP; 95 69 96 70 if (command == S390_RUNTIME_INSTR_STOP) { ··· 68 106 return 0; 69 107 } 70 108 71 - if (command != S390_RUNTIME_INSTR_START || 72 - (signum < SIGRTMIN || signum > SIGRTMAX)) 109 + if (command != S390_RUNTIME_INSTR_START) 73 110 return -EINVAL; 74 111 75 112 if (!current->thread.ri_cb) { ··· 81 120 } 82 121 83 122 init_runtime_instr_cb(cb); 84 - current->thread.ri_signum = signum; 85 123 86 124 /* now load the control block to make it available */ 87 125 preempt_disable(); ··· 89 129 preempt_enable(); 90 130 return 0; 91 131 } 92 - 93 - static int __init runtime_instr_init(void) 94 - { 95 - int rc; 96 - 97 - if (!runtime_instr_avail()) 98 - return 0; 99 - 100 - irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 101 - rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, 102 - runtime_instr_int_handler); 103 - if (rc) 104 - irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 105 - else 106 - pr_info("Runtime instrumentation facility initialized\n"); 107 - return rc; 108 - } 109 - device_initcall(runtime_instr_init);
+1 -2
arch/s390/kernel/s390_ksyms.c
··· 1 1 #include <linux/module.h> 2 2 #include <linux/kvm_host.h> 3 - #include <asm/fpu-internal.h> 3 + #include <asm/fpu/api.h> 4 4 #include <asm/ftrace.h> 5 5 6 6 #ifdef CONFIG_FUNCTION_TRACER ··· 10 10 EXPORT_SYMBOL(sie64a); 11 11 EXPORT_SYMBOL(sie_exit); 12 12 EXPORT_SYMBOL(save_fpu_regs); 13 - EXPORT_SYMBOL(__ctl_set_vx); 14 13 #endif 15 14 EXPORT_SYMBOL(memcpy); 16 15 EXPORT_SYMBOL(memset);
+3 -4
arch/s390/kernel/signal.c
··· 179 179 int i; 180 180 181 181 /* Save vector registers to signal stack */ 182 - if (is_vx_task(current)) { 182 + if (MACHINE_HAS_VX) { 183 183 for (i = 0; i < __NUM_VXRS_LOW; i++) 184 184 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); 185 185 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, ··· 199 199 int i; 200 200 201 201 /* Restore vector registers from signal stack */ 202 - if (is_vx_task(current)) { 202 + if (MACHINE_HAS_VX) { 203 203 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, 204 204 sizeof(sregs_ext->vxrs_low)) || 205 205 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, ··· 381 381 uc_flags = 0; 382 382 if (MACHINE_HAS_VX) { 383 383 frame_size += sizeof(_sigregs_ext); 384 - if (is_vx_task(current)) 385 - uc_flags |= UC_VXRS; 384 + uc_flags |= UC_VXRS; 386 385 } 387 386 frame = get_sigframe(&ksig->ka, regs, frame_size); 388 387 if (frame == (void __user *) -1UL)
+8 -2
arch/s390/kernel/smp.c
··· 33 33 #include <linux/crash_dump.h> 34 34 #include <linux/memblock.h> 35 35 #include <asm/asm-offsets.h> 36 + #include <asm/diag.h> 36 37 #include <asm/switch_to.h> 37 38 #include <asm/facility.h> 38 39 #include <asm/ipl.h> ··· 262 261 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 263 262 lc->thread_info = (unsigned long) task_thread_info(tsk); 264 263 lc->current_task = (unsigned long) tsk; 264 + lc->lpp = LPP_MAGIC; 265 + lc->current_pid = tsk->pid; 265 266 lc->user_timer = ti->user_timer; 266 267 lc->system_timer = ti->system_timer; 267 268 lc->steal_timer = 0; ··· 378 375 379 376 void smp_yield_cpu(int cpu) 380 377 { 381 - if (MACHINE_HAS_DIAG9C) 378 + if (MACHINE_HAS_DIAG9C) { 379 + diag_stat_inc_norecursion(DIAG_STAT_X09C); 382 380 asm volatile("diag %0,0,0x9c" 383 381 : : "d" (pcpu_devices[cpu].address)); 384 - else if (MACHINE_HAS_DIAG44) 382 + } else if (MACHINE_HAS_DIAG44) { 383 + diag_stat_inc_norecursion(DIAG_STAT_X044); 385 384 asm volatile("diag 0,0,0x44"); 385 + } 386 386 } 387 387 388 388 /*
+21 -10
arch/s390/kernel/time.c
··· 542 542 * Switch to local machine check. This is called when the last usable 543 543 * ETR port goes inactive. After switch to local the clock is not in sync. 544 544 */ 545 - void etr_switch_to_local(void) 545 + int etr_switch_to_local(void) 546 546 { 547 547 if (!etr_eacr.sl) 548 - return; 548 + return 0; 549 549 disable_sync_clock(NULL); 550 550 if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) { 551 551 etr_eacr.es = etr_eacr.sl = 0; 552 552 etr_setr(&etr_eacr); 553 - queue_work(time_sync_wq, &etr_work); 553 + return 1; 554 554 } 555 + return 0; 555 556 } 556 557 557 558 /* ··· 561 560 * After a ETR sync check the clock is not in sync. The machine check 562 561 * is broadcasted to all cpus at the same time. 563 562 */ 564 - void etr_sync_check(void) 563 + int etr_sync_check(void) 565 564 { 566 565 if (!etr_eacr.es) 567 - return; 566 + return 0; 568 567 disable_sync_clock(NULL); 569 568 if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) { 570 569 etr_eacr.es = 0; 571 570 etr_setr(&etr_eacr); 572 - queue_work(time_sync_wq, &etr_work); 571 + return 1; 573 572 } 573 + return 0; 574 + } 575 + 576 + void etr_queue_work(void) 577 + { 578 + queue_work(time_sync_wq, &etr_work); 574 579 } 575 580 576 581 /* ··· 1511 1504 * After a STP sync check the clock is not in sync. The machine check 1512 1505 * is broadcasted to all cpus at the same time. 1513 1506 */ 1514 - void stp_sync_check(void) 1507 + int stp_sync_check(void) 1515 1508 { 1516 1509 disable_sync_clock(NULL); 1517 - queue_work(time_sync_wq, &stp_work); 1510 + return 1; 1518 1511 } 1519 1512 1520 1513 /* ··· 1523 1516 * have matching CTN ids and have a valid stratum-1 configuration 1524 1517 * but the configurations do not match. 1525 1518 */ 1526 - void stp_island_check(void) 1519 + int stp_island_check(void) 1527 1520 { 1528 1521 disable_sync_clock(NULL); 1529 - queue_work(time_sync_wq, &stp_work); 1522 + return 1; 1530 1523 } 1531 1524 1525 + void stp_queue_work(void) 1526 + { 1527 + queue_work(time_sync_wq, &stp_work); 1528 + } 1532 1529 1533 1530 static int stp_sync_clock(void *data) 1534 1531 {
+16 -12
arch/s390/kernel/topology.c
··· 84 84 struct mask_info *socket, 85 85 int one_socket_per_cpu) 86 86 { 87 + struct cpu_topology_s390 *topo; 87 88 unsigned int core; 88 89 89 90 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { ··· 96 95 if (lcpu < 0) 97 96 continue; 98 97 for (i = 0; i <= smp_cpu_mtid; i++) { 99 - per_cpu(cpu_topology, lcpu + i).book_id = book->id; 100 - per_cpu(cpu_topology, lcpu + i).core_id = rcore; 101 - per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; 98 + topo = &per_cpu(cpu_topology, lcpu + i); 99 + topo->book_id = book->id; 100 + topo->core_id = rcore; 101 + topo->thread_id = lcpu + i; 102 102 cpumask_set_cpu(lcpu + i, &book->mask); 103 103 cpumask_set_cpu(lcpu + i, &socket->mask); 104 104 if (one_socket_per_cpu) 105 - per_cpu(cpu_topology, lcpu + i).socket_id = rcore; 105 + topo->socket_id = rcore; 106 106 else 107 - per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; 107 + topo->socket_id = socket->id; 108 108 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 109 109 } 110 110 if (one_socket_per_cpu) ··· 249 247 250 248 static void update_cpu_masks(void) 251 249 { 250 + struct cpu_topology_s390 *topo; 252 251 int cpu; 253 252 254 253 for_each_possible_cpu(cpu) { 255 - per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); 256 - per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); 257 - per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); 254 + topo = &per_cpu(cpu_topology, cpu); 255 + topo->thread_mask = cpu_thread_map(cpu); 256 + topo->core_mask = cpu_group_map(&socket_info, cpu); 257 + topo->book_mask = cpu_group_map(&book_info, cpu); 258 258 if (!MACHINE_HAS_TOPOLOGY) { 259 - per_cpu(cpu_topology, cpu).thread_id = cpu; 260 - per_cpu(cpu_topology, cpu).core_id = cpu; 261 - per_cpu(cpu_topology, cpu).socket_id = cpu; 262 - per_cpu(cpu_topology, cpu).book_id = cpu; 259 + topo->thread_id = cpu; 260 + topo->core_id = cpu; 261 + topo->socket_id = cpu; 262 + topo->book_id = cpu; 263 263 } 264 264 } 265 265 numa_update_cpu_topology();
+29
arch/s390/kernel/trace.c
··· 1 + /* 2 + * Tracepoint definitions for s390 3 + * 4 + * Copyright IBM Corp. 2015 5 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 + */ 7 + 8 + #include <linux/percpu.h> 9 + #define CREATE_TRACE_POINTS 10 + #include <asm/trace/diag.h> 11 + 12 + EXPORT_TRACEPOINT_SYMBOL(diagnose); 13 + 14 + static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); 15 + 16 + void trace_diagnose_norecursion(int diag_nr) 17 + { 18 + unsigned long flags; 19 + unsigned int *depth; 20 + 21 + local_irq_save(flags); 22 + depth = this_cpu_ptr(&diagnose_trace_depth); 23 + if (*depth == 0) { 24 + (*depth)++; 25 + trace_diagnose(diag_nr); 26 + (*depth)--; 27 + } 28 + local_irq_restore(flags); 29 + }
+1 -40
arch/s390/kernel/traps.c
··· 19 19 #include <linux/sched.h> 20 20 #include <linux/mm.h> 21 21 #include <linux/slab.h> 22 - #include <asm/fpu-internal.h> 22 + #include <asm/fpu/api.h> 23 23 #include "entry.h" 24 24 25 25 int show_unhandled_signals = 1; ··· 224 224 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 225 225 "specification exception"); 226 226 227 - int alloc_vector_registers(struct task_struct *tsk) 228 - { 229 - __vector128 *vxrs; 230 - freg_t *fprs; 231 - 232 - /* Allocate vector register save area. */ 233 - vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS, 234 - GFP_KERNEL|__GFP_REPEAT); 235 - if (!vxrs) 236 - return -ENOMEM; 237 - preempt_disable(); 238 - if (tsk == current) 239 - save_fpu_regs(); 240 - /* Copy the 16 floating point registers */ 241 - convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); 242 - fprs = tsk->thread.fpu.fprs; 243 - tsk->thread.fpu.vxrs = vxrs; 244 - tsk->thread.fpu.flags |= FPU_USE_VX; 245 - kfree(fprs); 246 - preempt_enable(); 247 - return 0; 248 - } 249 - 250 227 void vector_exception(struct pt_regs *regs) 251 228 { 252 229 int si_code, vic; ··· 258 281 do_trap(regs, SIGFPE, si_code, "vector exception"); 259 282 } 260 283 261 - static int __init disable_vector_extension(char *str) 262 - { 263 - S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; 264 - return 1; 265 - } 266 - __setup("novx", disable_vector_extension); 267 - 268 284 void data_exception(struct pt_regs *regs) 269 285 { 270 286 __u16 __user *location; ··· 266 296 location = get_trap_ip(regs); 267 297 268 298 save_fpu_regs(); 269 - /* Check for vector register enablement */ 270 - if (MACHINE_HAS_VX && !is_vx_task(current) && 271 - (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { 272 - alloc_vector_registers(current); 273 - /* Vector data exception is suppressing, rewind psw. */ 274 - regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 275 - clear_pt_regs_flag(regs, PIF_PER_TRAP); 276 - return; 277 - } 278 299 if (current->thread.fpu.fpc & FPC_DXC_MASK) 279 300 signal = SIGFPE; 280 301 else
+1 -1
arch/s390/kernel/vdso.c
··· 299 299 300 300 get_page(virt_to_page(vdso_data)); 301 301 302 - smp_wmb(); 302 + smp_mb(); 303 303 304 304 return 0; 305 305 }
-6
arch/s390/kvm/kvm-s390.c
··· 1292 1292 static inline void save_fpu_to(struct fpu *dst) 1293 1293 { 1294 1294 dst->fpc = current->thread.fpu.fpc; 1295 - dst->flags = current->thread.fpu.flags; 1296 1295 dst->regs = current->thread.fpu.regs; 1297 1296 } 1298 1297 ··· 1302 1303 static inline void load_fpu_from(struct fpu *from) 1303 1304 { 1304 1305 current->thread.fpu.fpc = from->fpc; 1305 - current->thread.fpu.flags = from->flags; 1306 1306 current->thread.fpu.regs = from->regs; 1307 1307 } 1308 1308 ··· 1313 1315 1314 1316 if (test_kvm_facility(vcpu->kvm, 129)) { 1315 1317 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; 1316 - current->thread.fpu.flags = FPU_USE_VX; 1317 1318 /* 1318 1319 * Use the register save area in the SIE-control block 1319 1320 * for register restore and save in kvm_arch_vcpu_put() 1320 1321 */ 1321 1322 current->thread.fpu.vxrs = 1322 1323 (__vector128 *)&vcpu->run->s.regs.vrs; 1323 - /* Always enable the vector extension for KVM */ 1324 - __ctl_set_vx(); 1325 1324 } else 1326 1325 load_fpu_from(&vcpu->arch.guest_fpregs); 1327 1326 ··· 2321 2326 * registers and the FPC value and store them in the 2322 2327 * guest_fpregs structure. 2323 2328 */ 2324 - WARN_ON(!is_vx_task(current)); /* XXX remove later */ 2325 2329 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; 2326 2330 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, 2327 2331 current->thread.fpu.vxrs);
+14 -16
arch/s390/lib/delay.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/irqflags.h> 14 14 #include <linux/interrupt.h> 15 + #include <linux/irq.h> 15 16 #include <asm/vtimer.h> 16 17 #include <asm/div64.h> 18 + #include <asm/idle.h> 17 19 18 20 void __delay(unsigned long loops) 19 21 { ··· 32 30 33 31 static void __udelay_disabled(unsigned long long usecs) 34 32 { 35 - unsigned long cr0, cr6, new; 36 - u64 clock_saved, end; 33 + unsigned long cr0, cr0_new, psw_mask; 34 + struct s390_idle_data idle; 35 + u64 end; 37 36 38 37 end = get_tod_clock() + (usecs << 12); 39 - clock_saved = local_tick_disable(); 40 38 __ctl_store(cr0, 0, 0); 41 - __ctl_store(cr6, 6, 6); 42 - new = (cr0 & 0xffff00e0) | 0x00000800; 43 - __ctl_load(new , 0, 0); 44 - new = 0; 45 - __ctl_load(new, 6, 6); 46 - lockdep_off(); 47 - do { 48 - set_clock_comparator(end); 49 - enabled_wait(); 50 - } while (get_tod_clock_fast() < end); 51 - lockdep_on(); 39 + cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK; 40 + cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */ 41 + __ctl_load(cr0_new, 0, 0); 42 + psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT; 43 + set_clock_comparator(end); 44 + set_cpu_flag(CIF_IGNORE_IRQ); 45 + psw_idle(&idle, psw_mask); 46 + clear_cpu_flag(CIF_IGNORE_IRQ); 47 + set_clock_comparator(S390_lowcore.clock_comparator); 52 48 __ctl_load(cr0, 0, 0); 53 - __ctl_load(cr6, 6, 6); 54 - local_tick_enable(clock_saved); 55 49 } 56 50 57 51 static void __udelay_enabled(unsigned long long usecs)
+1 -3
arch/s390/lib/find.c
··· 1 1 /* 2 2 * MSB0 numbered special bitops handling. 3 3 * 4 - * On s390x the bits are numbered: 4 + * The bits are numbered: 5 5 * |0..............63|64............127|128...........191|192...........255| 6 - * and on s390: 7 - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| 8 6 * 9 7 * The reason for this bit numbering is the fact that the hardware sets bits 10 8 * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+2 -2
arch/s390/lib/spinlock.c
··· 197 197 } 198 198 old = ACCESS_ONCE(rw->lock); 199 199 owner = ACCESS_ONCE(rw->owner); 200 - smp_rmb(); 200 + smp_mb(); 201 201 if ((int) old >= 0) { 202 202 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); 203 203 old = prev; ··· 231 231 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) 232 232 prev = old; 233 233 else 234 - smp_rmb(); 234 + smp_mb(); 235 235 if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 236 236 break; 237 237 if (MACHINE_HAS_CAD)
+3
arch/s390/mm/extmem.c
··· 18 18 #include <linux/bootmem.h> 19 19 #include <linux/ctype.h> 20 20 #include <linux/ioport.h> 21 + #include <asm/diag.h> 21 22 #include <asm/page.h> 22 23 #include <asm/pgtable.h> 23 24 #include <asm/ebcdic.h> ··· 113 112 ry = DCSS_FINDSEGX; 114 113 115 114 strcpy(name, "dummy"); 115 + diag_stat_inc(DIAG_STAT_X064); 116 116 asm volatile( 117 117 " diag %0,%1,0x64\n" 118 118 "0: ipm %2\n" ··· 207 205 ry = (unsigned long) *func; 208 206 209 207 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 208 + diag_stat_inc(DIAG_STAT_X064); 210 209 if (*func > DCSS_SEGEXT) 211 210 asm volatile( 212 211 " diag %0,%1,0x64\n"
+5 -2
arch/s390/mm/fault.c
··· 30 30 #include <linux/uaccess.h> 31 31 #include <linux/hugetlb.h> 32 32 #include <asm/asm-offsets.h> 33 + #include <asm/diag.h> 33 34 #include <asm/pgtable.h> 34 35 #include <asm/irq.h> 35 36 #include <asm/mmu_context.h> ··· 590 589 .reffcode = 0, 591 590 .refdwlen = 5, 592 591 .refversn = 2, 593 - .refgaddr = __LC_CURRENT_PID, 592 + .refgaddr = __LC_LPP, 594 593 .refselmk = 1ULL << 48, 595 594 .refcmpmk = 1ULL << 48, 596 595 .reserved = __PF_RES_FIELD }; ··· 598 597 599 598 if (pfault_disable) 600 599 return -1; 600 + diag_stat_inc(DIAG_STAT_X258); 601 601 asm volatile( 602 602 " diag %1,%0,0x258\n" 603 603 "0: j 2f\n" ··· 620 618 621 619 if (pfault_disable) 622 620 return; 621 + diag_stat_inc(DIAG_STAT_X258); 623 622 asm volatile( 624 623 " diag %0,0,0x258\n" 625 624 "0:\n" ··· 649 646 return; 650 647 inc_irq_stat(IRQEXT_PFL); 651 648 /* Get the token (= pid of the affected task). */ 652 - pid = param64; 649 + pid = param64 & LPP_PFAULT_PID_MASK; 653 650 rcu_read_lock(); 654 651 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 655 652 if (tsk)
+2
arch/s390/mm/hugetlbpage.c
··· 40 40 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); 41 41 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; 42 42 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; 43 + pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; 43 44 } else 44 45 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 45 46 return pmd; ··· 79 78 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); 80 79 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; 81 80 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; 81 + pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; 82 82 } else 83 83 pte_val(pte) = _PAGE_INVALID; 84 84 return pte;
+8 -2
arch/s390/numa/mode_emu.c
··· 436 436 */ 437 437 static unsigned long emu_setup_size_adjust(unsigned long size) 438 438 { 439 + unsigned long size_new; 440 + 439 441 size = size ? : CONFIG_EMU_SIZE; 440 - size = roundup(size, memory_block_size_bytes()); 441 - return size; 442 + size_new = roundup(size, memory_block_size_bytes()); 443 + if (size_new == size) 444 + return size; 445 + pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n", 446 + size >> 20, size_new >> 20); 447 + return size_new; 442 448 } 443 449 444 450 /*
+3 -3
arch/s390/pci/pci_insn.c
··· 16 16 static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) 17 17 { 18 18 struct { 19 - u8 cc; 20 - u8 status; 21 19 u64 req; 22 20 u64 offset; 23 - } data = {cc, status, req, offset}; 21 + u8 cc; 22 + u8 status; 23 + } __packed data = {req, offset, cc, status}; 24 24 25 25 zpci_err_hex(&data, sizeof(data)); 26 26 }
+10
arch/x86/include/asm/pgtable.h
··· 325 325 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 326 326 } 327 327 328 + static inline pte_t pte_clear_soft_dirty(pte_t pte) 329 + { 330 + return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); 331 + } 332 + 333 + static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 334 + { 335 + return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 336 + } 337 + 328 338 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ 329 339 330 340 /*
+1
drivers/s390/block/dasd.c
··· 3030 3030 } else { 3031 3031 max = block->base->discipline->max_blocks << block->s2b_shift; 3032 3032 } 3033 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3033 3034 blk_queue_logical_block_size(block->request_queue, 3034 3035 block->bp_block); 3035 3036 blk_queue_max_hw_sectors(block->request_queue, max);
+4 -1
drivers/s390/block/dasd_alias.c
··· 824 824 * were waiting for the flush 825 825 */ 826 826 if (device == list_first_entry(&active, 827 - struct dasd_device, alias_list)) 827 + struct dasd_device, alias_list)) { 828 828 list_move(&device->alias_list, &lcu->active_devices); 829 + private = (struct dasd_eckd_private *) device->private; 830 + private->pavgroup = NULL; 831 + } 829 832 } 830 833 spin_unlock_irqrestore(&lcu->lock, flags); 831 834 }
+2
drivers/s390/block/dasd_diag.c
··· 21 21 22 22 #include <asm/dasd.h> 23 23 #include <asm/debug.h> 24 + #include <asm/diag.h> 24 25 #include <asm/ebcdic.h> 25 26 #include <asm/io.h> 26 27 #include <asm/irq.h> ··· 77 76 int rc; 78 77 79 78 rc = 3; 79 + diag_stat_inc(DIAG_STAT_X250); 80 80 asm volatile( 81 81 " diag 2,%2,0x250\n" 82 82 "0: ipm %0\n"
+50 -23
drivers/s390/block/dasd_eckd.c
··· 1032 1032 return 0; 1033 1033 } 1034 1034 1035 + static void dasd_eckd_clear_conf_data(struct dasd_device *device) 1036 + { 1037 + struct dasd_eckd_private *private; 1038 + int i; 1039 + 1040 + private = (struct dasd_eckd_private *) device->private; 1041 + private->conf_data = NULL; 1042 + private->conf_len = 0; 1043 + for (i = 0; i < 8; i++) { 1044 + kfree(private->path_conf_data[i]); 1045 + private->path_conf_data[i] = NULL; 1046 + } 1047 + } 1048 + 1049 + 1035 1050 static int dasd_eckd_read_conf(struct dasd_device *device) 1036 1051 { 1037 1052 void *conf_data; ··· 1083 1068 path_data->opm |= lpm; 1084 1069 continue; /* no error */ 1085 1070 } 1086 - /* translate path mask to position in mask */ 1087 - pos = 8 - ffs(lpm); 1088 - kfree(private->path_conf_data[pos]); 1089 - if ((__u8 *)private->path_conf_data[pos] == 1090 - private->conf_data) { 1091 - private->conf_data = NULL; 1092 - private->conf_len = 0; 1093 - conf_data_saved = 0; 1094 - } 1095 - private->path_conf_data[pos] = 1096 - (struct dasd_conf_data *) conf_data; 1097 1071 /* save first valid configuration data */ 1098 1072 if (!conf_data_saved) { 1099 - kfree(private->conf_data); 1073 + /* initially clear previously stored conf_data */ 1074 + dasd_eckd_clear_conf_data(device); 1100 1075 private->conf_data = conf_data; 1101 1076 private->conf_len = conf_len; 1102 1077 if (dasd_eckd_identify_conf_parts(private)) { ··· 1095 1090 kfree(conf_data); 1096 1091 continue; 1097 1092 } 1093 + pos = pathmask_to_pos(lpm); 1094 + /* store per path conf_data */ 1095 + private->path_conf_data[pos] = 1096 + (struct dasd_conf_data *) conf_data; 1098 1097 /* 1099 1098 * build device UID that other path data 1100 1099 * can be compared to it ··· 1156 1147 path_data->cablepm |= lpm; 1157 1148 continue; 1158 1149 } 1159 - 1150 + pos = pathmask_to_pos(lpm); 1151 + /* store per path conf_data */ 1152 + private->path_conf_data[pos] = 1153 + (struct dasd_conf_data *) conf_data; 1160 1154 path_private.conf_data = NULL; 1161 1155 path_private.conf_len = 0; 1162 1156 } ··· 1171 1159 path_data->ppm |= lpm; 1172 1160 break; 1173 1161 } 1174 - path_data->opm |= lpm; 1162 + if (!path_data->opm) { 1163 + path_data->opm = lpm; 1164 + dasd_generic_path_operational(device); 1165 + } else { 1166 + path_data->opm |= lpm; 1167 + } 1175 1168 /* 1176 1169 * if the path is used 1177 1170 * it should not be in one of the negative lists ··· 4440 4423 private = (struct dasd_eckd_private *) device->private; 4441 4424 4442 4425 /* Read Configuration Data */ 4443 - dasd_eckd_read_conf(device); 4426 + rc = dasd_eckd_read_conf(device); 4427 + if (rc) { 4428 + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4429 + "Read configuration data failed, rc=%d", rc); 4430 + goto out_err; 4431 + } 4444 4432 4445 4433 dasd_eckd_get_uid(device, &temp_uid); 4446 4434 /* Generate device unique id */ ··· 4461 4439 /* register lcu with alias handling, enable PAV if this is a new lcu */ 4462 4440 rc = dasd_alias_make_device_known_to_lcu(device); 4463 4441 if (rc) 4464 - return rc; 4442 + goto out_err; 4465 4443 4466 4444 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags); 4467 4445 dasd_eckd_validate_server(device, cqr_flags); 4468 4446 4469 4447 /* RE-Read Configuration Data */ 4470 - dasd_eckd_read_conf(device); 4448 + rc = dasd_eckd_read_conf(device); 4449 + if (rc) { 4450 + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4451 + "Read configuration data failed, rc=%d", rc); 4452 + goto out_err2; 4453 + } 4471 4454 4472 4455 /* Read Feature Codes */ 4473 4456 dasd_eckd_read_features(device); ··· 4483 4456 if (rc) { 4484 4457 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 4485 4458 "Read device characteristic failed, rc=%d", rc); 4486 - goto out_err; 4459 + goto out_err2; 4487 4460 } 4488 4461 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 4489 4462 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data)); ··· 4494 4467 4495 4468 return 0; 4496 4469 4470 + out_err2: 4471 + dasd_alias_disconnect_device_from_lcu(device); 4497 4472 out_err: 4498 4473 return -1; 4499 4474 } ··· 4700 4671 return conf_data; 4701 4672 } 4702 4673 out: 4703 - return private->path_conf_data[8 - ffs(lpum)]; 4674 + return private->path_conf_data[pathmask_to_pos(lpum)]; 4704 4675 } 4705 4676 4706 4677 /* ··· 4745 4716 for (path = 0x80; path; path >>= 1) { 4746 4717 /* initialise data per path */ 4747 4718 bitmask = mask; 4748 - pos = 8 - ffs(path); 4719 + pos = pathmask_to_pos(path); 4749 4720 conf_data = private->path_conf_data[pos]; 4750 4721 pos = 8 - ffs(cuir->ned_map); 4751 4722 ned = (char *) &conf_data->neds[pos]; ··· 4966 4937 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 4967 4938 ((u32 *)cuir)[3]); 4968 4939 ccw_device_get_schid(device->cdev, &sch_id); 4969 - /* get position of path in mask */ 4970 - pos = 8 - ffs(lpum); 4971 - /* get channel path descriptor from this position */ 4940 + pos = pathmask_to_pos(lpum); 4972 4941 desc = ccw_device_get_chp_desc(device->cdev, pos); 4973 4942 4974 4943 if (cuir->code == CUIR_QUIESCE) {
+2
drivers/s390/char/diag_ftp.c
··· 15 15 #include <linux/wait.h> 16 16 #include <linux/string.h> 17 17 #include <asm/ctl_reg.h> 18 + #include <asm/diag.h> 18 19 19 20 #include "hmcdrv_ftp.h" 20 21 #include "diag_ftp.h" ··· 103 102 { 104 103 int rc; 105 104 105 + diag_stat_inc(DIAG_STAT_X2C4); 106 106 asm volatile( 107 107 " diag %[addr],%[cmd],0x2c4\n" 108 108 "0: j 2f\n"
+67 -69
drivers/s390/char/sclp_rw.c
··· 47 47 sclp_make_buffer(void *page, unsigned short columns, unsigned short htab) 48 48 { 49 49 struct sclp_buffer *buffer; 50 - struct write_sccb *sccb; 50 + struct sccb_header *sccb; 51 51 52 - sccb = (struct write_sccb *) page; 52 + sccb = (struct sccb_header *) page; 53 53 /* 54 54 * We keep the struct sclp_buffer structure at the end 55 55 * of the sccb page. ··· 57 57 buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1; 58 58 buffer->sccb = sccb; 59 59 buffer->retry_count = 0; 60 - buffer->mto_number = 0; 61 - buffer->mto_char_sum = 0; 60 + buffer->messages = 0; 61 + buffer->char_sum = 0; 62 62 buffer->current_line = NULL; 63 63 buffer->current_length = 0; 64 64 buffer->columns = columns; 65 65 buffer->htab = htab; 66 66 67 67 /* initialize sccb */ 68 - memset(sccb, 0, sizeof(struct write_sccb)); 69 - sccb->header.length = sizeof(struct write_sccb); 70 - sccb->msg_buf.header.length = sizeof(struct msg_buf); 71 - sccb->msg_buf.header.type = EVTYP_MSG; 72 - sccb->msg_buf.mdb.header.length = sizeof(struct mdb); 73 - sccb->msg_buf.mdb.header.type = 1; 74 - sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 75 - sccb->msg_buf.mdb.header.revision_code = 1; 76 - sccb->msg_buf.mdb.go.length = sizeof(struct go); 77 - sccb->msg_buf.mdb.go.type = 1; 68 + memset(sccb, 0, sizeof(struct sccb_header)); 69 + sccb->length = sizeof(struct sccb_header); 78 70 79 71 return buffer; 80 72 } ··· 82 90 } 83 91 84 92 /* 85 - * Initialize a new Message Text Object (MTO) at the end of the provided buffer 86 - * with enough room for max_len characters. Return 0 on success. 93 + * Initialize a new message the end of the provided buffer with 94 + * enough room for max_len characters. Return 0 on success. 87 95 */ 88 96 static int 89 97 sclp_initialize_mto(struct sclp_buffer *buffer, int max_len) 90 98 { 91 - struct write_sccb *sccb; 99 + struct sccb_header *sccb; 100 + struct msg_buf *msg; 101 + struct mdb *mdb; 102 + struct go *go; 92 103 struct mto *mto; 93 - int mto_size; 104 + int msg_size; 94 105 95 - /* max size of new Message Text Object including message text */ 96 - mto_size = sizeof(struct mto) + max_len; 106 + /* max size of new message including message text */ 107 + msg_size = sizeof(struct msg_buf) + max_len; 97 108 98 109 /* check if current buffer sccb can contain the mto */ 99 110 sccb = buffer->sccb; 100 - if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size) 111 + if ((MAX_SCCB_ROOM - sccb->length) < msg_size) 101 112 return -ENOMEM; 102 113 103 - /* find address of new message text object */ 104 - mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); 114 + msg = (struct msg_buf *)((addr_t) sccb + sccb->length); 115 + memset(msg, 0, sizeof(struct msg_buf)); 116 + msg->header.length = sizeof(struct msg_buf); 117 + msg->header.type = EVTYP_MSG; 105 118 106 - /* 107 - * fill the new Message-Text Object, 108 - * starting behind the former last byte of the SCCB 109 - */ 110 - memset(mto, 0, sizeof(struct mto)); 119 + mdb = &msg->mdb; 120 + mdb->header.length = sizeof(struct mdb); 121 + mdb->header.type = 1; 122 + mdb->header.tag = 0xD4C4C240; /* ebcdic "MDB " */ 123 + mdb->header.revision_code = 1; 124 + 125 + go = &mdb->go; 126 + go->length = sizeof(struct go); 127 + go->type = 1; 128 + 129 + mto = &mdb->mto; 111 130 mto->length = sizeof(struct mto); 112 131 mto->type = 4; /* message text object */ 113 132 mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */ 114 133 115 134 /* set pointer to first byte after struct mto. */ 135 + buffer->current_msg = msg; 116 136 buffer->current_line = (char *) (mto + 1); 117 137 buffer->current_length = 0; 118 138 ··· 132 128 } 133 129 134 130 /* 135 - * Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of 136 - * MTO, enclosing MDB, event buffer and SCCB. 131 + * Finalize message initialized by sclp_initialize_mto(), 132 + * updating the sizes of MTO, enclosing MDB, event buffer and SCCB. 137 133 */ 138 134 static void 139 135 sclp_finalize_mto(struct sclp_buffer *buffer) 140 136 { 141 - struct write_sccb *sccb; 142 - struct mto *mto; 143 - int str_len, mto_size; 144 - 145 - str_len = buffer->current_length; 146 - buffer->current_line = NULL; 147 - buffer->current_length = 0; 148 - 149 - /* real size of new Message Text Object including message text */ 150 - mto_size = sizeof(struct mto) + str_len; 151 - 152 - /* find address of new message text object */ 153 - sccb = buffer->sccb; 154 - mto = (struct mto *)(((addr_t) sccb) + sccb->header.length); 155 - 156 - /* set size of message text object */ 157 - mto->length = mto_size; 137 + struct sccb_header *sccb; 138 + struct msg_buf *msg; 158 139 159 140 /* 160 141 * update values of sizes 161 142 * (SCCB, Event(Message) Buffer, Message Data Block) 162 143 */ 163 - sccb->header.length += mto_size; 164 - sccb->msg_buf.header.length += mto_size; 165 - sccb->msg_buf.mdb.header.length += mto_size; 144 + sccb = buffer->sccb; 145 + msg = buffer->current_msg; 146 + msg->header.length += buffer->current_length; 147 + msg->mdb.header.length += buffer->current_length; 148 + msg->mdb.mto.length += buffer->current_length; 149 + sccb->length += msg->header.length; 166 150 167 151 /* 168 152 * count number of buffered messages (= number of Message Text 169 153 * Objects) and number of buffered characters 170 154 * for the SCCB currently used for buffering and at all 171 155 */ 172 - buffer->mto_number++; 173 - buffer->mto_char_sum += str_len; 156 + buffer->messages++; 157 + buffer->char_sum += buffer->current_length; 158 + 159 + buffer->current_line = NULL; 160 + buffer->current_length = 0; 161 + buffer->current_msg = NULL; 174 162 } 175 163 176 164 /* ··· 214 218 break; 215 219 case '\a': /* bell, one for several times */ 216 220 /* set SCLP sound alarm bit in General Object */ 217 - buffer->sccb->msg_buf.mdb.go.general_msg_flags |= 221 + if (buffer->current_line == NULL) { 222 + rc = sclp_initialize_mto(buffer, 223 + buffer->columns); 224 + if (rc) 225 + return i_msg; 226 + } 227 + buffer->current_msg->mdb.go.general_msg_flags |= 218 228 GNRLMSGFLGS_SNDALRM; 219 229 break; 220 230 case '\t': /* horizontal tabulator */ ··· 311 309 int 312 310 sclp_buffer_space(struct sclp_buffer *buffer) 313 311 { 312 + struct sccb_header *sccb; 314 313 int count; 315 314 316 - count = MAX_SCCB_ROOM - buffer->sccb->header.length; 315 + sccb = buffer->sccb; 316 + count = MAX_SCCB_ROOM - sccb->length; 317 317 if (buffer->current_line != NULL) 318 - count -= sizeof(struct mto) + buffer->current_length; 318 + count -= sizeof(struct msg_buf) + buffer->current_length; 319 319 return count; 320 320 } 321 321 ··· 329 325 { 330 326 int count; 331 327 332 - count = buffer->mto_char_sum; 328 + count = buffer->char_sum; 333 329 if (buffer->current_line != NULL) 334 330 count += buffer->current_length; 335 331 return count; ··· 382 378 { 383 379 int rc; 384 380 struct sclp_buffer *buffer; 385 - struct write_sccb *sccb; 381 + struct sccb_header *sccb; 386 382 387 383 buffer = (struct sclp_buffer *) data; 388 384 sccb = buffer->sccb; ··· 393 389 return; 394 390 } 395 391 /* check SCLP response code and choose suitable action */ 396 - switch (sccb->header.response_code) { 392 + switch (sccb->response_code) { 397 393 case 0x0020 : 398 394 /* Normal completion, buffer processed, message(s) sent */ 399 395 rc = 0; ··· 407 403 /* remove processed buffers and requeue rest */ 408 404 if (sclp_remove_processed((struct sccb_header *) sccb) > 0) { 409 405 /* not all buffers were processed */ 410 - sccb->header.response_code = 0x0000; 406 + sccb->response_code = 0x0000; 411 407 buffer->request.status = SCLP_REQ_FILLED; 412 408 rc = sclp_add_request(request); 413 409 if (rc == 0) ··· 423 419 break; 424 420 } 425 421 /* retry request */ 426 - sccb->header.response_code = 0x0000; 422 + sccb->response_code = 0x0000; 427 423 buffer->request.status = SCLP_REQ_FILLED; 428 424 rc = sclp_add_request(request); 429 425 if (rc == 0) 430 426 return; 431 427 break; 432 428 default: 433 - if (sccb->header.response_code == 0x71f0) 429 + if (sccb->response_code == 0x71f0) 434 430 rc = -ENOMEM; 435 431 else 436 432 rc = -EINVAL; ··· 449 445 sclp_emit_buffer(struct sclp_buffer *buffer, 450 446 void (*callback)(struct sclp_buffer *, int)) 451 447 { 452 - struct write_sccb *sccb; 453 - 454 448 /* add current line if there is one */ 455 449 if (buffer->current_line != NULL) 456 450 sclp_finalize_mto(buffer); 457 451 458 452 /* Are there messages in the output buffer ? */ 459 - if (buffer->mto_number == 0) 453 + if (buffer->messages == 0) 460 454 return -EIO; 461 - 462 - sccb = buffer->sccb; 463 - /* Use normal write message */ 464 - sccb->msg_buf.header.type = EVTYP_MSG; 465 455 466 456 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 467 457 buffer->request.status = SCLP_REQ_FILLED; 468 458 buffer->request.callback = sclp_writedata_callback; 469 459 buffer->request.callback_data = buffer; 470 - buffer->request.sccb = sccb; 460 + buffer->request.sccb = buffer->sccb; 471 461 buffer->callback = callback; 472 462 return sclp_add_request(&buffer->request); 473 463 }
+7 -10
drivers/s390/char/sclp_rw.h
··· 45 45 struct mdb { 46 46 struct mdb_header header; 47 47 struct go go; 48 + struct mto mto; 48 49 } __attribute__((packed)); 49 50 50 51 struct msg_buf { ··· 53 52 struct mdb mdb; 54 53 } __attribute__((packed)); 55 54 56 - struct write_sccb { 57 - struct sccb_header header; 58 - struct msg_buf msg_buf; 59 - } __attribute__((packed)); 60 - 61 55 /* The number of empty mto buffers that can be contained in a single sccb. */ 62 - #define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \ 63 - sizeof(struct write_sccb)) / sizeof(struct mto)) 56 + #define NR_EMPTY_MSG_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \ 57 + sizeof(struct sccb_header)) / sizeof(struct msg_buf)) 64 58 65 59 /* 66 60 * data structure for information about list of SCCBs (only for writing), ··· 64 68 struct sclp_buffer { 65 69 struct list_head list; /* list_head for sccb_info chain */ 66 70 struct sclp_req request; 67 - struct write_sccb *sccb; 71 + void *sccb; 72 + struct msg_buf *current_msg; 68 73 char *current_line; 69 74 int current_length; 70 75 int retry_count; ··· 73 76 unsigned short columns; 74 77 unsigned short htab; 75 78 /* statistics about this buffer */ 76 - unsigned int mto_char_sum; /* # chars in sccb */ 77 - unsigned int mto_number; /* # mtos in sccb */ 79 + unsigned int char_sum; /* # chars in sccb */ 80 + unsigned int messages; /* # messages in sccb */ 78 81 /* Callback that is called after reaching final status. */ 79 82 void (*callback)(struct sclp_buffer *, int); 80 83 };
+4 -4
drivers/s390/char/sclp_tty.c
··· 84 84 * to change as output buffers get emptied, or if the output flow 85 85 * control is acted. This is not an exact number because not every 86 86 * character needs the same space in the sccb. The worst case is 87 - * a string of newlines. Every newlines creates a new mto which 88 - * needs 8 bytes. 87 + * a string of newlines. Every newline creates a new message which 88 + * needs 82 bytes. 89 89 */ 90 90 static int 91 91 sclp_tty_write_room (struct tty_struct *tty) ··· 97 97 spin_lock_irqsave(&sclp_tty_lock, flags); 98 98 count = 0; 99 99 if (sclp_ttybuf != NULL) 100 - count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto); 100 + count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct msg_buf); 101 101 list_for_each(l, &sclp_tty_pages) 102 - count += NR_EMPTY_MTO_PER_SCCB; 102 + count += NR_EMPTY_MSG_PER_SCCB; 103 103 spin_unlock_irqrestore(&sclp_tty_lock, flags); 104 104 return count; 105 105 }
+7 -26
drivers/s390/cio/cio.c
··· 476 476 return 0; 477 477 } 478 478 479 - static int cio_validate_io_subchannel(struct subchannel *sch) 480 - { 481 - /* Initialization for io subchannels. */ 482 - if (!css_sch_is_valid(&sch->schib)) 483 - return -ENODEV; 484 - 485 - /* Devno is valid. */ 486 - return cio_check_devno_blacklisted(sch); 487 - } 488 - 489 - static int cio_validate_msg_subchannel(struct subchannel *sch) 490 - { 491 - /* Initialization for message subchannels. */ 492 - if (!css_sch_is_valid(&sch->schib)) 493 - return -ENODEV; 494 - 495 - /* Devno is valid. */ 496 - return cio_check_devno_blacklisted(sch); 497 - } 498 - 499 479 /** 500 480 * cio_validate_subchannel - basic validation of subchannel 501 481 * @sch: subchannel structure to be filled out ··· 513 533 514 534 switch (sch->st) { 515 535 case SUBCHANNEL_TYPE_IO: 516 - err = cio_validate_io_subchannel(sch); 517 - break; 518 536 case SUBCHANNEL_TYPE_MSG: 519 - err = cio_validate_msg_subchannel(sch); 537 + if (!css_sch_is_valid(&sch->schib)) 538 + err = -ENODEV; 539 + else 540 + err = cio_check_devno_blacklisted(sch); 520 541 break; 521 542 default: 522 543 err = 0; ··· 807 826 static void s390_reset_chpids_mcck_handler(void) 808 827 { 809 828 struct crw crw; 810 - struct mci *mci; 829 + union mci mci; 811 830 812 831 /* Check for pending channel report word. */ 813 - mci = (struct mci *)&S390_lowcore.mcck_interruption_code; 814 - if (!mci->cp) 832 + mci.val = S390_lowcore.mcck_interruption_code; 833 + if (!mci.cp) 815 834 return; 816 835 /* Process channel report words. */ 817 836 while (stcrw(&crw) == 0) {
+132 -90
drivers/s390/cio/cmf.c
··· 113 113 * @readall: read a measurement block in a common format 114 114 * @reset: clear the data in the associated measurement block and 115 115 * reset its time stamp 116 - * @align: align an allocated block so that the hardware can use it 117 116 */ 118 117 struct cmb_operations { 119 118 int (*alloc) (struct ccw_device *); ··· 121 122 u64 (*read) (struct ccw_device *, int); 122 123 int (*readall)(struct ccw_device *, struct cmbdata *); 123 124 void (*reset) (struct ccw_device *); 124 - void *(*align) (void *); 125 125 /* private: */ 126 126 struct attribute_group *attr_group; 127 127 }; ··· 184 186 static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, 185 187 unsigned long address) 186 188 { 187 - struct subchannel *sch; 188 - 189 - sch = to_subchannel(cdev->dev.parent); 189 + struct subchannel *sch = to_subchannel(cdev->dev.parent); 190 + int ret; 190 191 191 192 sch->config.mme = mme; 192 193 sch->config.mbfc = mbfc; ··· 195 198 else 196 199 sch->config.mbi = address; 197 200 198 - return cio_commit_config(sch); 201 + ret = cio_commit_config(sch); 202 + if (!mme && ret == -ENODEV) { 203 + /* 204 + * The task was to disable measurement block updates but 205 + * the subchannel is already gone. Report success. 206 + */ 207 + ret = 0; 208 + } 209 + return ret; 199 210 } 200 211 201 212 struct set_schib_struct { ··· 319 314 return -EBUSY; 320 315 } 321 316 cmb_data = cdev->private->cmb; 322 - hw_block = cmbops->align(cmb_data->hw_block); 317 + hw_block = cmb_data->hw_block; 323 318 if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size)) 324 319 /* No need to copy. */ 325 320 return 0; ··· 430 425 * Need to reset hw block as well to make the hardware start 431 426 * from 0 again. 432 427 */ 433 - memset(cmbops->align(cmb_data->hw_block), 0, cmb_data->size); 428 + memset(cmb_data->hw_block, 0, cmb_data->size); 434 429 cmb_data->last_update = 0; 435 430 } 436 431 cdev->private->cmb_start_time = get_tod_clock(); ··· 611 606 spin_lock_irq(cdev->ccwlock); 612 607 613 608 priv = cdev->private; 614 - 615 - if (list_empty(&priv->cmb_list)) { 616 - /* already freed */ 617 - goto out; 618 - } 619 - 620 609 cmb_data = priv->cmb; 621 610 priv->cmb = NULL; 622 611 if (cmb_data) ··· 625 626 free_pages((unsigned long)cmb_area.mem, get_order(size)); 626 627 cmb_area.mem = NULL; 627 628 } 628 - out: 629 629 spin_unlock_irq(cdev->ccwlock); 630 630 spin_unlock(&cmb_area.lock); 631 631 } ··· 753 755 cmf_generic_reset(cdev); 754 756 } 755 757 756 - static void * align_cmb(void *area) 757 - { 758 - return area; 759 - } 760 - 761 758 static struct attribute_group cmf_attr_group; 762 759 763 760 static struct cmb_operations cmbops_basic = { ··· 762 769 .read = read_cmb, 763 770 .readall = readall_cmb, 764 771 .reset = reset_cmb, 765 - .align = align_cmb, 766 772 .attr_group = &cmf_attr_group, 767 773 }; 768 774 ··· 796 804 u32 device_busy_time; 797 805 u32 initial_command_response_time; 798 806 u32 reserved[7]; 799 - }; 807 + } __packed __aligned(64); 800 808 801 - /* 802 - * kmalloc only guarantees 8 byte alignment, but we need cmbe 803 - * pointers to be naturally aligned. Make sure to allocate 804 - * enough space for two cmbes. 805 - */ 806 - static inline struct cmbe *cmbe_align(struct cmbe *c) 807 - { 808 - unsigned long addr; 809 - addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) & 810 - ~(sizeof (struct cmbe) - sizeof(long)); 811 - return (struct cmbe*)addr; 812 - } 809 + static struct kmem_cache *cmbe_cache; 813 810 814 811 static int alloc_cmbe(struct ccw_device *cdev) 815 812 { 816 - struct cmbe *cmbe; 817 813 struct cmb_data *cmb_data; 818 - int ret; 814 + struct cmbe *cmbe; 815 + int ret = -ENOMEM; 819 816 820 - cmbe = kzalloc (sizeof (*cmbe) * 2, GFP_KERNEL); 817 + cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL); 821 818 if (!cmbe) 822 - return -ENOMEM; 823 - cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL); 824 - if (!cmb_data) { 825 - ret = -ENOMEM; 819 + return ret; 820 + 821 + cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL); 822 + if (!cmb_data) 826 823 goto out_free; 827 - } 824 + 828 825 cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL); 829 - if (!cmb_data->last_block) { 830 - ret = -ENOMEM; 826 + if (!cmb_data->last_block) 831 827 goto out_free; 832 - } 833 - cmb_data->size = sizeof(struct cmbe); 834 - spin_lock_irq(cdev->ccwlock); 835 - if (cdev->private->cmb) { 836 - spin_unlock_irq(cdev->ccwlock); 837 - ret = -EBUSY; 838 - goto out_free; 839 - } 828 + 829 + cmb_data->size = sizeof(*cmbe); 840 830 cmb_data->hw_block = cmbe; 831 + 832 + spin_lock(&cmb_area.lock); 833 + spin_lock_irq(cdev->ccwlock); 834 + if (cdev->private->cmb) 835 + goto out_unlock; 836 + 841 837 cdev->private->cmb = cmb_data; 842 - spin_unlock_irq(cdev->ccwlock); 843 838 844 839 /* activate global measurement if this is the first channel */ 845 - spin_lock(&cmb_area.lock); 846 840 if (list_empty(&cmb_area.list)) 847 841 cmf_activate(NULL, 1); 848 842 list_add_tail(&cdev->private->cmb_list, &cmb_area.list); 849 - spin_unlock(&cmb_area.lock); 850 843 844 + spin_unlock_irq(cdev->ccwlock); 845 + spin_unlock(&cmb_area.lock); 851 846 return 0; 847 + 848 + out_unlock: 849 + spin_unlock_irq(cdev->ccwlock); 850 + spin_unlock(&cmb_area.lock); 851 + ret = -EBUSY; 852 852 out_free: 853 853 if (cmb_data) 854 854 kfree(cmb_data->last_block); 855 855 kfree(cmb_data); 856 - kfree(cmbe); 856 + kmem_cache_free(cmbe_cache, cmbe); 857 + 857 858 return ret; 858 859 } 859 860 ··· 854 869 { 855 870 struct cmb_data *cmb_data; 856 871 872 + spin_lock(&cmb_area.lock); 857 873 spin_lock_irq(cdev->ccwlock); 858 874 cmb_data = cdev->private->cmb; 859 875 cdev->private->cmb = NULL; 860 - if (cmb_data) 876 + if (cmb_data) { 861 877 kfree(cmb_data->last_block); 878 + kmem_cache_free(cmbe_cache, cmb_data->hw_block); 879 + } 862 880 kfree(cmb_data); 863 - spin_unlock_irq(cdev->ccwlock); 864 881 865 882 /* deactivate global measurement if this is the last channel */ 866 - spin_lock(&cmb_area.lock); 867 883 list_del_init(&cdev->private->cmb_list); 868 884 if (list_empty(&cmb_area.list)) 869 885 cmf_activate(NULL, 0); 886 + spin_unlock_irq(cdev->ccwlock); 870 887 spin_unlock(&cmb_area.lock); 871 888 } 872 889 ··· 884 897 return -EINVAL; 885 898 } 886 899 cmb_data = cdev->private->cmb; 887 - mba = mme ? (unsigned long) cmbe_align(cmb_data->hw_block) : 0; 900 + mba = mme ? (unsigned long) cmb_data->hw_block : 0; 888 901 spin_unlock_irqrestore(cdev->ccwlock, flags); 889 902 890 903 return set_schib_wait(cdev, mme, 1, mba); ··· 1009 1022 cmf_generic_reset(cdev); 1010 1023 } 1011 1024 1012 - static void * align_cmbe(void *area) 1013 - { 1014 - return cmbe_align(area); 1015 - } 1016 - 1017 1025 static struct attribute_group cmf_attr_group_ext; 1018 1026 1019 1027 static struct cmb_operations cmbops_extended = { ··· 1018 1036 .read = read_cmbe, 1019 1037 .readall = readall_cmbe, 1020 1038 .reset = reset_cmbe, 1021 - .align = align_cmbe, 1022 1039 .attr_group = &cmf_attr_group_ext, 1023 1040 }; 1024 1041 ··· 1152 1171 struct device_attribute *attr, 1153 1172 char *buf) 1154 1173 { 1155 - return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0); 1174 + struct ccw_device *cdev = to_ccwdev(dev); 1175 + int enabled; 1176 + 1177 + spin_lock_irq(cdev->ccwlock); 1178 + enabled = !!cdev->private->cmb; 1179 + spin_unlock_irq(cdev->ccwlock); 1180 + 1181 + return sprintf(buf, "%d\n", enabled); 1156 1182 } 1157 1183 1158 1184 static ssize_t cmb_enable_store(struct device *dev, 1159 1185 struct device_attribute *attr, const char *buf, 1160 1186 size_t c) 1161 1187 { 1162 - struct ccw_device *cdev; 1163 - int ret; 1188 + struct ccw_device *cdev = to_ccwdev(dev); 1164 1189 unsigned long val; 1190 + int ret; 1165 1191 1166 1192 ret = kstrtoul(buf, 16, &val); 1167 1193 if (ret) 1168 1194 return ret; 1169 - 1170 - cdev = to_ccwdev(dev); 1171 1195 1172 1196 switch (val) { 1173 1197 case 0: ··· 1181 1195 case 1: 1182 1196 ret = enable_cmf(cdev); 1183 1197 break; 1198 + default: 1199 + ret = -EINVAL; 1184 1200 } 1185 1201 1186 - return c; 1202 + return ret ? ret : c; 1187 1203 } 1188 - 1189 - DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); 1204 + DEVICE_ATTR_RW(cmb_enable); 1190 1205 1191 1206 int ccw_set_cmf(struct ccw_device *cdev, int enable) 1192 1207 { ··· 1207 1220 { 1208 1221 int ret; 1209 1222 1223 + device_lock(&cdev->dev); 1224 + get_device(&cdev->dev); 1210 1225 ret = cmbops->alloc(cdev); 1211 - cmbops->reset(cdev); 1212 1226 if (ret) 1213 - return ret; 1214 - ret = cmbops->set(cdev, 2); 1227 + goto out; 1228 + cmbops->reset(cdev); 1229 + ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group); 1215 1230 if (ret) { 1216 1231 cmbops->free(cdev); 1217 - return ret; 1232 + goto out; 1218 1233 } 1219 - ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group); 1220 - if (!ret) 1221 - return 0; 1222 - cmbops->set(cdev, 0); //FIXME: this can fail 1234 + ret = cmbops->set(cdev, 2); 1235 + if (ret) { 1236 + sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group); 1237 + cmbops->free(cdev); 1238 + } 1239 + out: 1240 + if (ret) 1241 + put_device(&cdev->dev); 1242 + 1243 + device_unlock(&cdev->dev); 1244 + return ret; 1245 + } 1246 + 1247 + /** 1248 + * __disable_cmf() - switch off the channel measurement for a specific device 1249 + * @cdev: The ccw device to be disabled 1250 + * 1251 + * Returns %0 for success or a negative error value. 1252 + * 1253 + * Context: 1254 + * non-atomic, device_lock() held. 1255 + */ 1256 + int __disable_cmf(struct ccw_device *cdev) 1257 + { 1258 + int ret; 1259 + 1260 + ret = cmbops->set(cdev, 0); 1261 + if (ret) 1262 + return ret; 1263 + 1264 + sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group); 1223 1265 cmbops->free(cdev); 1266 + put_device(&cdev->dev); 1267 + 1224 1268 return ret; 1225 1269 } 1226 1270 ··· 1268 1250 { 1269 1251 int ret; 1270 1252 1271 - ret = cmbops->set(cdev, 0); 1272 - if (ret) 1273 - return ret; 1274 - cmbops->free(cdev); 1275 - sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group); 1253 + device_lock(&cdev->dev); 1254 + ret = __disable_cmf(cdev); 1255 + device_unlock(&cdev->dev); 1256 + 1276 1257 return ret; 1277 1258 } 1278 1259 ··· 1312 1295 return cmbops->set(cdev, 2); 1313 1296 } 1314 1297 1298 + /** 1299 + * cmf_reactivate() - reactivate measurement block updates 1300 + * 1301 + * Use this during resume from hibernate. 1302 + */ 1303 + void cmf_reactivate(void) 1304 + { 1305 + spin_lock(&cmb_area.lock); 1306 + if (!list_empty(&cmb_area.list)) 1307 + cmf_activate(cmb_area.mem, 1); 1308 + spin_unlock(&cmb_area.lock); 1309 + } 1310 + 1311 + static int __init init_cmbe(void) 1312 + { 1313 + cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe), 1314 + __alignof__(struct cmbe), 0, NULL); 1315 + 1316 + return cmbe_cache ? 0 : -ENOMEM; 1317 + } 1318 + 1315 1319 static int __init init_cmf(void) 1316 1320 { 1317 1321 char *format_string; 1318 - char *detect_string = "parameter"; 1322 + char *detect_string; 1323 + int ret; 1319 1324 1320 1325 /* 1321 1326 * If the user did not give a parameter, see if we are running on a ··· 1363 1324 case CMF_EXTENDED: 1364 1325 format_string = "extended"; 1365 1326 cmbops = &cmbops_extended; 1327 + 1328 + ret = init_cmbe(); 1329 + if (ret) 1330 + return ret; 1366 1331 break; 1367 1332 default: 1368 - return 1; 1333 + return -EINVAL; 1369 1334 } 1370 1335 pr_info("Channel measurement facility initialized using format " 1371 1336 "%s (mode %s)\n", format_string, detect_string); 1372 1337 return 0; 1373 1338 } 1374 - 1375 1339 module_init(init_cmf); 1376 1340 1377 1341
+1 -1
drivers/s390/cio/css.c
··· 44 44 int ret; 45 45 46 46 init_subchannel_id(&schid); 47 - ret = -ENODEV; 48 47 do { 49 48 do { 50 49 ret = fn(schid, data); ··· 1088 1089 if (chp) 1089 1090 chp_update_desc(chp); 1090 1091 } 1092 + cmf_reactivate(); 1091 1093 } 1092 1094 1093 1095 #ifdef CONFIG_PROC_FS
+3 -1
drivers/s390/cio/device.c
··· 1787 1787 cdev->drv = NULL; 1788 1788 cdev->private->int_class = IRQIO_CIO; 1789 1789 spin_unlock_irq(cdev->ccwlock); 1790 + __disable_cmf(cdev); 1791 + 1790 1792 return 0; 1791 1793 } 1792 1794 ··· 1799 1797 cdev = to_ccwdev(dev); 1800 1798 if (cdev->drv && cdev->drv->shutdown) 1801 1799 cdev->drv->shutdown(cdev); 1802 - disable_cmf(cdev); 1800 + __disable_cmf(cdev); 1803 1801 } 1804 1802 1805 1803 static int ccw_device_pm_prepare(struct device *dev)
+1 -5
drivers/s390/cio/device.h
··· 125 125 void ccw_device_disband_start(struct ccw_device *); 126 126 void ccw_device_disband_done(struct ccw_device *, int); 127 127 128 - void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *); 129 - void ccw_device_stlck_done(struct ccw_device *, void *, int); 130 - 131 - int ccw_device_call_handler(struct ccw_device *); 132 - 133 128 int ccw_device_stlck(struct ccw_device *); 134 129 135 130 /* Helper function for machine check handling. */ ··· 140 145 void retry_set_schib(struct ccw_device *cdev); 141 146 void cmf_retry_copy_block(struct ccw_device *); 142 147 int cmf_reenable(struct ccw_device *); 148 + void cmf_reactivate(void); 143 149 int ccw_set_cmf(struct ccw_device *cdev, int enable); 144 150 extern struct device_attribute dev_attr_cmb_enable; 145 151 #endif
+38
drivers/s390/cio/device_fsm.c
··· 731 731 } 732 732 733 733 /* 734 + * Pass interrupt to device driver. 735 + */ 736 + static int ccw_device_call_handler(struct ccw_device *cdev) 737 + { 738 + unsigned int stctl; 739 + int ending_status; 740 + 741 + /* 742 + * we allow for the device action handler if . 743 + * - we received ending status 744 + * - the action handler requested to see all interrupts 745 + * - we received an intermediate status 746 + * - fast notification was requested (primary status) 747 + * - unsolicited interrupts 748 + */ 749 + stctl = scsw_stctl(&cdev->private->irb.scsw); 750 + ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 751 + (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 752 + (stctl == SCSW_STCTL_STATUS_PEND); 753 + if (!ending_status && 754 + !cdev->private->options.repall && 755 + !(stctl & SCSW_STCTL_INTER_STATUS) && 756 + !(cdev->private->options.fast && 757 + (stctl & SCSW_STCTL_PRIM_STATUS))) 758 + return 0; 759 + 760 + if (ending_status) 761 + ccw_device_set_timeout(cdev, 0); 762 + 763 + if (cdev->handler) 764 + cdev->handler(cdev, cdev->private->intparm, 765 + &cdev->private->irb); 766 + 767 + memset(&cdev->private->irb, 0, sizeof(struct irb)); 768 + return 1; 769 + } 770 + 771 + /* 734 772 * Got an interrupt for a normal io (state online). 735 773 */ 736 774 static void
-107
drivers/s390/cio/device_ops.c
··· 412 412 return cio_resume(sch); 413 413 } 414 414 415 - /* 416 - * Pass interrupt to device driver. 417 - */ 418 - int 419 - ccw_device_call_handler(struct ccw_device *cdev) 420 - { 421 - unsigned int stctl; 422 - int ending_status; 423 - 424 - /* 425 - * we allow for the device action handler if . 426 - * - we received ending status 427 - * - the action handler requested to see all interrupts 428 - * - we received an intermediate status 429 - * - fast notification was requested (primary status) 430 - * - unsolicited interrupts 431 - */ 432 - stctl = scsw_stctl(&cdev->private->irb.scsw); 433 - ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 434 - (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 435 - (stctl == SCSW_STCTL_STATUS_PEND); 436 - if (!ending_status && 437 - !cdev->private->options.repall && 438 - !(stctl & SCSW_STCTL_INTER_STATUS) && 439 - !(cdev->private->options.fast && 440 - (stctl & SCSW_STCTL_PRIM_STATUS))) 441 - return 0; 442 - 443 - /* Clear pending timers for device driver initiated I/O. */ 444 - if (ending_status) 445 - ccw_device_set_timeout(cdev, 0); 446 - /* 447 - * Now we are ready to call the device driver interrupt handler. 448 - */ 449 - if (cdev->handler) 450 - cdev->handler(cdev, cdev->private->intparm, 451 - &cdev->private->irb); 452 - 453 - /* 454 - * Clear the old and now useless interrupt response block. 455 - */ 456 - memset(&cdev->private->irb, 0, sizeof(struct irb)); 457 - 458 - return 1; 459 - } 460 - 461 415 /** 462 416 * ccw_device_get_ciw() - Search for CIW command in extended sense data. 463 417 * @cdev: ccw device to inspect ··· 454 500 455 501 sch = to_subchannel(cdev->dev.parent); 456 502 return sch->lpm; 457 - } 458 - 459 - struct stlck_data { 460 - struct completion done; 461 - int rc; 462 - }; 463 - 464 - void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc) 465 - { 466 - struct stlck_data *sdata = data; 467 - 468 - sdata->rc = rc; 469 - complete(&sdata->done); 470 - } 471 - 472 - /* 473 - * Perform unconditional reserve + release. 474 - */ 475 - int ccw_device_stlck(struct ccw_device *cdev) 476 - { 477 - struct subchannel *sch = to_subchannel(cdev->dev.parent); 478 - struct stlck_data data; 479 - u8 *buffer; 480 - int rc; 481 - 482 - /* Check if steal lock operation is valid for this device. */ 483 - if (cdev->drv) { 484 - if (!cdev->private->options.force) 485 - return -EINVAL; 486 - } 487 - buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); 488 - if (!buffer) 489 - return -ENOMEM; 490 - init_completion(&data.done); 491 - data.rc = -EIO; 492 - spin_lock_irq(sch->lock); 493 - rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); 494 - if (rc) 495 - goto out_unlock; 496 - /* Perform operation. */ 497 - cdev->private->state = DEV_STATE_STEAL_LOCK; 498 - ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); 499 - spin_unlock_irq(sch->lock); 500 - /* Wait for operation to finish. */ 501 - if (wait_for_completion_interruptible(&data.done)) { 502 - /* Got a signal. */ 503 - spin_lock_irq(sch->lock); 504 - ccw_request_cancel(cdev); 505 - spin_unlock_irq(sch->lock); 506 - wait_for_completion(&data.done); 507 - } 508 - rc = data.rc; 509 - /* Check results. */ 510 - spin_lock_irq(sch->lock); 511 - cio_disable_subchannel(sch); 512 - cdev->private->state = DEV_STATE_BOXED; 513 - out_unlock: 514 - spin_unlock_irq(sch->lock); 515 - kfree(buffer); 516 - 517 - return rc; 518 503 } 519 504 520 505 /**
+62 -8
drivers/s390/cio/device_pgid.c
··· 9 9 10 10 #include <linux/kernel.h> 11 11 #include <linux/string.h> 12 + #include <linux/bitops.h> 12 13 #include <linux/types.h> 13 14 #include <linux/errno.h> 14 - #include <linux/bitops.h> 15 + #include <linux/slab.h> 15 16 #include <asm/ccwdev.h> 16 17 #include <asm/cio.h> 17 18 ··· 134 133 { 135 134 struct ccw_request *req = &cdev->private->req; 136 135 struct ccw1 *cp = cdev->private->iccws; 137 - int i = 8 - ffs(req->lpm); 136 + int i = pathmask_to_pos(req->lpm); 138 137 struct pgid *pgid = &cdev->private->pgid[i]; 139 138 140 139 pgid->inf.fc = fn; ··· 435 434 { 436 435 struct ccw_request *req = &cdev->private->req; 437 436 struct ccw1 *cp = cdev->private->iccws; 438 - int i = 8 - ffs(req->lpm); 437 + int i = pathmask_to_pos(req->lpm); 439 438 440 439 /* Channel program setup. */ 441 440 cp->cmd_code = CCW_CMD_SENSE_PGID; ··· 617 616 ccw_request_start(cdev); 618 617 } 619 618 619 + struct stlck_data { 620 + struct completion done; 621 + int rc; 622 + }; 623 + 620 624 static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2) 621 625 { 622 626 struct ccw_request *req = &cdev->private->req; ··· 640 634 641 635 static void stlck_callback(struct ccw_device *cdev, void *data, int rc) 642 636 { 643 - ccw_device_stlck_done(cdev, data, rc); 637 + struct stlck_data *sdata = data; 638 + 639 + sdata->rc = rc; 640 + complete(&sdata->done); 644 641 } 645 642 646 643 /** ··· 654 645 * @buf2: data pointer used in channel program 655 646 * 656 647 * Execute a channel program on @cdev to release an existing PGID reservation. 657 - * When finished, call ccw_device_stlck_done with a return code specifying the 658 - * result. 659 648 */ 660 - void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1, 661 - void *buf2) 649 + static void ccw_device_stlck_start(struct ccw_device *cdev, void *data, 650 + void *buf1, void *buf2) 662 651 { 663 652 struct subchannel *sch = to_subchannel(cdev->dev.parent); 664 653 struct ccw_request *req = &cdev->private->req; ··· 674 667 ccw_request_start(cdev); 675 668 } 676 669 670 + /* 671 + * Perform unconditional reserve + release. 672 + */ 673 + int ccw_device_stlck(struct ccw_device *cdev) 674 + { 675 + struct subchannel *sch = to_subchannel(cdev->dev.parent); 676 + struct stlck_data data; 677 + u8 *buffer; 678 + int rc; 679 + 680 + /* Check if steal lock operation is valid for this device. */ 681 + if (cdev->drv) { 682 + if (!cdev->private->options.force) 683 + return -EINVAL; 684 + } 685 + buffer = kzalloc(64, GFP_DMA | GFP_KERNEL); 686 + if (!buffer) 687 + return -ENOMEM; 688 + init_completion(&data.done); 689 + data.rc = -EIO; 690 + spin_lock_irq(sch->lock); 691 + rc = cio_enable_subchannel(sch, (u32) (addr_t) sch); 692 + if (rc) 693 + goto out_unlock; 694 + /* Perform operation. */ 695 + cdev->private->state = DEV_STATE_STEAL_LOCK; 696 + ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]); 697 + spin_unlock_irq(sch->lock); 698 + /* Wait for operation to finish. */ 699 + if (wait_for_completion_interruptible(&data.done)) { 700 + /* Got a signal. */ 701 + spin_lock_irq(sch->lock); 702 + ccw_request_cancel(cdev); 703 + spin_unlock_irq(sch->lock); 704 + wait_for_completion(&data.done); 705 + } 706 + rc = data.rc; 707 + /* Check results. */ 708 + spin_lock_irq(sch->lock); 709 + cio_disable_subchannel(sch); 710 + cdev->private->state = DEV_STATE_BOXED; 711 + out_unlock: 712 + spin_unlock_irq(sch->lock); 713 + kfree(buffer); 714 + 715 + return rc; 716 + }
+2 -2
drivers/s390/crypto/Makefile
··· 3 3 # 4 4 5 5 ap-objs := ap_bus.o 6 - obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 7 - obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o 6 + obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o 7 + obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o 8 8 obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
+775 -1059
drivers/s390/crypto/ap_bus.c
··· 37 37 #include <linux/notifier.h> 38 38 #include <linux/kthread.h> 39 39 #include <linux/mutex.h> 40 + #include <linux/suspend.h> 40 41 #include <asm/reset.h> 41 42 #include <asm/airq.h> 42 43 #include <linux/atomic.h> ··· 48 47 #include <linux/crypto.h> 49 48 50 49 #include "ap_bus.h" 51 - 52 - /* Some prototypes. */ 53 - static void ap_scan_bus(struct work_struct *); 54 - static void ap_poll_all(unsigned long); 55 - static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 56 - static int ap_poll_thread_start(void); 57 - static void ap_poll_thread_stop(void); 58 - static void ap_request_timeout(unsigned long); 59 - static inline void ap_schedule_poll_timer(void); 60 - static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 61 - static int ap_device_remove(struct device *dev); 62 - static int ap_device_probe(struct device *dev); 63 - static void ap_interrupt_handler(struct airq_struct *airq); 64 - static void ap_reset(struct ap_device *ap_dev, unsigned long *flags); 65 - static void ap_config_timeout(unsigned long ptr); 66 - static int ap_select_domain(void); 67 - static void ap_query_configuration(void); 68 50 69 51 /* 70 52 * Module description. ··· 76 92 static LIST_HEAD(ap_device_list); 77 93 78 94 /* 79 - * Workqueue & timer for bus rescan. 95 + * Workqueue timer for bus rescan. 80 96 */ 81 - static struct workqueue_struct *ap_work_queue; 82 97 static struct timer_list ap_config_timer; 83 98 static int ap_config_time = AP_CONFIG_TIME; 84 - static DECLARE_WORK(ap_config_work, ap_scan_bus); 99 + static void ap_scan_bus(struct work_struct *); 100 + static DECLARE_WORK(ap_scan_work, ap_scan_bus); 85 101 86 102 /* 87 103 * Tasklet & timer for AP request polling and interrupts 88 104 */ 89 - static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 105 + static void ap_tasklet_fn(unsigned long); 106 + static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); 90 107 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 91 108 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 92 109 static struct task_struct *ap_poll_kthread = NULL; ··· 100 115 101 116 /* Suspend flag */ 102 117 static int ap_suspend_flag; 118 + /* Maximum domain id */ 119 + static int ap_max_domain_id; 103 120 /* Flag to check if domain was set through module parameter domain=. This is 104 121 * important when supsend and resume is done in a z/VM environment where the 105 122 * domain might change. */ ··· 109 122 static struct bus_type ap_bus_type; 110 123 111 124 /* Adapter interrupt definitions */ 125 + static void ap_interrupt_handler(struct airq_struct *airq); 126 + 112 127 static int ap_airq_flag; 113 128 114 129 static struct airq_struct ap_airq = { ··· 171 182 /** 172 183 * ap_test_queue(): Test adjunct processor queue. 173 184 * @qid: The AP queue number 174 - * @queue_depth: Pointer to queue depth value 175 - * @device_type: Pointer to device type value 185 + * @info: Pointer to queue descriptor 176 186 * 177 187 * Returns AP queue status structure. 178 188 */ 179 189 static inline struct ap_queue_status 180 - ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 190 + ap_test_queue(ap_qid_t qid, unsigned long *info) 181 191 { 182 192 register unsigned long reg0 asm ("0") = qid; 183 193 register struct ap_queue_status reg1 asm ("1"); 184 194 register unsigned long reg2 asm ("2") = 0UL; 185 195 196 + if (test_facility(15)) 197 + reg0 |= 1UL << 23; /* set APFT T bit*/ 186 198 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 187 199 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 188 - *device_type = (int) (reg2 >> 24); 189 - *queue_depth = (int) (reg2 & 0xff); 200 + if (info) 201 + *info = reg2; 190 202 return reg1; 191 - } 192 - 193 - /** 194 - * ap_query_facilities(): PQAP(TAPQ) query facilities. 195 - * @qid: The AP queue number 196 - * 197 - * Returns content of general register 2 after the PQAP(TAPQ) 198 - * instruction was called. 199 - */ 200 - static inline unsigned long ap_query_facilities(ap_qid_t qid) 201 - { 202 - register unsigned long reg0 asm ("0") = qid | 0x00800000UL; 203 - register unsigned long reg1 asm ("1"); 204 - register unsigned long reg2 asm ("2") = 0UL; 205 - 206 - asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 207 - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 208 - return reg2; 209 203 } 210 204 211 205 /** ··· 231 259 return reg1_out; 232 260 } 233 261 234 - static inline struct ap_queue_status 235 - __ap_query_functions(ap_qid_t qid, unsigned int *functions) 236 - { 237 - register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 238 - register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 239 - register unsigned long reg2 asm ("2"); 240 - 241 - asm volatile( 242 - ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 243 - "0:\n" 244 - EX_TABLE(0b, 0b) 245 - : "+d" (reg0), "+d" (reg1), "=d" (reg2) 246 - : 247 - : "cc"); 248 - 249 - *functions = (unsigned int)(reg2 >> 32); 250 - return reg1; 251 - } 252 - 253 - static inline int __ap_query_configuration(struct ap_config_info *config) 262 + /** 263 + * ap_query_configuration(): Get AP configuration data 264 + * 265 + * Returns 0 on success, or -EOPNOTSUPP. 266 + */ 267 + static inline int ap_query_configuration(void) 254 268 { 255 269 register unsigned long reg0 asm ("0") = 0x04000000UL; 256 270 register unsigned long reg1 asm ("1") = -EINVAL; 257 - register unsigned char *reg2 asm ("2") = (unsigned char *)config; 271 + register void *reg2 asm ("2") = (void *) ap_configuration; 258 272 273 + if (!ap_configuration) 274 + return -EOPNOTSUPP; 259 275 asm volatile( 260 276 ".long 0xb2af0000\n" /* PQAP(QCI) */ 261 277 "0: la %1,0\n" ··· 257 297 } 258 298 259 299 /** 260 - * ap_query_functions(): Query supported functions. 261 - * @qid: The AP queue number 262 - * @functions: Pointer to functions field. 263 - * 264 - * Returns 265 - * 0 on success. 266 - * -ENODEV if queue not valid. 267 - * -EBUSY if device busy. 268 - * -EINVAL if query function is not supported 300 + * ap_init_configuration(): Allocate and query configuration array. 269 301 */ 270 - static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 302 + static void ap_init_configuration(void) 271 303 { 272 - struct ap_queue_status status; 304 + if (!ap_configuration_available()) 305 + return; 273 306 274 - status = __ap_query_functions(qid, functions); 275 - 276 - if (ap_queue_status_invalid_test(&status)) 277 - return -ENODEV; 278 - 279 - switch (status.response_code) { 280 - case AP_RESPONSE_NORMAL: 281 - return 0; 282 - case AP_RESPONSE_Q_NOT_AVAIL: 283 - case AP_RESPONSE_DECONFIGURED: 284 - case AP_RESPONSE_CHECKSTOPPED: 285 - case AP_RESPONSE_INVALID_ADDRESS: 286 - return -ENODEV; 287 - case AP_RESPONSE_RESET_IN_PROGRESS: 288 - case AP_RESPONSE_BUSY: 289 - case AP_RESPONSE_OTHERWISE_CHANGED: 290 - default: 291 - return -EBUSY; 307 + ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL); 308 + if (!ap_configuration) 309 + return; 310 + if (ap_query_configuration() != 0) { 311 + kfree(ap_configuration); 312 + ap_configuration = NULL; 313 + return; 292 314 } 315 + } 316 + 317 + /* 318 + * ap_test_config(): helper function to extract the nrth bit 319 + * within the unsigned int array field. 320 + */ 321 + static inline int ap_test_config(unsigned int *field, unsigned int nr) 322 + { 323 + return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 324 + } 325 + 326 + /* 327 + * ap_test_config_card_id(): Test, whether an AP card ID is configured. 328 + * @id AP card ID 329 + * 330 + * Returns 0 if the card is not configured 331 + * 1 if the card is configured or 332 + * if the configuration information is not available 333 + */ 334 + static inline int ap_test_config_card_id(unsigned int id) 335 + { 336 + if (!ap_configuration) /* QCI not supported */ 337 + return 1; 338 + return ap_test_config(ap_configuration->apm, id); 339 + } 340 + 341 + /* 342 + * ap_test_config_domain(): Test, whether an AP usage domain is configured. 343 + * @domain AP usage domain ID 344 + * 345 + * Returns 0 if the usage domain is not configured 346 + * 1 if the usage domain is configured or 347 + * if the configuration information is not available 348 + */ 349 + static inline int ap_test_config_domain(unsigned int domain) 350 + { 351 + if (!ap_configuration) /* QCI not supported */ 352 + return domain < 16; 353 + return ap_test_config(ap_configuration->aqm, domain); 293 354 } 294 355 295 356 /** ··· 335 354 case AP_RESPONSE_DECONFIGURED: 336 355 case AP_RESPONSE_CHECKSTOPPED: 337 356 case AP_RESPONSE_INVALID_ADDRESS: 338 - return -ENODEV; 357 + pr_err("Registering adapter interrupts for AP %d failed\n", 358 + AP_QID_DEVICE(ap_dev->qid)); 359 + return -EOPNOTSUPP; 339 360 case AP_RESPONSE_RESET_IN_PROGRESS: 340 361 case AP_RESPONSE_BUSY: 341 362 default: ··· 463 480 EXPORT_SYMBOL(ap_recv); 464 481 465 482 /** 466 - * __ap_schedule_poll_timer(): Schedule poll timer. 467 - * 468 - * Set up the timer to run the poll tasklet 469 - */ 470 - static inline void __ap_schedule_poll_timer(void) 471 - { 472 - ktime_t hr_time; 473 - 474 - spin_lock_bh(&ap_poll_timer_lock); 475 - if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) { 476 - hr_time = ktime_set(0, poll_timeout); 477 - hrtimer_forward_now(&ap_poll_timer, hr_time); 478 - hrtimer_restart(&ap_poll_timer); 479 - } 480 - spin_unlock_bh(&ap_poll_timer_lock); 481 - } 482 - 483 - /** 484 - * ap_schedule_poll_timer(): Schedule poll timer. 485 - * 486 - * Set up the timer to run the poll tasklet 487 - */ 488 - static inline void ap_schedule_poll_timer(void) 489 - { 490 - if (ap_using_interrupts()) 491 - return; 492 - __ap_schedule_poll_timer(); 493 - } 494 - 495 - 496 - /** 497 483 * ap_query_queue(): Check if an AP queue is available. 498 484 * @qid: The AP queue number 499 485 * @queue_depth: Pointer to queue depth value 500 486 * @device_type: Pointer to device type value 487 + * @facilities: Pointer to facility indicator 501 488 */ 502 - static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 489 + static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type, 490 + unsigned int *facilities) 503 491 { 504 492 struct ap_queue_status status; 505 - int t_depth, t_device_type; 493 + unsigned long info; 494 + int nd; 506 495 507 - status = ap_test_queue(qid, &t_depth, &t_device_type); 496 + if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) 497 + return -ENODEV; 498 + 499 + status = ap_test_queue(qid, &info); 508 500 switch (status.response_code) { 509 501 case AP_RESPONSE_NORMAL: 510 - *queue_depth = t_depth + 1; 511 - *device_type = t_device_type; 502 + *queue_depth = (int)(info & 0xff); 503 + *device_type = (int)((info >> 24) & 0xff); 504 + *facilities = (unsigned int)(info >> 32); 505 + /* Update maximum domain id */ 506 + nd = (info >> 16) & 0xff; 507 + if ((info & (1UL << 57)) && nd > 0) 508 + ap_max_domain_id = nd; 512 509 return 0; 513 510 case AP_RESPONSE_Q_NOT_AVAIL: 514 511 case AP_RESPONSE_DECONFIGURED: ··· 504 541 } 505 542 } 506 543 544 + /* State machine definitions and helpers */ 545 + 546 + static void ap_sm_wait(enum ap_wait wait) 547 + { 548 + ktime_t hr_time; 549 + 550 + switch (wait) { 551 + case AP_WAIT_AGAIN: 552 + case AP_WAIT_INTERRUPT: 553 + if (ap_using_interrupts()) 554 + break; 555 + if (ap_poll_kthread) { 556 + wake_up(&ap_poll_wait); 557 + break; 558 + } 559 + /* Fall through */ 560 + case AP_WAIT_TIMEOUT: 561 + spin_lock_bh(&ap_poll_timer_lock); 562 + if (!hrtimer_is_queued(&ap_poll_timer)) { 563 + hr_time = ktime_set(0, poll_timeout); 564 + hrtimer_forward_now(&ap_poll_timer, hr_time); 565 + hrtimer_restart(&ap_poll_timer); 566 + } 567 + spin_unlock_bh(&ap_poll_timer_lock); 568 + break; 569 + case AP_WAIT_NONE: 570 + default: 571 + break; 572 + } 573 + } 574 + 575 + static enum ap_wait ap_sm_nop(struct ap_device *ap_dev) 576 + { 577 + return AP_WAIT_NONE; 578 + } 579 + 507 580 /** 508 - * ap_init_queue(): Reset an AP queue. 581 + * ap_sm_recv(): Receive pending reply messages from an AP device but do 582 + * not change the state of the device. 583 + * @ap_dev: pointer to the AP device 584 + * 585 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 586 + */ 587 + static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev) 588 + { 589 + struct ap_queue_status status; 590 + struct ap_message *ap_msg; 591 + 592 + status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 593 + ap_dev->reply->message, ap_dev->reply->length); 594 + switch (status.response_code) { 595 + case AP_RESPONSE_NORMAL: 596 + atomic_dec(&ap_poll_requests); 597 + ap_dev->queue_count--; 598 + if (ap_dev->queue_count > 0) 599 + mod_timer(&ap_dev->timeout, 600 + jiffies + ap_dev->drv->request_timeout); 601 + list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 602 + if (ap_msg->psmid != ap_dev->reply->psmid) 603 + continue; 604 + list_del_init(&ap_msg->list); 605 + ap_dev->pendingq_count--; 606 + ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 607 + break; 608 + } 609 + case AP_RESPONSE_NO_PENDING_REPLY: 610 + if (!status.queue_empty || ap_dev->queue_count <= 0) 611 + break; 612 + /* The card shouldn't forget requests but who knows. */ 613 + atomic_sub(ap_dev->queue_count, &ap_poll_requests); 614 + ap_dev->queue_count = 0; 615 + list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 616 + ap_dev->requestq_count += ap_dev->pendingq_count; 617 + ap_dev->pendingq_count = 0; 618 + break; 619 + default: 620 + break; 621 + } 622 + return status; 623 + } 624 + 625 + /** 626 + * ap_sm_read(): Receive pending reply messages from an AP device. 627 + * @ap_dev: pointer to the AP device 628 + * 629 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 630 + */ 631 + static enum ap_wait ap_sm_read(struct ap_device *ap_dev) 632 + { 633 + struct ap_queue_status status; 634 + 635 + status = ap_sm_recv(ap_dev); 636 + switch (status.response_code) { 637 + case AP_RESPONSE_NORMAL: 638 + if (ap_dev->queue_count > 0) 639 + return AP_WAIT_AGAIN; 640 + ap_dev->state = AP_STATE_IDLE; 641 + return AP_WAIT_NONE; 642 + case AP_RESPONSE_NO_PENDING_REPLY: 643 + if (ap_dev->queue_count > 0) 644 + return AP_WAIT_INTERRUPT; 645 + ap_dev->state = AP_STATE_IDLE; 646 + return AP_WAIT_NONE; 647 + default: 648 + ap_dev->state = AP_STATE_BORKED; 649 + return AP_WAIT_NONE; 650 + } 651 + } 652 + 653 + /** 654 + * ap_sm_write(): Send messages from the request queue to an AP device. 655 + * @ap_dev: pointer to the AP device 656 + * 657 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 658 + */ 659 + static enum ap_wait ap_sm_write(struct ap_device *ap_dev) 660 + { 661 + struct ap_queue_status status; 662 + struct ap_message *ap_msg; 663 + 664 + if (ap_dev->requestq_count <= 0) 665 + return AP_WAIT_NONE; 666 + /* Start the next request on the queue. */ 667 + ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 668 + status = __ap_send(ap_dev->qid, ap_msg->psmid, 669 + ap_msg->message, ap_msg->length, ap_msg->special); 670 + switch (status.response_code) { 671 + case AP_RESPONSE_NORMAL: 672 + atomic_inc(&ap_poll_requests); 673 + ap_dev->queue_count++; 674 + if (ap_dev->queue_count == 1) 675 + mod_timer(&ap_dev->timeout, 676 + jiffies + ap_dev->drv->request_timeout); 677 + list_move_tail(&ap_msg->list, &ap_dev->pendingq); 678 + ap_dev->requestq_count--; 679 + ap_dev->pendingq_count++; 680 + if (ap_dev->queue_count < ap_dev->queue_depth) { 681 + ap_dev->state = AP_STATE_WORKING; 682 + return AP_WAIT_AGAIN; 683 + } 684 + /* fall through */ 685 + case AP_RESPONSE_Q_FULL: 686 + ap_dev->state = AP_STATE_QUEUE_FULL; 687 + return AP_WAIT_INTERRUPT; 688 + case AP_RESPONSE_RESET_IN_PROGRESS: 689 + ap_dev->state = AP_STATE_RESET_WAIT; 690 + return AP_WAIT_TIMEOUT; 691 + case AP_RESPONSE_MESSAGE_TOO_BIG: 692 + case AP_RESPONSE_REQ_FAC_NOT_INST: 693 + list_del_init(&ap_msg->list); 694 + ap_dev->requestq_count--; 695 + ap_msg->rc = -EINVAL; 696 + ap_msg->receive(ap_dev, ap_msg, NULL); 697 + return AP_WAIT_AGAIN; 698 + default: 699 + ap_dev->state = AP_STATE_BORKED; 700 + return AP_WAIT_NONE; 701 + } 702 + } 703 + 704 + /** 705 + * ap_sm_read_write(): Send and receive messages to/from an AP device. 706 + * @ap_dev: pointer to the AP device 707 + * 708 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 709 + */ 710 + static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev) 711 + { 712 + return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev)); 713 + } 714 + 715 + /** 716 + * ap_sm_reset(): Reset an AP queue. 509 717 * @qid: The AP queue number 510 718 * 511 719 * Submit the Reset command to an AP queue. 512 - * Since the reset is asynchron set the state to 'RESET_IN_PROGRESS' 513 - * and check later via ap_poll_queue() if the reset is done. 514 720 */ 515 - static int ap_init_queue(struct ap_device *ap_dev) 721 + static enum ap_wait ap_sm_reset(struct ap_device *ap_dev) 516 722 { 517 723 struct ap_queue_status status; 518 724 519 725 status = ap_reset_queue(ap_dev->qid); 520 726 switch (status.response_code) { 521 727 case AP_RESPONSE_NORMAL: 522 - ap_dev->interrupt = AP_INTR_DISABLED; 523 - ap_dev->reset = AP_RESET_IN_PROGRESS; 524 - return 0; 525 728 case AP_RESPONSE_RESET_IN_PROGRESS: 729 + ap_dev->state = AP_STATE_RESET_WAIT; 730 + ap_dev->interrupt = AP_INTR_DISABLED; 731 + return AP_WAIT_TIMEOUT; 526 732 case AP_RESPONSE_BUSY: 527 - return -EBUSY; 733 + return AP_WAIT_TIMEOUT; 528 734 case AP_RESPONSE_Q_NOT_AVAIL: 529 735 case AP_RESPONSE_DECONFIGURED: 530 736 case AP_RESPONSE_CHECKSTOPPED: 531 737 default: 532 - return -ENODEV; 738 + ap_dev->state = AP_STATE_BORKED; 739 + return AP_WAIT_NONE; 533 740 } 534 741 } 535 742 536 743 /** 537 - * ap_increase_queue_count(): Arm request timeout. 538 - * @ap_dev: Pointer to an AP device. 744 + * ap_sm_reset_wait(): Test queue for completion of the reset operation 745 + * @ap_dev: pointer to the AP device 539 746 * 540 - * Arm request timeout if an AP device was idle and a new request is submitted. 747 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 541 748 */ 542 - static void ap_increase_queue_count(struct ap_device *ap_dev) 749 + static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) 543 750 { 544 - int timeout = ap_dev->drv->request_timeout; 751 + struct ap_queue_status status; 752 + unsigned long info; 545 753 546 - ap_dev->queue_count++; 547 - if (ap_dev->queue_count == 1) { 548 - mod_timer(&ap_dev->timeout, jiffies + timeout); 549 - ap_dev->reset = AP_RESET_ARMED; 550 - } 551 - } 552 - 553 - /** 554 - * ap_decrease_queue_count(): Decrease queue count. 555 - * @ap_dev: Pointer to an AP device. 556 - * 557 - * If AP device is still alive, re-schedule request timeout if there are still 558 - * pending requests. 559 - */ 560 - static void ap_decrease_queue_count(struct ap_device *ap_dev) 561 - { 562 - int timeout = ap_dev->drv->request_timeout; 563 - 564 - ap_dev->queue_count--; 565 754 if (ap_dev->queue_count > 0) 566 - mod_timer(&ap_dev->timeout, jiffies + timeout); 755 + /* Try to read a completed message and get the status */ 756 + status = ap_sm_recv(ap_dev); 567 757 else 568 - /* 569 - * The timeout timer should to be disabled now - since 570 - * del_timer_sync() is very expensive, we just tell via the 571 - * reset flag to ignore the pending timeout timer. 572 - */ 573 - ap_dev->reset = AP_RESET_IGNORE; 758 + /* Get the status with TAPQ */ 759 + status = ap_test_queue(ap_dev->qid, &info); 760 + 761 + switch (status.response_code) { 762 + case AP_RESPONSE_NORMAL: 763 + if (ap_using_interrupts() && 764 + ap_queue_enable_interruption(ap_dev, 765 + ap_airq.lsi_ptr) == 0) 766 + ap_dev->state = AP_STATE_SETIRQ_WAIT; 767 + else 768 + ap_dev->state = (ap_dev->queue_count > 0) ? 769 + AP_STATE_WORKING : AP_STATE_IDLE; 770 + return AP_WAIT_AGAIN; 771 + case AP_RESPONSE_BUSY: 772 + case AP_RESPONSE_RESET_IN_PROGRESS: 773 + return AP_WAIT_TIMEOUT; 774 + case AP_RESPONSE_Q_NOT_AVAIL: 775 + case AP_RESPONSE_DECONFIGURED: 776 + case AP_RESPONSE_CHECKSTOPPED: 777 + default: 778 + ap_dev->state = AP_STATE_BORKED; 779 + return AP_WAIT_NONE; 780 + } 574 781 } 782 + 783 + /** 784 + * ap_sm_setirq_wait(): Test queue for completion of the irq enablement 785 + * @ap_dev: pointer to the AP device 786 + * 787 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 788 + */ 789 + static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) 790 + { 791 + struct ap_queue_status status; 792 + unsigned long info; 793 + 794 + if (ap_dev->queue_count > 0) 795 + /* Try to read a completed message and get the status */ 796 + status = ap_sm_recv(ap_dev); 797 + else 798 + /* Get the status with TAPQ */ 799 + status = ap_test_queue(ap_dev->qid, &info); 800 + 801 + if (status.int_enabled == 1) { 802 + /* Irqs are now enabled */ 803 + ap_dev->interrupt = AP_INTR_ENABLED; 804 + ap_dev->state = (ap_dev->queue_count > 0) ? 805 + AP_STATE_WORKING : AP_STATE_IDLE; 806 + } 807 + 808 + switch (status.response_code) { 809 + case AP_RESPONSE_NORMAL: 810 + if (ap_dev->queue_count > 0) 811 + return AP_WAIT_AGAIN; 812 + /* fallthrough */ 813 + case AP_RESPONSE_NO_PENDING_REPLY: 814 + return AP_WAIT_TIMEOUT; 815 + default: 816 + ap_dev->state = AP_STATE_BORKED; 817 + return AP_WAIT_NONE; 818 + } 819 + } 820 + 821 + /* 822 + * AP state machine jump table 823 + */ 824 + ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { 825 + [AP_STATE_RESET_START] = { 826 + [AP_EVENT_POLL] = ap_sm_reset, 827 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 828 + }, 829 + [AP_STATE_RESET_WAIT] = { 830 + [AP_EVENT_POLL] = ap_sm_reset_wait, 831 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 832 + }, 833 + [AP_STATE_SETIRQ_WAIT] = { 834 + [AP_EVENT_POLL] = ap_sm_setirq_wait, 835 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 836 + }, 837 + [AP_STATE_IDLE] = { 838 + [AP_EVENT_POLL] = ap_sm_write, 839 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 840 + }, 841 + [AP_STATE_WORKING] = { 842 + [AP_EVENT_POLL] = ap_sm_read_write, 843 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 844 + }, 845 + [AP_STATE_QUEUE_FULL] = { 846 + [AP_EVENT_POLL] = ap_sm_read, 847 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 848 + }, 849 + [AP_STATE_SUSPEND_WAIT] = { 850 + [AP_EVENT_POLL] = ap_sm_read, 851 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 852 + }, 853 + [AP_STATE_BORKED] = { 854 + [AP_EVENT_POLL] = ap_sm_nop, 855 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 856 + }, 857 + }; 858 + 859 + static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev, 860 + enum ap_event event) 861 + { 862 + return ap_jumptable[ap_dev->state][event](ap_dev); 863 + } 864 + 865 + static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev, 866 + enum ap_event event) 867 + { 868 + enum ap_wait wait; 869 + 870 + while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN) 871 + ; 872 + return wait; 873 + } 874 + 875 + /** 876 + * ap_request_timeout(): Handling of request timeouts 877 + * @data: Holds the AP device. 878 + * 879 + * Handles request timeouts. 880 + */ 881 + static void ap_request_timeout(unsigned long data) 882 + { 883 + struct ap_device *ap_dev = (struct ap_device *) data; 884 + 885 + if (ap_suspend_flag) 886 + return; 887 + spin_lock_bh(&ap_dev->lock); 888 + ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); 889 + spin_unlock_bh(&ap_dev->lock); 890 + } 891 + 892 + /** 893 + * ap_poll_timeout(): AP receive polling for finished AP requests. 894 + * @unused: Unused pointer. 895 + * 896 + * Schedules the AP tasklet using a high resolution timer. 897 + */ 898 + static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 899 + { 900 + if (!ap_suspend_flag) 901 + tasklet_schedule(&ap_tasklet); 902 + return HRTIMER_NORESTART; 903 + } 904 + 905 + /** 906 + * ap_interrupt_handler() - Schedule ap_tasklet on interrupt 907 + * @airq: pointer to adapter interrupt descriptor 908 + */ 909 + static void ap_interrupt_handler(struct airq_struct *airq) 910 + { 911 + inc_irq_stat(IRQIO_APB); 912 + if (!ap_suspend_flag) 913 + tasklet_schedule(&ap_tasklet); 914 + } 915 + 916 + /** 917 + * ap_tasklet_fn(): Tasklet to poll all AP devices. 918 + * @dummy: Unused variable 919 + * 920 + * Poll all AP devices on the bus. 921 + */ 922 + static void ap_tasklet_fn(unsigned long dummy) 923 + { 924 + struct ap_device *ap_dev; 925 + enum ap_wait wait = AP_WAIT_NONE; 926 + 927 + /* Reset the indicator if interrupts are used. Thus new interrupts can 928 + * be received. Doing it in the beginning of the tasklet is therefor 929 + * important that no requests on any AP get lost. 930 + */ 931 + if (ap_using_interrupts()) 932 + xchg(ap_airq.lsi_ptr, 0); 933 + 934 + spin_lock(&ap_device_list_lock); 935 + list_for_each_entry(ap_dev, &ap_device_list, list) { 936 + spin_lock_bh(&ap_dev->lock); 937 + wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 938 + spin_unlock_bh(&ap_dev->lock); 939 + } 940 + spin_unlock(&ap_device_list_lock); 941 + ap_sm_wait(wait); 942 + } 943 + 944 + /** 945 + * ap_poll_thread(): Thread that polls for finished requests. 946 + * @data: Unused pointer 947 + * 948 + * AP bus poll thread. The purpose of this thread is to poll for 949 + * finished requests in a loop if there is a "free" cpu - that is 950 + * a cpu that doesn't have anything better to do. The polling stops 951 + * as soon as there is another task or if all messages have been 952 + * delivered. 953 + */ 954 + static int ap_poll_thread(void *data) 955 + { 956 + DECLARE_WAITQUEUE(wait, current); 957 + 958 + set_user_nice(current, MAX_NICE); 959 + set_freezable(); 960 + while (!kthread_should_stop()) { 961 + add_wait_queue(&ap_poll_wait, &wait); 962 + set_current_state(TASK_INTERRUPTIBLE); 963 + if (ap_suspend_flag || 964 + atomic_read(&ap_poll_requests) <= 0) { 965 + schedule(); 966 + try_to_freeze(); 967 + } 968 + set_current_state(TASK_RUNNING); 969 + remove_wait_queue(&ap_poll_wait, &wait); 970 + if (need_resched()) { 971 + schedule(); 972 + try_to_freeze(); 973 + continue; 974 + } 975 + ap_tasklet_fn(0); 976 + } while (!kthread_should_stop()); 977 + return 0; 978 + } 979 + 980 + static int ap_poll_thread_start(void) 981 + { 982 + int rc; 983 + 984 + if (ap_using_interrupts() || ap_poll_kthread) 985 + return 0; 986 + mutex_lock(&ap_poll_thread_mutex); 987 + ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 988 + rc = PTR_RET(ap_poll_kthread); 989 + if (rc) 990 + ap_poll_kthread = NULL; 991 + mutex_unlock(&ap_poll_thread_mutex); 992 + return rc; 993 + } 994 + 995 + static void ap_poll_thread_stop(void) 996 + { 997 + if (!ap_poll_kthread) 998 + return; 999 + mutex_lock(&ap_poll_thread_mutex); 1000 + kthread_stop(ap_poll_kthread); 1001 + ap_poll_kthread = NULL; 1002 + mutex_unlock(&ap_poll_thread_mutex); 1003 + } 1004 + 1005 + /** 1006 + * ap_queue_message(): Queue a request to an AP device. 1007 + * @ap_dev: The AP device to queue the message to 1008 + * @ap_msg: The message that is to be added 1009 + */ 1010 + void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1011 + { 1012 + /* For asynchronous message handling a valid receive-callback 1013 + * is required. */ 1014 + BUG_ON(!ap_msg->receive); 1015 + 1016 + spin_lock_bh(&ap_dev->lock); 1017 + /* Queue the message. */ 1018 + list_add_tail(&ap_msg->list, &ap_dev->requestq); 1019 + ap_dev->requestq_count++; 1020 + ap_dev->total_request_count++; 1021 + /* Send/receive as many request from the queue as possible. */ 1022 + ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 1023 + spin_unlock_bh(&ap_dev->lock); 1024 + } 1025 + EXPORT_SYMBOL(ap_queue_message); 1026 + 1027 + /** 1028 + * ap_cancel_message(): Cancel a crypto request. 1029 + * @ap_dev: The AP device that has the message queued 1030 + * @ap_msg: The message that is to be removed 1031 + * 1032 + * Cancel a crypto request. This is done by removing the request 1033 + * from the device pending or request queue. Note that the 1034 + * request stays on the AP queue. When it finishes the message 1035 + * reply will be discarded because the psmid can't be found. 1036 + */ 1037 + void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1038 + { 1039 + struct ap_message *tmp; 1040 + 1041 + spin_lock_bh(&ap_dev->lock); 1042 + if (!list_empty(&ap_msg->list)) { 1043 + list_for_each_entry(tmp, &ap_dev->pendingq, list) 1044 + if (tmp->psmid == ap_msg->psmid) { 1045 + ap_dev->pendingq_count--; 1046 + goto found; 1047 + } 1048 + ap_dev->requestq_count--; 1049 + found: 1050 + list_del_init(&ap_msg->list); 1051 + } 1052 + spin_unlock_bh(&ap_dev->lock); 1053 + } 1054 + EXPORT_SYMBOL(ap_cancel_message); 575 1055 576 1056 /* 577 1057 * AP device related attributes. ··· 1096 690 int rc = 0; 1097 691 1098 692 spin_lock_bh(&ap_dev->lock); 1099 - switch (ap_dev->reset) { 1100 - case AP_RESET_IGNORE: 1101 - rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 1102 - break; 1103 - case AP_RESET_ARMED: 1104 - rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 1105 - break; 1106 - case AP_RESET_DO: 1107 - rc = snprintf(buf, PAGE_SIZE, "Reset Timer expired.\n"); 1108 - break; 1109 - case AP_RESET_IN_PROGRESS: 693 + switch (ap_dev->state) { 694 + case AP_STATE_RESET_START: 695 + case AP_STATE_RESET_WAIT: 1110 696 rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 1111 697 break; 1112 - default: 698 + case AP_STATE_WORKING: 699 + case AP_STATE_QUEUE_FULL: 700 + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 1113 701 break; 702 + default: 703 + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 1114 704 } 1115 705 spin_unlock_bh(&ap_dev->lock); 1116 706 return rc; ··· 1121 719 int rc = 0; 1122 720 1123 721 spin_lock_bh(&ap_dev->lock); 1124 - switch (ap_dev->interrupt) { 1125 - case AP_INTR_DISABLED: 1126 - rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 1127 - break; 1128 - case AP_INTR_ENABLED: 1129 - rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 1130 - break; 1131 - case AP_INTR_IN_PROGRESS: 722 + if (ap_dev->state == AP_STATE_SETIRQ_WAIT) 1132 723 rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 1133 - break; 1134 - } 724 + else if (ap_dev->interrupt == AP_INTR_ENABLED) 725 + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 726 + else 727 + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 1135 728 spin_unlock_bh(&ap_dev->lock); 1136 729 return rc; 1137 730 } ··· 1220 823 return retval; 1221 824 } 1222 825 1223 - static int ap_bus_suspend(struct device *dev, pm_message_t state) 826 + static int ap_dev_suspend(struct device *dev, pm_message_t state) 1224 827 { 1225 828 struct ap_device *ap_dev = to_ap_dev(dev); 1226 - unsigned long flags; 1227 829 1228 - if (!ap_suspend_flag) { 1229 - ap_suspend_flag = 1; 1230 - 1231 - /* Disable scanning for devices, thus we do not want to scan 1232 - * for them after removing. 1233 - */ 1234 - del_timer_sync(&ap_config_timer); 1235 - if (ap_work_queue != NULL) { 1236 - destroy_workqueue(ap_work_queue); 1237 - ap_work_queue = NULL; 1238 - } 1239 - 1240 - tasklet_disable(&ap_tasklet); 1241 - } 1242 830 /* Poll on the device until all requests are finished. */ 1243 - do { 1244 - flags = 0; 1245 - spin_lock_bh(&ap_dev->lock); 1246 - __ap_poll_device(ap_dev, &flags); 1247 - spin_unlock_bh(&ap_dev->lock); 1248 - } while ((flags & 1) || (flags & 2)); 1249 - 1250 831 spin_lock_bh(&ap_dev->lock); 1251 - ap_dev->unregistered = 1; 832 + ap_dev->state = AP_STATE_SUSPEND_WAIT; 833 + while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) 834 + ; 835 + ap_dev->state = AP_STATE_BORKED; 1252 836 spin_unlock_bh(&ap_dev->lock); 1253 - 1254 837 return 0; 1255 838 } 1256 839 1257 - static int ap_bus_resume(struct device *dev) 840 + static int ap_dev_resume(struct device *dev) 1258 841 { 1259 - struct ap_device *ap_dev = to_ap_dev(dev); 842 + return 0; 843 + } 844 + 845 + static void ap_bus_suspend(void) 846 + { 847 + ap_suspend_flag = 1; 848 + /* 849 + * Disable scanning for devices, thus we do not want to scan 850 + * for them after removing. 851 + */ 852 + flush_work(&ap_scan_work); 853 + tasklet_disable(&ap_tasklet); 854 + } 855 + 856 + static int __ap_devices_unregister(struct device *dev, void *dummy) 857 + { 858 + device_unregister(dev); 859 + return 0; 860 + } 861 + 862 + static void ap_bus_resume(void) 863 + { 1260 864 int rc; 1261 865 1262 - if (ap_suspend_flag) { 1263 - ap_suspend_flag = 0; 1264 - if (ap_interrupts_available()) { 1265 - if (!ap_using_interrupts()) { 1266 - rc = register_adapter_interrupt(&ap_airq); 1267 - ap_airq_flag = (rc == 0); 1268 - } 1269 - } else { 1270 - if (ap_using_interrupts()) { 1271 - unregister_adapter_interrupt(&ap_airq); 1272 - ap_airq_flag = 0; 1273 - } 1274 - } 1275 - ap_query_configuration(); 1276 - if (!user_set_domain) { 1277 - ap_domain_index = -1; 1278 - ap_select_domain(); 1279 - } 1280 - init_timer(&ap_config_timer); 1281 - ap_config_timer.function = ap_config_timeout; 1282 - ap_config_timer.data = 0; 1283 - ap_config_timer.expires = jiffies + ap_config_time * HZ; 1284 - add_timer(&ap_config_timer); 1285 - ap_work_queue = create_singlethread_workqueue("kapwork"); 1286 - if (!ap_work_queue) 1287 - return -ENOMEM; 1288 - tasklet_enable(&ap_tasklet); 1289 - if (!ap_using_interrupts()) 1290 - ap_schedule_poll_timer(); 1291 - else 1292 - tasklet_schedule(&ap_tasklet); 1293 - if (ap_thread_flag) 1294 - rc = ap_poll_thread_start(); 1295 - else 1296 - rc = 0; 1297 - } else 1298 - rc = 0; 1299 - if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 1300 - spin_lock_bh(&ap_dev->lock); 1301 - ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 1302 - ap_domain_index); 1303 - spin_unlock_bh(&ap_dev->lock); 866 + /* Unconditionally remove all AP devices */ 867 + bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 868 + /* Reset thin interrupt setting */ 869 + if (ap_interrupts_available() && !ap_using_interrupts()) { 870 + rc = register_adapter_interrupt(&ap_airq); 871 + ap_airq_flag = (rc == 0); 1304 872 } 1305 - queue_work(ap_work_queue, &ap_config_work); 1306 - 1307 - return rc; 873 + if (!ap_interrupts_available() && ap_using_interrupts()) { 874 + unregister_adapter_interrupt(&ap_airq); 875 + ap_airq_flag = 0; 876 + } 877 + /* Reset domain */ 878 + if (!user_set_domain) 879 + ap_domain_index = -1; 880 + /* Get things going again */ 881 + ap_suspend_flag = 0; 882 + if (ap_airq_flag) 883 + xchg(ap_airq.lsi_ptr, 0); 884 + tasklet_enable(&ap_tasklet); 885 + queue_work(system_long_wq, &ap_scan_work); 1308 886 } 887 + 888 + static int ap_power_event(struct notifier_block *this, unsigned long event, 889 + void *ptr) 890 + { 891 + switch (event) { 892 + case PM_HIBERNATION_PREPARE: 893 + case PM_SUSPEND_PREPARE: 894 + ap_bus_suspend(); 895 + break; 896 + case PM_POST_HIBERNATION: 897 + case PM_POST_SUSPEND: 898 + ap_bus_resume(); 899 + break; 900 + default: 901 + break; 902 + } 903 + return NOTIFY_DONE; 904 + } 905 + static struct notifier_block ap_power_notifier = { 906 + .notifier_call = ap_power_event, 907 + }; 1309 908 1310 909 static struct bus_type ap_bus_type = { 1311 910 .name = "ap", 1312 911 .match = &ap_bus_match, 1313 912 .uevent = &ap_uevent, 1314 - .suspend = ap_bus_suspend, 1315 - .resume = ap_bus_resume 913 + .suspend = ap_dev_suspend, 914 + .resume = ap_dev_resume, 1316 915 }; 1317 916 1318 917 static int ap_device_probe(struct device *dev) ··· 1318 925 int rc; 1319 926 1320 927 ap_dev->drv = ap_drv; 1321 - 1322 - spin_lock_bh(&ap_device_list_lock); 1323 - list_add(&ap_dev->list, &ap_device_list); 1324 - spin_unlock_bh(&ap_device_list_lock); 1325 - 1326 928 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 1327 - if (rc) { 1328 - spin_lock_bh(&ap_device_list_lock); 1329 - list_del_init(&ap_dev->list); 1330 - spin_unlock_bh(&ap_device_list_lock); 1331 - } else { 1332 - if (ap_dev->reset == AP_RESET_IN_PROGRESS || 1333 - ap_dev->interrupt == AP_INTR_IN_PROGRESS) 1334 - __ap_schedule_poll_timer(); 1335 - } 929 + if (rc) 930 + ap_dev->drv = NULL; 1336 931 return rc; 1337 932 } 1338 933 ··· 1337 956 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 1338 957 list_del_init(&ap_msg->list); 1339 958 ap_dev->pendingq_count--; 1340 - ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 959 + ap_msg->rc = -EAGAIN; 960 + ap_msg->receive(ap_dev, ap_msg, NULL); 1341 961 } 1342 962 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 1343 963 list_del_init(&ap_msg->list); 1344 964 ap_dev->requestq_count--; 1345 - ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 965 + ap_msg->rc = -EAGAIN; 966 + ap_msg->receive(ap_dev, ap_msg, NULL); 1346 967 } 1347 968 } 1348 969 ··· 1374 991 return 0; 1375 992 } 1376 993 994 + static void ap_device_release(struct device *dev) 995 + { 996 + kfree(to_ap_dev(dev)); 997 + } 998 + 1377 999 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 1378 1000 char *name) 1379 1001 { ··· 1401 1013 1402 1014 void ap_bus_force_rescan(void) 1403 1015 { 1404 - /* reconfigure the AP bus rescan timer. */ 1405 - mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1016 + if (ap_suspend_flag) 1017 + return; 1406 1018 /* processing a asynchronous bus rescan */ 1407 - queue_work(ap_work_queue, &ap_config_work); 1408 - flush_work(&ap_config_work); 1019 + del_timer(&ap_config_timer); 1020 + queue_work(system_long_wq, &ap_scan_work); 1021 + flush_work(&ap_scan_work); 1409 1022 } 1410 1023 EXPORT_SYMBOL(ap_bus_force_rescan); 1411 - 1412 - /* 1413 - * ap_test_config(): helper function to extract the nrth bit 1414 - * within the unsigned int array field. 1415 - */ 1416 - static inline int ap_test_config(unsigned int *field, unsigned int nr) 1417 - { 1418 - if (nr > 0xFFu) 1419 - return 0; 1420 - return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 1421 - } 1422 - 1423 - /* 1424 - * ap_test_config_card_id(): Test, whether an AP card ID is configured. 1425 - * @id AP card ID 1426 - * 1427 - * Returns 0 if the card is not configured 1428 - * 1 if the card is configured or 1429 - * if the configuration information is not available 1430 - */ 1431 - static inline int ap_test_config_card_id(unsigned int id) 1432 - { 1433 - if (!ap_configuration) 1434 - return 1; 1435 - return ap_test_config(ap_configuration->apm, id); 1436 - } 1437 - 1438 - /* 1439 - * ap_test_config_domain(): Test, whether an AP usage domain is configured. 1440 - * @domain AP usage domain ID 1441 - * 1442 - * Returns 0 if the usage domain is not configured 1443 - * 1 if the usage domain is configured or 1444 - * if the configuration information is not available 1445 - */ 1446 - static inline int ap_test_config_domain(unsigned int domain) 1447 - { 1448 - if (!ap_configuration) /* QCI not supported */ 1449 - if (domain < 16) 1450 - return 1; /* then domains 0...15 are configured */ 1451 - else 1452 - return 0; 1453 - else 1454 - return ap_test_config(ap_configuration->aqm, domain); 1455 - } 1456 1024 1457 1025 /* 1458 1026 * AP bus attributes. ··· 1422 1078 1423 1079 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 1424 1080 { 1425 - if (ap_configuration != NULL) { /* QCI not supported */ 1426 - if (test_facility(76)) { /* format 1 - 256 bit domain field */ 1427 - return snprintf(buf, PAGE_SIZE, 1428 - "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1081 + if (!ap_configuration) /* QCI not supported */ 1082 + return snprintf(buf, PAGE_SIZE, "not supported\n"); 1083 + if (!test_facility(76)) 1084 + /* format 0 - 16 bit domain field */ 1085 + return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 1086 + ap_configuration->adm[0], 1087 + ap_configuration->adm[1]); 1088 + /* format 1 - 256 bit domain field */ 1089 + return snprintf(buf, PAGE_SIZE, 1090 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1429 1091 ap_configuration->adm[0], ap_configuration->adm[1], 1430 1092 ap_configuration->adm[2], ap_configuration->adm[3], 1431 1093 ap_configuration->adm[4], ap_configuration->adm[5], 1432 1094 ap_configuration->adm[6], ap_configuration->adm[7]); 1433 - } else { /* format 0 - 16 bit domain field */ 1434 - return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 1435 - ap_configuration->adm[0], ap_configuration->adm[1]); 1436 - } 1437 - } else { 1438 - return snprintf(buf, PAGE_SIZE, "not supported\n"); 1439 - } 1440 1095 } 1441 1096 1442 1097 static BUS_ATTR(ap_control_domain_mask, 0444, ··· 1462 1119 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1463 1120 return -EINVAL; 1464 1121 ap_config_time = time; 1465 - if (!timer_pending(&ap_config_timer) || 1466 - !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 1467 - ap_config_timer.expires = jiffies + ap_config_time * HZ; 1468 - add_timer(&ap_config_timer); 1469 - } 1122 + mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1470 1123 return count; 1471 1124 } 1472 1125 ··· 1483 1144 if (flag) { 1484 1145 rc = ap_poll_thread_start(); 1485 1146 if (rc) 1486 - return rc; 1487 - } 1488 - else 1147 + count = rc; 1148 + } else 1489 1149 ap_poll_thread_stop(); 1490 1150 return count; 1491 1151 } ··· 1522 1184 1523 1185 static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf) 1524 1186 { 1525 - ap_qid_t qid; 1526 - int i, nd, max_domain_id = -1; 1527 - unsigned long fbits; 1187 + int max_domain_id; 1528 1188 1529 - if (ap_configuration) { 1530 - if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) { 1531 - for (i = 0; i < AP_DEVICES; i++) { 1532 - if (!ap_test_config_card_id(i)) 1533 - continue; 1534 - qid = AP_MKQID(i, ap_domain_index); 1535 - fbits = ap_query_facilities(qid); 1536 - if (fbits & (1UL << 57)) { 1537 - /* the N bit is 0, Nd field is filled */ 1538 - nd = (int)((fbits & 0x00FF0000UL)>>16); 1539 - if (nd > 0) 1540 - max_domain_id = nd; 1541 - else 1542 - max_domain_id = 15; 1543 - } else { 1544 - /* N bit is 1, max 16 domains */ 1545 - max_domain_id = 15; 1546 - } 1547 - break; 1548 - } 1549 - } 1550 - } else { 1551 - /* no APXA support, older machines with max 16 domains */ 1189 + if (ap_configuration) 1190 + max_domain_id = ap_max_domain_id ? : -1; 1191 + else 1552 1192 max_domain_id = 15; 1553 - } 1554 1193 return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); 1555 1194 } 1556 1195 ··· 1545 1230 }; 1546 1231 1547 1232 /** 1548 - * ap_query_configuration(): Query AP configuration information. 1549 - * 1550 - * Query information of installed cards and configured domains from AP. 1551 - */ 1552 - static void ap_query_configuration(void) 1553 - { 1554 - if (ap_configuration_available()) { 1555 - if (!ap_configuration) 1556 - ap_configuration = 1557 - kzalloc(sizeof(struct ap_config_info), 1558 - GFP_KERNEL); 1559 - if (ap_configuration) 1560 - __ap_query_configuration(ap_configuration); 1561 - } else 1562 - ap_configuration = NULL; 1563 - } 1564 - 1565 - /** 1566 1233 * ap_select_domain(): Select an AP domain. 1567 1234 * 1568 1235 * Pick one of the 16 AP domains. 1569 1236 */ 1570 1237 static int ap_select_domain(void) 1571 1238 { 1572 - int queue_depth, device_type, count, max_count, best_domain; 1573 - ap_qid_t qid; 1574 - int rc, i, j; 1575 - 1576 - /* IF APXA isn't installed, only 16 domains could be defined */ 1577 - if (!ap_configuration->ap_extended && (ap_domain_index > 15)) 1578 - return -EINVAL; 1239 + int count, max_count, best_domain; 1240 + struct ap_queue_status status; 1241 + int i, j; 1579 1242 1580 1243 /* 1581 1244 * We want to use a single domain. Either the one specified with 1582 1245 * the "domain=" parameter or the domain with the maximum number 1583 1246 * of devices. 1584 1247 */ 1585 - if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1248 + if (ap_domain_index >= 0) 1586 1249 /* Domain has already been selected. */ 1587 1250 return 0; 1588 1251 best_domain = -1; ··· 1572 1279 for (j = 0; j < AP_DEVICES; j++) { 1573 1280 if (!ap_test_config_card_id(j)) 1574 1281 continue; 1575 - qid = AP_MKQID(j, i); 1576 - rc = ap_query_queue(qid, &queue_depth, &device_type); 1577 - if (rc) 1282 + status = ap_test_queue(AP_MKQID(j, i), NULL); 1283 + if (status.response_code != AP_RESPONSE_NORMAL) 1578 1284 continue; 1579 1285 count++; 1580 1286 } ··· 1590 1298 } 1591 1299 1592 1300 /** 1593 - * ap_probe_device_type(): Find the device type of an AP. 1594 - * @ap_dev: pointer to the AP device. 1595 - * 1596 - * Find the device type if query queue returned a device type of 0. 1597 - */ 1598 - static int ap_probe_device_type(struct ap_device *ap_dev) 1599 - { 1600 - static unsigned char msg[] = { 1601 - 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1602 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1603 - 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1604 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1605 - 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1606 - 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1607 - 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1608 - 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1609 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1610 - 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1611 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1612 - 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1613 - 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1614 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1615 - 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1616 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1617 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1618 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1619 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1620 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1621 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1622 - 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1623 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1624 - 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1625 - 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1626 - 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1627 - 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1628 - 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1629 - 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1630 - 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1631 - 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1632 - 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1633 - 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1634 - 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1635 - 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1636 - 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1637 - 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1638 - 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1639 - 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1640 - 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1641 - 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1642 - 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1643 - 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1644 - 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1645 - 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1646 - }; 1647 - struct ap_queue_status status; 1648 - unsigned long long psmid; 1649 - char *reply; 1650 - int rc, i; 1651 - 1652 - reply = (void *) get_zeroed_page(GFP_KERNEL); 1653 - if (!reply) { 1654 - rc = -ENOMEM; 1655 - goto out; 1656 - } 1657 - 1658 - status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1659 - msg, sizeof(msg), 0); 1660 - if (status.response_code != AP_RESPONSE_NORMAL) { 1661 - rc = -ENODEV; 1662 - goto out_free; 1663 - } 1664 - 1665 - /* Wait for the test message to complete. */ 1666 - for (i = 0; i < 6; i++) { 1667 - msleep(300); 1668 - status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1669 - if (status.response_code == AP_RESPONSE_NORMAL && 1670 - psmid == 0x0102030405060708ULL) 1671 - break; 1672 - } 1673 - if (i < 6) { 1674 - /* Got an answer. */ 1675 - if (reply[0] == 0x00 && reply[1] == 0x86) 1676 - ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1677 - else 1678 - ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1679 - rc = 0; 1680 - } else 1681 - rc = -ENODEV; 1682 - 1683 - out_free: 1684 - free_page((unsigned long) reply); 1685 - out: 1686 - return rc; 1687 - } 1688 - 1689 - static void ap_interrupt_handler(struct airq_struct *airq) 1690 - { 1691 - inc_irq_stat(IRQIO_APB); 1692 - tasklet_schedule(&ap_tasklet); 1693 - } 1694 - 1695 - /** 1696 1301 * __ap_scan_bus(): Scan the AP bus. 1697 1302 * @dev: Pointer to device 1698 1303 * @data: Pointer to data ··· 1601 1412 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1602 1413 } 1603 1414 1604 - static void ap_device_release(struct device *dev) 1605 - { 1606 - struct ap_device *ap_dev = to_ap_dev(dev); 1607 - 1608 - kfree(ap_dev); 1609 - } 1610 - 1611 1415 static void ap_scan_bus(struct work_struct *unused) 1612 1416 { 1613 1417 struct ap_device *ap_dev; 1614 1418 struct device *dev; 1615 1419 ap_qid_t qid; 1616 1420 int queue_depth = 0, device_type = 0; 1617 - unsigned int device_functions; 1618 - int rc, i; 1421 + unsigned int device_functions = 0; 1422 + int rc, i, borked; 1619 1423 1620 1424 ap_query_configuration(); 1621 - if (ap_select_domain() != 0) { 1622 - return; 1623 - } 1425 + if (ap_select_domain() != 0) 1426 + goto out; 1427 + 1624 1428 for (i = 0; i < AP_DEVICES; i++) { 1625 1429 qid = AP_MKQID(i, ap_domain_index); 1626 1430 dev = bus_find_device(&ap_bus_type, NULL, 1627 1431 (void *)(unsigned long)qid, 1628 1432 __ap_scan_bus); 1629 - if (ap_test_config_card_id(i)) 1630 - rc = ap_query_queue(qid, &queue_depth, &device_type); 1631 - else 1632 - rc = -ENODEV; 1433 + rc = ap_query_queue(qid, &queue_depth, &device_type, 1434 + &device_functions); 1633 1435 if (dev) { 1634 1436 ap_dev = to_ap_dev(dev); 1635 1437 spin_lock_bh(&ap_dev->lock); 1636 - if (rc == -ENODEV || ap_dev->unregistered) { 1637 - spin_unlock_bh(&ap_dev->lock); 1638 - if (ap_dev->unregistered) 1639 - i--; 1640 - device_unregister(dev); 1641 - put_device(dev); 1642 - continue; 1643 - } 1438 + if (rc == -ENODEV) 1439 + ap_dev->state = AP_STATE_BORKED; 1440 + borked = ap_dev->state == AP_STATE_BORKED; 1644 1441 spin_unlock_bh(&ap_dev->lock); 1442 + if (borked) /* Remove broken device */ 1443 + device_unregister(dev); 1645 1444 put_device(dev); 1646 - continue; 1445 + if (!borked) 1446 + continue; 1647 1447 } 1648 1448 if (rc) 1649 1449 continue; ··· 1640 1462 if (!ap_dev) 1641 1463 break; 1642 1464 ap_dev->qid = qid; 1643 - rc = ap_init_queue(ap_dev); 1644 - if ((rc != 0) && (rc != -EBUSY)) { 1645 - kfree(ap_dev); 1646 - continue; 1647 - } 1465 + ap_dev->state = AP_STATE_RESET_START; 1466 + ap_dev->interrupt = AP_INTR_DISABLED; 1648 1467 ap_dev->queue_depth = queue_depth; 1649 - ap_dev->unregistered = 1; 1468 + ap_dev->raw_hwtype = device_type; 1469 + ap_dev->device_type = device_type; 1470 + ap_dev->functions = device_functions; 1650 1471 spin_lock_init(&ap_dev->lock); 1651 1472 INIT_LIST_HEAD(&ap_dev->pendingq); 1652 1473 INIT_LIST_HEAD(&ap_dev->requestq); 1653 1474 INIT_LIST_HEAD(&ap_dev->list); 1654 1475 setup_timer(&ap_dev->timeout, ap_request_timeout, 1655 1476 (unsigned long) ap_dev); 1656 - switch (device_type) { 1657 - case 0: 1658 - /* device type probing for old cards */ 1659 - if (ap_probe_device_type(ap_dev)) { 1660 - kfree(ap_dev); 1661 - continue; 1662 - } 1663 - break; 1664 - default: 1665 - ap_dev->device_type = device_type; 1666 - } 1667 - ap_dev->raw_hwtype = device_type; 1668 - 1669 - rc = ap_query_functions(qid, &device_functions); 1670 - if (!rc) 1671 - ap_dev->functions = device_functions; 1672 - else 1673 - ap_dev->functions = 0u; 1674 1477 1675 1478 ap_dev->device.bus = &ap_bus_type; 1676 1479 ap_dev->device.parent = ap_root_device; 1677 - if (dev_set_name(&ap_dev->device, "card%02x", 1678 - AP_QID_DEVICE(ap_dev->qid))) { 1480 + rc = dev_set_name(&ap_dev->device, "card%02x", 1481 + AP_QID_DEVICE(ap_dev->qid)); 1482 + if (rc) { 1679 1483 kfree(ap_dev); 1680 1484 continue; 1681 1485 } 1486 + /* Add to list of devices */ 1487 + spin_lock_bh(&ap_device_list_lock); 1488 + list_add(&ap_dev->list, &ap_device_list); 1489 + spin_unlock_bh(&ap_device_list_lock); 1490 + /* Start with a device reset */ 1491 + spin_lock_bh(&ap_dev->lock); 1492 + ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); 1493 + spin_unlock_bh(&ap_dev->lock); 1494 + /* Register device */ 1682 1495 ap_dev->device.release = ap_device_release; 1683 1496 rc = device_register(&ap_dev->device); 1684 1497 if (rc) { 1498 + spin_lock_bh(&ap_dev->lock); 1499 + list_del_init(&ap_dev->list); 1500 + spin_unlock_bh(&ap_dev->lock); 1685 1501 put_device(&ap_dev->device); 1686 1502 continue; 1687 1503 } 1688 1504 /* Add device attributes. */ 1689 1505 rc = sysfs_create_group(&ap_dev->device.kobj, 1690 1506 &ap_dev_attr_group); 1691 - if (!rc) { 1692 - spin_lock_bh(&ap_dev->lock); 1693 - ap_dev->unregistered = 0; 1694 - spin_unlock_bh(&ap_dev->lock); 1695 - } 1696 - else 1507 + if (rc) { 1697 1508 device_unregister(&ap_dev->device); 1698 - } 1699 - } 1700 - 1701 - static void 1702 - ap_config_timeout(unsigned long ptr) 1703 - { 1704 - queue_work(ap_work_queue, &ap_config_work); 1705 - ap_config_timer.expires = jiffies + ap_config_time * HZ; 1706 - add_timer(&ap_config_timer); 1707 - } 1708 - 1709 - /** 1710 - * ap_poll_read(): Receive pending reply messages from an AP device. 1711 - * @ap_dev: pointer to the AP device 1712 - * @flags: pointer to control flags, bit 2^0 is set if another poll is 1713 - * required, bit 2^1 is set if the poll timer needs to get armed 1714 - * 1715 - * Returns 0 if the device is still present, -ENODEV if not. 1716 - */ 1717 - static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1718 - { 1719 - struct ap_queue_status status; 1720 - struct ap_message *ap_msg; 1721 - 1722 - if (ap_dev->queue_count <= 0) 1723 - return 0; 1724 - status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1725 - ap_dev->reply->message, ap_dev->reply->length); 1726 - switch (status.response_code) { 1727 - case AP_RESPONSE_NORMAL: 1728 - ap_dev->interrupt = status.int_enabled; 1729 - atomic_dec(&ap_poll_requests); 1730 - ap_decrease_queue_count(ap_dev); 1731 - list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1732 - if (ap_msg->psmid != ap_dev->reply->psmid) 1733 - continue; 1734 - list_del_init(&ap_msg->list); 1735 - ap_dev->pendingq_count--; 1736 - ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1737 - break; 1738 - } 1739 - if (ap_dev->queue_count > 0) 1740 - *flags |= 1; 1741 - break; 1742 - case AP_RESPONSE_NO_PENDING_REPLY: 1743 - ap_dev->interrupt = status.int_enabled; 1744 - if (status.queue_empty) { 1745 - /* The card shouldn't forget requests but who knows. */ 1746 - atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1747 - ap_dev->queue_count = 0; 1748 - list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1749 - ap_dev->requestq_count += ap_dev->pendingq_count; 1750 - ap_dev->pendingq_count = 0; 1751 - } else 1752 - *flags |= 2; 1753 - break; 1754 - default: 1755 - return -ENODEV; 1756 - } 1757 - return 0; 1758 - } 1759 - 1760 - /** 1761 - * ap_poll_write(): Send messages from the request queue to an AP device. 1762 - * @ap_dev: pointer to the AP device 1763 - * @flags: pointer to control flags, bit 2^0 is set if another poll is 1764 - * required, bit 2^1 is set if the poll timer needs to get armed 1765 - * 1766 - * Returns 0 if the device is still present, -ENODEV if not. 1767 - */ 1768 - static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1769 - { 1770 - struct ap_queue_status status; 1771 - struct ap_message *ap_msg; 1772 - 1773 - if (ap_dev->requestq_count <= 0 || 1774 - (ap_dev->queue_count >= ap_dev->queue_depth) || 1775 - (ap_dev->reset == AP_RESET_IN_PROGRESS)) 1776 - return 0; 1777 - /* Start the next request on the queue. */ 1778 - ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1779 - status = __ap_send(ap_dev->qid, ap_msg->psmid, 1780 - ap_msg->message, ap_msg->length, ap_msg->special); 1781 - switch (status.response_code) { 1782 - case AP_RESPONSE_NORMAL: 1783 - atomic_inc(&ap_poll_requests); 1784 - ap_increase_queue_count(ap_dev); 1785 - list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1786 - ap_dev->requestq_count--; 1787 - ap_dev->pendingq_count++; 1788 - if (ap_dev->queue_count < ap_dev->queue_depth && 1789 - ap_dev->requestq_count > 0) 1790 - *flags |= 1; 1791 - *flags |= 2; 1792 - break; 1793 - case AP_RESPONSE_RESET_IN_PROGRESS: 1794 - __ap_schedule_poll_timer(); 1795 - case AP_RESPONSE_Q_FULL: 1796 - *flags |= 2; 1797 - break; 1798 - case AP_RESPONSE_MESSAGE_TOO_BIG: 1799 - case AP_RESPONSE_REQ_FAC_NOT_INST: 1800 - return -EINVAL; 1801 - default: 1802 - return -ENODEV; 1803 - } 1804 - return 0; 1805 - } 1806 - 1807 - /** 1808 - * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1809 - * Check if the queue has a pending reset. In case it's done re-enable 1810 - * interrupts, otherwise reschedule the poll_timer for another attempt. 1811 - * @ap_dev: pointer to the bus device 1812 - * @flags: pointer to control flags, bit 2^0 is set if another poll is 1813 - * required, bit 2^1 is set if the poll timer needs to get armed 1814 - * 1815 - * Poll AP device for pending replies and send new messages. If either 1816 - * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1817 - * Returns 0. 1818 - */ 1819 - static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1820 - { 1821 - int rc, depth, type; 1822 - struct ap_queue_status status; 1823 - 1824 - 1825 - if (ap_dev->reset == AP_RESET_IN_PROGRESS) { 1826 - status = ap_test_queue(ap_dev->qid, &depth, &type); 1827 - switch (status.response_code) { 1828 - case AP_RESPONSE_NORMAL: 1829 - ap_dev->reset = AP_RESET_IGNORE; 1830 - if (ap_using_interrupts()) { 1831 - rc = ap_queue_enable_interruption( 1832 - ap_dev, ap_airq.lsi_ptr); 1833 - if (!rc) 1834 - ap_dev->interrupt = AP_INTR_IN_PROGRESS; 1835 - else if (rc == -ENODEV) { 1836 - pr_err("Registering adapter interrupts for " 1837 - "AP %d failed\n", AP_QID_DEVICE(ap_dev->qid)); 1838 - return rc; 1839 - } 1840 - } 1841 - /* fall through */ 1842 - case AP_RESPONSE_BUSY: 1843 - case AP_RESPONSE_RESET_IN_PROGRESS: 1844 - *flags |= AP_POLL_AFTER_TIMEOUT; 1845 - break; 1846 - case AP_RESPONSE_Q_NOT_AVAIL: 1847 - case AP_RESPONSE_DECONFIGURED: 1848 - case AP_RESPONSE_CHECKSTOPPED: 1849 - return -ENODEV; 1850 - default: 1851 - break; 1852 - } 1853 - } 1854 - 1855 - if ((ap_dev->reset != AP_RESET_IN_PROGRESS) && 1856 - (ap_dev->interrupt == AP_INTR_IN_PROGRESS)) { 1857 - status = ap_test_queue(ap_dev->qid, &depth, &type); 1858 - if (ap_using_interrupts()) { 1859 - if (status.int_enabled == 1) 1860 - ap_dev->interrupt = AP_INTR_ENABLED; 1861 - else 1862 - *flags |= AP_POLL_AFTER_TIMEOUT; 1863 - } else 1864 - ap_dev->interrupt = AP_INTR_DISABLED; 1865 - } 1866 - 1867 - rc = ap_poll_read(ap_dev, flags); 1868 - if (rc) 1869 - return rc; 1870 - return ap_poll_write(ap_dev, flags); 1871 - } 1872 - 1873 - /** 1874 - * __ap_queue_message(): Queue a message to a device. 1875 - * @ap_dev: pointer to the AP device 1876 - * @ap_msg: the message to be queued 1877 - * 1878 - * Queue a message to a device. Returns 0 if successful. 1879 - */ 1880 - static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1881 - { 1882 - struct ap_queue_status status; 1883 - 1884 - if (list_empty(&ap_dev->requestq) && 1885 - (ap_dev->queue_count < ap_dev->queue_depth) && 1886 - (ap_dev->reset != AP_RESET_IN_PROGRESS)) { 1887 - status = __ap_send(ap_dev->qid, ap_msg->psmid, 1888 - ap_msg->message, ap_msg->length, 1889 - ap_msg->special); 1890 - switch (status.response_code) { 1891 - case AP_RESPONSE_NORMAL: 1892 - list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1893 - atomic_inc(&ap_poll_requests); 1894 - ap_dev->pendingq_count++; 1895 - ap_increase_queue_count(ap_dev); 1896 - ap_dev->total_request_count++; 1897 - break; 1898 - case AP_RESPONSE_Q_FULL: 1899 - case AP_RESPONSE_RESET_IN_PROGRESS: 1900 - list_add_tail(&ap_msg->list, &ap_dev->requestq); 1901 - ap_dev->requestq_count++; 1902 - ap_dev->total_request_count++; 1903 - return -EBUSY; 1904 - case AP_RESPONSE_REQ_FAC_NOT_INST: 1905 - case AP_RESPONSE_MESSAGE_TOO_BIG: 1906 - ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1907 - return -EINVAL; 1908 - default: /* Device is gone. */ 1909 - ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1910 - return -ENODEV; 1911 - } 1912 - } else { 1913 - list_add_tail(&ap_msg->list, &ap_dev->requestq); 1914 - ap_dev->requestq_count++; 1915 - ap_dev->total_request_count++; 1916 - return -EBUSY; 1917 - } 1918 - ap_schedule_poll_timer(); 1919 - return 0; 1920 - } 1921 - 1922 - void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1923 - { 1924 - unsigned long flags; 1925 - int rc; 1926 - 1927 - /* For asynchronous message handling a valid receive-callback 1928 - * is required. */ 1929 - BUG_ON(!ap_msg->receive); 1930 - 1931 - spin_lock_bh(&ap_dev->lock); 1932 - if (!ap_dev->unregistered) { 1933 - /* Make room on the queue by polling for finished requests. */ 1934 - rc = ap_poll_queue(ap_dev, &flags); 1935 - if (!rc) 1936 - rc = __ap_queue_message(ap_dev, ap_msg); 1937 - if (!rc) 1938 - wake_up(&ap_poll_wait); 1939 - if (rc == -ENODEV) 1940 - ap_dev->unregistered = 1; 1941 - } else { 1942 - ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1943 - rc = -ENODEV; 1944 - } 1945 - spin_unlock_bh(&ap_dev->lock); 1946 - if (rc == -ENODEV) 1947 - device_unregister(&ap_dev->device); 1948 - } 1949 - EXPORT_SYMBOL(ap_queue_message); 1950 - 1951 - /** 1952 - * ap_cancel_message(): Cancel a crypto request. 1953 - * @ap_dev: The AP device that has the message queued 1954 - * @ap_msg: The message that is to be removed 1955 - * 1956 - * Cancel a crypto request. This is done by removing the request 1957 - * from the device pending or request queue. Note that the 1958 - * request stays on the AP queue. When it finishes the message 1959 - * reply will be discarded because the psmid can't be found. 1960 - */ 1961 - void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1962 - { 1963 - struct ap_message *tmp; 1964 - 1965 - spin_lock_bh(&ap_dev->lock); 1966 - if (!list_empty(&ap_msg->list)) { 1967 - list_for_each_entry(tmp, &ap_dev->pendingq, list) 1968 - if (tmp->psmid == ap_msg->psmid) { 1969 - ap_dev->pendingq_count--; 1970 - goto found; 1971 - } 1972 - ap_dev->requestq_count--; 1973 - found: 1974 - list_del_init(&ap_msg->list); 1975 - } 1976 - spin_unlock_bh(&ap_dev->lock); 1977 - } 1978 - EXPORT_SYMBOL(ap_cancel_message); 1979 - 1980 - /** 1981 - * ap_poll_timeout(): AP receive polling for finished AP requests. 1982 - * @unused: Unused pointer. 1983 - * 1984 - * Schedules the AP tasklet using a high resolution timer. 1985 - */ 1986 - static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1987 - { 1988 - tasklet_schedule(&ap_tasklet); 1989 - return HRTIMER_NORESTART; 1990 - } 1991 - 1992 - /** 1993 - * ap_reset(): Reset a not responding AP device. 1994 - * @ap_dev: Pointer to the AP device 1995 - * 1996 - * Reset a not responding AP device and move all requests from the 1997 - * pending queue to the request queue. 1998 - */ 1999 - static void ap_reset(struct ap_device *ap_dev, unsigned long *flags) 2000 - { 2001 - int rc; 2002 - 2003 - atomic_sub(ap_dev->queue_count, &ap_poll_requests); 2004 - ap_dev->queue_count = 0; 2005 - list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 2006 - ap_dev->requestq_count += ap_dev->pendingq_count; 2007 - ap_dev->pendingq_count = 0; 2008 - rc = ap_init_queue(ap_dev); 2009 - if (rc == -ENODEV) 2010 - ap_dev->unregistered = 1; 2011 - else 2012 - *flags |= AP_POLL_AFTER_TIMEOUT; 2013 - } 2014 - 2015 - static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 2016 - { 2017 - if (!ap_dev->unregistered) { 2018 - if (ap_poll_queue(ap_dev, flags)) 2019 - ap_dev->unregistered = 1; 2020 - if (ap_dev->reset == AP_RESET_DO) 2021 - ap_reset(ap_dev, flags); 2022 - } 2023 - return 0; 2024 - } 2025 - 2026 - /** 2027 - * ap_poll_all(): Poll all AP devices. 2028 - * @dummy: Unused variable 2029 - * 2030 - * Poll all AP devices on the bus in a round robin fashion. Continue 2031 - * polling until bit 2^0 of the control flags is not set. If bit 2^1 2032 - * of the control flags has been set arm the poll timer. 2033 - */ 2034 - static void ap_poll_all(unsigned long dummy) 2035 - { 2036 - unsigned long flags; 2037 - struct ap_device *ap_dev; 2038 - 2039 - /* Reset the indicator if interrupts are used. Thus new interrupts can 2040 - * be received. Doing it in the beginning of the tasklet is therefor 2041 - * important that no requests on any AP get lost. 2042 - */ 2043 - if (ap_using_interrupts()) 2044 - xchg(ap_airq.lsi_ptr, 0); 2045 - do { 2046 - flags = 0; 2047 - spin_lock(&ap_device_list_lock); 2048 - list_for_each_entry(ap_dev, &ap_device_list, list) { 2049 - spin_lock(&ap_dev->lock); 2050 - __ap_poll_device(ap_dev, &flags); 2051 - spin_unlock(&ap_dev->lock); 2052 - } 2053 - spin_unlock(&ap_device_list_lock); 2054 - } while (flags & AP_POLL_IMMEDIATELY); 2055 - if (flags & AP_POLL_AFTER_TIMEOUT) 2056 - __ap_schedule_poll_timer(); 2057 - } 2058 - 2059 - /** 2060 - * ap_poll_thread(): Thread that polls for finished requests. 2061 - * @data: Unused pointer 2062 - * 2063 - * AP bus poll thread. The purpose of this thread is to poll for 2064 - * finished requests in a loop if there is a "free" cpu - that is 2065 - * a cpu that doesn't have anything better to do. The polling stops 2066 - * as soon as there is another task or if all messages have been 2067 - * delivered. 2068 - */ 2069 - static int ap_poll_thread(void *data) 2070 - { 2071 - DECLARE_WAITQUEUE(wait, current); 2072 - unsigned long flags; 2073 - int requests; 2074 - struct ap_device *ap_dev; 2075 - 2076 - set_user_nice(current, MAX_NICE); 2077 - while (1) { 2078 - if (ap_suspend_flag) 2079 - return 0; 2080 - if (need_resched()) { 2081 - schedule(); 2082 1509 continue; 2083 1510 } 2084 - add_wait_queue(&ap_poll_wait, &wait); 2085 - set_current_state(TASK_INTERRUPTIBLE); 2086 - if (kthread_should_stop()) 2087 - break; 2088 - requests = atomic_read(&ap_poll_requests); 2089 - if (requests <= 0) 2090 - schedule(); 2091 - set_current_state(TASK_RUNNING); 2092 - remove_wait_queue(&ap_poll_wait, &wait); 2093 - 2094 - flags = 0; 2095 - spin_lock_bh(&ap_device_list_lock); 2096 - list_for_each_entry(ap_dev, &ap_device_list, list) { 2097 - spin_lock(&ap_dev->lock); 2098 - __ap_poll_device(ap_dev, &flags); 2099 - spin_unlock(&ap_dev->lock); 2100 - } 2101 - spin_unlock_bh(&ap_device_list_lock); 2102 1511 } 2103 - set_current_state(TASK_RUNNING); 2104 - remove_wait_queue(&ap_poll_wait, &wait); 2105 - return 0; 1512 + out: 1513 + mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 2106 1514 } 2107 1515 2108 - static int ap_poll_thread_start(void) 1516 + static void ap_config_timeout(unsigned long ptr) 2109 1517 { 2110 - int rc; 2111 - 2112 - if (ap_using_interrupts() || ap_suspend_flag) 2113 - return 0; 2114 - mutex_lock(&ap_poll_thread_mutex); 2115 - if (!ap_poll_kthread) { 2116 - ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 2117 - rc = PTR_RET(ap_poll_kthread); 2118 - if (rc) 2119 - ap_poll_kthread = NULL; 2120 - } 2121 - else 2122 - rc = 0; 2123 - mutex_unlock(&ap_poll_thread_mutex); 2124 - return rc; 2125 - } 2126 - 2127 - static void ap_poll_thread_stop(void) 2128 - { 2129 - mutex_lock(&ap_poll_thread_mutex); 2130 - if (ap_poll_kthread) { 2131 - kthread_stop(ap_poll_kthread); 2132 - ap_poll_kthread = NULL; 2133 - } 2134 - mutex_unlock(&ap_poll_thread_mutex); 2135 - } 2136 - 2137 - /** 2138 - * ap_request_timeout(): Handling of request timeouts 2139 - * @data: Holds the AP device. 2140 - * 2141 - * Handles request timeouts. 2142 - */ 2143 - static void ap_request_timeout(unsigned long data) 2144 - { 2145 - struct ap_device *ap_dev = (struct ap_device *) data; 2146 - 2147 - if (ap_dev->reset == AP_RESET_ARMED) { 2148 - ap_dev->reset = AP_RESET_DO; 2149 - 2150 - if (ap_using_interrupts()) 2151 - tasklet_schedule(&ap_tasklet); 2152 - } 1518 + if (ap_suspend_flag) 1519 + return; 1520 + queue_work(system_long_wq, &ap_scan_work); 2153 1521 } 2154 1522 2155 1523 static void ap_reset_domain(void) 2156 1524 { 2157 1525 int i; 2158 1526 2159 - if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index))) 2160 - for (i = 0; i < AP_DEVICES; i++) 2161 - ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1527 + if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) 1528 + return; 1529 + for (i = 0; i < AP_DEVICES; i++) 1530 + ap_reset_queue(AP_MKQID(i, ap_domain_index)); 2162 1531 } 2163 1532 2164 1533 static void ap_reset_all(void) ··· 1734 2009 */ 1735 2010 int __init ap_module_init(void) 1736 2011 { 2012 + int max_domain_id; 1737 2013 int rc, i; 1738 2014 1739 - if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1740 - pr_warning("%d is not a valid cryptographic domain\n", 1741 - ap_domain_index); 2015 + if (ap_instructions_available() != 0) { 2016 + pr_warn("The hardware system does not support AP instructions\n"); 2017 + return -ENODEV; 2018 + } 2019 + 2020 + /* Get AP configuration data if available */ 2021 + ap_init_configuration(); 2022 + 2023 + if (ap_configuration) 2024 + max_domain_id = ap_max_domain_id ? : (AP_DOMAINS - 1); 2025 + else 2026 + max_domain_id = 15; 2027 + if (ap_domain_index < -1 || ap_domain_index > max_domain_id) { 2028 + pr_warn("%d is not a valid cryptographic domain\n", 2029 + ap_domain_index); 1742 2030 return -EINVAL; 1743 2031 } 1744 2032 /* In resume callback we need to know if the user had set the domain. ··· 1760 2022 if (ap_domain_index >= 0) 1761 2023 user_set_domain = 1; 1762 2024 1763 - if (ap_instructions_available() != 0) { 1764 - pr_warning("The hardware system does not support " 1765 - "AP instructions\n"); 1766 - return -ENODEV; 1767 - } 1768 2025 if (ap_interrupts_available()) { 1769 2026 rc = register_adapter_interrupt(&ap_airq); 1770 2027 ap_airq_flag = (rc == 0); ··· 1783 2050 if (rc) 1784 2051 goto out_bus; 1785 2052 1786 - ap_work_queue = create_singlethread_workqueue("kapwork"); 1787 - if (!ap_work_queue) { 1788 - rc = -ENOMEM; 1789 - goto out_root; 1790 - } 1791 - 1792 - ap_query_configuration(); 1793 - if (ap_select_domain() == 0) 1794 - ap_scan_bus(NULL); 1795 - 1796 2053 /* Setup the AP bus rescan timer. */ 1797 - init_timer(&ap_config_timer); 1798 - ap_config_timer.function = ap_config_timeout; 1799 - ap_config_timer.data = 0; 1800 - ap_config_timer.expires = jiffies + ap_config_time * HZ; 1801 - add_timer(&ap_config_timer); 2054 + setup_timer(&ap_config_timer, ap_config_timeout, 0); 1802 2055 1803 - /* Setup the high resultion poll timer. 2056 + /* 2057 + * Setup the high resultion poll timer. 1804 2058 * If we are running under z/VM adjust polling to z/VM polling rate. 1805 2059 */ 1806 2060 if (MACHINE_IS_VM) ··· 1803 2083 goto out_work; 1804 2084 } 1805 2085 2086 + rc = register_pm_notifier(&ap_power_notifier); 2087 + if (rc) 2088 + goto out_pm; 2089 + 2090 + queue_work(system_long_wq, &ap_scan_work); 2091 + 1806 2092 return 0; 1807 2093 2094 + out_pm: 2095 + ap_poll_thread_stop(); 1808 2096 out_work: 1809 - del_timer_sync(&ap_config_timer); 1810 2097 hrtimer_cancel(&ap_poll_timer); 1811 - destroy_workqueue(ap_work_queue); 1812 - out_root: 1813 2098 root_device_unregister(ap_root_device); 1814 2099 out_bus: 1815 2100 while (i--) ··· 1824 2099 unregister_reset_call(&ap_reset_call); 1825 2100 if (ap_using_interrupts()) 1826 2101 unregister_adapter_interrupt(&ap_airq); 2102 + kfree(ap_configuration); 1827 2103 return rc; 1828 - } 1829 - 1830 - static int __ap_match_all(struct device *dev, void *data) 1831 - { 1832 - return 1; 1833 2104 } 1834 2105 1835 2106 /** ··· 1836 2115 void ap_module_exit(void) 1837 2116 { 1838 2117 int i; 1839 - struct device *dev; 1840 2118 1841 2119 ap_reset_domain(); 1842 2120 ap_poll_thread_stop(); 1843 2121 del_timer_sync(&ap_config_timer); 1844 2122 hrtimer_cancel(&ap_poll_timer); 1845 - destroy_workqueue(ap_work_queue); 1846 2123 tasklet_kill(&ap_tasklet); 1847 - while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 1848 - __ap_match_all))) 1849 - { 1850 - device_unregister(dev); 1851 - put_device(dev); 1852 - } 2124 + bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1853 2125 for (i = 0; ap_bus_attrs[i]; i++) 1854 2126 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2127 + unregister_pm_notifier(&ap_power_notifier); 1855 2128 root_device_unregister(ap_root_device); 1856 2129 bus_unregister(&ap_bus_type); 2130 + kfree(ap_configuration); 1857 2131 unregister_reset_call(&ap_reset_call); 1858 2132 if (ap_using_interrupts()) 1859 2133 unregister_adapter_interrupt(&ap_airq);
+41 -26
drivers/s390/crypto/ap_bus.h
··· 36 36 #define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 37 37 #define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ 38 38 39 - #define AP_POLL_IMMEDIATELY 1 /* continue running poll tasklet */ 40 - #define AP_POLL_AFTER_TIMEOUT 2 /* run poll tasklet again after timout */ 41 - 42 39 extern int ap_domain_index; 43 40 44 41 /** ··· 72 75 unsigned int pad2 : 16; 73 76 } __packed; 74 77 75 - #define AP_QUEUE_STATUS_INVALID \ 76 - { 1, 1, 1, 0xF, 1, 0xFF, 0xFFFF } 77 78 78 - static inline 79 - int ap_queue_status_invalid_test(struct ap_queue_status *status) 80 - { 81 - struct ap_queue_status invalid = AP_QUEUE_STATUS_INVALID; 82 - return !(memcmp(status, &invalid, sizeof(struct ap_queue_status))); 83 - } 84 - 85 - #define AP_MAX_BITS 31 86 79 static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) 87 80 { 88 - if (nr > AP_MAX_BITS) 89 - return 0; 90 81 return (*ptr & (0x80000000u >> nr)) != 0; 91 82 } 92 83 ··· 117 132 #define AP_FUNC_APXA 6 118 133 119 134 /* 120 - * AP reset flag states 121 - */ 122 - #define AP_RESET_IGNORE 0 /* request timeout will be ignored */ 123 - #define AP_RESET_ARMED 1 /* request timeout timer is active */ 124 - #define AP_RESET_DO 2 /* AP reset required */ 125 - #define AP_RESET_IN_PROGRESS 3 /* AP reset in progress */ 126 - 127 - /* 128 135 * AP interrupt states 129 136 */ 130 137 #define AP_INTR_DISABLED 0 /* AP interrupt disabled */ 131 138 #define AP_INTR_ENABLED 1 /* AP interrupt enabled */ 132 - #define AP_INTR_IN_PROGRESS 3 /* AP interrupt in progress */ 139 + 140 + /* 141 + * AP device states 142 + */ 143 + enum ap_state { 144 + AP_STATE_RESET_START, 145 + AP_STATE_RESET_WAIT, 146 + AP_STATE_SETIRQ_WAIT, 147 + AP_STATE_IDLE, 148 + AP_STATE_WORKING, 149 + AP_STATE_QUEUE_FULL, 150 + AP_STATE_SUSPEND_WAIT, 151 + AP_STATE_BORKED, 152 + NR_AP_STATES 153 + }; 154 + 155 + /* 156 + * AP device events 157 + */ 158 + enum ap_event { 159 + AP_EVENT_POLL, 160 + AP_EVENT_TIMEOUT, 161 + NR_AP_EVENTS 162 + }; 163 + 164 + /* 165 + * AP wait behaviour 166 + */ 167 + enum ap_wait { 168 + AP_WAIT_AGAIN, /* retry immediately */ 169 + AP_WAIT_TIMEOUT, /* wait for timeout */ 170 + AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */ 171 + AP_WAIT_NONE, /* no wait */ 172 + NR_AP_WAIT 173 + }; 133 174 134 175 struct ap_device; 135 176 struct ap_message; ··· 174 163 int ap_driver_register(struct ap_driver *, struct module *, char *); 175 164 void ap_driver_unregister(struct ap_driver *); 176 165 166 + typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev); 167 + 177 168 struct ap_device { 178 169 struct device device; 179 170 struct ap_driver *drv; /* Pointer to AP device driver. */ 180 171 spinlock_t lock; /* Per device lock. */ 181 172 struct list_head list; /* private list of all AP devices. */ 182 173 174 + enum ap_state state; /* State of the AP device. */ 175 + 183 176 ap_qid_t qid; /* AP queue id. */ 184 177 int queue_depth; /* AP queue depth.*/ 185 178 int device_type; /* AP device type. */ 186 179 int raw_hwtype; /* AP raw hardware type. */ 187 180 unsigned int functions; /* AP device function bitfield. */ 188 - int unregistered; /* marks AP device as unregistered */ 189 181 struct timer_list timeout; /* Timer for request timeouts. */ 190 - int reset; /* Reset required after req. timeout. */ 191 182 192 183 int interrupt; /* indicate if interrupts are enabled */ 193 184 int queue_count; /* # messages currently on AP queue. */ ··· 212 199 unsigned long long psmid; /* Message id. */ 213 200 void *message; /* Pointer to message buffer. */ 214 201 size_t length; /* Message length. */ 202 + int rc; /* Return code for this message */ 215 203 216 204 void *private; /* ap driver private pointer. */ 217 205 unsigned int special:1; /* Used for special commands. */ ··· 245 231 { 246 232 ap_msg->psmid = 0; 247 233 ap_msg->length = 0; 234 + ap_msg->rc = 0; 248 235 ap_msg->special = 0; 249 236 ap_msg->receive = NULL; 250 237 }
+1 -2
drivers/s390/crypto/zcrypt_api.c
··· 472 472 unsigned long long z1, z2, z3; 473 473 int rc, copied; 474 474 475 - if (crt->outputdatalength < crt->inputdatalength || 476 - (crt->inputdatalength & 1)) 475 + if (crt->outputdatalength < crt->inputdatalength) 477 476 return -EINVAL; 478 477 /* 479 478 * As long as outputdatalength is big enough, we can set the
+1 -1
drivers/s390/crypto/zcrypt_cca_key.h
··· 291 291 292 292 memset(key, 0, sizeof(*key)); 293 293 294 - short_len = crt->inputdatalength / 2; 294 + short_len = (crt->inputdatalength + 1) / 2; 295 295 long_len = short_len + 8; 296 296 pad_len = -(3*long_len + 2*short_len) & 7; 297 297 key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+15 -13
drivers/s390/crypto/zcrypt_msgtype50.c
··· 248 248 unsigned char *p, *q, *dp, *dq, *u, *inp; 249 249 250 250 mod_len = crt->inputdatalength; 251 - short_len = mod_len / 2; 251 + short_len = (mod_len + 1) / 2; 252 252 253 253 /* 254 254 * CEX2A and CEX3A w/o FW update can handle requests up to ··· 395 395 int length; 396 396 397 397 /* Copy the reply message to the request message buffer. */ 398 - if (IS_ERR(reply)) { 399 - memcpy(msg->message, &error_reply, sizeof(error_reply)); 400 - goto out; 401 - } 398 + if (!reply) 399 + goto out; /* ap_msg->rc indicates the error */ 402 400 t80h = reply->message; 403 401 if (t80h->type == TYPE80_RSP_CODE) { 404 402 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) ··· 447 449 init_completion(&work); 448 450 ap_queue_message(zdev->ap_dev, &ap_msg); 449 451 rc = wait_for_completion_interruptible(&work); 450 - if (rc == 0) 451 - rc = convert_response(zdev, &ap_msg, mex->outputdata, 452 - mex->outputdatalength); 453 - else 452 + if (rc == 0) { 453 + rc = ap_msg.rc; 454 + if (rc == 0) 455 + rc = convert_response(zdev, &ap_msg, mex->outputdata, 456 + mex->outputdatalength); 457 + } else 454 458 /* Signal pending. */ 455 459 ap_cancel_message(zdev->ap_dev, &ap_msg); 456 460 out_free: ··· 493 493 init_completion(&work); 494 494 ap_queue_message(zdev->ap_dev, &ap_msg); 495 495 rc = wait_for_completion_interruptible(&work); 496 - if (rc == 0) 497 - rc = convert_response(zdev, &ap_msg, crt->outputdata, 498 - crt->outputdatalength); 499 - else 496 + if (rc == 0) { 497 + rc = ap_msg.rc; 498 + if (rc == 0) 499 + rc = convert_response(zdev, &ap_msg, crt->outputdata, 500 + crt->outputdatalength); 501 + } else 500 502 /* Signal pending. */ 501 503 ap_cancel_message(zdev->ap_dev, &ap_msg); 502 504 out_free:
+34 -25
drivers/s390/crypto/zcrypt_msgtype6.c
··· 829 829 int length; 830 830 831 831 /* Copy the reply message to the request message buffer. */ 832 - if (IS_ERR(reply)) { 833 - memcpy(msg->message, &error_reply, sizeof(error_reply)); 834 - goto out; 835 - } 832 + if (!reply) 833 + goto out; /* ap_msg->rc indicates the error */ 836 834 t86r = reply->message; 837 835 if (t86r->hdr.type == TYPE86_RSP_CODE && 838 836 t86r->cprbx.cprb_ver_id == 0x02) { ··· 878 880 int length; 879 881 880 882 /* Copy the reply message to the request message buffer. */ 881 - if (IS_ERR(reply)) { 882 - memcpy(msg->message, &error_reply, sizeof(error_reply)); 883 - goto out; 884 - } 883 + if (!reply) 884 + goto out; /* ap_msg->rc indicates the error */ 885 885 t86r = reply->message; 886 886 if (t86r->hdr.type == TYPE86_RSP_CODE && 887 887 t86r->cprbx.cprb_ver_id == 0x04) { ··· 931 935 init_completion(&resp_type.work); 932 936 ap_queue_message(zdev->ap_dev, &ap_msg); 933 937 rc = wait_for_completion_interruptible(&resp_type.work); 934 - if (rc == 0) 935 - rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, 936 - mex->outputdatalength); 937 - else 938 + if (rc == 0) { 939 + rc = ap_msg.rc; 940 + if (rc == 0) 941 + rc = convert_response_ica(zdev, &ap_msg, 942 + mex->outputdata, 943 + mex->outputdatalength); 944 + } else 938 945 /* Signal pending. */ 939 946 ap_cancel_message(zdev->ap_dev, &ap_msg); 940 947 out_free: ··· 975 976 init_completion(&resp_type.work); 976 977 ap_queue_message(zdev->ap_dev, &ap_msg); 977 978 rc = wait_for_completion_interruptible(&resp_type.work); 978 - if (rc == 0) 979 - rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, 980 - crt->outputdatalength); 981 - else 979 + if (rc == 0) { 980 + rc = ap_msg.rc; 981 + if (rc == 0) 982 + rc = convert_response_ica(zdev, &ap_msg, 983 + crt->outputdata, 984 + crt->outputdatalength); 985 + } else 982 986 /* Signal pending. */ 983 987 ap_cancel_message(zdev->ap_dev, &ap_msg); 984 988 out_free: ··· 1019 1017 init_completion(&resp_type.work); 1020 1018 ap_queue_message(zdev->ap_dev, &ap_msg); 1021 1019 rc = wait_for_completion_interruptible(&resp_type.work); 1022 - if (rc == 0) 1023 - rc = convert_response_xcrb(zdev, &ap_msg, xcRB); 1024 - else 1020 + if (rc == 0) { 1021 + rc = ap_msg.rc; 1022 + if (rc == 0) 1023 + rc = convert_response_xcrb(zdev, &ap_msg, xcRB); 1024 + } else 1025 1025 /* Signal pending. */ 1026 1026 ap_cancel_message(zdev->ap_dev, &ap_msg); 1027 1027 out_free: ··· 1061 1057 init_completion(&resp_type.work); 1062 1058 ap_queue_message(zdev->ap_dev, &ap_msg); 1063 1059 rc = wait_for_completion_interruptible(&resp_type.work); 1064 - if (rc == 0) 1065 - rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); 1066 - else /* Signal pending. */ 1060 + if (rc == 0) { 1061 + rc = ap_msg.rc; 1062 + if (rc == 0) 1063 + rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); 1064 + } else 1065 + /* Signal pending. */ 1067 1066 ap_cancel_message(zdev->ap_dev, &ap_msg); 1068 1067 1069 1068 out_free: ··· 1103 1096 init_completion(&resp_type.work); 1104 1097 ap_queue_message(zdev->ap_dev, &ap_msg); 1105 1098 rc = wait_for_completion_interruptible(&resp_type.work); 1106 - if (rc == 0) 1107 - rc = convert_response_rng(zdev, &ap_msg, buffer); 1108 - else 1099 + if (rc == 0) { 1100 + rc = ap_msg.rc; 1101 + if (rc == 0) 1102 + rc = convert_response_rng(zdev, &ap_msg, buffer); 1103 + } else 1109 1104 /* Signal pending. */ 1110 1105 ap_cancel_message(zdev->ap_dev, &ap_msg); 1111 1106 kfree(ap_msg.message);
-420
drivers/s390/crypto/zcrypt_pcica.c
··· 1 - /* 2 - * zcrypt 2.1.0 3 - * 4 - * Copyright IBM Corp. 2001, 2006 5 - * Author(s): Robert Burroughs 6 - * Eric Rossman (edrossma@us.ibm.com) 7 - * 8 - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 - * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of the GNU General Public License as published by 14 - * the Free Software Foundation; either version 2, or (at your option) 15 - * any later version. 16 - * 17 - * This program is distributed in the hope that it will be useful, 18 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 - * GNU General Public License for more details. 21 - * 22 - * You should have received a copy of the GNU General Public License 23 - * along with this program; if not, write to the Free Software 24 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 - */ 26 - 27 - #define KMSG_COMPONENT "zcrypt" 28 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 29 - 30 - #include <linux/module.h> 31 - #include <linux/slab.h> 32 - #include <linux/init.h> 33 - #include <linux/err.h> 34 - #include <linux/atomic.h> 35 - #include <asm/uaccess.h> 36 - 37 - #include "ap_bus.h" 38 - #include "zcrypt_api.h" 39 - #include "zcrypt_error.h" 40 - #include "zcrypt_pcica.h" 41 - 42 - #define PCICA_MIN_MOD_SIZE 1 /* 8 bits */ 43 - #define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */ 44 - 45 - #define PCICA_SPEED_RATING 2800 46 - 47 - #define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */ 48 - #define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 49 - 50 - #define PCICA_CLEANUP_TIME (15*HZ) 51 - 52 - static struct ap_device_id zcrypt_pcica_ids[] = { 53 - { AP_DEVICE(AP_DEVICE_TYPE_PCICA) }, 54 - { /* end of list */ }, 55 - }; 56 - 57 - MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); 58 - MODULE_AUTHOR("IBM Corporation"); 59 - MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " 60 - "Copyright IBM Corp. 2001, 2006"); 61 - MODULE_LICENSE("GPL"); 62 - 63 - static int zcrypt_pcica_probe(struct ap_device *ap_dev); 64 - static void zcrypt_pcica_remove(struct ap_device *ap_dev); 65 - static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *, 66 - struct ap_message *); 67 - 68 - static struct ap_driver zcrypt_pcica_driver = { 69 - .probe = zcrypt_pcica_probe, 70 - .remove = zcrypt_pcica_remove, 71 - .ids = zcrypt_pcica_ids, 72 - .request_timeout = PCICA_CLEANUP_TIME, 73 - }; 74 - 75 - /** 76 - * Convert a ICAMEX message to a type4 MEX message. 77 - * 78 - * @zdev: crypto device pointer 79 - * @zreq: crypto request pointer 80 - * @mex: pointer to user input data 81 - * 82 - * Returns 0 on success or -EFAULT. 83 - */ 84 - static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev, 85 - struct ap_message *ap_msg, 86 - struct ica_rsa_modexpo *mex) 87 - { 88 - unsigned char *modulus, *exponent, *message; 89 - int mod_len; 90 - 91 - mod_len = mex->inputdatalength; 92 - 93 - if (mod_len <= 128) { 94 - struct type4_sme *sme = ap_msg->message; 95 - memset(sme, 0, sizeof(*sme)); 96 - ap_msg->length = sizeof(*sme); 97 - sme->header.msg_fmt = TYPE4_SME_FMT; 98 - sme->header.msg_len = sizeof(*sme); 99 - sme->header.msg_type_code = TYPE4_TYPE_CODE; 100 - sme->header.request_code = TYPE4_REQU_CODE; 101 - modulus = sme->modulus + sizeof(sme->modulus) - mod_len; 102 - exponent = sme->exponent + sizeof(sme->exponent) - mod_len; 103 - message = sme->message + sizeof(sme->message) - mod_len; 104 - } else { 105 - struct type4_lme *lme = ap_msg->message; 106 - memset(lme, 0, sizeof(*lme)); 107 - ap_msg->length = sizeof(*lme); 108 - lme->header.msg_fmt = TYPE4_LME_FMT; 109 - lme->header.msg_len = sizeof(*lme); 110 - lme->header.msg_type_code = TYPE4_TYPE_CODE; 111 - lme->header.request_code = TYPE4_REQU_CODE; 112 - modulus = lme->modulus + sizeof(lme->modulus) - mod_len; 113 - exponent = lme->exponent + sizeof(lme->exponent) - mod_len; 114 - message = lme->message + sizeof(lme->message) - mod_len; 115 - } 116 - 117 - if (copy_from_user(modulus, mex->n_modulus, mod_len) || 118 - copy_from_user(exponent, mex->b_key, mod_len) || 119 - copy_from_user(message, mex->inputdata, mod_len)) 120 - return -EFAULT; 121 - return 0; 122 - } 123 - 124 - /** 125 - * Convert a ICACRT message to a type4 CRT message. 126 - * 127 - * @zdev: crypto device pointer 128 - * @zreq: crypto request pointer 129 - * @crt: pointer to user input data 130 - * 131 - * Returns 0 on success or -EFAULT. 132 - */ 133 - static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, 134 - struct ap_message *ap_msg, 135 - struct ica_rsa_modexpo_crt *crt) 136 - { 137 - unsigned char *p, *q, *dp, *dq, *u, *inp; 138 - int mod_len, short_len, long_len; 139 - 140 - mod_len = crt->inputdatalength; 141 - short_len = mod_len / 2; 142 - long_len = mod_len / 2 + 8; 143 - 144 - if (mod_len <= 128) { 145 - struct type4_scr *scr = ap_msg->message; 146 - memset(scr, 0, sizeof(*scr)); 147 - ap_msg->length = sizeof(*scr); 148 - scr->header.msg_type_code = TYPE4_TYPE_CODE; 149 - scr->header.request_code = TYPE4_REQU_CODE; 150 - scr->header.msg_fmt = TYPE4_SCR_FMT; 151 - scr->header.msg_len = sizeof(*scr); 152 - p = scr->p + sizeof(scr->p) - long_len; 153 - q = scr->q + sizeof(scr->q) - short_len; 154 - dp = scr->dp + sizeof(scr->dp) - long_len; 155 - dq = scr->dq + sizeof(scr->dq) - short_len; 156 - u = scr->u + sizeof(scr->u) - long_len; 157 - inp = scr->message + sizeof(scr->message) - mod_len; 158 - } else { 159 - struct type4_lcr *lcr = ap_msg->message; 160 - memset(lcr, 0, sizeof(*lcr)); 161 - ap_msg->length = sizeof(*lcr); 162 - lcr->header.msg_type_code = TYPE4_TYPE_CODE; 163 - lcr->header.request_code = TYPE4_REQU_CODE; 164 - lcr->header.msg_fmt = TYPE4_LCR_FMT; 165 - lcr->header.msg_len = sizeof(*lcr); 166 - p = lcr->p + sizeof(lcr->p) - long_len; 167 - q = lcr->q + sizeof(lcr->q) - short_len; 168 - dp = lcr->dp + sizeof(lcr->dp) - long_len; 169 - dq = lcr->dq + sizeof(lcr->dq) - short_len; 170 - u = lcr->u + sizeof(lcr->u) - long_len; 171 - inp = lcr->message + sizeof(lcr->message) - mod_len; 172 - } 173 - 174 - if (copy_from_user(p, crt->np_prime, long_len) || 175 - copy_from_user(q, crt->nq_prime, short_len) || 176 - copy_from_user(dp, crt->bp_key, long_len) || 177 - copy_from_user(dq, crt->bq_key, short_len) || 178 - copy_from_user(u, crt->u_mult_inv, long_len) || 179 - copy_from_user(inp, crt->inputdata, mod_len)) 180 - return -EFAULT; 181 - return 0; 182 - } 183 - 184 - /** 185 - * Copy results from a type 84 reply message back to user space. 186 - * 187 - * @zdev: crypto device pointer 188 - * @reply: reply AP message. 189 - * @data: pointer to user output data 190 - * @length: size of user output data 191 - * 192 - * Returns 0 on success or -EFAULT. 193 - */ 194 - static int convert_type84(struct zcrypt_device *zdev, 195 - struct ap_message *reply, 196 - char __user *outputdata, 197 - unsigned int outputdatalength) 198 - { 199 - struct type84_hdr *t84h = reply->message; 200 - char *data; 201 - 202 - if (t84h->len < sizeof(*t84h) + outputdatalength) { 203 - /* The result is too short, the PCICA card may not do that.. */ 204 - zdev->online = 0; 205 - pr_err("Cryptographic device %x failed and was set offline\n", 206 - zdev->ap_dev->qid); 207 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 208 - zdev->ap_dev->qid, zdev->online, t84h->code); 209 - return -EAGAIN; /* repeat the request on a different device. */ 210 - } 211 - BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE); 212 - data = reply->message + t84h->len - outputdatalength; 213 - if (copy_to_user(outputdata, data, outputdatalength)) 214 - return -EFAULT; 215 - return 0; 216 - } 217 - 218 - static int convert_response(struct zcrypt_device *zdev, 219 - struct ap_message *reply, 220 - char __user *outputdata, 221 - unsigned int outputdatalength) 222 - { 223 - /* Response type byte is the second byte in the response. */ 224 - switch (((unsigned char *) reply->message)[1]) { 225 - case TYPE82_RSP_CODE: 226 - case TYPE88_RSP_CODE: 227 - return convert_error(zdev, reply); 228 - case TYPE84_RSP_CODE: 229 - return convert_type84(zdev, reply, 230 - outputdata, outputdatalength); 231 - default: /* Unknown response type, this should NEVER EVER happen */ 232 - zdev->online = 0; 233 - pr_err("Cryptographic device %x failed and was set offline\n", 234 - zdev->ap_dev->qid); 235 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 236 - zdev->ap_dev->qid, zdev->online); 237 - return -EAGAIN; /* repeat the request on a different device. */ 238 - } 239 - } 240 - 241 - /** 242 - * This function is called from the AP bus code after a crypto request 243 - * "msg" has finished with the reply message "reply". 244 - * It is called from tasklet context. 245 - * @ap_dev: pointer to the AP device 246 - * @msg: pointer to the AP message 247 - * @reply: pointer to the AP reply message 248 - */ 249 - static void zcrypt_pcica_receive(struct ap_device *ap_dev, 250 - struct ap_message *msg, 251 - struct ap_message *reply) 252 - { 253 - static struct error_hdr error_reply = { 254 - .type = TYPE82_RSP_CODE, 255 - .reply_code = REP82_ERROR_MACHINE_FAILURE, 256 - }; 257 - struct type84_hdr *t84h; 258 - int length; 259 - 260 - /* Copy the reply message to the request message buffer. */ 261 - if (IS_ERR(reply)) { 262 - memcpy(msg->message, &error_reply, sizeof(error_reply)); 263 - goto out; 264 - } 265 - t84h = reply->message; 266 - if (t84h->code == TYPE84_RSP_CODE) { 267 - length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); 268 - memcpy(msg->message, reply->message, length); 269 - } else 270 - memcpy(msg->message, reply->message, sizeof error_reply); 271 - out: 272 - complete((struct completion *) msg->private); 273 - } 274 - 275 - static atomic_t zcrypt_step = ATOMIC_INIT(0); 276 - 277 - /** 278 - * The request distributor calls this function if it picked the PCICA 279 - * device to handle a modexpo request. 280 - * @zdev: pointer to zcrypt_device structure that identifies the 281 - * PCICA device to the request distributor 282 - * @mex: pointer to the modexpo request buffer 283 - */ 284 - static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev, 285 - struct ica_rsa_modexpo *mex) 286 - { 287 - struct ap_message ap_msg; 288 - struct completion work; 289 - int rc; 290 - 291 - ap_init_message(&ap_msg); 292 - ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 293 - if (!ap_msg.message) 294 - return -ENOMEM; 295 - ap_msg.receive = zcrypt_pcica_receive; 296 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 297 - atomic_inc_return(&zcrypt_step); 298 - ap_msg.private = &work; 299 - rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex); 300 - if (rc) 301 - goto out_free; 302 - init_completion(&work); 303 - ap_queue_message(zdev->ap_dev, &ap_msg); 304 - rc = wait_for_completion_interruptible(&work); 305 - if (rc == 0) 306 - rc = convert_response(zdev, &ap_msg, mex->outputdata, 307 - mex->outputdatalength); 308 - else 309 - /* Signal pending. */ 310 - ap_cancel_message(zdev->ap_dev, &ap_msg); 311 - out_free: 312 - kfree(ap_msg.message); 313 - return rc; 314 - } 315 - 316 - /** 317 - * The request distributor calls this function if it picked the PCICA 318 - * device to handle a modexpo_crt request. 319 - * @zdev: pointer to zcrypt_device structure that identifies the 320 - * PCICA device to the request distributor 321 - * @crt: pointer to the modexpoc_crt request buffer 322 - */ 323 - static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev, 324 - struct ica_rsa_modexpo_crt *crt) 325 - { 326 - struct ap_message ap_msg; 327 - struct completion work; 328 - int rc; 329 - 330 - ap_init_message(&ap_msg); 331 - ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 332 - if (!ap_msg.message) 333 - return -ENOMEM; 334 - ap_msg.receive = zcrypt_pcica_receive; 335 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 336 - atomic_inc_return(&zcrypt_step); 337 - ap_msg.private = &work; 338 - rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt); 339 - if (rc) 340 - goto out_free; 341 - init_completion(&work); 342 - ap_queue_message(zdev->ap_dev, &ap_msg); 343 - rc = wait_for_completion_interruptible(&work); 344 - if (rc == 0) 345 - rc = convert_response(zdev, &ap_msg, crt->outputdata, 346 - crt->outputdatalength); 347 - else 348 - /* Signal pending. */ 349 - ap_cancel_message(zdev->ap_dev, &ap_msg); 350 - out_free: 351 - kfree(ap_msg.message); 352 - return rc; 353 - } 354 - 355 - /** 356 - * The crypto operations for a PCICA card. 357 - */ 358 - static struct zcrypt_ops zcrypt_pcica_ops = { 359 - .rsa_modexpo = zcrypt_pcica_modexpo, 360 - .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt, 361 - }; 362 - 363 - /** 364 - * Probe function for PCICA cards. It always accepts the AP device 365 - * since the bus_match already checked the hardware type. 366 - * @ap_dev: pointer to the AP device. 367 - */ 368 - static int zcrypt_pcica_probe(struct ap_device *ap_dev) 369 - { 370 - struct zcrypt_device *zdev; 371 - int rc; 372 - 373 - zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE); 374 - if (!zdev) 375 - return -ENOMEM; 376 - zdev->ap_dev = ap_dev; 377 - zdev->ops = &zcrypt_pcica_ops; 378 - zdev->online = 1; 379 - zdev->user_space_type = ZCRYPT_PCICA; 380 - zdev->type_string = "PCICA"; 381 - zdev->min_mod_size = PCICA_MIN_MOD_SIZE; 382 - zdev->max_mod_size = PCICA_MAX_MOD_SIZE; 383 - zdev->speed_rating = PCICA_SPEED_RATING; 384 - zdev->max_exp_bit_length = PCICA_MAX_MOD_SIZE; 385 - ap_dev->reply = &zdev->reply; 386 - ap_dev->private = zdev; 387 - rc = zcrypt_device_register(zdev); 388 - if (rc) 389 - goto out_free; 390 - return 0; 391 - 392 - out_free: 393 - ap_dev->private = NULL; 394 - zcrypt_device_free(zdev); 395 - return rc; 396 - } 397 - 398 - /** 399 - * This is called to remove the extended PCICA driver information 400 - * if an AP device is removed. 401 - */ 402 - static void zcrypt_pcica_remove(struct ap_device *ap_dev) 403 - { 404 - struct zcrypt_device *zdev = ap_dev->private; 405 - 406 - zcrypt_device_unregister(zdev); 407 - } 408 - 409 - int __init zcrypt_pcica_init(void) 410 - { 411 - return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica"); 412 - } 413 - 414 - void zcrypt_pcica_exit(void) 415 - { 416 - ap_driver_unregister(&zcrypt_pcica_driver); 417 - } 418 - 419 - module_init(zcrypt_pcica_init); 420 - module_exit(zcrypt_pcica_exit);
-115
drivers/s390/crypto/zcrypt_pcica.h
··· 1 - /* 2 - * zcrypt 2.1.0 3 - * 4 - * Copyright IBM Corp. 2001, 2006 5 - * Author(s): Robert Burroughs 6 - * Eric Rossman (edrossma@us.ibm.com) 7 - * 8 - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of the GNU General Public License as published by 13 - * the Free Software Foundation; either version 2, or (at your option) 14 - * any later version. 15 - * 16 - * This program is distributed in the hope that it will be useful, 17 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 - * GNU General Public License for more details. 20 - * 21 - * You should have received a copy of the GNU General Public License 22 - * along with this program; if not, write to the Free Software 23 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - #ifndef _ZCRYPT_PCICA_H_ 27 - #define _ZCRYPT_PCICA_H_ 28 - 29 - /** 30 - * The type 4 message family is associated with a PCICA card. 31 - * 32 - * The four members of the family are described below. 33 - * 34 - * Note that all unsigned char arrays are right-justified and left-padded 35 - * with zeroes. 36 - * 37 - * Note that all reserved fields must be zeroes. 38 - */ 39 - struct type4_hdr { 40 - unsigned char reserved1; 41 - unsigned char msg_type_code; /* 0x04 */ 42 - unsigned short msg_len; 43 - unsigned char request_code; /* 0x40 */ 44 - unsigned char msg_fmt; 45 - unsigned short reserved2; 46 - } __attribute__((packed)); 47 - 48 - #define TYPE4_TYPE_CODE 0x04 49 - #define TYPE4_REQU_CODE 0x40 50 - 51 - #define TYPE4_SME_FMT 0x00 52 - #define TYPE4_LME_FMT 0x10 53 - #define TYPE4_SCR_FMT 0x40 54 - #define TYPE4_LCR_FMT 0x50 55 - 56 - /* Mod-Exp, with a small modulus */ 57 - struct type4_sme { 58 - struct type4_hdr header; 59 - unsigned char message[128]; 60 - unsigned char exponent[128]; 61 - unsigned char modulus[128]; 62 - } __attribute__((packed)); 63 - 64 - /* Mod-Exp, with a large modulus */ 65 - struct type4_lme { 66 - struct type4_hdr header; 67 - unsigned char message[256]; 68 - unsigned char exponent[256]; 69 - unsigned char modulus[256]; 70 - } __attribute__((packed)); 71 - 72 - /* CRT, with a small modulus */ 73 - struct type4_scr { 74 - struct type4_hdr header; 75 - unsigned char message[128]; 76 - unsigned char dp[72]; 77 - unsigned char dq[64]; 78 - unsigned char p[72]; 79 - unsigned char q[64]; 80 - unsigned char u[72]; 81 - } __attribute__((packed)); 82 - 83 - /* CRT, with a large modulus */ 84 - struct type4_lcr { 85 - struct type4_hdr header; 86 - unsigned char message[256]; 87 - unsigned char dp[136]; 88 - unsigned char dq[128]; 89 - unsigned char p[136]; 90 - unsigned char q[128]; 91 - unsigned char u[136]; 92 - } __attribute__((packed)); 93 - 94 - /** 95 - * The type 84 response family is associated with a PCICA card. 96 - * 97 - * Note that all unsigned char arrays are right-justified and left-padded 98 - * with zeroes. 99 - * 100 - * Note that all reserved fields must be zeroes. 101 - */ 102 - 103 - struct type84_hdr { 104 - unsigned char reserved1; 105 - unsigned char code; 106 - unsigned short len; 107 - unsigned char reserved2[4]; 108 - } __attribute__((packed)); 109 - 110 - #define TYPE84_RSP_CODE 0x84 111 - 112 - int zcrypt_pcica_init(void); 113 - void zcrypt_pcica_exit(void); 114 - 115 - #endif /* _ZCRYPT_PCICA_H_ */
-627
drivers/s390/crypto/zcrypt_pcicc.c
··· 1 - /* 2 - * zcrypt 2.1.0 3 - * 4 - * Copyright IBM Corp. 2001, 2006 5 - * Author(s): Robert Burroughs 6 - * Eric Rossman (edrossma@us.ibm.com) 7 - * 8 - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 - * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of the GNU General Public License as published by 14 - * the Free Software Foundation; either version 2, or (at your option) 15 - * any later version. 16 - * 17 - * This program is distributed in the hope that it will be useful, 18 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 - * GNU General Public License for more details. 21 - * 22 - * You should have received a copy of the GNU General Public License 23 - * along with this program; if not, write to the Free Software 24 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 - */ 26 - 27 - #define KMSG_COMPONENT "zcrypt" 28 - #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 29 - 30 - #include <linux/module.h> 31 - #include <linux/init.h> 32 - #include <linux/gfp.h> 33 - #include <linux/err.h> 34 - #include <linux/atomic.h> 35 - #include <asm/uaccess.h> 36 - 37 - #include "ap_bus.h" 38 - #include "zcrypt_api.h" 39 - #include "zcrypt_error.h" 40 - #include "zcrypt_pcicc.h" 41 - #include "zcrypt_cca_key.h" 42 - 43 - #define PCICC_MIN_MOD_SIZE 64 /* 512 bits */ 44 - #define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ 45 - #define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ 46 - 47 - /* 48 - * PCICC cards need a speed rating of 0. This keeps them at the end of 49 - * the zcrypt device list (see zcrypt_api.c). PCICC cards are only 50 - * used if no other cards are present because they are slow and can only 51 - * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded 52 - * requests are rejected. The modexpo function encrypts PKCS12 padded data 53 - * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption 54 - * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts 55 - * the data in the assumption that its PKCS12 encrypted data. 56 - */ 57 - #define PCICC_SPEED_RATING 0 58 - 59 - #define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */ 60 - #define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */ 61 - 62 - #define PCICC_CLEANUP_TIME (15*HZ) 63 - 64 - static struct ap_device_id zcrypt_pcicc_ids[] = { 65 - { AP_DEVICE(AP_DEVICE_TYPE_PCICC) }, 66 - { /* end of list */ }, 67 - }; 68 - 69 - MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); 70 - MODULE_AUTHOR("IBM Corporation"); 71 - MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " 72 - "Copyright IBM Corp. 2001, 2006"); 73 - MODULE_LICENSE("GPL"); 74 - 75 - static int zcrypt_pcicc_probe(struct ap_device *ap_dev); 76 - static void zcrypt_pcicc_remove(struct ap_device *ap_dev); 77 - static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *, 78 - struct ap_message *); 79 - 80 - static struct ap_driver zcrypt_pcicc_driver = { 81 - .probe = zcrypt_pcicc_probe, 82 - .remove = zcrypt_pcicc_remove, 83 - .ids = zcrypt_pcicc_ids, 84 - .request_timeout = PCICC_CLEANUP_TIME, 85 - }; 86 - 87 - /** 88 - * The following is used to initialize the CPRB passed to the PCICC card 89 - * in a type6 message. The 3 fields that must be filled in at execution 90 - * time are req_parml, rpl_parml and usage_domain. Note that all three 91 - * fields are *little*-endian. Actually, everything about this interface 92 - * is ascii/little-endian, since the device has 'Intel inside'. 93 - * 94 - * The CPRB is followed immediately by the parm block. 95 - * The parm block contains: 96 - * - function code ('PD' 0x5044 or 'PK' 0x504B) 97 - * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD') 98 - * - VUD block 99 - */ 100 - static struct CPRB static_cprb = { 101 - .cprb_len = cpu_to_le16(0x0070), 102 - .cprb_ver_id = 0x41, 103 - .func_id = {0x54,0x32}, 104 - .checkpoint_flag= 0x01, 105 - .svr_namel = cpu_to_le16(0x0008), 106 - .svr_name = {'I','C','S','F',' ',' ',' ',' '} 107 - }; 108 - 109 - /** 110 - * Check the message for PKCS11 padding. 111 - */ 112 - static inline int is_PKCS11_padded(unsigned char *buffer, int length) 113 - { 114 - int i; 115 - if ((buffer[0] != 0x00) || (buffer[1] != 0x01)) 116 - return 0; 117 - for (i = 2; i < length; i++) 118 - if (buffer[i] != 0xFF) 119 - break; 120 - if (i < 10 || i == length) 121 - return 0; 122 - if (buffer[i] != 0x00) 123 - return 0; 124 - return 1; 125 - } 126 - 127 - /** 128 - * Check the message for PKCS12 padding. 129 - */ 130 - static inline int is_PKCS12_padded(unsigned char *buffer, int length) 131 - { 132 - int i; 133 - if ((buffer[0] != 0x00) || (buffer[1] != 0x02)) 134 - return 0; 135 - for (i = 2; i < length; i++) 136 - if (buffer[i] == 0x00) 137 - break; 138 - if ((i < 10) || (i == length)) 139 - return 0; 140 - if (buffer[i] != 0x00) 141 - return 0; 142 - return 1; 143 - } 144 - 145 - /** 146 - * Convert a ICAMEX message to a type6 MEX message. 147 - * 148 - * @zdev: crypto device pointer 149 - * @zreq: crypto request pointer 150 - * @mex: pointer to user input data 151 - * 152 - * Returns 0 on success or -EFAULT. 153 - */ 154 - static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev, 155 - struct ap_message *ap_msg, 156 - struct ica_rsa_modexpo *mex) 157 - { 158 - static struct type6_hdr static_type6_hdr = { 159 - .type = 0x06, 160 - .offset1 = 0x00000058, 161 - .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, 162 - 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, 163 - .function_code = {'P','K'}, 164 - }; 165 - static struct function_and_rules_block static_pke_function_and_rules ={ 166 - .function_code = {'P','K'}, 167 - .ulen = cpu_to_le16(10), 168 - .only_rule = {'P','K','C','S','-','1','.','2'} 169 - }; 170 - struct { 171 - struct type6_hdr hdr; 172 - struct CPRB cprb; 173 - struct function_and_rules_block fr; 174 - unsigned short length; 175 - char text[0]; 176 - } __attribute__((packed)) *msg = ap_msg->message; 177 - int vud_len, pad_len, size; 178 - 179 - /* VUD.ciphertext */ 180 - if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) 181 - return -EFAULT; 182 - 183 - if (is_PKCS11_padded(msg->text, mex->inputdatalength)) 184 - return -EINVAL; 185 - 186 - /* static message header and f&r */ 187 - msg->hdr = static_type6_hdr; 188 - msg->fr = static_pke_function_and_rules; 189 - 190 - if (is_PKCS12_padded(msg->text, mex->inputdatalength)) { 191 - /* strip the padding and adjust the data length */ 192 - pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3; 193 - if (pad_len <= 9 || pad_len >= mex->inputdatalength) 194 - return -ENODEV; 195 - vud_len = mex->inputdatalength - pad_len; 196 - memmove(msg->text, msg->text + pad_len, vud_len); 197 - msg->length = cpu_to_le16(vud_len + 2); 198 - 199 - /* Set up key after the variable length text. */ 200 - size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0); 201 - if (size < 0) 202 - return size; 203 - size += sizeof(*msg) + vud_len; /* total size of msg */ 204 - } else { 205 - vud_len = mex->inputdatalength; 206 - msg->length = cpu_to_le16(2 + vud_len); 207 - 208 - msg->hdr.function_code[1] = 'D'; 209 - msg->fr.function_code[1] = 'D'; 210 - 211 - /* Set up key after the variable length text. */ 212 - size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0); 213 - if (size < 0) 214 - return size; 215 - size += sizeof(*msg) + vud_len; /* total size of msg */ 216 - } 217 - 218 - /* message header, cprb and f&r */ 219 - msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; 220 - msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); 221 - 222 - msg->cprb = static_cprb; 223 - msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid); 224 - msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) - 225 - sizeof(msg->cprb)); 226 - msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1); 227 - 228 - ap_msg->length = (size + 3) & -4; 229 - return 0; 230 - } 231 - 232 - /** 233 - * Convert a ICACRT message to a type6 CRT message. 234 - * 235 - * @zdev: crypto device pointer 236 - * @zreq: crypto request pointer 237 - * @crt: pointer to user input data 238 - * 239 - * Returns 0 on success or -EFAULT. 240 - */ 241 - static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev, 242 - struct ap_message *ap_msg, 243 - struct ica_rsa_modexpo_crt *crt) 244 - { 245 - static struct type6_hdr static_type6_hdr = { 246 - .type = 0x06, 247 - .offset1 = 0x00000058, 248 - .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, 249 - 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, 250 - .function_code = {'P','D'}, 251 - }; 252 - static struct function_and_rules_block static_pkd_function_and_rules ={ 253 - .function_code = {'P','D'}, 254 - .ulen = cpu_to_le16(10), 255 - .only_rule = {'P','K','C','S','-','1','.','2'} 256 - }; 257 - struct { 258 - struct type6_hdr hdr; 259 - struct CPRB cprb; 260 - struct function_and_rules_block fr; 261 - unsigned short length; 262 - char text[0]; 263 - } __attribute__((packed)) *msg = ap_msg->message; 264 - int size; 265 - 266 - /* VUD.ciphertext */ 267 - msg->length = cpu_to_le16(2 + crt->inputdatalength); 268 - if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) 269 - return -EFAULT; 270 - 271 - if (is_PKCS11_padded(msg->text, crt->inputdatalength)) 272 - return -EINVAL; 273 - 274 - /* Set up key after the variable length text. */ 275 - size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0); 276 - if (size < 0) 277 - return size; 278 - size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ 279 - 280 - /* message header, cprb and f&r */ 281 - msg->hdr = static_type6_hdr; 282 - msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; 283 - msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); 284 - 285 - msg->cprb = static_cprb; 286 - msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid); 287 - msg->cprb.req_parml = msg->cprb.rpl_parml = 288 - cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb)); 289 - 290 - msg->fr = static_pkd_function_and_rules; 291 - 292 - ap_msg->length = (size + 3) & -4; 293 - return 0; 294 - } 295 - 296 - /** 297 - * Copy results from a type 86 reply message back to user space. 298 - * 299 - * @zdev: crypto device pointer 300 - * @reply: reply AP message. 301 - * @data: pointer to user output data 302 - * @length: size of user output data 303 - * 304 - * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 305 - */ 306 - struct type86_reply { 307 - struct type86_hdr hdr; 308 - struct type86_fmt2_ext fmt2; 309 - struct CPRB cprb; 310 - unsigned char pad[4]; /* 4 byte function code/rules block ? */ 311 - unsigned short length; 312 - char text[0]; 313 - } __attribute__((packed)); 314 - 315 - static int convert_type86(struct zcrypt_device *zdev, 316 - struct ap_message *reply, 317 - char __user *outputdata, 318 - unsigned int outputdatalength) 319 - { 320 - static unsigned char static_pad[] = { 321 - 0x00,0x02, 322 - 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD, 323 - 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, 324 - 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B, 325 - 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, 326 - 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5, 327 - 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, 328 - 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB, 329 - 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, 330 - 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9, 331 - 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, 332 - 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9, 333 - 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, 334 - 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD, 335 - 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, 336 - 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD, 337 - 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, 338 - 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B, 339 - 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, 340 - 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B, 341 - 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, 342 - 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7, 343 - 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, 344 - 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3, 345 - 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, 346 - 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55, 347 - 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, 348 - 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F, 349 - 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, 350 - 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5, 351 - 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, 352 - 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41, 353 - 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 354 - }; 355 - struct type86_reply *msg = reply->message; 356 - unsigned short service_rc, service_rs; 357 - unsigned int reply_len, pad_len; 358 - char *data; 359 - 360 - service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); 361 - if (unlikely(service_rc != 0)) { 362 - service_rs = le16_to_cpu(msg->cprb.ccp_rscode); 363 - if (service_rc == 8 && service_rs == 66) 364 - return -EINVAL; 365 - if (service_rc == 8 && service_rs == 65) 366 - return -EINVAL; 367 - if (service_rc == 8 && service_rs == 770) { 368 - zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 369 - return -EAGAIN; 370 - } 371 - if (service_rc == 8 && service_rs == 783) { 372 - zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; 373 - return -EAGAIN; 374 - } 375 - if (service_rc == 8 && service_rs == 72) 376 - return -EINVAL; 377 - zdev->online = 0; 378 - pr_err("Cryptographic device %x failed and was set offline\n", 379 - zdev->ap_dev->qid); 380 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 381 - zdev->ap_dev->qid, zdev->online, 382 - msg->hdr.reply_code); 383 - return -EAGAIN; /* repeat the request on a different device. */ 384 - } 385 - data = msg->text; 386 - reply_len = le16_to_cpu(msg->length) - 2; 387 - if (reply_len > outputdatalength) 388 - return -EINVAL; 389 - /* 390 - * For all encipher requests, the length of the ciphertext (reply_len) 391 - * will always equal the modulus length. For MEX decipher requests 392 - * the output needs to get padded. Minimum pad size is 10. 393 - * 394 - * Currently, the cases where padding will be added is for: 395 - * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support 396 - * ZERO-PAD and CRT is only supported for PKD requests) 397 - * - PCICC, always 398 - */ 399 - pad_len = outputdatalength - reply_len; 400 - if (pad_len > 0) { 401 - if (pad_len < 10) 402 - return -EINVAL; 403 - /* 'restore' padding left in the PCICC/PCIXCC card. */ 404 - if (copy_to_user(outputdata, static_pad, pad_len - 1)) 405 - return -EFAULT; 406 - if (put_user(0, outputdata + pad_len - 1)) 407 - return -EFAULT; 408 - } 409 - /* Copy the crypto response to user space. */ 410 - if (copy_to_user(outputdata + pad_len, data, reply_len)) 411 - return -EFAULT; 412 - return 0; 413 - } 414 - 415 - static int convert_response(struct zcrypt_device *zdev, 416 - struct ap_message *reply, 417 - char __user *outputdata, 418 - unsigned int outputdatalength) 419 - { 420 - struct type86_reply *msg = reply->message; 421 - 422 - /* Response type byte is the second byte in the response. */ 423 - switch (msg->hdr.type) { 424 - case TYPE82_RSP_CODE: 425 - case TYPE88_RSP_CODE: 426 - return convert_error(zdev, reply); 427 - case TYPE86_RSP_CODE: 428 - if (msg->hdr.reply_code) 429 - return convert_error(zdev, reply); 430 - if (msg->cprb.cprb_ver_id == 0x01) 431 - return convert_type86(zdev, reply, 432 - outputdata, outputdatalength); 433 - /* no break, incorrect cprb version is an unknown response */ 434 - default: /* Unknown response type, this should NEVER EVER happen */ 435 - zdev->online = 0; 436 - pr_err("Cryptographic device %x failed and was set offline\n", 437 - zdev->ap_dev->qid); 438 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 439 - zdev->ap_dev->qid, zdev->online); 440 - return -EAGAIN; /* repeat the request on a different device. */ 441 - } 442 - } 443 - 444 - /** 445 - * This function is called from the AP bus code after a crypto request 446 - * "msg" has finished with the reply message "reply". 447 - * It is called from tasklet context. 448 - * @ap_dev: pointer to the AP device 449 - * @msg: pointer to the AP message 450 - * @reply: pointer to the AP reply message 451 - */ 452 - static void zcrypt_pcicc_receive(struct ap_device *ap_dev, 453 - struct ap_message *msg, 454 - struct ap_message *reply) 455 - { 456 - static struct error_hdr error_reply = { 457 - .type = TYPE82_RSP_CODE, 458 - .reply_code = REP82_ERROR_MACHINE_FAILURE, 459 - }; 460 - struct type86_reply *t86r; 461 - int length; 462 - 463 - /* Copy the reply message to the request message buffer. */ 464 - if (IS_ERR(reply)) { 465 - memcpy(msg->message, &error_reply, sizeof(error_reply)); 466 - goto out; 467 - } 468 - t86r = reply->message; 469 - if (t86r->hdr.type == TYPE86_RSP_CODE && 470 - t86r->cprb.cprb_ver_id == 0x01) { 471 - length = sizeof(struct type86_reply) + t86r->length - 2; 472 - length = min(PCICC_MAX_RESPONSE_SIZE, length); 473 - memcpy(msg->message, reply->message, length); 474 - } else 475 - memcpy(msg->message, reply->message, sizeof error_reply); 476 - out: 477 - complete((struct completion *) msg->private); 478 - } 479 - 480 - static atomic_t zcrypt_step = ATOMIC_INIT(0); 481 - 482 - /** 483 - * The request distributor calls this function if it picked the PCICC 484 - * device to handle a modexpo request. 485 - * @zdev: pointer to zcrypt_device structure that identifies the 486 - * PCICC device to the request distributor 487 - * @mex: pointer to the modexpo request buffer 488 - */ 489 - static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev, 490 - struct ica_rsa_modexpo *mex) 491 - { 492 - struct ap_message ap_msg; 493 - struct completion work; 494 - int rc; 495 - 496 - ap_init_message(&ap_msg); 497 - ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 498 - if (!ap_msg.message) 499 - return -ENOMEM; 500 - ap_msg.receive = zcrypt_pcicc_receive; 501 - ap_msg.length = PAGE_SIZE; 502 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 503 - atomic_inc_return(&zcrypt_step); 504 - ap_msg.private = &work; 505 - rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex); 506 - if (rc) 507 - goto out_free; 508 - init_completion(&work); 509 - ap_queue_message(zdev->ap_dev, &ap_msg); 510 - rc = wait_for_completion_interruptible(&work); 511 - if (rc == 0) 512 - rc = convert_response(zdev, &ap_msg, mex->outputdata, 513 - mex->outputdatalength); 514 - else 515 - /* Signal pending. */ 516 - ap_cancel_message(zdev->ap_dev, &ap_msg); 517 - out_free: 518 - free_page((unsigned long) ap_msg.message); 519 - return rc; 520 - } 521 - 522 - /** 523 - * The request distributor calls this function if it picked the PCICC 524 - * device to handle a modexpo_crt request. 525 - * @zdev: pointer to zcrypt_device structure that identifies the 526 - * PCICC device to the request distributor 527 - * @crt: pointer to the modexpoc_crt request buffer 528 - */ 529 - static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev, 530 - struct ica_rsa_modexpo_crt *crt) 531 - { 532 - struct ap_message ap_msg; 533 - struct completion work; 534 - int rc; 535 - 536 - ap_init_message(&ap_msg); 537 - ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 538 - if (!ap_msg.message) 539 - return -ENOMEM; 540 - ap_msg.receive = zcrypt_pcicc_receive; 541 - ap_msg.length = PAGE_SIZE; 542 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 543 - atomic_inc_return(&zcrypt_step); 544 - ap_msg.private = &work; 545 - rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt); 546 - if (rc) 547 - goto out_free; 548 - init_completion(&work); 549 - ap_queue_message(zdev->ap_dev, &ap_msg); 550 - rc = wait_for_completion_interruptible(&work); 551 - if (rc == 0) 552 - rc = convert_response(zdev, &ap_msg, crt->outputdata, 553 - crt->outputdatalength); 554 - else 555 - /* Signal pending. */ 556 - ap_cancel_message(zdev->ap_dev, &ap_msg); 557 - out_free: 558 - free_page((unsigned long) ap_msg.message); 559 - return rc; 560 - } 561 - 562 - /** 563 - * The crypto operations for a PCICC card. 564 - */ 565 - static struct zcrypt_ops zcrypt_pcicc_ops = { 566 - .rsa_modexpo = zcrypt_pcicc_modexpo, 567 - .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt, 568 - }; 569 - 570 - /** 571 - * Probe function for PCICC cards. It always accepts the AP device 572 - * since the bus_match already checked the hardware type. 573 - * @ap_dev: pointer to the AP device. 574 - */ 575 - static int zcrypt_pcicc_probe(struct ap_device *ap_dev) 576 - { 577 - struct zcrypt_device *zdev; 578 - int rc; 579 - 580 - zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE); 581 - if (!zdev) 582 - return -ENOMEM; 583 - zdev->ap_dev = ap_dev; 584 - zdev->ops = &zcrypt_pcicc_ops; 585 - zdev->online = 1; 586 - zdev->user_space_type = ZCRYPT_PCICC; 587 - zdev->type_string = "PCICC"; 588 - zdev->min_mod_size = PCICC_MIN_MOD_SIZE; 589 - zdev->max_mod_size = PCICC_MAX_MOD_SIZE; 590 - zdev->speed_rating = PCICC_SPEED_RATING; 591 - zdev->max_exp_bit_length = PCICC_MAX_MOD_SIZE; 592 - ap_dev->reply = &zdev->reply; 593 - ap_dev->private = zdev; 594 - rc = zcrypt_device_register(zdev); 595 - if (rc) 596 - goto out_free; 597 - return 0; 598 - 599 - out_free: 600 - ap_dev->private = NULL; 601 - zcrypt_device_free(zdev); 602 - return rc; 603 - } 604 - 605 - /** 606 - * This is called to remove the extended PCICC driver information 607 - * if an AP device is removed. 608 - */ 609 - static void zcrypt_pcicc_remove(struct ap_device *ap_dev) 610 - { 611 - struct zcrypt_device *zdev = ap_dev->private; 612 - 613 - zcrypt_device_unregister(zdev); 614 - } 615 - 616 - int __init zcrypt_pcicc_init(void) 617 - { 618 - return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc"); 619 - } 620 - 621 - void zcrypt_pcicc_exit(void) 622 - { 623 - ap_driver_unregister(&zcrypt_pcicc_driver); 624 - } 625 - 626 - module_init(zcrypt_pcicc_init); 627 - module_exit(zcrypt_pcicc_exit);
-174
drivers/s390/crypto/zcrypt_pcicc.h
··· 1 - /* 2 - * zcrypt 2.1.0 3 - * 4 - * Copyright IBM Corp. 2001, 2006 5 - * Author(s): Robert Burroughs 6 - * Eric Rossman (edrossma@us.ibm.com) 7 - * 8 - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of the GNU General Public License as published by 13 - * the Free Software Foundation; either version 2, or (at your option) 14 - * any later version. 15 - * 16 - * This program is distributed in the hope that it will be useful, 17 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 - * GNU General Public License for more details. 20 - * 21 - * You should have received a copy of the GNU General Public License 22 - * along with this program; if not, write to the Free Software 23 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 - */ 25 - 26 - #ifndef _ZCRYPT_PCICC_H_ 27 - #define _ZCRYPT_PCICC_H_ 28 - 29 - /** 30 - * The type 6 message family is associated with PCICC or PCIXCC cards. 31 - * 32 - * It contains a message header followed by a CPRB, both of which 33 - * are described below. 34 - * 35 - * Note that all reserved fields must be zeroes. 36 - */ 37 - struct type6_hdr { 38 - unsigned char reserved1; /* 0x00 */ 39 - unsigned char type; /* 0x06 */ 40 - unsigned char reserved2[2]; /* 0x0000 */ 41 - unsigned char right[4]; /* 0x00000000 */ 42 - unsigned char reserved3[2]; /* 0x0000 */ 43 - unsigned char reserved4[2]; /* 0x0000 */ 44 - unsigned char apfs[4]; /* 0x00000000 */ 45 - unsigned int offset1; /* 0x00000058 (offset to CPRB) */ 46 - unsigned int offset2; /* 0x00000000 */ 47 - unsigned int offset3; /* 0x00000000 */ 48 - unsigned int offset4; /* 0x00000000 */ 49 - unsigned char agent_id[16]; /* PCICC: */ 50 - /* 0x0100 */ 51 - /* 0x4343412d4150504c202020 */ 52 - /* 0x010101 */ 53 - /* PCIXCC: */ 54 - /* 0x4341000000000000 */ 55 - /* 0x0000000000000000 */ 56 - unsigned char rqid[2]; /* rqid. internal to 603 */ 57 - unsigned char reserved5[2]; /* 0x0000 */ 58 - unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ 59 - unsigned char reserved6[2]; /* 0x0000 */ 60 - unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ 61 - unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ 62 - unsigned int ToCardLen3; /* 0x00000000 */ 63 - unsigned int ToCardLen4; /* 0x00000000 */ 64 - unsigned int FromCardLen1; /* response buffer length */ 65 - unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ 66 - unsigned int FromCardLen3; /* 0x00000000 */ 67 - unsigned int FromCardLen4; /* 0x00000000 */ 68 - } __attribute__((packed)); 69 - 70 - /** 71 - * CPRB 72 - * Note that all shorts, ints and longs are little-endian. 73 - * All pointer fields are 32-bits long, and mean nothing 74 - * 75 - * A request CPRB is followed by a request_parameter_block. 76 - * 77 - * The request (or reply) parameter block is organized thus: 78 - * function code 79 - * VUD block 80 - * key block 81 - */ 82 - struct CPRB { 83 - unsigned short cprb_len; /* CPRB length */ 84 - unsigned char cprb_ver_id; /* CPRB version id. */ 85 - unsigned char pad_000; /* Alignment pad byte. */ 86 - unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ 87 - unsigned char srpi_verb; /* SRPI verb type */ 88 - unsigned char flags; /* flags */ 89 - unsigned char func_id[2]; /* function id */ 90 - unsigned char checkpoint_flag; /* */ 91 - unsigned char resv2; /* reserved */ 92 - unsigned short req_parml; /* request parameter buffer */ 93 - /* length 16-bit little endian */ 94 - unsigned char req_parmp[4]; /* request parameter buffer * 95 - * pointer (means nothing: the * 96 - * parameter buffer follows * 97 - * the CPRB). */ 98 - unsigned char req_datal[4]; /* request data buffer */ 99 - /* length ULELONG */ 100 - unsigned char req_datap[4]; /* request data buffer */ 101 - /* pointer */ 102 - unsigned short rpl_parml; /* reply parameter buffer */ 103 - /* length 16-bit little endian */ 104 - unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ 105 - unsigned char rpl_parmp[4]; /* reply parameter buffer * 106 - * pointer (means nothing: the * 107 - * parameter buffer follows * 108 - * the CPRB). */ 109 - unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ 110 - unsigned char rpl_datap[4]; /* reply data buffer */ 111 - /* pointer */ 112 - unsigned short ccp_rscode; /* server reason code ULESHORT */ 113 - unsigned short ccp_rtcode; /* server return code ULESHORT */ 114 - unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ 115 - unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ 116 - unsigned char repd_datal[4]; /* replied data length ULELONG */ 117 - unsigned char req_pc[2]; /* PC identifier */ 118 - unsigned char res_origin[8]; /* resource origin */ 119 - unsigned char mac_value[8]; /* Mac Value */ 120 - unsigned char logon_id[8]; /* Logon Identifier */ 121 - unsigned char usage_domain[2]; /* cdx */ 122 - unsigned char resv3[18]; /* reserved for requestor */ 123 - unsigned short svr_namel; /* server name length ULESHORT */ 124 - unsigned char svr_name[8]; /* server name */ 125 - } __attribute__((packed)); 126 - 127 - /** 128 - * The type 86 message family is associated with PCICC and PCIXCC cards. 129 - * 130 - * It contains a message header followed by a CPRB. The CPRB is 131 - * the same as the request CPRB, which is described above. 132 - * 133 - * If format is 1, an error condition exists and no data beyond 134 - * the 8-byte message header is of interest. 135 - * 136 - * The non-error message is shown below. 137 - * 138 - * Note that all reserved fields must be zeroes. 139 - */ 140 - struct type86_hdr { 141 - unsigned char reserved1; /* 0x00 */ 142 - unsigned char type; /* 0x86 */ 143 - unsigned char format; /* 0x01 (error) or 0x02 (ok) */ 144 - unsigned char reserved2; /* 0x00 */ 145 - unsigned char reply_code; /* reply code (see above) */ 146 - unsigned char reserved3[3]; /* 0x000000 */ 147 - } __attribute__((packed)); 148 - 149 - #define TYPE86_RSP_CODE 0x86 150 - #define TYPE86_FMT2 0x02 151 - 152 - struct type86_fmt2_ext { 153 - unsigned char reserved[4]; /* 0x00000000 */ 154 - unsigned char apfs[4]; /* final status */ 155 - unsigned int count1; /* length of CPRB + parameters */ 156 - unsigned int offset1; /* offset to CPRB */ 157 - unsigned int count2; /* 0x00000000 */ 158 - unsigned int offset2; /* db offset 0x00000000 for PKD */ 159 - unsigned int count3; /* 0x00000000 */ 160 - unsigned int offset3; /* 0x00000000 */ 161 - unsigned int count4; /* 0x00000000 */ 162 - unsigned int offset4; /* 0x00000000 */ 163 - } __attribute__((packed)); 164 - 165 - struct function_and_rules_block { 166 - unsigned char function_code[2]; 167 - unsigned short ulen; 168 - unsigned char only_rule[8]; 169 - } __attribute__((packed)); 170 - 171 - int zcrypt_pcicc_init(void); 172 - void zcrypt_pcicc_exit(void); 173 - 174 - #endif /* _ZCRYPT_PCICC_H_ */
+12 -3
drivers/s390/virtio/virtio_ccw.c
··· 28 28 #include <linux/io.h> 29 29 #include <linux/kvm_para.h> 30 30 #include <linux/notifier.h> 31 + #include <asm/diag.h> 31 32 #include <asm/setup.h> 32 33 #include <asm/irq.h> 33 34 #include <asm/cio.h> ··· 367 366 kfree(thinint_area); 368 367 } 369 368 370 - static inline long do_kvm_notify(struct subchannel_id schid, 371 - unsigned long queue_index, 372 - long cookie) 369 + static inline long __do_kvm_notify(struct subchannel_id schid, 370 + unsigned long queue_index, 371 + long cookie) 373 372 { 374 373 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 375 374 register struct subchannel_id __schid asm("2") = schid; ··· 382 381 "d"(__cookie) 383 382 : "memory", "cc"); 384 383 return __rc; 384 + } 385 + 386 + static inline long do_kvm_notify(struct subchannel_id schid, 387 + unsigned long queue_index, 388 + long cookie) 389 + { 390 + diag_stat_inc(DIAG_STAT_X500); 391 + return __do_kvm_notify(schid, queue_index, cookie); 385 392 } 386 393 387 394 static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
+4
drivers/watchdog/diag288_wdt.c
··· 29 29 #include <linux/watchdog.h> 30 30 #include <linux/suspend.h> 31 31 #include <asm/ebcdic.h> 32 + #include <asm/diag.h> 32 33 #include <linux/io.h> 33 34 #include <linux/uaccess.h> 34 35 ··· 95 94 static int __diag288_vm(unsigned int func, unsigned int timeout, 96 95 char *cmd, size_t len) 97 96 { 97 + diag_stat_inc(DIAG_STAT_X288); 98 98 return __diag288(func, timeout, virt_to_phys(cmd), len); 99 99 } 100 100 101 101 static int __diag288_lpar(unsigned int func, unsigned int timeout, 102 102 unsigned long action) 103 103 { 104 + diag_stat_inc(DIAG_STAT_X288); 104 105 return __diag288(func, timeout, action, 0); 105 106 } 106 107 ··· 144 141 { 145 142 int ret; 146 143 144 + diag_stat_inc(DIAG_STAT_X288); 147 145 ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0); 148 146 return ret; 149 147 }
+2 -2
fs/proc/task_mmu.c
··· 754 754 755 755 if (pte_present(ptent)) { 756 756 ptent = pte_wrprotect(ptent); 757 - ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 757 + ptent = pte_clear_soft_dirty(ptent); 758 758 } else if (is_swap_pte(ptent)) { 759 759 ptent = pte_swp_clear_soft_dirty(ptent); 760 760 } ··· 768 768 pmd_t pmd = *pmdp; 769 769 770 770 pmd = pmd_wrprotect(pmd); 771 - pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 771 + pmd = pmd_clear_soft_dirty(pmd); 772 772 773 773 if (vma->vm_flags & VM_SOFTDIRTY) 774 774 vma->vm_flags &= ~VM_SOFTDIRTY;
+10
include/asm-generic/pgtable.h
··· 505 505 return pmd; 506 506 } 507 507 508 + static inline pte_t pte_clear_soft_dirty(pte_t pte) 509 + { 510 + return pte; 511 + } 512 + 513 + static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) 514 + { 515 + return pmd; 516 + } 517 + 508 518 static inline pte_t pte_swp_mksoft_dirty(pte_t pte) 509 519 { 510 520 return pte;
+1 -1
include/linux/compiler.h
··· 56 56 #include <linux/compiler-gcc.h> 57 57 #endif 58 58 59 - #ifdef CC_USING_HOTPATCH 59 + #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) 60 60 #define notrace __attribute__((hotpatch(0,0))) 61 61 #else 62 62 #define notrace __attribute__((no_instrument_function))