Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull m ore s390 updates from Martin Schwidefsky:
"Over 95% of the changes in this pull request are related to the zcrypt
driver. There are five improvements for zcrypt: the ID for the CEX6
cards is added, workload balancing and multi-domain support are
introduced, the debug logs are overhauled and a set of tracepoints is
added.

Then there are several patches in regard to inline assemblies. One
compile fix and several missing memory clobbers. As far as we can tell
the omitted memory clobbers have not caused any breakage.

A small change to the PCI arch code, the machine can tells us how big
the function measurement blocks are. The PCI function measurement will
be disabled for a device if the queried length is larger than the
allocated size for these blocks.

And two more patches to correct five printk messages.

That is it for s390 in regard to the 4.10 merge window. Happy holidays"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits)
s390/pci: query fmb length
s390/zcrypt: add missing memory clobber to ap_qci inline assembly
s390/extmem: add missing memory clobber to dcss_set_subcodes
s390/nmi: fix inline assembly constraints
s390/lib: add missing memory barriers to string inline assemblies
s390/cpumf: fix qsi inline assembly
s390/setup: reword printk messages
s390/dasd: fix typos in DASD error messages
s390: fix compile error with memmove_early() inline assembly
s390/zcrypt: tracepoint definitions for zcrypt device driver.
s390/zcrypt: Rework debug feature invocations.
s390/zcrypt: Improved invalid domain response handling.
s390/zcrypt: Fix ap_max_domain_id for older machine types
s390/zcrypt: Correct function bits for CEX2x and CEX3x cards.
s390/zcrypt: Fixed attrition of AP adapters and domains
s390/zcrypt: Introduce new zcrypt device status API
s390/zcrypt: add multi domain support
s390/zcrypt: Introduce workload balancing
s390/zcrypt: get rid of ap_poll_requests
s390/zcrypt: header for the AP inline assmblies
...

+3938 -2408
+3 -7
arch/s390/include/asm/cpu_mf.h
··· 213 213 /* Query sampling information */ 214 214 static inline int qsi(struct hws_qsi_info_block *info) 215 215 { 216 - int cc; 217 - cc = 1; 216 + int cc = 1; 218 217 219 218 asm volatile( 220 - "0: .insn s,0xb2860000,0(%1)\n" 219 + "0: .insn s,0xb2860000,%1\n" 221 220 "1: lhi %0,0\n" 222 221 "2:\n" 223 222 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) 224 - : "=d" (cc), "+a" (info) 225 - : "m" (*info) 226 - : "cc", "memory"); 227 - 223 + : "+d" (cc), "+Q" (*info)); 228 224 return cc ? -EINVAL : 0; 229 225 } 230 226
+1
arch/s390/include/asm/pci.h
··· 133 133 /* Function measurement block */ 134 134 struct zpci_fmb *fmb; 135 135 u16 fmb_update; /* update interval */ 136 + u16 fmb_length; 136 137 /* software counters */ 137 138 atomic64_t allocated_pages; 138 139 atomic64_t mapped_pages;
+2 -1
arch/s390/include/asm/pci_clp.h
··· 87 87 u16 pchid; 88 88 u32 bar[PCI_BAR_COUNT]; 89 89 u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ 90 - u32 : 24; 90 + u32 : 16; 91 + u8 fmb_len; 91 92 u8 pft; /* pci function type */ 92 93 u64 sdma; /* start dma as */ 93 94 u64 edma; /* end dma as */
+4 -4
arch/s390/include/asm/string.h
··· 62 62 " jl 1f\n" 63 63 " la %0,0\n" 64 64 "1:" 65 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 65 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); 66 66 return (void *) ret; 67 67 } 68 68 ··· 74 74 asm volatile( 75 75 "0: srst %0,%1\n" 76 76 " jo 0b\n" 77 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc"); 77 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); 78 78 return (void *) ret; 79 79 } 80 80 ··· 115 115 asm volatile( 116 116 "0: srst %0,%1\n" 117 117 " jo 0b" 118 - : "+d" (r0), "+a" (tmp) : : "cc"); 118 + : "+d" (r0), "+a" (tmp) : : "cc", "memory"); 119 119 return r0 - (unsigned long) s; 120 120 } 121 121 ··· 128 128 asm volatile( 129 129 "0: srst %0,%1\n" 130 130 " jo 0b" 131 - : "+a" (end), "+a" (tmp) : "d" (r0) : "cc"); 131 + : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory"); 132 132 return end - s; 133 133 } 134 134 #else /* IN_ARCH_STRING_C */
+122
arch/s390/include/asm/trace/zcrypt.h
··· 1 + /* 2 + * Tracepoint definitions for the s390 zcrypt device driver 3 + * 4 + * Copyright IBM Corp. 2016 5 + * Author(s): Harald Freudenberger <freude@de.ibm.com> 6 + * 7 + * Currently there are two tracepoint events defined here. 8 + * An s390_zcrypt_req request event occurs as soon as the request is 9 + * recognized by the zcrypt ioctl function. This event may act as some kind 10 + * of request-processing-starts-now indication. 11 + * As late as possible within the zcrypt ioctl function there occurs the 12 + * s390_zcrypt_rep event which may act as the point in time where the 13 + * request has been processed by the kernel and the result is about to be 14 + * transferred back to userspace. 15 + * The glue which binds together request and reply event is the ptr 16 + * parameter, which is the local buffer address where the request from 17 + * userspace has been stored by the ioctl function. 18 + * 19 + * The main purpose of this zcrypt tracepoint api is to get some data for 20 + * performance measurements together with information about on which card 21 + * and queue the request has been processed. It is not an ffdc interface as 22 + * there is already code in the zcrypt device driver to serve the s390 23 + * debug feature interface. 24 + */ 25 + 26 + #undef TRACE_SYSTEM 27 + #define TRACE_SYSTEM s390 28 + 29 + #if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ) 30 + #define _TRACE_S390_ZCRYPT_H 31 + 32 + #include <linux/tracepoint.h> 33 + 34 + #define TP_ICARSAMODEXPO 0x0001 35 + #define TP_ICARSACRT 0x0002 36 + #define TB_ZSECSENDCPRB 0x0003 37 + #define TP_ZSENDEP11CPRB 0x0004 38 + #define TP_HWRNGCPRB 0x0005 39 + 40 + #define show_zcrypt_tp_type(type) \ 41 + __print_symbolic(type, \ 42 + { TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \ 43 + { TP_ICARSACRT, "ICARSACRT" }, \ 44 + { TB_ZSECSENDCPRB, "ZSECSENDCPRB" }, \ 45 + { TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \ 46 + { TP_HWRNGCPRB, "HWRNGCPRB" }) 47 + 48 + /** 49 + * trace_s390_zcrypt_req - zcrypt request tracepoint function 50 + * @ptr: Address of the local buffer where the request from userspace 51 + * is stored. Can be used as a unique id to relate together 52 + * request and reply. 53 + * @type: One of the TP_ defines above. 54 + * 55 + * Called when a request from userspace is recognised within the ioctl 56 + * function of the zcrypt device driver and may act as an entry 57 + * timestamp. 58 + */ 59 + TRACE_EVENT(s390_zcrypt_req, 60 + TP_PROTO(void *ptr, u32 type), 61 + TP_ARGS(ptr, type), 62 + TP_STRUCT__entry( 63 + __field(void *, ptr) 64 + __field(u32, type)), 65 + TP_fast_assign( 66 + __entry->ptr = ptr; 67 + __entry->type = type;), 68 + TP_printk("ptr=%p type=%s", 69 + __entry->ptr, 70 + show_zcrypt_tp_type(__entry->type)) 71 + ); 72 + 73 + /** 74 + * trace_s390_zcrypt_rep - zcrypt reply tracepoint function 75 + * @ptr: Address of the local buffer where the request from userspace 76 + * is stored. Can be used as a unique id to match together 77 + * request and reply. 78 + * @fc: Function code. 79 + * @rc: The bare returncode as returned by the device driver ioctl 80 + * function. 81 + * @dev: The adapter nr where this request was actually processed. 82 + * @dom: Domain id of the device where this request was processed. 83 + * 84 + * Called upon recognising the reply from the crypto adapter. This 85 + * message may act as the exit timestamp for the request but also 86 + * carries some info about on which adapter the request was processed 87 + * and the returncode from the device driver. 88 + */ 89 + TRACE_EVENT(s390_zcrypt_rep, 90 + TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom), 91 + TP_ARGS(ptr, fc, rc, dev, dom), 92 + TP_STRUCT__entry( 93 + __field(void *, ptr) 94 + __field(u32, fc) 95 + __field(u32, rc) 96 + __field(u16, device) 97 + __field(u16, domain)), 98 + TP_fast_assign( 99 + __entry->ptr = ptr; 100 + __entry->fc = fc; 101 + __entry->rc = rc; 102 + __entry->device = dev; 103 + __entry->domain = dom;), 104 + TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx", 105 + __entry->ptr, 106 + (unsigned int) __entry->fc, 107 + (int) __entry->rc, 108 + (unsigned short) __entry->device, 109 + (unsigned short) __entry->domain) 110 + ); 111 + 112 + #endif /* _TRACE_S390_ZCRYPT_H */ 113 + 114 + /* This part must be outside protection */ 115 + 116 + #undef TRACE_INCLUDE_PATH 117 + #undef TRACE_INCLUDE_FILE 118 + 119 + #define TRACE_INCLUDE_PATH asm/trace 120 + #define TRACE_INCLUDE_FILE zcrypt 121 + 122 + #include <trace/define_trace.h>
+37
arch/s390/include/uapi/asm/zcrypt.h
··· 215 215 uint64_t resp; 216 216 } __attribute__((packed)); 217 217 218 + /** 219 + * struct zcrypt_device_status 220 + * @hwtype: raw hardware type 221 + * @qid: 6 bit device index, 8 bit domain 222 + * @functions: AP device function bit field 'abcdef' 223 + * a, b, c = reserved 224 + * d = CCA coprocessor 225 + * e = Accelerator 226 + * f = EP11 coprocessor 227 + * @online online status 228 + * @reserved reserved 229 + */ 230 + struct zcrypt_device_status { 231 + unsigned int hwtype:8; 232 + unsigned int qid:14; 233 + unsigned int online:1; 234 + unsigned int functions:6; 235 + unsigned int reserved:3; 236 + }; 237 + 238 + #define MAX_ZDEV_CARDIDS 64 239 + #define MAX_ZDEV_DOMAINS 256 240 + 241 + /** 242 + * Maximum number of zcrypt devices 243 + */ 244 + #define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS) 245 + 246 + /** 247 + * zcrypt_device_matrix 248 + * Device matrix of all zcrypt devices 249 + */ 250 + struct zcrypt_device_matrix { 251 + struct zcrypt_device_status device[MAX_ZDEV_ENTRIES]; 252 + }; 253 + 218 254 #define AUTOSELECT ((unsigned int)0xFFFFFFFF) 219 255 220 256 #define ZCRYPT_IOCTL_MAGIC 'z' ··· 357 321 #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 358 322 #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 359 323 #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 324 + #define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) 360 325 361 326 /* New status calls */ 362 327 #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
+1 -1
arch/s390/kernel/early.c
··· 417 417 " brctg %[n],0b\n" 418 418 "1:\n" 419 419 : [addr] "=&d" (addr), 420 - [psw_pgm_addr] "=&Q" (S390_lowcore.program_new_psw.addr), 420 + [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr), 421 421 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n) 422 422 : [incr] "d" (incr) 423 423 : "cc", "memory");
+10 -9
arch/s390/kernel/nmi.c
··· 102 102 { 103 103 int kill_task; 104 104 u64 zero; 105 - void *fpt_save_area, *fpt_creg_save_area; 105 + void *fpt_save_area; 106 106 107 107 kill_task = 0; 108 108 zero = 0; ··· 130 130 kill_task = 1; 131 131 } 132 132 fpt_save_area = &S390_lowcore.floating_pt_save_area; 133 - fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 134 133 if (!mci.fc) { 135 134 /* 136 135 * Floating point control register can't be restored. ··· 141 142 */ 142 143 if (S390_lowcore.fpu_flags & KERNEL_FPC) 143 144 s390_handle_damage(); 144 - asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 145 + asm volatile("lfpc %0" : : "Q" (zero)); 145 146 if (!test_cpu_flag(CIF_FPU)) 146 147 kill_task = 1; 147 - } else 148 - asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 148 + } else { 149 + asm volatile("lfpc %0" 150 + : : "Q" (S390_lowcore.fpt_creg_save_area)); 151 + } 149 152 150 153 if (!MACHINE_HAS_VX) { 151 154 /* Validate floating point registers */ ··· 168 167 " ld 13,104(%0)\n" 169 168 " ld 14,112(%0)\n" 170 169 " ld 15,120(%0)\n" 171 - : : "a" (fpt_save_area)); 170 + : : "a" (fpt_save_area) : "memory"); 172 171 } else { 173 172 /* Validate vector registers */ 174 173 union ctlreg0 cr0; ··· 218 217 } else { 219 218 asm volatile( 220 219 " lctlg 0,15,0(%0)" 221 - : : "a" (&S390_lowcore.cregs_save_area)); 220 + : : "a" (&S390_lowcore.cregs_save_area) : "memory"); 222 221 } 223 222 /* 224 223 * We don't even try to validate the TOD register, since we simply ··· 235 234 : : : "0", "cc"); 236 235 else 237 236 asm volatile( 238 - " l 0,0(%0)\n" 237 + " l 0,%0\n" 239 238 " sckpf" 240 - : : "a" (&S390_lowcore.tod_progreg_save_area) 239 + : : "Q" (S390_lowcore.tod_progreg_save_area) 241 240 : "0", "cc"); 242 241 /* Validate clock comparator register */ 243 242 set_clock_comparator(S390_lowcore.clock_comparator);
+2 -2
arch/s390/kernel/setup.c
··· 485 485 max_pfn = max_low_pfn = PFN_DOWN(memory_end); 486 486 memblock_remove(memory_end, ULONG_MAX); 487 487 488 - pr_notice("Max memory size: %luMB\n", memory_end >> 20); 488 + pr_notice("The maximum memory size is %luMB\n", memory_end >> 20); 489 489 } 490 490 491 491 static void __init setup_vmcoreinfo(void) ··· 650 650 #ifdef CONFIG_BLK_DEV_INITRD 651 651 if (INITRD_START && INITRD_SIZE && 652 652 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) { 653 - pr_err("initrd does not fit memory.\n"); 653 + pr_err("The initial RAM disk does not fit into the memory\n"); 654 654 memblock_free(INITRD_START, INITRD_SIZE); 655 655 initrd_start = initrd_end = 0; 656 656 }
+6 -6
arch/s390/lib/string.c
··· 20 20 21 21 asm volatile ("0: srst %0,%1\n" 22 22 " jo 0b" 23 - : "+d" (r0), "+a" (s) : : "cc" ); 23 + : "+d" (r0), "+a" (s) : : "cc", "memory"); 24 24 return (char *) r0; 25 25 } 26 26 ··· 31 31 32 32 asm volatile ("0: srst %0,%1\n" 33 33 " jo 0b" 34 - : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); 34 + : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory"); 35 35 return (char *) p; 36 36 } 37 37 ··· 213 213 " sr %0,%1\n" 214 214 "1:" 215 215 : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) 216 - : : "cc" ); 216 + : : "cc", "memory"); 217 217 return ret; 218 218 } 219 219 EXPORT_SYMBOL(strcmp); ··· 250 250 " ipm %0\n" 251 251 " srl %0,28" 252 252 : "=&d" (cc), "+a" (r2), "+a" (r3), 253 - "+a" (r4), "+a" (r5) : : "cc"); 253 + "+a" (r4), "+a" (r5) : : "cc", "memory"); 254 254 return cc; 255 255 } 256 256 ··· 298 298 " jl 1f\n" 299 299 " la %0,0\n" 300 300 "1:" 301 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 301 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); 302 302 return (void *) ret; 303 303 } 304 304 EXPORT_SYMBOL(memchr); ··· 336 336 337 337 asm volatile ("0: srst %0,%1\n" 338 338 " jo 0b\n" 339 - : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); 339 + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); 340 340 return (void *) ret; 341 341 } 342 342 EXPORT_SYMBOL(memscan);
+1 -1
arch/s390/mm/extmem.c
··· 122 122 "1: la %2,3\n" 123 123 "2:\n" 124 124 EX_TABLE(0b, 1b) 125 - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 125 + : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory"); 126 126 127 127 kfree(name); 128 128 /* Diag x'64' new subcodes are supported, set to new subcodes */
+1 -1
arch/s390/pci/pci.c
··· 180 180 { 181 181 struct mod_pci_args args = { 0, 0, 0, 0 }; 182 182 183 - if (zdev->fmb) 183 + if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length) 184 184 return -EINVAL; 185 185 186 186 zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
+1
arch/s390/pci/pci_clp.c
··· 148 148 zdev->pft = response->pft; 149 149 zdev->vfn = response->vfn; 150 150 zdev->uid = response->uid; 151 + zdev->fmb_length = sizeof(u32) * response->fmb_len; 151 152 152 153 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip)); 153 154 if (response->util_str_avail) {
+3 -3
drivers/s390/block/dasd_3990_erp.c
··· 674 674 break; 675 675 case 0x0D: 676 676 dev_warn(&device->cdev->dev, 677 - "FORMAT 4 - No syn byte in count " 677 + "FORMAT 4 - No sync byte in count " 678 678 "address area; offset active\n"); 679 679 break; 680 680 case 0x0E: ··· 684 684 break; 685 685 case 0x0F: 686 686 dev_warn(&device->cdev->dev, 687 - "FORMAT 4 - No syn byte in data area; " 687 + "FORMAT 4 - No sync byte in data area; " 688 688 "offset active\n"); 689 689 break; 690 690 default: ··· 999 999 break; 1000 1000 default: 1001 1001 dev_warn(&device->cdev->dev, 1002 - "FORMAT D - Reserved\n"); 1002 + "FORMAT F - Reserved\n"); 1003 1003 } 1004 1004 break; 1005 1005
+7 -6
drivers/s390/crypto/Makefile
··· 2 2 # S/390 crypto devices 3 3 # 4 4 5 - ap-objs := ap_bus.o 6 - # zcrypt_api depends on ap 7 - obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o 8 - # msgtype* depend on zcrypt_api 9 - obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 10 - # adapter drivers depend on ap, zcrypt_api and msgtype* 5 + ap-objs := ap_bus.o ap_card.o ap_queue.o 6 + obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o 7 + # zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o 8 + zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o 9 + zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o 10 + obj-$(CONFIG_ZCRYPT) += zcrypt.o 11 + # adapter drivers depend on ap.o and zcrypt.o 11 12 obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+191
drivers/s390/crypto/ap_asm.h
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + * 5 + * Adjunct processor bus inline assemblies. 6 + */ 7 + 8 + #ifndef _AP_ASM_H_ 9 + #define _AP_ASM_H_ 10 + 11 + #include <asm/isc.h> 12 + 13 + /** 14 + * ap_intructions_available() - Test if AP instructions are available. 15 + * 16 + * Returns 0 if the AP instructions are installed. 17 + */ 18 + static inline int ap_instructions_available(void) 19 + { 20 + register unsigned long reg0 asm ("0") = AP_MKQID(0, 0); 21 + register unsigned long reg1 asm ("1") = -ENODEV; 22 + register unsigned long reg2 asm ("2") = 0UL; 23 + 24 + asm volatile( 25 + " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 26 + "0: la %1,0\n" 27 + "1:\n" 28 + EX_TABLE(0b, 1b) 29 + : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc"); 30 + return reg1; 31 + } 32 + 33 + /** 34 + * ap_tapq(): Test adjunct processor queue. 35 + * @qid: The AP queue number 36 + * @info: Pointer to queue descriptor 37 + * 38 + * Returns AP queue status structure. 39 + */ 40 + static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) 41 + { 42 + register unsigned long reg0 asm ("0") = qid; 43 + register struct ap_queue_status reg1 asm ("1"); 44 + register unsigned long reg2 asm ("2") = 0UL; 45 + 46 + asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 47 + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 48 + if (info) 49 + *info = reg2; 50 + return reg1; 51 + } 52 + 53 + /** 54 + * ap_pqap_rapq(): Reset adjunct processor queue. 55 + * @qid: The AP queue number 56 + * 57 + * Returns AP queue status structure. 58 + */ 59 + static inline struct ap_queue_status ap_rapq(ap_qid_t qid) 60 + { 61 + register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 62 + register struct ap_queue_status reg1 asm ("1"); 63 + register unsigned long reg2 asm ("2") = 0UL; 64 + 65 + asm volatile( 66 + ".long 0xb2af0000" /* PQAP(RAPQ) */ 67 + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 68 + return reg1; 69 + } 70 + 71 + /** 72 + * ap_aqic(): Enable interruption for a specific AP. 73 + * @qid: The AP queue number 74 + * @ind: The notification indicator byte 75 + * 76 + * Returns AP queue status. 77 + */ 78 + static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind) 79 + { 80 + register unsigned long reg0 asm ("0") = qid | (3UL << 24); 81 + register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC; 82 + register struct ap_queue_status reg1_out asm ("1"); 83 + register void *reg2 asm ("2") = ind; 84 + 85 + asm volatile( 86 + ".long 0xb2af0000" /* PQAP(AQIC) */ 87 + : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 88 + : 89 + : "cc"); 90 + return reg1_out; 91 + } 92 + 93 + /** 94 + * ap_qci(): Get AP configuration data 95 + * 96 + * Returns 0 on success, or -EOPNOTSUPP. 97 + */ 98 + static inline int ap_qci(void *config) 99 + { 100 + register unsigned long reg0 asm ("0") = 0x04000000UL; 101 + register unsigned long reg1 asm ("1") = -EINVAL; 102 + register void *reg2 asm ("2") = (void *) config; 103 + 104 + asm volatile( 105 + ".long 0xb2af0000\n" /* PQAP(QCI) */ 106 + "0: la %1,0\n" 107 + "1:\n" 108 + EX_TABLE(0b, 1b) 109 + : "+d" (reg0), "+d" (reg1), "+d" (reg2) 110 + : 111 + : "cc", "memory"); 112 + 113 + return reg1; 114 + } 115 + 116 + /** 117 + * ap_nqap(): Send message to adjunct processor queue. 118 + * @qid: The AP queue number 119 + * @psmid: The program supplied message identifier 120 + * @msg: The message text 121 + * @length: The message length 122 + * 123 + * Returns AP queue status structure. 124 + * Condition code 1 on NQAP can't happen because the L bit is 1. 125 + * Condition code 2 on NQAP also means the send is incomplete, 126 + * because a segment boundary was reached. The NQAP is repeated. 127 + */ 128 + static inline struct ap_queue_status ap_nqap(ap_qid_t qid, 129 + unsigned long long psmid, 130 + void *msg, size_t length) 131 + { 132 + struct msgblock { char _[length]; }; 133 + register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 134 + register struct ap_queue_status reg1 asm ("1"); 135 + register unsigned long reg2 asm ("2") = (unsigned long) msg; 136 + register unsigned long reg3 asm ("3") = (unsigned long) length; 137 + register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 138 + register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 139 + 140 + asm volatile ( 141 + "0: .long 0xb2ad0042\n" /* NQAP */ 142 + " brc 2,0b" 143 + : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 144 + : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg) 145 + : "cc"); 146 + return reg1; 147 + } 148 + 149 + /** 150 + * ap_dqap(): Receive message from adjunct processor queue. 151 + * @qid: The AP queue number 152 + * @psmid: Pointer to program supplied message identifier 153 + * @msg: The message text 154 + * @length: The message length 155 + * 156 + * Returns AP queue status structure. 157 + * Condition code 1 on DQAP means the receive has taken place 158 + * but only partially. The response is incomplete, hence the 159 + * DQAP is repeated. 160 + * Condition code 2 on DQAP also means the receive is incomplete, 161 + * this time because a segment boundary was reached. Again, the 162 + * DQAP is repeated. 163 + * Note that gpr2 is used by the DQAP instruction to keep track of 164 + * any 'residual' length, in case the instruction gets interrupted. 165 + * Hence it gets zeroed before the instruction. 166 + */ 167 + static inline struct ap_queue_status ap_dqap(ap_qid_t qid, 168 + unsigned long long *psmid, 169 + void *msg, size_t length) 170 + { 171 + struct msgblock { char _[length]; }; 172 + register unsigned long reg0 asm("0") = qid | 0x80000000UL; 173 + register struct ap_queue_status reg1 asm ("1"); 174 + register unsigned long reg2 asm("2") = 0UL; 175 + register unsigned long reg4 asm("4") = (unsigned long) msg; 176 + register unsigned long reg5 asm("5") = (unsigned long) length; 177 + register unsigned long reg6 asm("6") = 0UL; 178 + register unsigned long reg7 asm("7") = 0UL; 179 + 180 + 181 + asm volatile( 182 + "0: .long 0xb2ae0064\n" /* DQAP */ 183 + " brc 6,0b\n" 184 + : "+d" (reg0), "=d" (reg1), "+d" (reg2), 185 + "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 186 + "=m" (*(struct msgblock *) msg) : : "cc"); 187 + *psmid = (((unsigned long long) reg6) << 32) + reg7; 188 + return reg1; 189 + } 190 + 191 + #endif /* _AP_ASM_H_ */
+360 -973
drivers/s390/crypto/ap_bus.c
··· 46 46 #include <linux/ktime.h> 47 47 #include <asm/facility.h> 48 48 #include <linux/crypto.h> 49 + #include <linux/mod_devicetable.h> 50 + #include <linux/debugfs.h> 49 51 50 52 #include "ap_bus.h" 53 + #include "ap_asm.h" 54 + #include "ap_debug.h" 51 55 52 56 /* 53 57 * Module description. ··· 66 62 * Module parameter 67 63 */ 68 64 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 65 + static DEFINE_SPINLOCK(ap_domain_lock); 69 66 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); 70 67 MODULE_PARM_DESC(domain, "domain index for ap devices"); 71 68 EXPORT_SYMBOL(ap_domain_index); ··· 75 70 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 76 71 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 77 72 78 - static struct device *ap_root_device = NULL; 73 + static struct device *ap_root_device; 74 + 75 + DEFINE_SPINLOCK(ap_list_lock); 76 + LIST_HEAD(ap_card_list); 77 + 79 78 static struct ap_config_info *ap_configuration; 80 - static DEFINE_SPINLOCK(ap_device_list_lock); 81 - static LIST_HEAD(ap_device_list); 82 79 static bool initialised; 80 + 81 + /* 82 + * AP bus related debug feature things. 83 + */ 84 + static struct dentry *ap_dbf_root; 85 + debug_info_t *ap_dbf_info; 83 86 84 87 /* 85 88 * Workqueue timer for bus rescan. ··· 102 89 */ 103 90 static void ap_tasklet_fn(unsigned long); 104 91 static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0); 105 - static atomic_t ap_poll_requests = ATOMIC_INIT(0); 106 92 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 107 93 static struct task_struct *ap_poll_kthread = NULL; 108 94 static DEFINE_MUTEX(ap_poll_thread_mutex); ··· 141 129 } 142 130 143 131 /** 144 - * ap_intructions_available() - Test if AP instructions are available. 132 + * ap_airq_ptr() - Get the address of the adapter interrupt indicator 145 133 * 146 - * Returns 0 if the AP instructions are installed. 134 + * Returns the address of the local-summary-indicator of the adapter 135 + * interrupt handler for AP, or NULL if adapter interrupts are not 136 + * available. 147 137 */ 148 - static inline int ap_instructions_available(void) 138 + void *ap_airq_ptr(void) 149 139 { 150 - register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 151 - register unsigned long reg1 asm ("1") = -ENODEV; 152 - register unsigned long reg2 asm ("2") = 0UL; 153 - 154 - asm volatile( 155 - " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 156 - "0: la %1,0\n" 157 - "1:\n" 158 - EX_TABLE(0b, 1b) 159 - : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 160 - return reg1; 140 + if (ap_using_interrupts()) 141 + return ap_airq.lsi_ptr; 142 + return NULL; 161 143 } 162 144 163 145 /** ··· 175 169 return test_facility(12); 176 170 } 177 171 178 - static inline struct ap_queue_status 179 - __pqap_tapq(ap_qid_t qid, unsigned long *info) 180 - { 181 - register unsigned long reg0 asm ("0") = qid; 182 - register struct ap_queue_status reg1 asm ("1"); 183 - register unsigned long reg2 asm ("2") = 0UL; 184 - 185 - asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 186 - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 187 - *info = reg2; 188 - return reg1; 189 - } 190 - 191 172 /** 192 173 * ap_test_queue(): Test adjunct processor queue. 193 174 * @qid: The AP queue number ··· 185 192 static inline struct ap_queue_status 186 193 ap_test_queue(ap_qid_t qid, unsigned long *info) 187 194 { 188 - struct ap_queue_status aqs; 189 - unsigned long _info; 190 - 191 195 if (test_facility(15)) 192 196 qid |= 1UL << 23; /* set APFT T bit*/ 193 - aqs = __pqap_tapq(qid, &_info); 194 - if (info) 195 - *info = _info; 196 - return aqs; 197 - } 198 - 199 - /** 200 - * ap_reset_queue(): Reset adjunct processor queue. 201 - * @qid: The AP queue number 202 - * 203 - * Returns AP queue status structure. 204 - */ 205 - static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 206 - { 207 - register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 208 - register struct ap_queue_status reg1 asm ("1"); 209 - register unsigned long reg2 asm ("2") = 0UL; 210 - 211 - asm volatile( 212 - ".long 0xb2af0000" /* PQAP(RAPQ) */ 213 - : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 214 - return reg1; 215 - } 216 - 217 - /** 218 - * ap_queue_interruption_control(): Enable interruption for a specific AP. 219 - * @qid: The AP queue number 220 - * @ind: The notification indicator byte 221 - * 222 - * Returns AP queue status. 223 - */ 224 - static inline struct ap_queue_status 225 - ap_queue_interruption_control(ap_qid_t qid, void *ind) 226 - { 227 - register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 228 - register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 229 - register struct ap_queue_status reg1_out asm ("1"); 230 - register void *reg2 asm ("2") = ind; 231 - asm volatile( 232 - ".long 0xb2af0000" /* PQAP(AQIC) */ 233 - : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 234 - : 235 - : "cc" ); 236 - return reg1_out; 237 - } 238 - 239 - /** 240 - * ap_query_configuration(): Get AP configuration data 241 - * 242 - * Returns 0 on success, or -EOPNOTSUPP. 243 - */ 244 - static inline int __ap_query_configuration(void) 245 - { 246 - register unsigned long reg0 asm ("0") = 0x04000000UL; 247 - register unsigned long reg1 asm ("1") = -EINVAL; 248 - register void *reg2 asm ("2") = (void *) ap_configuration; 249 - 250 - asm volatile( 251 - ".long 0xb2af0000\n" /* PQAP(QCI) */ 252 - "0: la %1,0\n" 253 - "1:\n" 254 - EX_TABLE(0b, 1b) 255 - : "+d" (reg0), "+d" (reg1), "+d" (reg2) 256 - : 257 - : "cc"); 258 - 259 - return reg1; 197 + return ap_tapq(qid, info); 260 198 } 261 199 262 200 static inline int ap_query_configuration(void) 263 201 { 264 202 if (!ap_configuration) 265 203 return -EOPNOTSUPP; 266 - return __ap_query_configuration(); 204 + return ap_qci(ap_configuration); 267 205 } 268 206 269 207 /** ··· 255 331 } 256 332 257 333 /** 258 - * ap_queue_enable_interruption(): Enable interruption on an AP. 259 - * @qid: The AP queue number 260 - * @ind: the notification indicator byte 261 - * 262 - * Enables interruption on AP queue via ap_queue_interruption_control(). Based 263 - * on the return value it waits a while and tests the AP queue if interrupts 264 - * have been switched on using ap_test_queue(). 265 - */ 266 - static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind) 267 - { 268 - struct ap_queue_status status; 269 - 270 - status = ap_queue_interruption_control(ap_dev->qid, ind); 271 - switch (status.response_code) { 272 - case AP_RESPONSE_NORMAL: 273 - case AP_RESPONSE_OTHERWISE_CHANGED: 274 - return 0; 275 - case AP_RESPONSE_Q_NOT_AVAIL: 276 - case AP_RESPONSE_DECONFIGURED: 277 - case AP_RESPONSE_CHECKSTOPPED: 278 - case AP_RESPONSE_INVALID_ADDRESS: 279 - pr_err("Registering adapter interrupts for AP %d failed\n", 280 - AP_QID_DEVICE(ap_dev->qid)); 281 - return -EOPNOTSUPP; 282 - case AP_RESPONSE_RESET_IN_PROGRESS: 283 - case AP_RESPONSE_BUSY: 284 - default: 285 - return -EBUSY; 286 - } 287 - } 288 - 289 - static inline struct ap_queue_status 290 - __nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 291 - { 292 - typedef struct { char _[length]; } msgblock; 293 - register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 294 - register struct ap_queue_status reg1 asm ("1"); 295 - register unsigned long reg2 asm ("2") = (unsigned long) msg; 296 - register unsigned long reg3 asm ("3") = (unsigned long) length; 297 - register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 298 - register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 299 - 300 - asm volatile ( 301 - "0: .long 0xb2ad0042\n" /* NQAP */ 302 - " brc 2,0b" 303 - : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 304 - : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 305 - : "cc"); 306 - return reg1; 307 - } 308 - 309 - /** 310 - * __ap_send(): Send message to adjunct processor queue. 311 - * @qid: The AP queue number 312 - * @psmid: The program supplied message identifier 313 - * @msg: The message text 314 - * @length: The message length 315 - * @special: Special Bit 316 - * 317 - * Returns AP queue status structure. 318 - * Condition code 1 on NQAP can't happen because the L bit is 1. 319 - * Condition code 2 on NQAP also means the send is incomplete, 320 - * because a segment boundary was reached. The NQAP is repeated. 321 - */ 322 - static inline struct ap_queue_status 323 - __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 324 - unsigned int special) 325 - { 326 - if (special == 1) 327 - qid |= 0x400000UL; 328 - return __nqap(qid, psmid, msg, length); 329 - } 330 - 331 - int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 332 - { 333 - struct ap_queue_status status; 334 - 335 - status = __ap_send(qid, psmid, msg, length, 0); 336 - switch (status.response_code) { 337 - case AP_RESPONSE_NORMAL: 338 - return 0; 339 - case AP_RESPONSE_Q_FULL: 340 - case AP_RESPONSE_RESET_IN_PROGRESS: 341 - return -EBUSY; 342 - case AP_RESPONSE_REQ_FAC_NOT_INST: 343 - return -EINVAL; 344 - default: /* Device is gone. */ 345 - return -ENODEV; 346 - } 347 - } 348 - EXPORT_SYMBOL(ap_send); 349 - 350 - /** 351 - * __ap_recv(): Receive message from adjunct processor queue. 352 - * @qid: The AP queue number 353 - * @psmid: Pointer to program supplied message identifier 354 - * @msg: The message text 355 - * @length: The message length 356 - * 357 - * Returns AP queue status structure. 358 - * Condition code 1 on DQAP means the receive has taken place 359 - * but only partially. The response is incomplete, hence the 360 - * DQAP is repeated. 361 - * Condition code 2 on DQAP also means the receive is incomplete, 362 - * this time because a segment boundary was reached. Again, the 363 - * DQAP is repeated. 364 - * Note that gpr2 is used by the DQAP instruction to keep track of 365 - * any 'residual' length, in case the instruction gets interrupted. 366 - * Hence it gets zeroed before the instruction. 367 - */ 368 - static inline struct ap_queue_status 369 - __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 370 - { 371 - typedef struct { char _[length]; } msgblock; 372 - register unsigned long reg0 asm("0") = qid | 0x80000000UL; 373 - register struct ap_queue_status reg1 asm ("1"); 374 - register unsigned long reg2 asm("2") = 0UL; 375 - register unsigned long reg4 asm("4") = (unsigned long) msg; 376 - register unsigned long reg5 asm("5") = (unsigned long) length; 377 - register unsigned long reg6 asm("6") = 0UL; 378 - register unsigned long reg7 asm("7") = 0UL; 379 - 380 - 381 - asm volatile( 382 - "0: .long 0xb2ae0064\n" /* DQAP */ 383 - " brc 6,0b\n" 384 - : "+d" (reg0), "=d" (reg1), "+d" (reg2), 385 - "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 386 - "=m" (*(msgblock *) msg) : : "cc" ); 387 - *psmid = (((unsigned long long) reg6) << 32) + reg7; 388 - return reg1; 389 - } 390 - 391 - int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 392 - { 393 - struct ap_queue_status status; 394 - 395 - if (msg == NULL) 396 - return -EINVAL; 397 - status = __ap_recv(qid, psmid, msg, length); 398 - switch (status.response_code) { 399 - case AP_RESPONSE_NORMAL: 400 - return 0; 401 - case AP_RESPONSE_NO_PENDING_REPLY: 402 - if (status.queue_empty) 403 - return -ENOENT; 404 - return -EBUSY; 405 - case AP_RESPONSE_RESET_IN_PROGRESS: 406 - return -EBUSY; 407 - default: 408 - return -ENODEV; 409 - } 410 - } 411 - EXPORT_SYMBOL(ap_recv); 412 - 413 - /** 414 334 * ap_query_queue(): Check if an AP queue is available. 415 335 * @qid: The AP queue number 416 336 * @queue_depth: Pointer to queue depth value ··· 268 500 unsigned long info; 269 501 int nd; 270 502 271 - if (!ap_test_config_card_id(AP_QID_DEVICE(qid))) 503 + if (!ap_test_config_card_id(AP_QID_CARD(qid))) 272 504 return -ENODEV; 273 505 274 506 status = ap_test_queue(qid, &info); ··· 279 511 *facilities = (unsigned int)(info >> 32); 280 512 /* Update maximum domain id */ 281 513 nd = (info >> 16) & 0xff; 514 + /* if N bit is available, z13 and newer */ 282 515 if ((info & (1UL << 57)) && nd > 0) 283 516 ap_max_domain_id = nd; 517 + else /* older machine types */ 518 + ap_max_domain_id = 15; 519 + switch (*device_type) { 520 + /* For CEX2 and CEX3 the available functions 521 + * are not refrected by the facilities bits. 522 + * Instead it is coded into the type. So here 523 + * modify the function bits based on the type. 524 + */ 525 + case AP_DEVICE_TYPE_CEX2A: 526 + case AP_DEVICE_TYPE_CEX3A: 527 + *facilities |= 0x08000000; 528 + break; 529 + case AP_DEVICE_TYPE_CEX2C: 530 + case AP_DEVICE_TYPE_CEX3C: 531 + *facilities |= 0x10000000; 532 + break; 533 + default: 534 + break; 535 + } 284 536 return 0; 285 537 case AP_RESPONSE_Q_NOT_AVAIL: 286 538 case AP_RESPONSE_DECONFIGURED: ··· 316 528 } 317 529 } 318 530 319 - /* State machine definitions and helpers */ 320 - 321 - static void ap_sm_wait(enum ap_wait wait) 531 + void ap_wait(enum ap_wait wait) 322 532 { 323 533 ktime_t hr_time; 324 534 ··· 345 559 } 346 560 } 347 561 348 - static enum ap_wait ap_sm_nop(struct ap_device *ap_dev) 349 - { 350 - return AP_WAIT_NONE; 351 - } 352 - 353 - /** 354 - * ap_sm_recv(): Receive pending reply messages from an AP device but do 355 - * not change the state of the device. 356 - * @ap_dev: pointer to the AP device 357 - * 358 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 359 - */ 360 - static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev) 361 - { 362 - struct ap_queue_status status; 363 - struct ap_message *ap_msg; 364 - 365 - status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 366 - ap_dev->reply->message, ap_dev->reply->length); 367 - switch (status.response_code) { 368 - case AP_RESPONSE_NORMAL: 369 - atomic_dec(&ap_poll_requests); 370 - ap_dev->queue_count--; 371 - if (ap_dev->queue_count > 0) 372 - mod_timer(&ap_dev->timeout, 373 - jiffies + ap_dev->drv->request_timeout); 374 - list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 375 - if (ap_msg->psmid != ap_dev->reply->psmid) 376 - continue; 377 - list_del_init(&ap_msg->list); 378 - ap_dev->pendingq_count--; 379 - ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 380 - break; 381 - } 382 - case AP_RESPONSE_NO_PENDING_REPLY: 383 - if (!status.queue_empty || ap_dev->queue_count <= 0) 384 - break; 385 - /* The card shouldn't forget requests but who knows. */ 386 - atomic_sub(ap_dev->queue_count, &ap_poll_requests); 387 - ap_dev->queue_count = 0; 388 - list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 389 - ap_dev->requestq_count += ap_dev->pendingq_count; 390 - ap_dev->pendingq_count = 0; 391 - break; 392 - default: 393 - break; 394 - } 395 - return status; 396 - } 397 - 398 - /** 399 - * ap_sm_read(): Receive pending reply messages from an AP device. 400 - * @ap_dev: pointer to the AP device 401 - * 402 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 403 - */ 404 - static enum ap_wait ap_sm_read(struct ap_device *ap_dev) 405 - { 406 - struct ap_queue_status status; 407 - 408 - if (!ap_dev->reply) 409 - return AP_WAIT_NONE; 410 - status = ap_sm_recv(ap_dev); 411 - switch (status.response_code) { 412 - case AP_RESPONSE_NORMAL: 413 - if (ap_dev->queue_count > 0) { 414 - ap_dev->state = AP_STATE_WORKING; 415 - return AP_WAIT_AGAIN; 416 - } 417 - ap_dev->state = AP_STATE_IDLE; 418 - return AP_WAIT_NONE; 419 - case AP_RESPONSE_NO_PENDING_REPLY: 420 - if (ap_dev->queue_count > 0) 421 - return AP_WAIT_INTERRUPT; 422 - ap_dev->state = AP_STATE_IDLE; 423 - return AP_WAIT_NONE; 424 - default: 425 - ap_dev->state = AP_STATE_BORKED; 426 - return AP_WAIT_NONE; 427 - } 428 - } 429 - 430 - /** 431 - * ap_sm_suspend_read(): Receive pending reply messages from an AP device 432 - * without changing the device state in between. In suspend mode we don't 433 - * allow sending new requests, therefore just fetch pending replies. 434 - * @ap_dev: pointer to the AP device 435 - * 436 - * Returns AP_WAIT_NONE or AP_WAIT_AGAIN 437 - */ 438 - static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev) 439 - { 440 - struct ap_queue_status status; 441 - 442 - if (!ap_dev->reply) 443 - return AP_WAIT_NONE; 444 - status = ap_sm_recv(ap_dev); 445 - switch (status.response_code) { 446 - case AP_RESPONSE_NORMAL: 447 - if (ap_dev->queue_count > 0) 448 - return AP_WAIT_AGAIN; 449 - /* fall through */ 450 - default: 451 - return AP_WAIT_NONE; 452 - } 453 - } 454 - 455 - /** 456 - * ap_sm_write(): Send messages from the request queue to an AP device. 457 - * @ap_dev: pointer to the AP device 458 - * 459 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 460 - */ 461 - static enum ap_wait ap_sm_write(struct ap_device *ap_dev) 462 - { 463 - struct ap_queue_status status; 464 - struct ap_message *ap_msg; 465 - 466 - if (ap_dev->requestq_count <= 0) 467 - return AP_WAIT_NONE; 468 - /* Start the next request on the queue. */ 469 - ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 470 - status = __ap_send(ap_dev->qid, ap_msg->psmid, 471 - ap_msg->message, ap_msg->length, ap_msg->special); 472 - switch (status.response_code) { 473 - case AP_RESPONSE_NORMAL: 474 - atomic_inc(&ap_poll_requests); 475 - ap_dev->queue_count++; 476 - if (ap_dev->queue_count == 1) 477 - mod_timer(&ap_dev->timeout, 478 - jiffies + ap_dev->drv->request_timeout); 479 - list_move_tail(&ap_msg->list, &ap_dev->pendingq); 480 - ap_dev->requestq_count--; 481 - ap_dev->pendingq_count++; 482 - if (ap_dev->queue_count < ap_dev->queue_depth) { 483 - ap_dev->state = AP_STATE_WORKING; 484 - return AP_WAIT_AGAIN; 485 - } 486 - /* fall through */ 487 - case AP_RESPONSE_Q_FULL: 488 - ap_dev->state = AP_STATE_QUEUE_FULL; 489 - return AP_WAIT_INTERRUPT; 490 - case AP_RESPONSE_RESET_IN_PROGRESS: 491 - ap_dev->state = AP_STATE_RESET_WAIT; 492 - return AP_WAIT_TIMEOUT; 493 - case AP_RESPONSE_MESSAGE_TOO_BIG: 494 - case AP_RESPONSE_REQ_FAC_NOT_INST: 495 - list_del_init(&ap_msg->list); 496 - ap_dev->requestq_count--; 497 - ap_msg->rc = -EINVAL; 498 - ap_msg->receive(ap_dev, ap_msg, NULL); 499 - return AP_WAIT_AGAIN; 500 - default: 501 - ap_dev->state = AP_STATE_BORKED; 502 - return AP_WAIT_NONE; 503 - } 504 - } 505 - 506 - /** 507 - * ap_sm_read_write(): Send and receive messages to/from an AP device. 508 - * @ap_dev: pointer to the AP device 509 - * 510 - * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 511 - */ 512 - static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev) 513 - { 514 - return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev)); 515 - } 516 - 517 - /** 518 - * ap_sm_reset(): Reset an AP queue. 519 - * @qid: The AP queue number 520 - * 521 - * Submit the Reset command to an AP queue. 522 - */ 523 - static enum ap_wait ap_sm_reset(struct ap_device *ap_dev) 524 - { 525 - struct ap_queue_status status; 526 - 527 - status = ap_reset_queue(ap_dev->qid); 528 - switch (status.response_code) { 529 - case AP_RESPONSE_NORMAL: 530 - case AP_RESPONSE_RESET_IN_PROGRESS: 531 - ap_dev->state = AP_STATE_RESET_WAIT; 532 - ap_dev->interrupt = AP_INTR_DISABLED; 533 - return AP_WAIT_TIMEOUT; 534 - case AP_RESPONSE_BUSY: 535 - return AP_WAIT_TIMEOUT; 536 - case AP_RESPONSE_Q_NOT_AVAIL: 537 - case AP_RESPONSE_DECONFIGURED: 538 - case AP_RESPONSE_CHECKSTOPPED: 539 - default: 540 - ap_dev->state = AP_STATE_BORKED; 541 - return AP_WAIT_NONE; 542 - } 543 - } 544 - 545 - /** 546 - * ap_sm_reset_wait(): Test queue for completion of the reset operation 547 - * @ap_dev: pointer to the AP device 548 - * 549 - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 550 - */ 551 - static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev) 552 - { 553 - struct ap_queue_status status; 554 - unsigned long info; 555 - 556 - if (ap_dev->queue_count > 0 && ap_dev->reply) 557 - /* Try to read a completed message and get the status */ 558 - status = ap_sm_recv(ap_dev); 559 - else 560 - /* Get the status with TAPQ */ 561 - status = ap_test_queue(ap_dev->qid, &info); 562 - 563 - switch (status.response_code) { 564 - case AP_RESPONSE_NORMAL: 565 - if (ap_using_interrupts() && 566 - ap_queue_enable_interruption(ap_dev, 567 - ap_airq.lsi_ptr) == 0) 568 - ap_dev->state = AP_STATE_SETIRQ_WAIT; 569 - else 570 - ap_dev->state = (ap_dev->queue_count > 0) ? 571 - AP_STATE_WORKING : AP_STATE_IDLE; 572 - return AP_WAIT_AGAIN; 573 - case AP_RESPONSE_BUSY: 574 - case AP_RESPONSE_RESET_IN_PROGRESS: 575 - return AP_WAIT_TIMEOUT; 576 - case AP_RESPONSE_Q_NOT_AVAIL: 577 - case AP_RESPONSE_DECONFIGURED: 578 - case AP_RESPONSE_CHECKSTOPPED: 579 - default: 580 - ap_dev->state = AP_STATE_BORKED; 581 - return AP_WAIT_NONE; 582 - } 583 - } 584 - 585 - /** 586 - * ap_sm_setirq_wait(): Test queue for completion of the irq enablement 587 - * @ap_dev: pointer to the AP device 588 - * 589 - * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 590 - */ 591 - static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev) 592 - { 593 - struct ap_queue_status status; 594 - unsigned long info; 595 - 596 - if (ap_dev->queue_count > 0 && ap_dev->reply) 597 - /* Try to read a completed message and get the status */ 598 - status = ap_sm_recv(ap_dev); 599 - else 600 - /* Get the status with TAPQ */ 601 - status = ap_test_queue(ap_dev->qid, &info); 602 - 603 - if (status.int_enabled == 1) { 604 - /* Irqs are now enabled */ 605 - ap_dev->interrupt = AP_INTR_ENABLED; 606 - ap_dev->state = (ap_dev->queue_count > 0) ? 607 - AP_STATE_WORKING : AP_STATE_IDLE; 608 - } 609 - 610 - switch (status.response_code) { 611 - case AP_RESPONSE_NORMAL: 612 - if (ap_dev->queue_count > 0) 613 - return AP_WAIT_AGAIN; 614 - /* fallthrough */ 615 - case AP_RESPONSE_NO_PENDING_REPLY: 616 - return AP_WAIT_TIMEOUT; 617 - default: 618 - ap_dev->state = AP_STATE_BORKED; 619 - return AP_WAIT_NONE; 620 - } 621 - } 622 - 623 - /* 624 - * AP state machine jump table 625 - */ 626 - static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { 627 - [AP_STATE_RESET_START] = { 628 - [AP_EVENT_POLL] = ap_sm_reset, 629 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 630 - }, 631 - [AP_STATE_RESET_WAIT] = { 632 - [AP_EVENT_POLL] = ap_sm_reset_wait, 633 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 634 - }, 635 - [AP_STATE_SETIRQ_WAIT] = { 636 - [AP_EVENT_POLL] = ap_sm_setirq_wait, 637 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 638 - }, 639 - [AP_STATE_IDLE] = { 640 - [AP_EVENT_POLL] = ap_sm_write, 641 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 642 - }, 643 - [AP_STATE_WORKING] = { 644 - [AP_EVENT_POLL] = ap_sm_read_write, 645 - [AP_EVENT_TIMEOUT] = ap_sm_reset, 646 - }, 647 - [AP_STATE_QUEUE_FULL] = { 648 - [AP_EVENT_POLL] = ap_sm_read, 649 - [AP_EVENT_TIMEOUT] = ap_sm_reset, 650 - }, 651 - [AP_STATE_SUSPEND_WAIT] = { 652 - [AP_EVENT_POLL] = ap_sm_suspend_read, 653 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 654 - }, 655 - [AP_STATE_BORKED] = { 656 - [AP_EVENT_POLL] = ap_sm_nop, 657 - [AP_EVENT_TIMEOUT] = ap_sm_nop, 658 - }, 659 - }; 660 - 661 - static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev, 662 - enum ap_event event) 663 - { 664 - return ap_jumptable[ap_dev->state][event](ap_dev); 665 - } 666 - 667 - static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev, 668 - enum ap_event event) 669 - { 670 - enum ap_wait wait; 671 - 672 - while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN) 673 - ; 674 - return wait; 675 - } 676 - 677 562 /** 678 563 * ap_request_timeout(): Handling of request timeouts 679 564 * @data: Holds the AP device. 680 565 * 681 566 * Handles request timeouts. 682 567 */ 683 - static void ap_request_timeout(unsigned long data) 568 + void ap_request_timeout(unsigned long data) 684 569 { 685 - struct ap_device *ap_dev = (struct ap_device *) data; 570 + struct ap_queue *aq = (struct ap_queue *) data; 686 571 687 572 if (ap_suspend_flag) 688 573 return; 689 - spin_lock_bh(&ap_dev->lock); 690 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT)); 691 - spin_unlock_bh(&ap_dev->lock); 574 + spin_lock_bh(&aq->lock); 575 + ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT)); 576 + spin_unlock_bh(&aq->lock); 692 577 } 693 578 694 579 /** ··· 394 937 */ 395 938 static void ap_tasklet_fn(unsigned long dummy) 396 939 { 397 - struct ap_device *ap_dev; 940 + struct ap_card *ac; 941 + struct ap_queue *aq; 398 942 enum ap_wait wait = AP_WAIT_NONE; 399 943 400 944 /* Reset the indicator if interrupts are used. Thus new interrupts can ··· 405 947 if (ap_using_interrupts()) 406 948 xchg(ap_airq.lsi_ptr, 0); 407 949 408 - spin_lock(&ap_device_list_lock); 409 - list_for_each_entry(ap_dev, &ap_device_list, list) { 410 - spin_lock_bh(&ap_dev->lock); 411 - wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 412 - spin_unlock_bh(&ap_dev->lock); 950 + spin_lock_bh(&ap_list_lock); 951 + for_each_ap_card(ac) { 952 + for_each_ap_queue(aq, ac) { 953 + spin_lock_bh(&aq->lock); 954 + wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL)); 955 + spin_unlock_bh(&aq->lock); 956 + } 413 957 } 414 - spin_unlock(&ap_device_list_lock); 415 - ap_sm_wait(wait); 958 + spin_unlock_bh(&ap_list_lock); 959 + 960 + ap_wait(wait); 961 + } 962 + 963 + static int ap_pending_requests(void) 964 + { 965 + struct ap_card *ac; 966 + struct ap_queue *aq; 967 + 968 + spin_lock_bh(&ap_list_lock); 969 + for_each_ap_card(ac) { 970 + for_each_ap_queue(aq, ac) { 971 + if (aq->queue_count == 0) 972 + continue; 973 + spin_unlock_bh(&ap_list_lock); 974 + return 1; 975 + } 976 + } 977 + spin_unlock_bh(&ap_list_lock); 978 + return 0; 416 979 } 417 980 418 981 /** ··· 455 976 while (!kthread_should_stop()) { 456 977 add_wait_queue(&ap_poll_wait, &wait); 457 978 set_current_state(TASK_INTERRUPTIBLE); 458 - if (ap_suspend_flag || 459 - atomic_read(&ap_poll_requests) <= 0) { 979 + if (ap_suspend_flag || !ap_pending_requests()) { 460 980 schedule(); 461 981 try_to_freeze(); 462 982 } ··· 467 989 continue; 468 990 } 469 991 ap_tasklet_fn(0); 470 - } while (!kthread_should_stop()); 992 + } 993 + 471 994 return 0; 472 995 } 473 996 ··· 497 1018 mutex_unlock(&ap_poll_thread_mutex); 498 1019 } 499 1020 500 - /** 501 - * ap_queue_message(): Queue a request to an AP device. 502 - * @ap_dev: The AP device to queue the message to 503 - * @ap_msg: The message that is to be added 504 - */ 505 - void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 506 - { 507 - /* For asynchronous message handling a valid receive-callback 508 - * is required. */ 509 - BUG_ON(!ap_msg->receive); 510 - 511 - spin_lock_bh(&ap_dev->lock); 512 - /* Queue the message. */ 513 - list_add_tail(&ap_msg->list, &ap_dev->requestq); 514 - ap_dev->requestq_count++; 515 - ap_dev->total_request_count++; 516 - /* Send/receive as many request from the queue as possible. */ 517 - ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL)); 518 - spin_unlock_bh(&ap_dev->lock); 519 - } 520 - EXPORT_SYMBOL(ap_queue_message); 521 - 522 - /** 523 - * ap_cancel_message(): Cancel a crypto request. 524 - * @ap_dev: The AP device that has the message queued 525 - * @ap_msg: The message that is to be removed 526 - * 527 - * Cancel a crypto request. This is done by removing the request 528 - * from the device pending or request queue. Note that the 529 - * request stays on the AP queue. When it finishes the message 530 - * reply will be discarded because the psmid can't be found. 531 - */ 532 - void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 533 - { 534 - struct ap_message *tmp; 535 - 536 - spin_lock_bh(&ap_dev->lock); 537 - if (!list_empty(&ap_msg->list)) { 538 - list_for_each_entry(tmp, &ap_dev->pendingq, list) 539 - if (tmp->psmid == ap_msg->psmid) { 540 - ap_dev->pendingq_count--; 541 - goto found; 542 - } 543 - ap_dev->requestq_count--; 544 - found: 545 - list_del_init(&ap_msg->list); 546 - } 547 - spin_unlock_bh(&ap_dev->lock); 548 - } 549 - EXPORT_SYMBOL(ap_cancel_message); 550 - 551 - /* 552 - * AP device related attributes. 553 - */ 554 - static ssize_t ap_hwtype_show(struct device *dev, 555 - struct device_attribute *attr, char *buf) 556 - { 557 - struct ap_device *ap_dev = to_ap_dev(dev); 558 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 559 - } 560 - 561 - static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 562 - 563 - static ssize_t ap_raw_hwtype_show(struct device *dev, 564 - struct device_attribute *attr, char *buf) 565 - { 566 - struct ap_device *ap_dev = to_ap_dev(dev); 567 - 568 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype); 569 - } 570 - 571 - static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 572 - 573 - static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 574 - char *buf) 575 - { 576 - struct ap_device *ap_dev = to_ap_dev(dev); 577 - return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 578 - } 579 - 580 - static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 581 - static ssize_t ap_request_count_show(struct device *dev, 582 - struct device_attribute *attr, 583 - char *buf) 584 - { 585 - struct ap_device *ap_dev = to_ap_dev(dev); 586 - int rc; 587 - 588 - spin_lock_bh(&ap_dev->lock); 589 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 590 - spin_unlock_bh(&ap_dev->lock); 591 - return rc; 592 - } 593 - 594 - static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 595 - 596 - static ssize_t ap_requestq_count_show(struct device *dev, 597 - struct device_attribute *attr, char *buf) 598 - { 599 - struct ap_device *ap_dev = to_ap_dev(dev); 600 - int rc; 601 - 602 - spin_lock_bh(&ap_dev->lock); 603 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 604 - spin_unlock_bh(&ap_dev->lock); 605 - return rc; 606 - } 607 - 608 - static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 609 - 610 - static ssize_t ap_pendingq_count_show(struct device *dev, 611 - struct device_attribute *attr, char *buf) 612 - { 613 - struct ap_device *ap_dev = to_ap_dev(dev); 614 - int rc; 615 - 616 - spin_lock_bh(&ap_dev->lock); 617 - rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 618 - spin_unlock_bh(&ap_dev->lock); 619 - return rc; 620 - } 621 - 622 - static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 623 - 624 - static ssize_t ap_reset_show(struct device *dev, 625 - struct device_attribute *attr, char *buf) 626 - { 627 - struct ap_device *ap_dev = to_ap_dev(dev); 628 - int rc = 0; 629 - 630 - spin_lock_bh(&ap_dev->lock); 631 - switch (ap_dev->state) { 632 - case AP_STATE_RESET_START: 633 - case AP_STATE_RESET_WAIT: 634 - rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 635 - break; 636 - case AP_STATE_WORKING: 637 - case AP_STATE_QUEUE_FULL: 638 - rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 639 - break; 640 - default: 641 - rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 642 - } 643 - spin_unlock_bh(&ap_dev->lock); 644 - return rc; 645 - } 646 - 647 - static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); 648 - 649 - static ssize_t ap_interrupt_show(struct device *dev, 650 - struct device_attribute *attr, char *buf) 651 - { 652 - struct ap_device *ap_dev = to_ap_dev(dev); 653 - int rc = 0; 654 - 655 - spin_lock_bh(&ap_dev->lock); 656 - if (ap_dev->state == AP_STATE_SETIRQ_WAIT) 657 - rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 658 - else if (ap_dev->interrupt == AP_INTR_ENABLED) 659 - rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 660 - else 661 - rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 662 - spin_unlock_bh(&ap_dev->lock); 663 - return rc; 664 - } 665 - 666 - static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); 667 - 668 - static ssize_t ap_modalias_show(struct device *dev, 669 - struct device_attribute *attr, char *buf) 670 - { 671 - return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); 672 - } 673 - 674 - static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 675 - 676 - static ssize_t ap_functions_show(struct device *dev, 677 - struct device_attribute *attr, char *buf) 678 - { 679 - struct ap_device *ap_dev = to_ap_dev(dev); 680 - return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 681 - } 682 - 683 - static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 684 - 685 - static struct attribute *ap_dev_attrs[] = { 686 - &dev_attr_hwtype.attr, 687 - &dev_attr_raw_hwtype.attr, 688 - &dev_attr_depth.attr, 689 - &dev_attr_request_count.attr, 690 - &dev_attr_requestq_count.attr, 691 - &dev_attr_pendingq_count.attr, 692 - &dev_attr_reset.attr, 693 - &dev_attr_interrupt.attr, 694 - &dev_attr_modalias.attr, 695 - &dev_attr_ap_functions.attr, 696 - NULL 697 - }; 698 - static struct attribute_group ap_dev_attr_group = { 699 - .attrs = ap_dev_attrs 700 - }; 1021 + #define is_card_dev(x) ((x)->parent == ap_root_device) 1022 + #define is_queue_dev(x) ((x)->parent != ap_root_device) 701 1023 702 1024 /** 703 1025 * ap_bus_match() ··· 509 1229 */ 510 1230 static int ap_bus_match(struct device *dev, struct device_driver *drv) 511 1231 { 512 - struct ap_device *ap_dev = to_ap_dev(dev); 513 1232 struct ap_driver *ap_drv = to_ap_drv(drv); 514 1233 struct ap_device_id *id; 515 1234 ··· 517 1238 * supported types of the device_driver. 518 1239 */ 519 1240 for (id = ap_drv->ids; id->match_flags; id++) { 520 - if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 521 - (id->dev_type != ap_dev->device_type)) 522 - continue; 523 - return 1; 1241 + if (is_card_dev(dev) && 1242 + id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE && 1243 + id->dev_type == to_ap_dev(dev)->device_type) 1244 + return 1; 1245 + if (is_queue_dev(dev) && 1246 + id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE && 1247 + id->dev_type == to_ap_dev(dev)->device_type) 1248 + return 1; 524 1249 } 525 1250 return 0; 526 1251 } ··· 560 1277 { 561 1278 struct ap_device *ap_dev = to_ap_dev(dev); 562 1279 563 - /* Poll on the device until all requests are finished. */ 564 - spin_lock_bh(&ap_dev->lock); 565 - ap_dev->state = AP_STATE_SUSPEND_WAIT; 566 - while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE) 567 - ; 568 - ap_dev->state = AP_STATE_BORKED; 569 - spin_unlock_bh(&ap_dev->lock); 1280 + if (ap_dev->drv && ap_dev->drv->suspend) 1281 + ap_dev->drv->suspend(ap_dev); 1282 + return 0; 1283 + } 1284 + 1285 + static int ap_dev_resume(struct device *dev) 1286 + { 1287 + struct ap_device *ap_dev = to_ap_dev(dev); 1288 + 1289 + if (ap_dev->drv && ap_dev->drv->resume) 1290 + ap_dev->drv->resume(ap_dev); 570 1291 return 0; 571 1292 } 572 1293 573 1294 static void ap_bus_suspend(void) 574 1295 { 1296 + AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n"); 1297 + 575 1298 ap_suspend_flag = 1; 576 1299 /* 577 1300 * Disable scanning for devices, thus we do not want to scan ··· 587 1298 tasklet_disable(&ap_tasklet); 588 1299 } 589 1300 590 - static int __ap_devices_unregister(struct device *dev, void *dummy) 1301 + static int __ap_card_devices_unregister(struct device *dev, void *dummy) 591 1302 { 592 - device_unregister(dev); 1303 + if (is_card_dev(dev)) 1304 + device_unregister(dev); 1305 + return 0; 1306 + } 1307 + 1308 + static int __ap_queue_devices_unregister(struct device *dev, void *dummy) 1309 + { 1310 + if (is_queue_dev(dev)) 1311 + device_unregister(dev); 1312 + return 0; 1313 + } 1314 + 1315 + static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) 1316 + { 1317 + if (is_queue_dev(dev) && 1318 + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) 1319 + device_unregister(dev); 593 1320 return 0; 594 1321 } 595 1322 ··· 613 1308 { 614 1309 int rc; 615 1310 616 - /* Unconditionally remove all AP devices */ 617 - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1311 + AP_DBF(DBF_DEBUG, "ap_bus_resume running\n"); 1312 + 1313 + /* remove all queue devices */ 1314 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1315 + __ap_queue_devices_unregister); 1316 + /* remove all card devices */ 1317 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1318 + __ap_card_devices_unregister); 1319 + 618 1320 /* Reset thin interrupt setting */ 619 1321 if (ap_interrupts_available() && !ap_using_interrupts()) { 620 1322 rc = register_adapter_interrupt(&ap_airq); ··· 663 1351 .notifier_call = ap_power_event, 664 1352 }; 665 1353 666 - static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, NULL); 1354 + static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume); 667 1355 668 1356 static struct bus_type ap_bus_type = { 669 1357 .name = "ap", ··· 671 1359 .uevent = &ap_uevent, 672 1360 .pm = &ap_bus_pm_ops, 673 1361 }; 674 - 675 - void ap_device_init_reply(struct ap_device *ap_dev, 676 - struct ap_message *reply) 677 - { 678 - ap_dev->reply = reply; 679 - 680 - spin_lock_bh(&ap_dev->lock); 681 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); 682 - spin_unlock_bh(&ap_dev->lock); 683 - } 684 - EXPORT_SYMBOL(ap_device_init_reply); 685 1362 686 1363 static int ap_device_probe(struct device *dev) 687 1364 { ··· 685 1384 return rc; 686 1385 } 687 1386 688 - /** 689 - * __ap_flush_queue(): Flush requests. 690 - * @ap_dev: Pointer to the AP device 691 - * 692 - * Flush all requests from the request/pending queue of an AP device. 693 - */ 694 - static void __ap_flush_queue(struct ap_device *ap_dev) 695 - { 696 - struct ap_message *ap_msg, *next; 697 - 698 - list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 699 - list_del_init(&ap_msg->list); 700 - ap_dev->pendingq_count--; 701 - ap_msg->rc = -EAGAIN; 702 - ap_msg->receive(ap_dev, ap_msg, NULL); 703 - } 704 - list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 705 - list_del_init(&ap_msg->list); 706 - ap_dev->requestq_count--; 707 - ap_msg->rc = -EAGAIN; 708 - ap_msg->receive(ap_dev, ap_msg, NULL); 709 - } 710 - } 711 - 712 - void ap_flush_queue(struct ap_device *ap_dev) 713 - { 714 - spin_lock_bh(&ap_dev->lock); 715 - __ap_flush_queue(ap_dev); 716 - spin_unlock_bh(&ap_dev->lock); 717 - } 718 - EXPORT_SYMBOL(ap_flush_queue); 719 - 720 1387 static int ap_device_remove(struct device *dev) 721 1388 { 722 1389 struct ap_device *ap_dev = to_ap_dev(dev); 723 1390 struct ap_driver *ap_drv = ap_dev->drv; 724 1391 725 - ap_flush_queue(ap_dev); 726 - del_timer_sync(&ap_dev->timeout); 727 - spin_lock_bh(&ap_device_list_lock); 728 - list_del_init(&ap_dev->list); 729 - spin_unlock_bh(&ap_device_list_lock); 1392 + spin_lock_bh(&ap_list_lock); 1393 + if (is_card_dev(dev)) 1394 + list_del_init(&to_ap_card(dev)->list); 1395 + else 1396 + list_del_init(&to_ap_queue(dev)->list); 1397 + spin_unlock_bh(&ap_list_lock); 730 1398 if (ap_drv->remove) 731 1399 ap_drv->remove(ap_dev); 732 - spin_lock_bh(&ap_dev->lock); 733 - atomic_sub(ap_dev->queue_count, &ap_poll_requests); 734 - spin_unlock_bh(&ap_dev->lock); 735 1400 return 0; 736 - } 737 - 738 - static void ap_device_release(struct device *dev) 739 - { 740 - kfree(to_ap_dev(dev)); 741 1401 } 742 1402 743 1403 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, ··· 743 1481 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 744 1482 } 745 1483 746 - static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 1484 + static ssize_t ap_domain_store(struct bus_type *bus, 1485 + const char *buf, size_t count) 1486 + { 1487 + int domain; 1488 + 1489 + if (sscanf(buf, "%i\n", &domain) != 1 || 1490 + domain < 0 || domain > ap_max_domain_id) 1491 + return -EINVAL; 1492 + spin_lock_bh(&ap_domain_lock); 1493 + ap_domain_index = domain; 1494 + spin_unlock_bh(&ap_domain_lock); 1495 + 1496 + AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain); 1497 + 1498 + return count; 1499 + } 1500 + 1501 + static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store); 747 1502 748 1503 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 749 1504 { 750 1505 if (!ap_configuration) /* QCI not supported */ 751 1506 return snprintf(buf, PAGE_SIZE, "not supported\n"); 752 - if (!test_facility(76)) 753 - /* format 0 - 16 bit domain field */ 754 - return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 755 - ap_configuration->adm[0], 756 - ap_configuration->adm[1]); 757 - /* format 1 - 256 bit domain field */ 1507 + 758 1508 return snprintf(buf, PAGE_SIZE, 759 1509 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 760 1510 ap_configuration->adm[0], ap_configuration->adm[1], ··· 777 1503 778 1504 static BUS_ATTR(ap_control_domain_mask, 0444, 779 1505 ap_control_domain_mask_show, NULL); 1506 + 1507 + static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf) 1508 + { 1509 + if (!ap_configuration) /* QCI not supported */ 1510 + return snprintf(buf, PAGE_SIZE, "not supported\n"); 1511 + 1512 + return snprintf(buf, PAGE_SIZE, 1513 + "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1514 + ap_configuration->aqm[0], ap_configuration->aqm[1], 1515 + ap_configuration->aqm[2], ap_configuration->aqm[3], 1516 + ap_configuration->aqm[4], ap_configuration->aqm[5], 1517 + ap_configuration->aqm[6], ap_configuration->aqm[7]); 1518 + } 1519 + 1520 + static BUS_ATTR(ap_usage_domain_mask, 0444, 1521 + ap_usage_domain_mask_show, NULL); 780 1522 781 1523 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 782 1524 { ··· 889 1599 static struct bus_attribute *const ap_bus_attrs[] = { 890 1600 &bus_attr_ap_domain, 891 1601 &bus_attr_ap_control_domain_mask, 1602 + &bus_attr_ap_usage_domain_mask, 892 1603 &bus_attr_config_time, 893 1604 &bus_attr_poll_thread, 894 1605 &bus_attr_ap_interrupts, ··· 914 1623 * the "domain=" parameter or the domain with the maximum number 915 1624 * of devices. 916 1625 */ 917 - if (ap_domain_index >= 0) 1626 + spin_lock_bh(&ap_domain_lock); 1627 + if (ap_domain_index >= 0) { 918 1628 /* Domain has already been selected. */ 1629 + spin_unlock_bh(&ap_domain_lock); 919 1630 return 0; 1631 + } 920 1632 best_domain = -1; 921 1633 max_count = 0; 922 1634 for (i = 0; i < AP_DOMAINS; i++) { ··· 941 1647 } 942 1648 if (best_domain >= 0){ 943 1649 ap_domain_index = best_domain; 1650 + spin_unlock_bh(&ap_domain_lock); 944 1651 return 0; 945 1652 } 1653 + spin_unlock_bh(&ap_domain_lock); 946 1654 return -ENODEV; 947 1655 } 948 1656 949 - /** 950 - * __ap_scan_bus(): Scan the AP bus. 951 - * @dev: Pointer to device 952 - * @data: Pointer to data 953 - * 954 - * Scan the AP bus for new devices. 1657 + /* 1658 + * helper function to be used with bus_find_dev 1659 + * matches for the card device with the given id 955 1660 */ 956 - static int __ap_scan_bus(struct device *dev, void *data) 1661 + static int __match_card_device_with_id(struct device *dev, void *data) 957 1662 { 958 - return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1663 + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data; 959 1664 } 960 1665 1666 + /* helper function to be used with bus_find_dev 1667 + * matches for the queue device with a given qid 1668 + */ 1669 + static int __match_queue_device_with_qid(struct device *dev, void *data) 1670 + { 1671 + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; 1672 + } 1673 + 1674 + /** 1675 + * ap_scan_bus(): Scan the AP bus for new devices 1676 + * Runs periodically, workqueue timer (ap_config_time) 1677 + */ 961 1678 static void ap_scan_bus(struct work_struct *unused) 962 1679 { 963 - struct ap_device *ap_dev; 1680 + struct ap_queue *aq; 1681 + struct ap_card *ac; 964 1682 struct device *dev; 965 1683 ap_qid_t qid; 966 - int queue_depth = 0, device_type = 0; 967 - unsigned int device_functions = 0; 968 - int rc, i, borked; 1684 + int depth = 0, type = 0; 1685 + unsigned int functions = 0; 1686 + int rc, id, dom, borked, domains; 1687 + 1688 + AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); 969 1689 970 1690 ap_query_configuration(); 971 1691 if (ap_select_domain() != 0) 972 1692 goto out; 973 1693 974 - for (i = 0; i < AP_DEVICES; i++) { 975 - qid = AP_MKQID(i, ap_domain_index); 1694 + for (id = 0; id < AP_DEVICES; id++) { 1695 + /* check if device is registered */ 976 1696 dev = bus_find_device(&ap_bus_type, NULL, 977 - (void *)(unsigned long)qid, 978 - __ap_scan_bus); 979 - rc = ap_query_queue(qid, &queue_depth, &device_type, 980 - &device_functions); 981 - if (dev) { 982 - ap_dev = to_ap_dev(dev); 983 - spin_lock_bh(&ap_dev->lock); 984 - if (rc == -ENODEV) 985 - ap_dev->state = AP_STATE_BORKED; 986 - borked = ap_dev->state == AP_STATE_BORKED; 987 - spin_unlock_bh(&ap_dev->lock); 988 - if (borked) /* Remove broken device */ 1697 + (void *)(long) id, 1698 + __match_card_device_with_id); 1699 + ac = dev ? to_ap_card(dev) : NULL; 1700 + if (!ap_test_config_card_id(id)) { 1701 + if (dev) { 1702 + /* Card device has been removed from 1703 + * configuration, remove the belonging 1704 + * queue devices. 1705 + */ 1706 + bus_for_each_dev(&ap_bus_type, NULL, 1707 + (void *)(long) id, 1708 + __ap_queue_devices_with_id_unregister); 1709 + /* now remove the card device */ 989 1710 device_unregister(dev); 990 - put_device(dev); 991 - if (!borked) 1711 + put_device(dev); 1712 + } 1713 + continue; 1714 + } 1715 + /* According to the configuration there should be a card 1716 + * device, so check if there is at least one valid queue 1717 + * and maybe create queue devices and the card device. 1718 + */ 1719 + domains = 0; 1720 + for (dom = 0; dom < AP_DOMAINS; dom++) { 1721 + qid = AP_MKQID(id, dom); 1722 + dev = bus_find_device(&ap_bus_type, NULL, 1723 + (void *)(long) qid, 1724 + __match_queue_device_with_qid); 1725 + aq = dev ? to_ap_queue(dev) : NULL; 1726 + if (!ap_test_config_domain(dom)) { 1727 + if (dev) { 1728 + /* Queue device exists but has been 1729 + * removed from configuration. 1730 + */ 1731 + device_unregister(dev); 1732 + put_device(dev); 1733 + } 992 1734 continue; 1735 + } 1736 + rc = ap_query_queue(qid, &depth, &type, &functions); 1737 + if (dev) { 1738 + spin_lock_bh(&aq->lock); 1739 + if (rc == -ENODEV || 1740 + /* adapter reconfiguration */ 1741 + (ac && ac->functions != functions)) 1742 + aq->state = AP_STATE_BORKED; 1743 + borked = aq->state == AP_STATE_BORKED; 1744 + spin_unlock_bh(&aq->lock); 1745 + if (borked) /* Remove broken device */ 1746 + device_unregister(dev); 1747 + put_device(dev); 1748 + if (!borked) { 1749 + domains++; 1750 + continue; 1751 + } 1752 + } 1753 + if (rc) 1754 + continue; 1755 + /* new queue device needed */ 1756 + if (!ac) { 1757 + /* but first create the card device */ 1758 + ac = ap_card_create(id, depth, 1759 + type, functions); 1760 + if (!ac) 1761 + continue; 1762 + ac->ap_dev.device.bus = &ap_bus_type; 1763 + ac->ap_dev.device.parent = ap_root_device; 1764 + dev_set_name(&ac->ap_dev.device, 1765 + "card%02x", id); 1766 + /* Register card with AP bus */ 1767 + rc = device_register(&ac->ap_dev.device); 1768 + if (rc) { 1769 + put_device(&ac->ap_dev.device); 1770 + ac = NULL; 1771 + break; 1772 + } 1773 + /* get it and thus adjust reference counter */ 1774 + get_device(&ac->ap_dev.device); 1775 + /* Add card device to card list */ 1776 + spin_lock_bh(&ap_list_lock); 1777 + list_add(&ac->list, &ap_card_list); 1778 + spin_unlock_bh(&ap_list_lock); 1779 + } 1780 + /* now create the new queue device */ 1781 + aq = ap_queue_create(qid, type); 1782 + if (!aq) 1783 + continue; 1784 + aq->card = ac; 1785 + aq->ap_dev.device.bus = &ap_bus_type; 1786 + aq->ap_dev.device.parent = &ac->ap_dev.device; 1787 + dev_set_name(&aq->ap_dev.device, 1788 + "%02x.%04x", id, dom); 1789 + /* Add queue device to card queue list */ 1790 + spin_lock_bh(&ap_list_lock); 1791 + list_add(&aq->list, &ac->queues); 1792 + spin_unlock_bh(&ap_list_lock); 1793 + /* Start with a device reset */ 1794 + spin_lock_bh(&aq->lock); 1795 + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1796 + spin_unlock_bh(&aq->lock); 1797 + /* Register device */ 1798 + rc = device_register(&aq->ap_dev.device); 1799 + if (rc) { 1800 + spin_lock_bh(&ap_list_lock); 1801 + list_del_init(&aq->list); 1802 + spin_unlock_bh(&ap_list_lock); 1803 + put_device(&aq->ap_dev.device); 1804 + continue; 1805 + } 1806 + domains++; 1807 + } /* end domain loop */ 1808 + if (ac) { 1809 + /* remove card dev if there are no queue devices */ 1810 + if (!domains) 1811 + device_unregister(&ac->ap_dev.device); 1812 + put_device(&ac->ap_dev.device); 993 1813 } 994 - if (rc) 995 - continue; 996 - ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 997 - if (!ap_dev) 998 - break; 999 - ap_dev->qid = qid; 1000 - ap_dev->state = AP_STATE_RESET_START; 1001 - ap_dev->interrupt = AP_INTR_DISABLED; 1002 - ap_dev->queue_depth = queue_depth; 1003 - ap_dev->raw_hwtype = device_type; 1004 - ap_dev->device_type = device_type; 1005 - ap_dev->functions = device_functions; 1006 - spin_lock_init(&ap_dev->lock); 1007 - INIT_LIST_HEAD(&ap_dev->pendingq); 1008 - INIT_LIST_HEAD(&ap_dev->requestq); 1009 - INIT_LIST_HEAD(&ap_dev->list); 1010 - setup_timer(&ap_dev->timeout, ap_request_timeout, 1011 - (unsigned long) ap_dev); 1012 - 1013 - ap_dev->device.bus = &ap_bus_type; 1014 - ap_dev->device.parent = ap_root_device; 1015 - rc = dev_set_name(&ap_dev->device, "card%02x", 1016 - AP_QID_DEVICE(ap_dev->qid)); 1017 - if (rc) { 1018 - kfree(ap_dev); 1019 - continue; 1020 - } 1021 - /* Add to list of devices */ 1022 - spin_lock_bh(&ap_device_list_lock); 1023 - list_add(&ap_dev->list, &ap_device_list); 1024 - spin_unlock_bh(&ap_device_list_lock); 1025 - /* Start with a device reset */ 1026 - spin_lock_bh(&ap_dev->lock); 1027 - ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL)); 1028 - spin_unlock_bh(&ap_dev->lock); 1029 - /* Register device */ 1030 - ap_dev->device.release = ap_device_release; 1031 - rc = device_register(&ap_dev->device); 1032 - if (rc) { 1033 - spin_lock_bh(&ap_dev->lock); 1034 - list_del_init(&ap_dev->list); 1035 - spin_unlock_bh(&ap_dev->lock); 1036 - put_device(&ap_dev->device); 1037 - continue; 1038 - } 1039 - /* Add device attributes. */ 1040 - rc = sysfs_create_group(&ap_dev->device.kobj, 1041 - &ap_dev_attr_group); 1042 - if (rc) { 1043 - device_unregister(&ap_dev->device); 1044 - continue; 1045 - } 1046 - } 1814 + } /* end device loop */ 1047 1815 out: 1048 1816 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1049 1817 } ··· 1124 1768 if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index)) 1125 1769 return; 1126 1770 for (i = 0; i < AP_DEVICES; i++) 1127 - ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1771 + ap_rapq(AP_MKQID(i, ap_domain_index)); 1128 1772 } 1129 1773 1130 1774 static void ap_reset_all(void) ··· 1137 1781 for (j = 0; j < AP_DEVICES; j++) { 1138 1782 if (!ap_test_config_card_id(j)) 1139 1783 continue; 1140 - ap_reset_queue(AP_MKQID(j, i)); 1784 + ap_rapq(AP_MKQID(j, i)); 1141 1785 } 1142 1786 } 1143 1787 } ··· 1145 1789 static struct reset_call ap_reset_call = { 1146 1790 .fn = ap_reset_all, 1147 1791 }; 1792 + 1793 + int __init ap_debug_init(void) 1794 + { 1795 + ap_dbf_root = debugfs_create_dir("ap", NULL); 1796 + ap_dbf_info = debug_register("ap", 1, 1, 1797 + DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1798 + debug_register_view(ap_dbf_info, &debug_sprintf_view); 1799 + debug_set_level(ap_dbf_info, DBF_ERR); 1800 + 1801 + return 0; 1802 + } 1803 + 1804 + void ap_debug_exit(void) 1805 + { 1806 + debugfs_remove(ap_dbf_root); 1807 + debug_unregister(ap_dbf_info); 1808 + } 1148 1809 1149 1810 /** 1150 1811 * ap_module_init(): The module initialization code. ··· 1172 1799 { 1173 1800 int max_domain_id; 1174 1801 int rc, i; 1802 + 1803 + rc = ap_debug_init(); 1804 + if (rc) 1805 + return rc; 1175 1806 1176 1807 if (ap_instructions_available() != 0) { 1177 1808 pr_warn("The hardware system does not support AP instructions\n"); ··· 1286 1909 del_timer_sync(&ap_config_timer); 1287 1910 hrtimer_cancel(&ap_poll_timer); 1288 1911 tasklet_kill(&ap_tasklet); 1289 - bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister); 1912 + 1913 + /* first remove queue devices */ 1914 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1915 + __ap_queue_devices_unregister); 1916 + /* now remove the card devices */ 1917 + bus_for_each_dev(&ap_bus_type, NULL, NULL, 1918 + __ap_card_devices_unregister); 1919 + 1920 + /* remove bus attributes */ 1290 1921 for (i = 0; ap_bus_attrs[i]; i++) 1291 1922 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1292 1923 unregister_pm_notifier(&ap_power_notifier); ··· 1304 1919 unregister_reset_call(&ap_reset_call); 1305 1920 if (ap_using_interrupts()) 1306 1921 unregister_adapter_interrupt(&ap_airq); 1922 + 1923 + ap_debug_exit(); 1307 1924 } 1308 1925 1309 1926 module_init(ap_module_init);
+74 -40
drivers/s390/crypto/ap_bus.h
··· 27 27 #define _AP_BUS_H_ 28 28 29 29 #include <linux/device.h> 30 - #include <linux/mod_devicetable.h> 31 30 #include <linux/types.h> 32 31 33 32 #define AP_DEVICES 64 /* Number of AP devices. */ ··· 37 38 38 39 extern int ap_domain_index; 39 40 41 + extern spinlock_t ap_list_lock; 42 + extern struct list_head ap_card_list; 43 + 40 44 /** 41 45 * The ap_qid_t identifier of an ap queue. It contains a 42 - * 6 bit device index and a 4 bit queue index (domain). 46 + * 6 bit card index and a 4 bit queue index (domain). 43 47 */ 44 48 typedef unsigned int ap_qid_t; 45 49 46 - #define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255)) 47 - #define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) 50 + #define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) 51 + #define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) 48 52 #define AP_QID_QUEUE(_qid) ((_qid) & 255) 49 53 50 54 /** ··· 57 55 * @queue_full: Is 1 if the queue is full 58 56 * @pad: A 4 bit pad 59 57 * @int_enabled: Shows if interrupts are enabled for the AP 60 - * @response_conde: Holds the 8 bit response code 58 + * @response_code: Holds the 8 bit response code 61 59 * @pad2: A 16 bit pad 62 60 * 63 61 * The ap queue status word is returned by all three AP functions ··· 107 105 #define AP_DEVICE_TYPE_CEX3C 9 108 106 #define AP_DEVICE_TYPE_CEX4 10 109 107 #define AP_DEVICE_TYPE_CEX5 11 108 + #define AP_DEVICE_TYPE_CEX6 12 110 109 111 110 /* 112 111 * Known function facilities ··· 169 166 170 167 int (*probe)(struct ap_device *); 171 168 void (*remove)(struct ap_device *); 172 - int request_timeout; /* request timeout in jiffies */ 169 + void (*suspend)(struct ap_device *); 170 + void (*resume)(struct ap_device *); 173 171 }; 174 172 175 173 #define to_ap_drv(x) container_of((x), struct ap_driver, driver) ··· 178 174 int ap_driver_register(struct ap_driver *, struct module *, char *); 179 175 void ap_driver_unregister(struct ap_driver *); 180 176 181 - typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev); 182 - 183 177 struct ap_device { 184 178 struct device device; 185 179 struct ap_driver *drv; /* Pointer to AP device driver. */ 186 - spinlock_t lock; /* Per device lock. */ 187 - struct list_head list; /* private list of all AP devices. */ 188 - 189 - enum ap_state state; /* State of the AP device. */ 190 - 191 - ap_qid_t qid; /* AP queue id. */ 192 - int queue_depth; /* AP queue depth.*/ 193 180 int device_type; /* AP device type. */ 194 - int raw_hwtype; /* AP raw hardware type. */ 195 - unsigned int functions; /* AP device function bitfield. */ 196 - struct timer_list timeout; /* Timer for request timeouts. */ 197 - 198 - int interrupt; /* indicate if interrupts are enabled */ 199 - int queue_count; /* # messages currently on AP queue. */ 200 - 201 - struct list_head pendingq; /* List of message sent to AP queue. */ 202 - int pendingq_count; /* # requests on pendingq list. */ 203 - struct list_head requestq; /* List of message yet to be sent. */ 204 - int requestq_count; /* # requests on requestq list. */ 205 - int total_request_count; /* # requests ever for this AP device. */ 206 - 207 - struct ap_message *reply; /* Per device reply message. */ 208 - 209 - void *private; /* ap driver private pointer. */ 210 181 }; 211 182 212 183 #define to_ap_dev(x) container_of((x), struct ap_device, device) 184 + 185 + struct ap_card { 186 + struct ap_device ap_dev; 187 + struct list_head list; /* Private list of AP cards. */ 188 + struct list_head queues; /* List of assoc. AP queues */ 189 + void *private; /* ap driver private pointer. */ 190 + int raw_hwtype; /* AP raw hardware type. */ 191 + unsigned int functions; /* AP device function bitfield. */ 192 + int queue_depth; /* AP queue depth.*/ 193 + int id; /* AP card number. */ 194 + atomic_t total_request_count; /* # requests ever for this AP device.*/ 195 + }; 196 + 197 + #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) 198 + 199 + struct ap_queue { 200 + struct ap_device ap_dev; 201 + struct list_head list; /* Private list of AP queues. */ 202 + struct ap_card *card; /* Ptr to assoc. AP card. */ 203 + spinlock_t lock; /* Per device lock. */ 204 + void *private; /* ap driver private pointer. */ 205 + ap_qid_t qid; /* AP queue id. */ 206 + int interrupt; /* indicate if interrupts are enabled */ 207 + int queue_count; /* # messages currently on AP queue. */ 208 + enum ap_state state; /* State of the AP device. */ 209 + int pendingq_count; /* # requests on pendingq list. */ 210 + int requestq_count; /* # requests on requestq list. */ 211 + int total_request_count; /* # requests ever for this AP device.*/ 212 + int request_timeout; /* Request timout in jiffies. */ 213 + struct timer_list timeout; /* Timer for request timeouts. */ 214 + struct list_head pendingq; /* List of message sent to AP queue. */ 215 + struct list_head requestq; /* List of message yet to be sent. */ 216 + struct ap_message *reply; /* Per device reply message. */ 217 + }; 218 + 219 + #define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device) 220 + 221 + typedef enum ap_wait (ap_func_t)(struct ap_queue *queue); 213 222 214 223 struct ap_message { 215 224 struct list_head list; /* Request queueing. */ ··· 234 217 void *private; /* ap driver private pointer. */ 235 218 unsigned int special:1; /* Used for special commands. */ 236 219 /* receive is called from tasklet context */ 237 - void (*receive)(struct ap_device *, struct ap_message *, 220 + void (*receive)(struct ap_queue *, struct ap_message *, 238 221 struct ap_message *); 239 222 }; 240 223 ··· 248 231 unsigned int adm[8]; /* AP domain mask */ 249 232 unsigned char reserved4[16]; 250 233 } __packed; 251 - 252 - #define AP_DEVICE(dt) \ 253 - .dev_type=(dt), \ 254 - .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 255 234 256 235 /** 257 236 * ap_init_message() - Initialize ap_message. ··· 263 250 ap_msg->receive = NULL; 264 251 } 265 252 253 + #define for_each_ap_card(_ac) \ 254 + list_for_each_entry(_ac, &ap_card_list, list) 255 + 256 + #define for_each_ap_queue(_aq, _ac) \ 257 + list_for_each_entry(_aq, &(_ac)->queues, list) 258 + 266 259 /* 267 260 * Note: don't use ap_send/ap_recv after using ap_queue_message 268 261 * for the first time. Otherwise the ap message queue will get ··· 277 258 int ap_send(ap_qid_t, unsigned long long, void *, size_t); 278 259 int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); 279 260 280 - void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 281 - void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 282 - void ap_flush_queue(struct ap_device *ap_dev); 261 + enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event); 262 + enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event); 263 + 264 + void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg); 265 + void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg); 266 + void ap_flush_queue(struct ap_queue *aq); 267 + 268 + void *ap_airq_ptr(void); 269 + void ap_wait(enum ap_wait wait); 270 + void ap_request_timeout(unsigned long data); 283 271 void ap_bus_force_rescan(void); 284 - void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg); 272 + 273 + void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 274 + struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 275 + void ap_queue_remove(struct ap_queue *aq); 276 + void ap_queue_suspend(struct ap_device *ap_dev); 277 + void ap_queue_resume(struct ap_device *ap_dev); 278 + 279 + struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 280 + unsigned int device_functions); 285 281 286 282 int ap_module_init(void); 287 283 void ap_module_exit(void);
+170
drivers/s390/crypto/ap_card.c
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + * 5 + * Adjunct processor bus, card related code. 6 + */ 7 + 8 + #define KMSG_COMPONENT "ap" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/slab.h> 13 + #include <asm/facility.h> 14 + 15 + #include "ap_bus.h" 16 + #include "ap_asm.h" 17 + 18 + /* 19 + * AP card related attributes. 20 + */ 21 + static ssize_t ap_hwtype_show(struct device *dev, 22 + struct device_attribute *attr, char *buf) 23 + { 24 + struct ap_card *ac = to_ap_card(dev); 25 + 26 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type); 27 + } 28 + 29 + static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 30 + 31 + static ssize_t ap_raw_hwtype_show(struct device *dev, 32 + struct device_attribute *attr, char *buf) 33 + { 34 + struct ap_card *ac = to_ap_card(dev); 35 + 36 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype); 37 + } 38 + 39 + static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 40 + 41 + static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 42 + char *buf) 43 + { 44 + struct ap_card *ac = to_ap_card(dev); 45 + 46 + return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth); 47 + } 48 + 49 + static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 50 + 51 + static ssize_t ap_functions_show(struct device *dev, 52 + struct device_attribute *attr, char *buf) 53 + { 54 + struct ap_card *ac = to_ap_card(dev); 55 + 56 + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions); 57 + } 58 + 59 + static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 60 + 61 + static ssize_t ap_request_count_show(struct device *dev, 62 + struct device_attribute *attr, 63 + char *buf) 64 + { 65 + struct ap_card *ac = to_ap_card(dev); 66 + unsigned int req_cnt; 67 + 68 + req_cnt = 0; 69 + spin_lock_bh(&ap_list_lock); 70 + req_cnt = atomic_read(&ac->total_request_count); 71 + spin_unlock_bh(&ap_list_lock); 72 + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 73 + } 74 + 75 + static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 76 + 77 + static ssize_t ap_requestq_count_show(struct device *dev, 78 + struct device_attribute *attr, char *buf) 79 + { 80 + struct ap_card *ac = to_ap_card(dev); 81 + struct ap_queue *aq; 82 + unsigned int reqq_cnt; 83 + 84 + reqq_cnt = 0; 85 + spin_lock_bh(&ap_list_lock); 86 + for_each_ap_queue(aq, ac) 87 + reqq_cnt += aq->requestq_count; 88 + spin_unlock_bh(&ap_list_lock); 89 + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 90 + } 91 + 92 + static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 93 + 94 + static ssize_t ap_pendingq_count_show(struct device *dev, 95 + struct device_attribute *attr, char *buf) 96 + { 97 + struct ap_card *ac = to_ap_card(dev); 98 + struct ap_queue *aq; 99 + unsigned int penq_cnt; 100 + 101 + penq_cnt = 0; 102 + spin_lock_bh(&ap_list_lock); 103 + for_each_ap_queue(aq, ac) 104 + penq_cnt += aq->pendingq_count; 105 + spin_unlock_bh(&ap_list_lock); 106 + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 107 + } 108 + 109 + static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 110 + 111 + static ssize_t ap_modalias_show(struct device *dev, 112 + struct device_attribute *attr, char *buf) 113 + { 114 + return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type); 115 + } 116 + 117 + static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 118 + 119 + static struct attribute *ap_card_dev_attrs[] = { 120 + &dev_attr_hwtype.attr, 121 + &dev_attr_raw_hwtype.attr, 122 + &dev_attr_depth.attr, 123 + &dev_attr_ap_functions.attr, 124 + &dev_attr_request_count.attr, 125 + &dev_attr_requestq_count.attr, 126 + &dev_attr_pendingq_count.attr, 127 + &dev_attr_modalias.attr, 128 + NULL 129 + }; 130 + 131 + static struct attribute_group ap_card_dev_attr_group = { 132 + .attrs = ap_card_dev_attrs 133 + }; 134 + 135 + static const struct attribute_group *ap_card_dev_attr_groups[] = { 136 + &ap_card_dev_attr_group, 137 + NULL 138 + }; 139 + 140 + struct device_type ap_card_type = { 141 + .name = "ap_card", 142 + .groups = ap_card_dev_attr_groups, 143 + }; 144 + 145 + static void ap_card_device_release(struct device *dev) 146 + { 147 + kfree(to_ap_card(dev)); 148 + } 149 + 150 + struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 151 + unsigned int functions) 152 + { 153 + struct ap_card *ac; 154 + 155 + ac = kzalloc(sizeof(*ac), GFP_KERNEL); 156 + if (!ac) 157 + return NULL; 158 + INIT_LIST_HEAD(&ac->queues); 159 + ac->ap_dev.device.release = ap_card_device_release; 160 + ac->ap_dev.device.type = &ap_card_type; 161 + ac->ap_dev.device_type = device_type; 162 + /* CEX6 toleration: map to CEX5 */ 163 + if (device_type == AP_DEVICE_TYPE_CEX6) 164 + ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; 165 + ac->raw_hwtype = device_type; 166 + ac->queue_depth = queue_depth; 167 + ac->functions = functions; 168 + ac->id = id; 169 + return ac; 170 + }
+28
drivers/s390/crypto/ap_debug.h
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Harald Freudenberger <freude@de.ibm.com> 4 + */ 5 + #ifndef AP_DEBUG_H 6 + #define AP_DEBUG_H 7 + 8 + #include <asm/debug.h> 9 + 10 + #define DBF_ERR 3 /* error conditions */ 11 + #define DBF_WARN 4 /* warning conditions */ 12 + #define DBF_INFO 5 /* informational */ 13 + #define DBF_DEBUG 6 /* for debugging only */ 14 + 15 + #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) 16 + #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) 17 + 18 + #define DBF_MAX_SPRINTF_ARGS 5 19 + 20 + #define AP_DBF(...) \ 21 + debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__) 22 + 23 + extern debug_info_t *ap_dbf_info; 24 + 25 + int ap_debug_init(void); 26 + void ap_debug_exit(void); 27 + 28 + #endif /* AP_DEBUG_H */
+701
drivers/s390/crypto/ap_queue.c
··· 1 + /* 2 + * Copyright IBM Corp. 2016 3 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 + * 5 + * Adjunct processor bus, queue related code. 6 + */ 7 + 8 + #define KMSG_COMPONENT "ap" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/init.h> 12 + #include <linux/slab.h> 13 + #include <asm/facility.h> 14 + 15 + #include "ap_bus.h" 16 + #include "ap_asm.h" 17 + 18 + /** 19 + * ap_queue_enable_interruption(): Enable interruption on an AP queue. 20 + * @qid: The AP queue number 21 + * @ind: the notification indicator byte 22 + * 23 + * Enables interruption on AP queue via ap_aqic(). Based on the return 24 + * value it waits a while and tests the AP queue if interrupts 25 + * have been switched on using ap_test_queue(). 26 + */ 27 + static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind) 28 + { 29 + struct ap_queue_status status; 30 + 31 + status = ap_aqic(aq->qid, ind); 32 + switch (status.response_code) { 33 + case AP_RESPONSE_NORMAL: 34 + case AP_RESPONSE_OTHERWISE_CHANGED: 35 + return 0; 36 + case AP_RESPONSE_Q_NOT_AVAIL: 37 + case AP_RESPONSE_DECONFIGURED: 38 + case AP_RESPONSE_CHECKSTOPPED: 39 + case AP_RESPONSE_INVALID_ADDRESS: 40 + pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n", 41 + AP_QID_CARD(aq->qid), 42 + AP_QID_QUEUE(aq->qid)); 43 + return -EOPNOTSUPP; 44 + case AP_RESPONSE_RESET_IN_PROGRESS: 45 + case AP_RESPONSE_BUSY: 46 + default: 47 + return -EBUSY; 48 + } 49 + } 50 + 51 + /** 52 + * __ap_send(): Send message to adjunct processor queue. 53 + * @qid: The AP queue number 54 + * @psmid: The program supplied message identifier 55 + * @msg: The message text 56 + * @length: The message length 57 + * @special: Special Bit 58 + * 59 + * Returns AP queue status structure. 60 + * Condition code 1 on NQAP can't happen because the L bit is 1. 61 + * Condition code 2 on NQAP also means the send is incomplete, 62 + * because a segment boundary was reached. The NQAP is repeated. 63 + */ 64 + static inline struct ap_queue_status 65 + __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 66 + unsigned int special) 67 + { 68 + if (special == 1) 69 + qid |= 0x400000UL; 70 + return ap_nqap(qid, psmid, msg, length); 71 + } 72 + 73 + int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 74 + { 75 + struct ap_queue_status status; 76 + 77 + status = __ap_send(qid, psmid, msg, length, 0); 78 + switch (status.response_code) { 79 + case AP_RESPONSE_NORMAL: 80 + return 0; 81 + case AP_RESPONSE_Q_FULL: 82 + case AP_RESPONSE_RESET_IN_PROGRESS: 83 + return -EBUSY; 84 + case AP_RESPONSE_REQ_FAC_NOT_INST: 85 + return -EINVAL; 86 + default: /* Device is gone. */ 87 + return -ENODEV; 88 + } 89 + } 90 + EXPORT_SYMBOL(ap_send); 91 + 92 + int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 93 + { 94 + struct ap_queue_status status; 95 + 96 + if (msg == NULL) 97 + return -EINVAL; 98 + status = ap_dqap(qid, psmid, msg, length); 99 + switch (status.response_code) { 100 + case AP_RESPONSE_NORMAL: 101 + return 0; 102 + case AP_RESPONSE_NO_PENDING_REPLY: 103 + if (status.queue_empty) 104 + return -ENOENT; 105 + return -EBUSY; 106 + case AP_RESPONSE_RESET_IN_PROGRESS: 107 + return -EBUSY; 108 + default: 109 + return -ENODEV; 110 + } 111 + } 112 + EXPORT_SYMBOL(ap_recv); 113 + 114 + /* State machine definitions and helpers */ 115 + 116 + static enum ap_wait ap_sm_nop(struct ap_queue *aq) 117 + { 118 + return AP_WAIT_NONE; 119 + } 120 + 121 + /** 122 + * ap_sm_recv(): Receive pending reply messages from an AP queue but do 123 + * not change the state of the device. 124 + * @aq: pointer to the AP queue 125 + * 126 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 127 + */ 128 + static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) 129 + { 130 + struct ap_queue_status status; 131 + struct ap_message *ap_msg; 132 + 133 + status = ap_dqap(aq->qid, &aq->reply->psmid, 134 + aq->reply->message, aq->reply->length); 135 + switch (status.response_code) { 136 + case AP_RESPONSE_NORMAL: 137 + aq->queue_count--; 138 + if (aq->queue_count > 0) 139 + mod_timer(&aq->timeout, 140 + jiffies + aq->request_timeout); 141 + list_for_each_entry(ap_msg, &aq->pendingq, list) { 142 + if (ap_msg->psmid != aq->reply->psmid) 143 + continue; 144 + list_del_init(&ap_msg->list); 145 + aq->pendingq_count--; 146 + ap_msg->receive(aq, ap_msg, aq->reply); 147 + break; 148 + } 149 + case AP_RESPONSE_NO_PENDING_REPLY: 150 + if (!status.queue_empty || aq->queue_count <= 0) 151 + break; 152 + /* The card shouldn't forget requests but who knows. */ 153 + aq->queue_count = 0; 154 + list_splice_init(&aq->pendingq, &aq->requestq); 155 + aq->requestq_count += aq->pendingq_count; 156 + aq->pendingq_count = 0; 157 + break; 158 + default: 159 + break; 160 + } 161 + return status; 162 + } 163 + 164 + /** 165 + * ap_sm_read(): Receive pending reply messages from an AP queue. 166 + * @aq: pointer to the AP queue 167 + * 168 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 169 + */ 170 + static enum ap_wait ap_sm_read(struct ap_queue *aq) 171 + { 172 + struct ap_queue_status status; 173 + 174 + if (!aq->reply) 175 + return AP_WAIT_NONE; 176 + status = ap_sm_recv(aq); 177 + switch (status.response_code) { 178 + case AP_RESPONSE_NORMAL: 179 + if (aq->queue_count > 0) { 180 + aq->state = AP_STATE_WORKING; 181 + return AP_WAIT_AGAIN; 182 + } 183 + aq->state = AP_STATE_IDLE; 184 + return AP_WAIT_NONE; 185 + case AP_RESPONSE_NO_PENDING_REPLY: 186 + if (aq->queue_count > 0) 187 + return AP_WAIT_INTERRUPT; 188 + aq->state = AP_STATE_IDLE; 189 + return AP_WAIT_NONE; 190 + default: 191 + aq->state = AP_STATE_BORKED; 192 + return AP_WAIT_NONE; 193 + } 194 + } 195 + 196 + /** 197 + * ap_sm_suspend_read(): Receive pending reply messages from an AP queue 198 + * without changing the device state in between. In suspend mode we don't 199 + * allow sending new requests, therefore just fetch pending replies. 200 + * @aq: pointer to the AP queue 201 + * 202 + * Returns AP_WAIT_NONE or AP_WAIT_AGAIN 203 + */ 204 + static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq) 205 + { 206 + struct ap_queue_status status; 207 + 208 + if (!aq->reply) 209 + return AP_WAIT_NONE; 210 + status = ap_sm_recv(aq); 211 + switch (status.response_code) { 212 + case AP_RESPONSE_NORMAL: 213 + if (aq->queue_count > 0) 214 + return AP_WAIT_AGAIN; 215 + /* fall through */ 216 + default: 217 + return AP_WAIT_NONE; 218 + } 219 + } 220 + 221 + /** 222 + * ap_sm_write(): Send messages from the request queue to an AP queue. 223 + * @aq: pointer to the AP queue 224 + * 225 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 226 + */ 227 + static enum ap_wait ap_sm_write(struct ap_queue *aq) 228 + { 229 + struct ap_queue_status status; 230 + struct ap_message *ap_msg; 231 + 232 + if (aq->requestq_count <= 0) 233 + return AP_WAIT_NONE; 234 + /* Start the next request on the queue. */ 235 + ap_msg = list_entry(aq->requestq.next, struct ap_message, list); 236 + status = __ap_send(aq->qid, ap_msg->psmid, 237 + ap_msg->message, ap_msg->length, ap_msg->special); 238 + switch (status.response_code) { 239 + case AP_RESPONSE_NORMAL: 240 + aq->queue_count++; 241 + if (aq->queue_count == 1) 242 + mod_timer(&aq->timeout, jiffies + aq->request_timeout); 243 + list_move_tail(&ap_msg->list, &aq->pendingq); 244 + aq->requestq_count--; 245 + aq->pendingq_count++; 246 + if (aq->queue_count < aq->card->queue_depth) { 247 + aq->state = AP_STATE_WORKING; 248 + return AP_WAIT_AGAIN; 249 + } 250 + /* fall through */ 251 + case AP_RESPONSE_Q_FULL: 252 + aq->state = AP_STATE_QUEUE_FULL; 253 + return AP_WAIT_INTERRUPT; 254 + case AP_RESPONSE_RESET_IN_PROGRESS: 255 + aq->state = AP_STATE_RESET_WAIT; 256 + return AP_WAIT_TIMEOUT; 257 + case AP_RESPONSE_MESSAGE_TOO_BIG: 258 + case AP_RESPONSE_REQ_FAC_NOT_INST: 259 + list_del_init(&ap_msg->list); 260 + aq->requestq_count--; 261 + ap_msg->rc = -EINVAL; 262 + ap_msg->receive(aq, ap_msg, NULL); 263 + return AP_WAIT_AGAIN; 264 + default: 265 + aq->state = AP_STATE_BORKED; 266 + return AP_WAIT_NONE; 267 + } 268 + } 269 + 270 + /** 271 + * ap_sm_read_write(): Send and receive messages to/from an AP queue. 272 + * @aq: pointer to the AP queue 273 + * 274 + * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT 275 + */ 276 + static enum ap_wait ap_sm_read_write(struct ap_queue *aq) 277 + { 278 + return min(ap_sm_read(aq), ap_sm_write(aq)); 279 + } 280 + 281 + /** 282 + * ap_sm_reset(): Reset an AP queue. 283 + * @qid: The AP queue number 284 + * 285 + * Submit the Reset command to an AP queue. 286 + */ 287 + static enum ap_wait ap_sm_reset(struct ap_queue *aq) 288 + { 289 + struct ap_queue_status status; 290 + 291 + status = ap_rapq(aq->qid); 292 + switch (status.response_code) { 293 + case AP_RESPONSE_NORMAL: 294 + case AP_RESPONSE_RESET_IN_PROGRESS: 295 + aq->state = AP_STATE_RESET_WAIT; 296 + aq->interrupt = AP_INTR_DISABLED; 297 + return AP_WAIT_TIMEOUT; 298 + case AP_RESPONSE_BUSY: 299 + return AP_WAIT_TIMEOUT; 300 + case AP_RESPONSE_Q_NOT_AVAIL: 301 + case AP_RESPONSE_DECONFIGURED: 302 + case AP_RESPONSE_CHECKSTOPPED: 303 + default: 304 + aq->state = AP_STATE_BORKED; 305 + return AP_WAIT_NONE; 306 + } 307 + } 308 + 309 + /** 310 + * ap_sm_reset_wait(): Test queue for completion of the reset operation 311 + * @aq: pointer to the AP queue 312 + * 313 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 314 + */ 315 + static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq) 316 + { 317 + struct ap_queue_status status; 318 + void *lsi_ptr; 319 + 320 + if (aq->queue_count > 0 && aq->reply) 321 + /* Try to read a completed message and get the status */ 322 + status = ap_sm_recv(aq); 323 + else 324 + /* Get the status with TAPQ */ 325 + status = ap_tapq(aq->qid, NULL); 326 + 327 + switch (status.response_code) { 328 + case AP_RESPONSE_NORMAL: 329 + lsi_ptr = ap_airq_ptr(); 330 + if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0) 331 + aq->state = AP_STATE_SETIRQ_WAIT; 332 + else 333 + aq->state = (aq->queue_count > 0) ? 334 + AP_STATE_WORKING : AP_STATE_IDLE; 335 + return AP_WAIT_AGAIN; 336 + case AP_RESPONSE_BUSY: 337 + case AP_RESPONSE_RESET_IN_PROGRESS: 338 + return AP_WAIT_TIMEOUT; 339 + case AP_RESPONSE_Q_NOT_AVAIL: 340 + case AP_RESPONSE_DECONFIGURED: 341 + case AP_RESPONSE_CHECKSTOPPED: 342 + default: 343 + aq->state = AP_STATE_BORKED; 344 + return AP_WAIT_NONE; 345 + } 346 + } 347 + 348 + /** 349 + * ap_sm_setirq_wait(): Test queue for completion of the irq enablement 350 + * @aq: pointer to the AP queue 351 + * 352 + * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0. 353 + */ 354 + static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq) 355 + { 356 + struct ap_queue_status status; 357 + 358 + if (aq->queue_count > 0 && aq->reply) 359 + /* Try to read a completed message and get the status */ 360 + status = ap_sm_recv(aq); 361 + else 362 + /* Get the status with TAPQ */ 363 + status = ap_tapq(aq->qid, NULL); 364 + 365 + if (status.int_enabled == 1) { 366 + /* Irqs are now enabled */ 367 + aq->interrupt = AP_INTR_ENABLED; 368 + aq->state = (aq->queue_count > 0) ? 369 + AP_STATE_WORKING : AP_STATE_IDLE; 370 + } 371 + 372 + switch (status.response_code) { 373 + case AP_RESPONSE_NORMAL: 374 + if (aq->queue_count > 0) 375 + return AP_WAIT_AGAIN; 376 + /* fallthrough */ 377 + case AP_RESPONSE_NO_PENDING_REPLY: 378 + return AP_WAIT_TIMEOUT; 379 + default: 380 + aq->state = AP_STATE_BORKED; 381 + return AP_WAIT_NONE; 382 + } 383 + } 384 + 385 + /* 386 + * AP state machine jump table 387 + */ 388 + static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { 389 + [AP_STATE_RESET_START] = { 390 + [AP_EVENT_POLL] = ap_sm_reset, 391 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 392 + }, 393 + [AP_STATE_RESET_WAIT] = { 394 + [AP_EVENT_POLL] = ap_sm_reset_wait, 395 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 396 + }, 397 + [AP_STATE_SETIRQ_WAIT] = { 398 + [AP_EVENT_POLL] = ap_sm_setirq_wait, 399 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 400 + }, 401 + [AP_STATE_IDLE] = { 402 + [AP_EVENT_POLL] = ap_sm_write, 403 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 404 + }, 405 + [AP_STATE_WORKING] = { 406 + [AP_EVENT_POLL] = ap_sm_read_write, 407 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 408 + }, 409 + [AP_STATE_QUEUE_FULL] = { 410 + [AP_EVENT_POLL] = ap_sm_read, 411 + [AP_EVENT_TIMEOUT] = ap_sm_reset, 412 + }, 413 + [AP_STATE_SUSPEND_WAIT] = { 414 + [AP_EVENT_POLL] = ap_sm_suspend_read, 415 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 416 + }, 417 + [AP_STATE_BORKED] = { 418 + [AP_EVENT_POLL] = ap_sm_nop, 419 + [AP_EVENT_TIMEOUT] = ap_sm_nop, 420 + }, 421 + }; 422 + 423 + enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event) 424 + { 425 + return ap_jumptable[aq->state][event](aq); 426 + } 427 + 428 + enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event) 429 + { 430 + enum ap_wait wait; 431 + 432 + while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN) 433 + ; 434 + return wait; 435 + } 436 + 437 + /* 438 + * Power management for queue devices 439 + */ 440 + void ap_queue_suspend(struct ap_device *ap_dev) 441 + { 442 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 443 + 444 + /* Poll on the device until all requests are finished. */ 445 + spin_lock_bh(&aq->lock); 446 + aq->state = AP_STATE_SUSPEND_WAIT; 447 + while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE) 448 + ; 449 + aq->state = AP_STATE_BORKED; 450 + spin_unlock_bh(&aq->lock); 451 + } 452 + EXPORT_SYMBOL(ap_queue_suspend); 453 + 454 + void ap_queue_resume(struct ap_device *ap_dev) 455 + { 456 + } 457 + EXPORT_SYMBOL(ap_queue_resume); 458 + 459 + /* 460 + * AP queue related attributes. 461 + */ 462 + static ssize_t ap_request_count_show(struct device *dev, 463 + struct device_attribute *attr, 464 + char *buf) 465 + { 466 + struct ap_queue *aq = to_ap_queue(dev); 467 + unsigned int req_cnt; 468 + 469 + spin_lock_bh(&aq->lock); 470 + req_cnt = aq->total_request_count; 471 + spin_unlock_bh(&aq->lock); 472 + return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); 473 + } 474 + 475 + static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 476 + 477 + static ssize_t ap_requestq_count_show(struct device *dev, 478 + struct device_attribute *attr, char *buf) 479 + { 480 + struct ap_queue *aq = to_ap_queue(dev); 481 + unsigned int reqq_cnt = 0; 482 + 483 + spin_lock_bh(&aq->lock); 484 + reqq_cnt = aq->requestq_count; 485 + spin_unlock_bh(&aq->lock); 486 + return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt); 487 + } 488 + 489 + static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 490 + 491 + static ssize_t ap_pendingq_count_show(struct device *dev, 492 + struct device_attribute *attr, char *buf) 493 + { 494 + struct ap_queue *aq = to_ap_queue(dev); 495 + unsigned int penq_cnt = 0; 496 + 497 + spin_lock_bh(&aq->lock); 498 + penq_cnt = aq->pendingq_count; 499 + spin_unlock_bh(&aq->lock); 500 + return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt); 501 + } 502 + 503 + static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 504 + 505 + static ssize_t ap_reset_show(struct device *dev, 506 + struct device_attribute *attr, char *buf) 507 + { 508 + struct ap_queue *aq = to_ap_queue(dev); 509 + int rc = 0; 510 + 511 + spin_lock_bh(&aq->lock); 512 + switch (aq->state) { 513 + case AP_STATE_RESET_START: 514 + case AP_STATE_RESET_WAIT: 515 + rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n"); 516 + break; 517 + case AP_STATE_WORKING: 518 + case AP_STATE_QUEUE_FULL: 519 + rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n"); 520 + break; 521 + default: 522 + rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n"); 523 + } 524 + spin_unlock_bh(&aq->lock); 525 + return rc; 526 + } 527 + 528 + static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL); 529 + 530 + static ssize_t ap_interrupt_show(struct device *dev, 531 + struct device_attribute *attr, char *buf) 532 + { 533 + struct ap_queue *aq = to_ap_queue(dev); 534 + int rc = 0; 535 + 536 + spin_lock_bh(&aq->lock); 537 + if (aq->state == AP_STATE_SETIRQ_WAIT) 538 + rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n"); 539 + else if (aq->interrupt == AP_INTR_ENABLED) 540 + rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n"); 541 + else 542 + rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n"); 543 + spin_unlock_bh(&aq->lock); 544 + return rc; 545 + } 546 + 547 + static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL); 548 + 549 + static struct attribute *ap_queue_dev_attrs[] = { 550 + &dev_attr_request_count.attr, 551 + &dev_attr_requestq_count.attr, 552 + &dev_attr_pendingq_count.attr, 553 + &dev_attr_reset.attr, 554 + &dev_attr_interrupt.attr, 555 + NULL 556 + }; 557 + 558 + static struct attribute_group ap_queue_dev_attr_group = { 559 + .attrs = ap_queue_dev_attrs 560 + }; 561 + 562 + static const struct attribute_group *ap_queue_dev_attr_groups[] = { 563 + &ap_queue_dev_attr_group, 564 + NULL 565 + }; 566 + 567 + struct device_type ap_queue_type = { 568 + .name = "ap_queue", 569 + .groups = ap_queue_dev_attr_groups, 570 + }; 571 + 572 + static void ap_queue_device_release(struct device *dev) 573 + { 574 + kfree(to_ap_queue(dev)); 575 + } 576 + 577 + struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 578 + { 579 + struct ap_queue *aq; 580 + 581 + aq = kzalloc(sizeof(*aq), GFP_KERNEL); 582 + if (!aq) 583 + return NULL; 584 + aq->ap_dev.device.release = ap_queue_device_release; 585 + aq->ap_dev.device.type = &ap_queue_type; 586 + aq->ap_dev.device_type = device_type; 587 + /* CEX6 toleration: map to CEX5 */ 588 + if (device_type == AP_DEVICE_TYPE_CEX6) 589 + aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; 590 + aq->qid = qid; 591 + aq->state = AP_STATE_RESET_START; 592 + aq->interrupt = AP_INTR_DISABLED; 593 + spin_lock_init(&aq->lock); 594 + INIT_LIST_HEAD(&aq->pendingq); 595 + INIT_LIST_HEAD(&aq->requestq); 596 + setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq); 597 + 598 + return aq; 599 + } 600 + 601 + void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply) 602 + { 603 + aq->reply = reply; 604 + 605 + spin_lock_bh(&aq->lock); 606 + ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 607 + spin_unlock_bh(&aq->lock); 608 + } 609 + EXPORT_SYMBOL(ap_queue_init_reply); 610 + 611 + /** 612 + * ap_queue_message(): Queue a request to an AP device. 613 + * @aq: The AP device to queue the message to 614 + * @ap_msg: The message that is to be added 615 + */ 616 + void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) 617 + { 618 + /* For asynchronous message handling a valid receive-callback 619 + * is required. 620 + */ 621 + BUG_ON(!ap_msg->receive); 622 + 623 + spin_lock_bh(&aq->lock); 624 + /* Queue the message. */ 625 + list_add_tail(&ap_msg->list, &aq->requestq); 626 + aq->requestq_count++; 627 + aq->total_request_count++; 628 + atomic_inc(&aq->card->total_request_count); 629 + /* Send/receive as many request from the queue as possible. */ 630 + ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); 631 + spin_unlock_bh(&aq->lock); 632 + } 633 + EXPORT_SYMBOL(ap_queue_message); 634 + 635 + /** 636 + * ap_cancel_message(): Cancel a crypto request. 637 + * @aq: The AP device that has the message queued 638 + * @ap_msg: The message that is to be removed 639 + * 640 + * Cancel a crypto request. This is done by removing the request 641 + * from the device pending or request queue. Note that the 642 + * request stays on the AP queue. When it finishes the message 643 + * reply will be discarded because the psmid can't be found. 644 + */ 645 + void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg) 646 + { 647 + struct ap_message *tmp; 648 + 649 + spin_lock_bh(&aq->lock); 650 + if (!list_empty(&ap_msg->list)) { 651 + list_for_each_entry(tmp, &aq->pendingq, list) 652 + if (tmp->psmid == ap_msg->psmid) { 653 + aq->pendingq_count--; 654 + goto found; 655 + } 656 + aq->requestq_count--; 657 + found: 658 + list_del_init(&ap_msg->list); 659 + } 660 + spin_unlock_bh(&aq->lock); 661 + } 662 + EXPORT_SYMBOL(ap_cancel_message); 663 + 664 + /** 665 + * __ap_flush_queue(): Flush requests. 666 + * @aq: Pointer to the AP queue 667 + * 668 + * Flush all requests from the request/pending queue of an AP device. 669 + */ 670 + static void __ap_flush_queue(struct ap_queue *aq) 671 + { 672 + struct ap_message *ap_msg, *next; 673 + 674 + list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) { 675 + list_del_init(&ap_msg->list); 676 + aq->pendingq_count--; 677 + ap_msg->rc = -EAGAIN; 678 + ap_msg->receive(aq, ap_msg, NULL); 679 + } 680 + list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) { 681 + list_del_init(&ap_msg->list); 682 + aq->requestq_count--; 683 + ap_msg->rc = -EAGAIN; 684 + ap_msg->receive(aq, ap_msg, NULL); 685 + } 686 + } 687 + 688 + void ap_flush_queue(struct ap_queue *aq) 689 + { 690 + spin_lock_bh(&aq->lock); 691 + __ap_flush_queue(aq); 692 + spin_unlock_bh(&aq->lock); 693 + } 694 + EXPORT_SYMBOL(ap_flush_queue); 695 + 696 + void ap_queue_remove(struct ap_queue *aq) 697 + { 698 + ap_flush_queue(aq); 699 + del_timer_sync(&aq->timeout); 700 + } 701 + EXPORT_SYMBOL(ap_queue_remove);
+571 -564
drivers/s390/crypto/zcrypt_api.c
··· 41 41 #include <linux/debugfs.h> 42 42 #include <asm/debug.h> 43 43 44 - #include "zcrypt_debug.h" 44 + #define CREATE_TRACE_POINTS 45 + #include <asm/trace/zcrypt.h> 46 + 45 47 #include "zcrypt_api.h" 48 + #include "zcrypt_debug.h" 46 49 47 50 #include "zcrypt_msgtype6.h" 51 + #include "zcrypt_msgtype50.h" 48 52 49 53 /* 50 54 * Module description. ··· 58 54 "Copyright IBM Corp. 2001, 2012"); 59 55 MODULE_LICENSE("GPL"); 60 56 57 + /* 58 + * zcrypt tracepoint functions 59 + */ 60 + EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 61 + EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 62 + 61 63 static int zcrypt_hwrng_seed = 1; 62 64 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 63 65 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 64 66 65 - static DEFINE_SPINLOCK(zcrypt_device_lock); 66 - static LIST_HEAD(zcrypt_device_list); 67 - static int zcrypt_device_count = 0; 67 + DEFINE_SPINLOCK(zcrypt_list_lock); 68 + LIST_HEAD(zcrypt_card_list); 69 + int zcrypt_device_count; 70 + 68 71 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 69 72 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 70 73 71 74 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 72 75 EXPORT_SYMBOL(zcrypt_rescan_req); 73 76 74 - static int zcrypt_rng_device_add(void); 75 - static void zcrypt_rng_device_remove(void); 76 - 77 - static DEFINE_SPINLOCK(zcrypt_ops_list_lock); 78 77 static LIST_HEAD(zcrypt_ops_list); 79 78 80 - static debug_info_t *zcrypt_dbf_common; 81 - static debug_info_t *zcrypt_dbf_devices; 82 - static struct dentry *debugfs_root; 83 - 84 - /* 85 - * Device attributes common for all crypto devices. 86 - */ 87 - static ssize_t zcrypt_type_show(struct device *dev, 88 - struct device_attribute *attr, char *buf) 89 - { 90 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 91 - return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); 92 - } 93 - 94 - static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); 95 - 96 - static ssize_t zcrypt_online_show(struct device *dev, 97 - struct device_attribute *attr, char *buf) 98 - { 99 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 100 - return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); 101 - } 102 - 103 - static ssize_t zcrypt_online_store(struct device *dev, 104 - struct device_attribute *attr, 105 - const char *buf, size_t count) 106 - { 107 - struct zcrypt_device *zdev = to_ap_dev(dev)->private; 108 - int online; 109 - 110 - if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 111 - return -EINVAL; 112 - zdev->online = online; 113 - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, 114 - zdev->online); 115 - if (!online) 116 - ap_flush_queue(zdev->ap_dev); 117 - return count; 118 - } 119 - 120 - static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); 121 - 122 - static struct attribute * zcrypt_device_attrs[] = { 123 - &dev_attr_type.attr, 124 - &dev_attr_online.attr, 125 - NULL, 126 - }; 127 - 128 - static struct attribute_group zcrypt_device_attr_group = { 129 - .attrs = zcrypt_device_attrs, 130 - }; 79 + /* Zcrypt related debug feature stuff. */ 80 + static struct dentry *zcrypt_dbf_root; 81 + debug_info_t *zcrypt_dbf_info; 131 82 132 83 /** 133 84 * Process a rescan of the transport layer. ··· 95 136 atomic_set(&zcrypt_rescan_req, 0); 96 137 atomic_inc(&zcrypt_rescan_count); 97 138 ap_bus_force_rescan(); 98 - ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", 99 - atomic_inc_return(&zcrypt_rescan_count)); 139 + ZCRYPT_DBF(DBF_INFO, "rescan count=%07d", 140 + atomic_inc_return(&zcrypt_rescan_count)); 100 141 return 1; 101 142 } 102 143 return 0; 103 144 } 104 145 105 - /** 106 - * __zcrypt_increase_preference(): Increase preference of a crypto device. 107 - * @zdev: Pointer the crypto device 108 - * 109 - * Move the device towards the head of the device list. 110 - * Need to be called while holding the zcrypt device list lock. 111 - * Note: cards with speed_rating of 0 are kept at the end of the list. 112 - */ 113 - static void __zcrypt_increase_preference(struct zcrypt_device *zdev) 114 - { 115 - struct zcrypt_device *tmp; 116 - struct list_head *l; 117 - 118 - if (zdev->speed_rating == 0) 119 - return; 120 - for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { 121 - tmp = list_entry(l, struct zcrypt_device, list); 122 - if ((tmp->request_count + 1) * tmp->speed_rating <= 123 - (zdev->request_count + 1) * zdev->speed_rating && 124 - tmp->speed_rating != 0) 125 - break; 126 - } 127 - if (l == zdev->list.prev) 128 - return; 129 - /* Move zdev behind l */ 130 - list_move(&zdev->list, l); 131 - } 132 - 133 - /** 134 - * __zcrypt_decrease_preference(): Decrease preference of a crypto device. 135 - * @zdev: Pointer to a crypto device. 136 - * 137 - * Move the device towards the tail of the device list. 138 - * Need to be called while holding the zcrypt device list lock. 139 - * Note: cards with speed_rating of 0 are kept at the end of the list. 140 - */ 141 - static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) 142 - { 143 - struct zcrypt_device *tmp; 144 - struct list_head *l; 145 - 146 - if (zdev->speed_rating == 0) 147 - return; 148 - for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { 149 - tmp = list_entry(l, struct zcrypt_device, list); 150 - if ((tmp->request_count + 1) * tmp->speed_rating > 151 - (zdev->request_count + 1) * zdev->speed_rating || 152 - tmp->speed_rating == 0) 153 - break; 154 - } 155 - if (l == zdev->list.next) 156 - return; 157 - /* Move zdev before l */ 158 - list_move_tail(&zdev->list, l); 159 - } 160 - 161 - static void zcrypt_device_release(struct kref *kref) 162 - { 163 - struct zcrypt_device *zdev = 164 - container_of(kref, struct zcrypt_device, refcount); 165 - zcrypt_device_free(zdev); 166 - } 167 - 168 - void zcrypt_device_get(struct zcrypt_device *zdev) 169 - { 170 - kref_get(&zdev->refcount); 171 - } 172 - EXPORT_SYMBOL(zcrypt_device_get); 173 - 174 - int zcrypt_device_put(struct zcrypt_device *zdev) 175 - { 176 - return kref_put(&zdev->refcount, zcrypt_device_release); 177 - } 178 - EXPORT_SYMBOL(zcrypt_device_put); 179 - 180 - struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) 181 - { 182 - struct zcrypt_device *zdev; 183 - 184 - zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); 185 - if (!zdev) 186 - return NULL; 187 - zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); 188 - if (!zdev->reply.message) 189 - goto out_free; 190 - zdev->reply.length = max_response_size; 191 - spin_lock_init(&zdev->lock); 192 - INIT_LIST_HEAD(&zdev->list); 193 - zdev->dbf_area = zcrypt_dbf_devices; 194 - return zdev; 195 - 196 - out_free: 197 - kfree(zdev); 198 - return NULL; 199 - } 200 - EXPORT_SYMBOL(zcrypt_device_alloc); 201 - 202 - void zcrypt_device_free(struct zcrypt_device *zdev) 203 - { 204 - kfree(zdev->reply.message); 205 - kfree(zdev); 206 - } 207 - EXPORT_SYMBOL(zcrypt_device_free); 208 - 209 - /** 210 - * zcrypt_device_register() - Register a crypto device. 211 - * @zdev: Pointer to a crypto device 212 - * 213 - * Register a crypto device. Returns 0 if successful. 214 - */ 215 - int zcrypt_device_register(struct zcrypt_device *zdev) 216 - { 217 - int rc; 218 - 219 - if (!zdev->ops) 220 - return -ENODEV; 221 - rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 222 - &zcrypt_device_attr_group); 223 - if (rc) 224 - goto out; 225 - get_device(&zdev->ap_dev->device); 226 - kref_init(&zdev->refcount); 227 - spin_lock_bh(&zcrypt_device_lock); 228 - zdev->online = 1; /* New devices are online by default. */ 229 - ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, 230 - zdev->online); 231 - list_add_tail(&zdev->list, &zcrypt_device_list); 232 - __zcrypt_increase_preference(zdev); 233 - zcrypt_device_count++; 234 - spin_unlock_bh(&zcrypt_device_lock); 235 - if (zdev->ops->rng) { 236 - rc = zcrypt_rng_device_add(); 237 - if (rc) 238 - goto out_unregister; 239 - } 240 - return 0; 241 - 242 - out_unregister: 243 - spin_lock_bh(&zcrypt_device_lock); 244 - zcrypt_device_count--; 245 - list_del_init(&zdev->list); 246 - spin_unlock_bh(&zcrypt_device_lock); 247 - sysfs_remove_group(&zdev->ap_dev->device.kobj, 248 - &zcrypt_device_attr_group); 249 - put_device(&zdev->ap_dev->device); 250 - zcrypt_device_put(zdev); 251 - out: 252 - return rc; 253 - } 254 - EXPORT_SYMBOL(zcrypt_device_register); 255 - 256 - /** 257 - * zcrypt_device_unregister(): Unregister a crypto device. 258 - * @zdev: Pointer to crypto device 259 - * 260 - * Unregister a crypto device. 261 - */ 262 - void zcrypt_device_unregister(struct zcrypt_device *zdev) 263 - { 264 - if (zdev->ops->rng) 265 - zcrypt_rng_device_remove(); 266 - spin_lock_bh(&zcrypt_device_lock); 267 - zcrypt_device_count--; 268 - list_del_init(&zdev->list); 269 - spin_unlock_bh(&zcrypt_device_lock); 270 - sysfs_remove_group(&zdev->ap_dev->device.kobj, 271 - &zcrypt_device_attr_group); 272 - put_device(&zdev->ap_dev->device); 273 - zcrypt_device_put(zdev); 274 - } 275 - EXPORT_SYMBOL(zcrypt_device_unregister); 276 - 277 146 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 278 147 { 279 - spin_lock_bh(&zcrypt_ops_list_lock); 280 148 list_add_tail(&zops->list, &zcrypt_ops_list); 281 - spin_unlock_bh(&zcrypt_ops_list_lock); 282 149 } 283 - EXPORT_SYMBOL(zcrypt_msgtype_register); 284 150 285 151 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 286 152 { 287 - spin_lock_bh(&zcrypt_ops_list_lock); 288 153 list_del_init(&zops->list); 289 - spin_unlock_bh(&zcrypt_ops_list_lock); 290 154 } 291 - EXPORT_SYMBOL(zcrypt_msgtype_unregister); 292 155 293 - static inline 294 - struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) 156 + struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 295 157 { 296 158 struct zcrypt_ops *zops; 297 - int found = 0; 298 159 299 - spin_lock_bh(&zcrypt_ops_list_lock); 300 - list_for_each_entry(zops, &zcrypt_ops_list, list) { 160 + list_for_each_entry(zops, &zcrypt_ops_list, list) 301 161 if ((zops->variant == variant) && 302 - (!strncmp(zops->name, name, sizeof(zops->name)))) { 303 - found = 1; 304 - break; 305 - } 306 - } 307 - if (!found || !try_module_get(zops->owner)) 308 - zops = NULL; 309 - 310 - spin_unlock_bh(&zcrypt_ops_list_lock); 311 - 312 - return zops; 162 + (!strncmp(zops->name, name, sizeof(zops->name)))) 163 + return zops; 164 + return NULL; 313 165 } 314 - 315 - struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant) 316 - { 317 - struct zcrypt_ops *zops = NULL; 318 - 319 - zops = __ops_lookup(name, variant); 320 - if (!zops) { 321 - request_module("%s", name); 322 - zops = __ops_lookup(name, variant); 323 - } 324 - return zops; 325 - } 326 - EXPORT_SYMBOL(zcrypt_msgtype_request); 327 - 328 - void zcrypt_msgtype_release(struct zcrypt_ops *zops) 329 - { 330 - if (zops) 331 - module_put(zops->owner); 332 - } 333 - EXPORT_SYMBOL(zcrypt_msgtype_release); 166 + EXPORT_SYMBOL(zcrypt_msgtype); 334 167 335 168 /** 336 169 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. ··· 168 417 return 0; 169 418 } 170 419 420 + static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 421 + struct zcrypt_queue *zq, 422 + unsigned int weight) 423 + { 424 + if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 425 + return NULL; 426 + zcrypt_queue_get(zq); 427 + get_device(&zq->queue->ap_dev.device); 428 + atomic_add(weight, &zc->load); 429 + atomic_add(weight, &zq->load); 430 + zq->request_count++; 431 + return zq; 432 + } 433 + 434 + static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 435 + struct zcrypt_queue *zq, 436 + unsigned int weight) 437 + { 438 + struct module *mod = zq->queue->ap_dev.drv->driver.owner; 439 + 440 + zq->request_count--; 441 + atomic_sub(weight, &zc->load); 442 + atomic_sub(weight, &zq->load); 443 + put_device(&zq->queue->ap_dev.device); 444 + zcrypt_queue_put(zq); 445 + module_put(mod); 446 + } 447 + 448 + static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 449 + struct zcrypt_card *pref_zc, 450 + unsigned weight, unsigned pref_weight) 451 + { 452 + if (!pref_zc) 453 + return 0; 454 + weight += atomic_read(&zc->load); 455 + pref_weight += atomic_read(&pref_zc->load); 456 + if (weight == pref_weight) 457 + return atomic_read(&zc->card->total_request_count) > 458 + atomic_read(&pref_zc->card->total_request_count); 459 + return weight > pref_weight; 460 + } 461 + 462 + static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 463 + struct zcrypt_queue *pref_zq, 464 + unsigned weight, unsigned pref_weight) 465 + { 466 + if (!pref_zq) 467 + return 0; 468 + weight += atomic_read(&zq->load); 469 + pref_weight += atomic_read(&pref_zq->load); 470 + if (weight == pref_weight) 471 + return &zq->queue->total_request_count > 472 + &pref_zq->queue->total_request_count; 473 + return weight > pref_weight; 474 + } 475 + 171 476 /* 172 477 * zcrypt ioctls. 173 478 */ 174 479 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 175 480 { 176 - struct zcrypt_device *zdev; 177 - int rc; 481 + struct zcrypt_card *zc, *pref_zc; 482 + struct zcrypt_queue *zq, *pref_zq; 483 + unsigned int weight, pref_weight; 484 + unsigned int func_code; 485 + int qid = 0, rc = -ENODEV; 178 486 179 - if (mex->outputdatalength < mex->inputdatalength) 180 - return -EINVAL; 487 + trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 488 + 489 + if (mex->outputdatalength < mex->inputdatalength) { 490 + rc = -EINVAL; 491 + goto out; 492 + } 493 + 181 494 /* 182 495 * As long as outputdatalength is big enough, we can set the 183 496 * outputdatalength equal to the inputdatalength, since that is the ··· 249 434 */ 250 435 mex->outputdatalength = mex->inputdatalength; 251 436 252 - spin_lock_bh(&zcrypt_device_lock); 253 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 254 - if (!zdev->online || 255 - !zdev->ops->rsa_modexpo || 256 - zdev->min_mod_size > mex->inputdatalength || 257 - zdev->max_mod_size < mex->inputdatalength) 437 + rc = get_rsa_modex_fc(mex, &func_code); 438 + if (rc) 439 + goto out; 440 + 441 + pref_zc = NULL; 442 + pref_zq = NULL; 443 + spin_lock(&zcrypt_list_lock); 444 + for_each_zcrypt_card(zc) { 445 + /* Check for online accelarator and CCA cards */ 446 + if (!zc->online || !(zc->card->functions & 0x18000000)) 258 447 continue; 259 - zcrypt_device_get(zdev); 260 - get_device(&zdev->ap_dev->device); 261 - zdev->request_count++; 262 - __zcrypt_decrease_preference(zdev); 263 - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 264 - spin_unlock_bh(&zcrypt_device_lock); 265 - rc = zdev->ops->rsa_modexpo(zdev, mex); 266 - spin_lock_bh(&zcrypt_device_lock); 267 - module_put(zdev->ap_dev->drv->driver.owner); 448 + /* Check for size limits */ 449 + if (zc->min_mod_size > mex->inputdatalength || 450 + zc->max_mod_size < mex->inputdatalength) 451 + continue; 452 + /* get weight index of the card device */ 453 + weight = zc->speed_rating[func_code]; 454 + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 455 + continue; 456 + for_each_zcrypt_queue(zq, zc) { 457 + /* check if device is online and eligible */ 458 + if (!zq->online || !zq->ops->rsa_modexpo) 459 + continue; 460 + if (zcrypt_queue_compare(zq, pref_zq, 461 + weight, pref_weight)) 462 + continue; 463 + pref_zc = zc; 464 + pref_zq = zq; 465 + pref_weight = weight; 268 466 } 269 - else 270 - rc = -EAGAIN; 271 - zdev->request_count--; 272 - __zcrypt_increase_preference(zdev); 273 - put_device(&zdev->ap_dev->device); 274 - zcrypt_device_put(zdev); 275 - spin_unlock_bh(&zcrypt_device_lock); 276 - return rc; 277 467 } 278 - spin_unlock_bh(&zcrypt_device_lock); 279 - return -ENODEV; 468 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 469 + spin_unlock(&zcrypt_list_lock); 470 + 471 + if (!pref_zq) { 472 + rc = -ENODEV; 473 + goto out; 474 + } 475 + 476 + qid = pref_zq->queue->qid; 477 + rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 478 + 479 + spin_lock(&zcrypt_list_lock); 480 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 481 + spin_unlock(&zcrypt_list_lock); 482 + 483 + out: 484 + trace_s390_zcrypt_rep(mex, func_code, rc, 485 + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 486 + return rc; 280 487 } 281 488 282 489 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 283 490 { 284 - struct zcrypt_device *zdev; 285 - unsigned long long z1, z2, z3; 286 - int rc, copied; 491 + struct zcrypt_card *zc, *pref_zc; 492 + struct zcrypt_queue *zq, *pref_zq; 493 + unsigned int weight, pref_weight; 494 + unsigned int func_code; 495 + int qid = 0, rc = -ENODEV; 287 496 288 - if (crt->outputdatalength < crt->inputdatalength) 289 - return -EINVAL; 497 + trace_s390_zcrypt_req(crt, TP_ICARSACRT); 498 + 499 + if (crt->outputdatalength < crt->inputdatalength) { 500 + rc = -EINVAL; 501 + goto out; 502 + } 503 + 290 504 /* 291 505 * As long as outputdatalength is big enough, we can set the 292 506 * outputdatalength equal to the inputdatalength, since that is the ··· 323 479 */ 324 480 crt->outputdatalength = crt->inputdatalength; 325 481 326 - copied = 0; 327 - restart: 328 - spin_lock_bh(&zcrypt_device_lock); 329 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 330 - if (!zdev->online || 331 - !zdev->ops->rsa_modexpo_crt || 332 - zdev->min_mod_size > crt->inputdatalength || 333 - zdev->max_mod_size < crt->inputdatalength) 482 + rc = get_rsa_crt_fc(crt, &func_code); 483 + if (rc) 484 + goto out; 485 + 486 + pref_zc = NULL; 487 + pref_zq = NULL; 488 + spin_lock(&zcrypt_list_lock); 489 + for_each_zcrypt_card(zc) { 490 + /* Check for online accelarator and CCA cards */ 491 + if (!zc->online || !(zc->card->functions & 0x18000000)) 334 492 continue; 335 - if (zdev->short_crt && crt->inputdatalength > 240) { 336 - /* 337 - * Check inputdata for leading zeros for cards 338 - * that can't handle np_prime, bp_key, or 339 - * u_mult_inv > 128 bytes. 340 - */ 341 - if (copied == 0) { 342 - unsigned int len; 343 - spin_unlock_bh(&zcrypt_device_lock); 344 - /* len is max 256 / 2 - 120 = 8 345 - * For bigger device just assume len of leading 346 - * 0s is 8 as stated in the requirements for 347 - * ica_rsa_modexpo_crt struct in zcrypt.h. 348 - */ 349 - if (crt->inputdatalength <= 256) 350 - len = crt->inputdatalength / 2 - 120; 351 - else 352 - len = 8; 353 - if (len > sizeof(z1)) 354 - return -EFAULT; 355 - z1 = z2 = z3 = 0; 356 - if (copy_from_user(&z1, crt->np_prime, len) || 357 - copy_from_user(&z2, crt->bp_key, len) || 358 - copy_from_user(&z3, crt->u_mult_inv, len)) 359 - return -EFAULT; 360 - z1 = z2 = z3 = 0; 361 - copied = 1; 362 - /* 363 - * We have to restart device lookup - 364 - * the device list may have changed by now. 365 - */ 366 - goto restart; 367 - } 368 - if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) 369 - /* The device can't handle this request. */ 493 + /* Check for size limits */ 494 + if (zc->min_mod_size > crt->inputdatalength || 495 + zc->max_mod_size < crt->inputdatalength) 496 + continue; 497 + /* get weight index of the card device */ 498 + weight = zc->speed_rating[func_code]; 499 + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 500 + continue; 501 + for_each_zcrypt_queue(zq, zc) { 502 + /* check if device is online and eligible */ 503 + if (!zq->online || !zq->ops->rsa_modexpo_crt) 370 504 continue; 505 + if (zcrypt_queue_compare(zq, pref_zq, 506 + weight, pref_weight)) 507 + continue; 508 + pref_zc = zc; 509 + pref_zq = zq; 510 + pref_weight = weight; 371 511 } 372 - zcrypt_device_get(zdev); 373 - get_device(&zdev->ap_dev->device); 374 - zdev->request_count++; 375 - __zcrypt_decrease_preference(zdev); 376 - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 377 - spin_unlock_bh(&zcrypt_device_lock); 378 - rc = zdev->ops->rsa_modexpo_crt(zdev, crt); 379 - spin_lock_bh(&zcrypt_device_lock); 380 - module_put(zdev->ap_dev->drv->driver.owner); 381 - } 382 - else 383 - rc = -EAGAIN; 384 - zdev->request_count--; 385 - __zcrypt_increase_preference(zdev); 386 - put_device(&zdev->ap_dev->device); 387 - zcrypt_device_put(zdev); 388 - spin_unlock_bh(&zcrypt_device_lock); 389 - return rc; 390 512 } 391 - spin_unlock_bh(&zcrypt_device_lock); 392 - return -ENODEV; 513 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 514 + spin_unlock(&zcrypt_list_lock); 515 + 516 + if (!pref_zq) { 517 + rc = -ENODEV; 518 + goto out; 519 + } 520 + 521 + qid = pref_zq->queue->qid; 522 + rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 523 + 524 + spin_lock(&zcrypt_list_lock); 525 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 526 + spin_unlock(&zcrypt_list_lock); 527 + 528 + out: 529 + trace_s390_zcrypt_rep(crt, func_code, rc, 530 + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 531 + return rc; 393 532 } 394 533 395 534 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 396 535 { 397 - struct zcrypt_device *zdev; 398 - int rc; 536 + struct zcrypt_card *zc, *pref_zc; 537 + struct zcrypt_queue *zq, *pref_zq; 538 + struct ap_message ap_msg; 539 + unsigned int weight, pref_weight; 540 + unsigned int func_code; 541 + unsigned short *domain; 542 + int qid = 0, rc = -ENODEV; 399 543 400 - spin_lock_bh(&zcrypt_device_lock); 401 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 402 - if (!zdev->online || !zdev->ops->send_cprb || 403 - (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || 404 - (xcRB->user_defined != AUTOSELECT && 405 - AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) 544 + trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 545 + 546 + rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 547 + if (rc) 548 + goto out; 549 + 550 + pref_zc = NULL; 551 + pref_zq = NULL; 552 + spin_lock(&zcrypt_list_lock); 553 + for_each_zcrypt_card(zc) { 554 + /* Check for online CCA cards */ 555 + if (!zc->online || !(zc->card->functions & 0x10000000)) 406 556 continue; 407 - zcrypt_device_get(zdev); 408 - get_device(&zdev->ap_dev->device); 409 - zdev->request_count++; 410 - __zcrypt_decrease_preference(zdev); 411 - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 412 - spin_unlock_bh(&zcrypt_device_lock); 413 - rc = zdev->ops->send_cprb(zdev, xcRB); 414 - spin_lock_bh(&zcrypt_device_lock); 415 - module_put(zdev->ap_dev->drv->driver.owner); 557 + /* Check for user selected CCA card */ 558 + if (xcRB->user_defined != AUTOSELECT && 559 + xcRB->user_defined != zc->card->id) 560 + continue; 561 + /* get weight index of the card device */ 562 + weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 563 + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 564 + continue; 565 + for_each_zcrypt_queue(zq, zc) { 566 + /* check if device is online and eligible */ 567 + if (!zq->online || 568 + !zq->ops->send_cprb || 569 + ((*domain != (unsigned short) AUTOSELECT) && 570 + (*domain != AP_QID_QUEUE(zq->queue->qid)))) 571 + continue; 572 + if (zcrypt_queue_compare(zq, pref_zq, 573 + weight, pref_weight)) 574 + continue; 575 + pref_zc = zc; 576 + pref_zq = zq; 577 + pref_weight = weight; 416 578 } 417 - else 418 - rc = -EAGAIN; 419 - zdev->request_count--; 420 - __zcrypt_increase_preference(zdev); 421 - put_device(&zdev->ap_dev->device); 422 - zcrypt_device_put(zdev); 423 - spin_unlock_bh(&zcrypt_device_lock); 424 - return rc; 425 579 } 426 - spin_unlock_bh(&zcrypt_device_lock); 427 - return -ENODEV; 580 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 581 + spin_unlock(&zcrypt_list_lock); 582 + 583 + if (!pref_zq) { 584 + rc = -ENODEV; 585 + goto out; 586 + } 587 + 588 + /* in case of auto select, provide the correct domain */ 589 + qid = pref_zq->queue->qid; 590 + if (*domain == (unsigned short) AUTOSELECT) 591 + *domain = AP_QID_QUEUE(qid); 592 + 593 + rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 594 + 595 + spin_lock(&zcrypt_list_lock); 596 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 597 + spin_unlock(&zcrypt_list_lock); 598 + 599 + out: 600 + trace_s390_zcrypt_rep(xcRB, func_code, rc, 601 + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 602 + return rc; 428 603 } 429 604 430 - struct ep11_target_dev_list { 431 - unsigned short targets_num; 432 - struct ep11_target_dev *targets; 433 - }; 434 - 435 - static bool is_desired_ep11dev(unsigned int dev_qid, 436 - struct ep11_target_dev_list dev_list) 605 + static bool is_desired_ep11_card(unsigned int dev_id, 606 + unsigned short target_num, 607 + struct ep11_target_dev *targets) 437 608 { 438 - int n; 439 - 440 - for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { 441 - if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && 442 - (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { 609 + while (target_num-- > 0) { 610 + if (dev_id == targets->ap_id) 443 611 return true; 444 - } 612 + targets++; 613 + } 614 + return false; 615 + } 616 + 617 + static bool is_desired_ep11_queue(unsigned int dev_qid, 618 + unsigned short target_num, 619 + struct ep11_target_dev *targets) 620 + { 621 + while (target_num-- > 0) { 622 + if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 623 + return true; 624 + targets++; 445 625 } 446 626 return false; 447 627 } 448 628 449 629 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 450 630 { 451 - struct zcrypt_device *zdev; 452 - bool autoselect = false; 453 - int rc; 454 - struct ep11_target_dev_list ep11_dev_list = { 455 - .targets_num = 0x00, 456 - .targets = NULL, 457 - }; 631 + struct zcrypt_card *zc, *pref_zc; 632 + struct zcrypt_queue *zq, *pref_zq; 633 + struct ep11_target_dev *targets; 634 + unsigned short target_num; 635 + unsigned int weight, pref_weight; 636 + unsigned int func_code; 637 + struct ap_message ap_msg; 638 + int qid = 0, rc = -ENODEV; 458 639 459 - ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; 640 + trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 641 + 642 + target_num = (unsigned short) xcrb->targets_num; 460 643 461 644 /* empty list indicates autoselect (all available targets) */ 462 - if (ep11_dev_list.targets_num == 0) 463 - autoselect = true; 464 - else { 465 - ep11_dev_list.targets = kcalloc((unsigned short) 466 - xcrb->targets_num, 467 - sizeof(struct ep11_target_dev), 468 - GFP_KERNEL); 469 - if (!ep11_dev_list.targets) 470 - return -ENOMEM; 645 + targets = NULL; 646 + if (target_num != 0) { 647 + struct ep11_target_dev __user *uptr; 471 648 472 - if (copy_from_user(ep11_dev_list.targets, 473 - (struct ep11_target_dev __force __user *) 474 - xcrb->targets, xcrb->targets_num * 475 - sizeof(struct ep11_target_dev))) 476 - return -EFAULT; 649 + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 650 + if (!targets) { 651 + rc = -ENOMEM; 652 + goto out; 653 + } 654 + 655 + uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 656 + if (copy_from_user(targets, uptr, 657 + target_num * sizeof(*targets))) { 658 + rc = -EFAULT; 659 + goto out; 660 + } 477 661 } 478 662 479 - spin_lock_bh(&zcrypt_device_lock); 480 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 481 - /* check if device is eligible */ 482 - if (!zdev->online || 483 - zdev->ops->variant != MSGTYPE06_VARIANT_EP11) 484 - continue; 663 + rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 664 + if (rc) 665 + goto out_free; 485 666 486 - /* check if device is selected as valid target */ 487 - if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && 488 - !autoselect) 667 + pref_zc = NULL; 668 + pref_zq = NULL; 669 + spin_lock(&zcrypt_list_lock); 670 + for_each_zcrypt_card(zc) { 671 + /* Check for online EP11 cards */ 672 + if (!zc->online || !(zc->card->functions & 0x04000000)) 489 673 continue; 490 - 491 - zcrypt_device_get(zdev); 492 - get_device(&zdev->ap_dev->device); 493 - zdev->request_count++; 494 - __zcrypt_decrease_preference(zdev); 495 - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 496 - spin_unlock_bh(&zcrypt_device_lock); 497 - rc = zdev->ops->send_ep11_cprb(zdev, xcrb); 498 - spin_lock_bh(&zcrypt_device_lock); 499 - module_put(zdev->ap_dev->drv->driver.owner); 500 - } else { 501 - rc = -EAGAIN; 502 - } 503 - zdev->request_count--; 504 - __zcrypt_increase_preference(zdev); 505 - put_device(&zdev->ap_dev->device); 506 - zcrypt_device_put(zdev); 507 - spin_unlock_bh(&zcrypt_device_lock); 508 - return rc; 674 + /* Check for user selected EP11 card */ 675 + if (targets && 676 + !is_desired_ep11_card(zc->card->id, target_num, targets)) 677 + continue; 678 + /* get weight index of the card device */ 679 + weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 680 + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 681 + continue; 682 + for_each_zcrypt_queue(zq, zc) { 683 + /* check if device is online and eligible */ 684 + if (!zq->online || 685 + !zq->ops->send_ep11_cprb || 686 + (targets && 687 + !is_desired_ep11_queue(zq->queue->qid, 688 + target_num, targets))) 689 + continue; 690 + if (zcrypt_queue_compare(zq, pref_zq, 691 + weight, pref_weight)) 692 + continue; 693 + pref_zc = zc; 694 + pref_zq = zq; 695 + pref_weight = weight; 696 + } 509 697 } 510 - spin_unlock_bh(&zcrypt_device_lock); 511 - return -ENODEV; 698 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 699 + spin_unlock(&zcrypt_list_lock); 700 + 701 + if (!pref_zq) { 702 + rc = -ENODEV; 703 + goto out_free; 704 + } 705 + 706 + qid = pref_zq->queue->qid; 707 + rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 708 + 709 + spin_lock(&zcrypt_list_lock); 710 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 711 + spin_unlock(&zcrypt_list_lock); 712 + 713 + out_free: 714 + kfree(targets); 715 + out: 716 + trace_s390_zcrypt_rep(xcrb, func_code, rc, 717 + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 718 + return rc; 512 719 } 513 720 514 721 static long zcrypt_rng(char *buffer) 515 722 { 516 - struct zcrypt_device *zdev; 517 - int rc; 723 + struct zcrypt_card *zc, *pref_zc; 724 + struct zcrypt_queue *zq, *pref_zq; 725 + unsigned int weight, pref_weight; 726 + unsigned int func_code; 727 + struct ap_message ap_msg; 728 + unsigned int domain; 729 + int qid = 0, rc = -ENODEV; 518 730 519 - spin_lock_bh(&zcrypt_device_lock); 520 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 521 - if (!zdev->online || !zdev->ops->rng) 731 + trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 732 + 733 + rc = get_rng_fc(&ap_msg, &func_code, &domain); 734 + if (rc) 735 + goto out; 736 + 737 + pref_zc = NULL; 738 + pref_zq = NULL; 739 + spin_lock(&zcrypt_list_lock); 740 + for_each_zcrypt_card(zc) { 741 + /* Check for online CCA cards */ 742 + if (!zc->online || !(zc->card->functions & 0x10000000)) 522 743 continue; 523 - zcrypt_device_get(zdev); 524 - get_device(&zdev->ap_dev->device); 525 - zdev->request_count++; 526 - __zcrypt_decrease_preference(zdev); 527 - if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 528 - spin_unlock_bh(&zcrypt_device_lock); 529 - rc = zdev->ops->rng(zdev, buffer); 530 - spin_lock_bh(&zcrypt_device_lock); 531 - module_put(zdev->ap_dev->drv->driver.owner); 532 - } else 533 - rc = -EAGAIN; 534 - zdev->request_count--; 535 - __zcrypt_increase_preference(zdev); 536 - put_device(&zdev->ap_dev->device); 537 - zcrypt_device_put(zdev); 538 - spin_unlock_bh(&zcrypt_device_lock); 539 - return rc; 744 + /* get weight index of the card device */ 745 + weight = zc->speed_rating[func_code]; 746 + if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 747 + continue; 748 + for_each_zcrypt_queue(zq, zc) { 749 + /* check if device is online and eligible */ 750 + if (!zq->online || !zq->ops->rng) 751 + continue; 752 + if (zcrypt_queue_compare(zq, pref_zq, 753 + weight, pref_weight)) 754 + continue; 755 + pref_zc = zc; 756 + pref_zq = zq; 757 + pref_weight = weight; 758 + } 540 759 } 541 - spin_unlock_bh(&zcrypt_device_lock); 542 - return -ENODEV; 760 + pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 761 + spin_unlock(&zcrypt_list_lock); 762 + 763 + if (!pref_zq) 764 + return -ENODEV; 765 + 766 + qid = pref_zq->queue->qid; 767 + rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 768 + 769 + spin_lock(&zcrypt_list_lock); 770 + zcrypt_drop_queue(pref_zc, pref_zq, weight); 771 + spin_unlock(&zcrypt_list_lock); 772 + 773 + out: 774 + trace_s390_zcrypt_rep(buffer, func_code, rc, 775 + AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 776 + return rc; 543 777 } 778 + 779 + static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 780 + { 781 + struct zcrypt_card *zc; 782 + struct zcrypt_queue *zq; 783 + struct zcrypt_device_status *stat; 784 + 785 + memset(matrix, 0, sizeof(*matrix)); 786 + spin_lock(&zcrypt_list_lock); 787 + for_each_zcrypt_card(zc) { 788 + for_each_zcrypt_queue(zq, zc) { 789 + stat = matrix->device; 790 + stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 791 + stat += AP_QID_QUEUE(zq->queue->qid); 792 + stat->hwtype = zc->card->ap_dev.device_type; 793 + stat->functions = zc->card->functions >> 26; 794 + stat->qid = zq->queue->qid; 795 + stat->online = zq->online ? 0x01 : 0x00; 796 + } 797 + } 798 + spin_unlock(&zcrypt_list_lock); 799 + } 800 + EXPORT_SYMBOL(zcrypt_device_status_mask); 544 801 545 802 static void zcrypt_status_mask(char status[AP_DEVICES]) 546 803 { 547 - struct zcrypt_device *zdev; 804 + struct zcrypt_card *zc; 805 + struct zcrypt_queue *zq; 548 806 549 807 memset(status, 0, sizeof(char) * AP_DEVICES); 550 - spin_lock_bh(&zcrypt_device_lock); 551 - list_for_each_entry(zdev, &zcrypt_device_list, list) 552 - status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 553 - zdev->online ? zdev->user_space_type : 0x0d; 554 - spin_unlock_bh(&zcrypt_device_lock); 808 + spin_lock(&zcrypt_list_lock); 809 + for_each_zcrypt_card(zc) { 810 + for_each_zcrypt_queue(zq, zc) { 811 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 812 + continue; 813 + status[AP_QID_CARD(zq->queue->qid)] = 814 + zc->online ? zc->user_space_type : 0x0d; 815 + } 816 + } 817 + spin_unlock(&zcrypt_list_lock); 555 818 } 556 819 557 820 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 558 821 { 559 - struct zcrypt_device *zdev; 822 + struct zcrypt_card *zc; 823 + struct zcrypt_queue *zq; 560 824 561 825 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 562 - spin_lock_bh(&zcrypt_device_lock); 563 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 564 - spin_lock(&zdev->ap_dev->lock); 565 - qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 566 - zdev->ap_dev->pendingq_count + 567 - zdev->ap_dev->requestq_count; 568 - spin_unlock(&zdev->ap_dev->lock); 826 + spin_lock(&zcrypt_list_lock); 827 + for_each_zcrypt_card(zc) { 828 + for_each_zcrypt_queue(zq, zc) { 829 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 830 + continue; 831 + spin_lock(&zq->queue->lock); 832 + qdepth[AP_QID_CARD(zq->queue->qid)] = 833 + zq->queue->pendingq_count + 834 + zq->queue->requestq_count; 835 + spin_unlock(&zq->queue->lock); 836 + } 569 837 } 570 - spin_unlock_bh(&zcrypt_device_lock); 838 + spin_unlock(&zcrypt_list_lock); 571 839 } 572 840 573 841 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 574 842 { 575 - struct zcrypt_device *zdev; 843 + struct zcrypt_card *zc; 844 + struct zcrypt_queue *zq; 576 845 577 846 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 578 - spin_lock_bh(&zcrypt_device_lock); 579 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 580 - spin_lock(&zdev->ap_dev->lock); 581 - reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 582 - zdev->ap_dev->total_request_count; 583 - spin_unlock(&zdev->ap_dev->lock); 847 + spin_lock(&zcrypt_list_lock); 848 + for_each_zcrypt_card(zc) { 849 + for_each_zcrypt_queue(zq, zc) { 850 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 851 + continue; 852 + spin_lock(&zq->queue->lock); 853 + reqcnt[AP_QID_CARD(zq->queue->qid)] = 854 + zq->queue->total_request_count; 855 + spin_unlock(&zq->queue->lock); 856 + } 584 857 } 585 - spin_unlock_bh(&zcrypt_device_lock); 858 + spin_unlock(&zcrypt_list_lock); 586 859 } 587 860 588 861 static int zcrypt_pendingq_count(void) 589 862 { 590 - struct zcrypt_device *zdev; 591 - int pendingq_count = 0; 863 + struct zcrypt_card *zc; 864 + struct zcrypt_queue *zq; 865 + int pendingq_count; 592 866 593 - spin_lock_bh(&zcrypt_device_lock); 594 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 595 - spin_lock(&zdev->ap_dev->lock); 596 - pendingq_count += zdev->ap_dev->pendingq_count; 597 - spin_unlock(&zdev->ap_dev->lock); 867 + pendingq_count = 0; 868 + spin_lock(&zcrypt_list_lock); 869 + for_each_zcrypt_card(zc) { 870 + for_each_zcrypt_queue(zq, zc) { 871 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 872 + continue; 873 + spin_lock(&zq->queue->lock); 874 + pendingq_count += zq->queue->pendingq_count; 875 + spin_unlock(&zq->queue->lock); 876 + } 598 877 } 599 - spin_unlock_bh(&zcrypt_device_lock); 878 + spin_unlock(&zcrypt_list_lock); 600 879 return pendingq_count; 601 880 } 602 881 603 882 static int zcrypt_requestq_count(void) 604 883 { 605 - struct zcrypt_device *zdev; 606 - int requestq_count = 0; 884 + struct zcrypt_card *zc; 885 + struct zcrypt_queue *zq; 886 + int requestq_count; 607 887 608 - spin_lock_bh(&zcrypt_device_lock); 609 - list_for_each_entry(zdev, &zcrypt_device_list, list) { 610 - spin_lock(&zdev->ap_dev->lock); 611 - requestq_count += zdev->ap_dev->requestq_count; 612 - spin_unlock(&zdev->ap_dev->lock); 888 + requestq_count = 0; 889 + spin_lock(&zcrypt_list_lock); 890 + for_each_zcrypt_card(zc) { 891 + for_each_zcrypt_queue(zq, zc) { 892 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 893 + continue; 894 + spin_lock(&zq->queue->lock); 895 + requestq_count += zq->queue->requestq_count; 896 + spin_unlock(&zq->queue->lock); 897 + } 613 898 } 614 - spin_unlock_bh(&zcrypt_device_lock); 899 + spin_unlock(&zcrypt_list_lock); 615 900 return requestq_count; 616 901 } 617 902 618 903 static int zcrypt_count_type(int type) 619 904 { 620 - struct zcrypt_device *zdev; 621 - int device_count = 0; 905 + struct zcrypt_card *zc; 906 + struct zcrypt_queue *zq; 907 + int device_count; 622 908 623 - spin_lock_bh(&zcrypt_device_lock); 624 - list_for_each_entry(zdev, &zcrypt_device_list, list) 625 - if (zdev->user_space_type == type) 909 + device_count = 0; 910 + spin_lock(&zcrypt_list_lock); 911 + for_each_zcrypt_card(zc) { 912 + if (zc->card->id != type) 913 + continue; 914 + for_each_zcrypt_queue(zq, zc) { 915 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 916 + continue; 626 917 device_count++; 627 - spin_unlock_bh(&zcrypt_device_lock); 918 + } 919 + } 920 + spin_unlock(&zcrypt_list_lock); 628 921 return device_count; 629 922 } 630 923 ··· 867 886 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 868 887 return -EFAULT; 869 888 return rc; 889 + } 890 + case ZDEVICESTATUS: { 891 + struct zcrypt_device_matrix *device_status; 892 + 893 + device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 894 + GFP_KERNEL); 895 + if (!device_status) 896 + return -ENOMEM; 897 + 898 + zcrypt_device_status_mask(device_status); 899 + 900 + if (copy_to_user((char __user *) arg, device_status, 901 + sizeof(struct zcrypt_device_matrix))) { 902 + kfree(device_status); 903 + return -EFAULT; 904 + } 905 + 906 + kfree(device_status); 907 + return 0; 870 908 } 871 909 case Z90STAT_STATUS_MASK: { 872 910 char status[AP_DEVICES]; ··· 1249 1249 1250 1250 static void zcrypt_disable_card(int index) 1251 1251 { 1252 - struct zcrypt_device *zdev; 1252 + struct zcrypt_card *zc; 1253 + struct zcrypt_queue *zq; 1253 1254 1254 - spin_lock_bh(&zcrypt_device_lock); 1255 - list_for_each_entry(zdev, &zcrypt_device_list, list) 1256 - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1257 - zdev->online = 0; 1258 - ap_flush_queue(zdev->ap_dev); 1259 - break; 1255 + spin_lock(&zcrypt_list_lock); 1256 + for_each_zcrypt_card(zc) { 1257 + for_each_zcrypt_queue(zq, zc) { 1258 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1259 + continue; 1260 + zq->online = 0; 1261 + ap_flush_queue(zq->queue); 1260 1262 } 1261 - spin_unlock_bh(&zcrypt_device_lock); 1263 + } 1264 + spin_unlock(&zcrypt_list_lock); 1262 1265 } 1263 1266 1264 1267 static void zcrypt_enable_card(int index) 1265 1268 { 1266 - struct zcrypt_device *zdev; 1269 + struct zcrypt_card *zc; 1270 + struct zcrypt_queue *zq; 1267 1271 1268 - spin_lock_bh(&zcrypt_device_lock); 1269 - list_for_each_entry(zdev, &zcrypt_device_list, list) 1270 - if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1271 - zdev->online = 1; 1272 - break; 1272 + spin_lock(&zcrypt_list_lock); 1273 + for_each_zcrypt_card(zc) { 1274 + for_each_zcrypt_queue(zq, zc) { 1275 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1276 + continue; 1277 + zq->online = 1; 1278 + ap_flush_queue(zq->queue); 1273 1279 } 1274 - spin_unlock_bh(&zcrypt_device_lock); 1280 + } 1281 + spin_unlock(&zcrypt_list_lock); 1275 1282 } 1276 1283 1277 1284 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, ··· 1376 1369 .quality = 990, 1377 1370 }; 1378 1371 1379 - static int zcrypt_rng_device_add(void) 1372 + int zcrypt_rng_device_add(void) 1380 1373 { 1381 1374 int rc = 0; 1382 1375 ··· 1406 1399 return rc; 1407 1400 } 1408 1401 1409 - static void zcrypt_rng_device_remove(void) 1402 + void zcrypt_rng_device_remove(void) 1410 1403 { 1411 1404 mutex_lock(&zcrypt_rng_mutex); 1412 1405 zcrypt_rng_device_count--; ··· 1419 1412 1420 1413 int __init zcrypt_debug_init(void) 1421 1414 { 1422 - debugfs_root = debugfs_create_dir("zcrypt", NULL); 1423 - 1424 - zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); 1425 - debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); 1426 - debug_set_level(zcrypt_dbf_common, DBF_ERR); 1427 - 1428 - zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16); 1429 - debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view); 1430 - debug_set_level(zcrypt_dbf_devices, DBF_ERR); 1415 + zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL); 1416 + zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1417 + DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1418 + debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1419 + debug_set_level(zcrypt_dbf_info, DBF_ERR); 1431 1420 1432 1421 return 0; 1433 1422 } 1434 1423 1435 1424 void zcrypt_debug_exit(void) 1436 1425 { 1437 - debugfs_remove(debugfs_root); 1438 - debug_unregister(zcrypt_dbf_common); 1439 - debug_unregister(zcrypt_dbf_devices); 1426 + debugfs_remove(zcrypt_dbf_root); 1427 + debug_unregister(zcrypt_dbf_info); 1440 1428 } 1441 1429 1442 1430 /** ··· 1455 1453 goto out; 1456 1454 1457 1455 /* Set up the proc file system */ 1458 - zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); 1456 + zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1457 + &zcrypt_proc_fops); 1459 1458 if (!zcrypt_entry) { 1460 1459 rc = -ENOMEM; 1461 1460 goto out_misc; 1462 1461 } 1463 1462 1463 + zcrypt_msgtype6_init(); 1464 + zcrypt_msgtype50_init(); 1464 1465 return 0; 1465 1466 1466 1467 out_misc: ··· 1477 1472 * 1478 1473 * The module termination code. 1479 1474 */ 1480 - void zcrypt_api_exit(void) 1475 + void __exit zcrypt_api_exit(void) 1481 1476 { 1482 1477 remove_proc_entry("driver/z90crypt", NULL); 1483 1478 misc_deregister(&zcrypt_misc_device); 1479 + zcrypt_msgtype6_exit(); 1480 + zcrypt_msgtype50_exit(); 1484 1481 zcrypt_debug_exit(); 1485 1482 } 1486 1483
+76 -23
drivers/s390/crypto/zcrypt_api.h
··· 84 84 */ 85 85 #define ZCRYPT_RNG_BUFFER_SIZE 4096 86 86 87 - struct zcrypt_device; 87 + /* 88 + * Identifier for Crypto Request Performance Index 89 + */ 90 + enum crypto_ops { 91 + MEX_1K, 92 + MEX_2K, 93 + MEX_4K, 94 + CRT_1K, 95 + CRT_2K, 96 + CRT_4K, 97 + HWRNG, 98 + SECKEY, 99 + NUM_OPS 100 + }; 101 + 102 + struct zcrypt_queue; 88 103 89 104 struct zcrypt_ops { 90 - long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); 91 - long (*rsa_modexpo_crt)(struct zcrypt_device *, 105 + long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *); 106 + long (*rsa_modexpo_crt)(struct zcrypt_queue *, 92 107 struct ica_rsa_modexpo_crt *); 93 - long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 94 - long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *); 95 - long (*rng)(struct zcrypt_device *, char *); 108 + long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *, 109 + struct ap_message *); 110 + long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *, 111 + struct ap_message *); 112 + long (*rng)(struct zcrypt_queue *, char *, struct ap_message *); 96 113 struct list_head list; /* zcrypt ops list. */ 97 114 struct module *owner; 98 115 int variant; 99 116 char name[128]; 100 117 }; 101 118 102 - struct zcrypt_device { 119 + struct zcrypt_card { 103 120 struct list_head list; /* Device list. */ 104 - spinlock_t lock; /* Per device lock. */ 121 + struct list_head zqueues; /* List of zcrypt queues */ 105 122 struct kref refcount; /* device refcounting */ 106 - struct ap_device *ap_dev; /* The "real" ap device. */ 107 - struct zcrypt_ops *ops; /* Crypto operations. */ 123 + struct ap_card *card; /* The "real" ap card device. */ 108 124 int online; /* User online/offline */ 109 125 110 126 int user_space_type; /* User space device id. */ 111 127 char *type_string; /* User space device name. */ 112 128 int min_mod_size; /* Min number of bits. */ 113 129 int max_mod_size; /* Max number of bits. */ 114 - int short_crt; /* Card has crt length restriction. */ 115 - int speed_rating; /* Speed of the crypto device. */ 130 + int max_exp_bit_length; 131 + int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */ 132 + atomic_t load; /* Utilization of the crypto device */ 133 + 134 + int request_count; /* # current requests. */ 135 + }; 136 + 137 + struct zcrypt_queue { 138 + struct list_head list; /* Device list. */ 139 + struct kref refcount; /* device refcounting */ 140 + struct zcrypt_card *zcard; 141 + struct zcrypt_ops *ops; /* Crypto operations. */ 142 + struct ap_queue *queue; /* The "real" ap queue device. */ 143 + int online; /* User online/offline */ 144 + 145 + atomic_t load; /* Utilization of the crypto device */ 116 146 117 147 int request_count; /* # current requests. */ 118 148 119 149 struct ap_message reply; /* Per-device reply structure. */ 120 - int max_exp_bit_length; 121 - 122 - debug_info_t *dbf_area; /* debugging */ 123 150 }; 124 151 125 152 /* transport layer rescanning */ 126 153 extern atomic_t zcrypt_rescan_req; 127 154 128 - struct zcrypt_device *zcrypt_device_alloc(size_t); 129 - void zcrypt_device_free(struct zcrypt_device *); 130 - void zcrypt_device_get(struct zcrypt_device *); 131 - int zcrypt_device_put(struct zcrypt_device *); 132 - int zcrypt_device_register(struct zcrypt_device *); 133 - void zcrypt_device_unregister(struct zcrypt_device *); 155 + extern spinlock_t zcrypt_list_lock; 156 + extern int zcrypt_device_count; 157 + extern struct list_head zcrypt_card_list; 158 + 159 + #define for_each_zcrypt_card(_zc) \ 160 + list_for_each_entry(_zc, &zcrypt_card_list, list) 161 + 162 + #define for_each_zcrypt_queue(_zq, _zc) \ 163 + list_for_each_entry(_zq, &(_zc)->zqueues, list) 164 + 165 + struct zcrypt_card *zcrypt_card_alloc(void); 166 + void zcrypt_card_free(struct zcrypt_card *); 167 + void zcrypt_card_get(struct zcrypt_card *); 168 + int zcrypt_card_put(struct zcrypt_card *); 169 + int zcrypt_card_register(struct zcrypt_card *); 170 + void zcrypt_card_unregister(struct zcrypt_card *); 171 + struct zcrypt_card *zcrypt_card_get_best(unsigned int *, 172 + unsigned int, unsigned int); 173 + void zcrypt_card_put_best(struct zcrypt_card *, unsigned int); 174 + 175 + struct zcrypt_queue *zcrypt_queue_alloc(size_t); 176 + void zcrypt_queue_free(struct zcrypt_queue *); 177 + void zcrypt_queue_get(struct zcrypt_queue *); 178 + int zcrypt_queue_put(struct zcrypt_queue *); 179 + int zcrypt_queue_register(struct zcrypt_queue *); 180 + void zcrypt_queue_unregister(struct zcrypt_queue *); 181 + void zcrypt_queue_force_online(struct zcrypt_queue *, int); 182 + struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int); 183 + void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int); 184 + 185 + int zcrypt_rng_device_add(void); 186 + void zcrypt_rng_device_remove(void); 187 + 134 188 void zcrypt_msgtype_register(struct zcrypt_ops *); 135 189 void zcrypt_msgtype_unregister(struct zcrypt_ops *); 136 - struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int); 137 - void zcrypt_msgtype_release(struct zcrypt_ops *); 190 + struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); 138 191 int zcrypt_api_init(void); 139 192 void zcrypt_api_exit(void); 140 193
+187
drivers/s390/crypto/zcrypt_card.c
··· 1 + /* 2 + * zcrypt 2.1.0 3 + * 4 + * Copyright IBM Corp. 2001, 2012 5 + * Author(s): Robert Burroughs 6 + * Eric Rossman (edrossma@us.ibm.com) 7 + * Cornelia Huck <cornelia.huck@de.ibm.com> 8 + * 9 + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 + * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2, or (at your option) 17 + * any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/init.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/miscdevice.h> 29 + #include <linux/fs.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/seq_file.h> 32 + #include <linux/compat.h> 33 + #include <linux/slab.h> 34 + #include <linux/atomic.h> 35 + #include <linux/uaccess.h> 36 + #include <linux/hw_random.h> 37 + #include <linux/debugfs.h> 38 + #include <asm/debug.h> 39 + 40 + #include "zcrypt_debug.h" 41 + #include "zcrypt_api.h" 42 + 43 + #include "zcrypt_msgtype6.h" 44 + #include "zcrypt_msgtype50.h" 45 + 46 + /* 47 + * Device attributes common for all crypto card devices. 48 + */ 49 + 50 + static ssize_t zcrypt_card_type_show(struct device *dev, 51 + struct device_attribute *attr, char *buf) 52 + { 53 + struct zcrypt_card *zc = to_ap_card(dev)->private; 54 + 55 + return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string); 56 + } 57 + 58 + static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL); 59 + 60 + static ssize_t zcrypt_card_online_show(struct device *dev, 61 + struct device_attribute *attr, 62 + char *buf) 63 + { 64 + struct zcrypt_card *zc = to_ap_card(dev)->private; 65 + 66 + return snprintf(buf, PAGE_SIZE, "%d\n", zc->online); 67 + } 68 + 69 + static ssize_t zcrypt_card_online_store(struct device *dev, 70 + struct device_attribute *attr, 71 + const char *buf, size_t count) 72 + { 73 + struct zcrypt_card *zc = to_ap_card(dev)->private; 74 + struct zcrypt_queue *zq; 75 + int online, id; 76 + 77 + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 78 + return -EINVAL; 79 + 80 + zc->online = online; 81 + id = zc->card->id; 82 + 83 + ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online); 84 + 85 + spin_lock(&zcrypt_list_lock); 86 + list_for_each_entry(zq, &zc->zqueues, list) 87 + zcrypt_queue_force_online(zq, online); 88 + spin_unlock(&zcrypt_list_lock); 89 + return count; 90 + } 91 + 92 + static DEVICE_ATTR(online, 0644, zcrypt_card_online_show, 93 + zcrypt_card_online_store); 94 + 95 + static struct attribute *zcrypt_card_attrs[] = { 96 + &dev_attr_type.attr, 97 + &dev_attr_online.attr, 98 + NULL, 99 + }; 100 + 101 + static struct attribute_group zcrypt_card_attr_group = { 102 + .attrs = zcrypt_card_attrs, 103 + }; 104 + 105 + struct zcrypt_card *zcrypt_card_alloc(void) 106 + { 107 + struct zcrypt_card *zc; 108 + 109 + zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); 110 + if (!zc) 111 + return NULL; 112 + INIT_LIST_HEAD(&zc->list); 113 + INIT_LIST_HEAD(&zc->zqueues); 114 + kref_init(&zc->refcount); 115 + return zc; 116 + } 117 + EXPORT_SYMBOL(zcrypt_card_alloc); 118 + 119 + void zcrypt_card_free(struct zcrypt_card *zc) 120 + { 121 + kfree(zc); 122 + } 123 + EXPORT_SYMBOL(zcrypt_card_free); 124 + 125 + static void zcrypt_card_release(struct kref *kref) 126 + { 127 + struct zcrypt_card *zdev = 128 + container_of(kref, struct zcrypt_card, refcount); 129 + zcrypt_card_free(zdev); 130 + } 131 + 132 + void zcrypt_card_get(struct zcrypt_card *zc) 133 + { 134 + kref_get(&zc->refcount); 135 + } 136 + EXPORT_SYMBOL(zcrypt_card_get); 137 + 138 + int zcrypt_card_put(struct zcrypt_card *zc) 139 + { 140 + return kref_put(&zc->refcount, zcrypt_card_release); 141 + } 142 + EXPORT_SYMBOL(zcrypt_card_put); 143 + 144 + /** 145 + * zcrypt_card_register() - Register a crypto card device. 146 + * @zc: Pointer to a crypto card device 147 + * 148 + * Register a crypto card device. Returns 0 if successful. 149 + */ 150 + int zcrypt_card_register(struct zcrypt_card *zc) 151 + { 152 + int rc; 153 + 154 + rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, 155 + &zcrypt_card_attr_group); 156 + if (rc) 157 + return rc; 158 + 159 + spin_lock(&zcrypt_list_lock); 160 + list_add_tail(&zc->list, &zcrypt_card_list); 161 + spin_unlock(&zcrypt_list_lock); 162 + 163 + zc->online = 1; 164 + 165 + ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id); 166 + 167 + return rc; 168 + } 169 + EXPORT_SYMBOL(zcrypt_card_register); 170 + 171 + /** 172 + * zcrypt_card_unregister(): Unregister a crypto card device. 173 + * @zc: Pointer to crypto card device 174 + * 175 + * Unregister a crypto card device. 176 + */ 177 + void zcrypt_card_unregister(struct zcrypt_card *zc) 178 + { 179 + ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id); 180 + 181 + spin_lock(&zcrypt_list_lock); 182 + list_del_init(&zc->list); 183 + spin_unlock(&zcrypt_list_lock); 184 + sysfs_remove_group(&zc->card->ap_dev.device.kobj, 185 + &zcrypt_card_attr_group); 186 + } 187 + EXPORT_SYMBOL(zcrypt_card_unregister);
+159 -73
drivers/s390/crypto/zcrypt_cex2a.c
··· 31 31 #include <linux/err.h> 32 32 #include <linux/atomic.h> 33 33 #include <asm/uaccess.h> 34 + #include <linux/mod_devicetable.h> 34 35 35 36 #include "ap_bus.h" 36 37 #include "zcrypt_api.h" ··· 44 43 #define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE 45 44 #define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ 46 45 47 - #define CEX2A_SPEED_RATING 970 48 - #define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ 49 - 50 46 #define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 51 47 #define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 52 48 ··· 55 57 #define CEX2A_CLEANUP_TIME (15*HZ) 56 58 #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 57 59 58 - static struct ap_device_id zcrypt_cex2a_ids[] = { 59 - { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 60 - { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) }, 61 - { /* end of list */ }, 62 - }; 63 - 64 - MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 65 60 MODULE_AUTHOR("IBM Corporation"); 66 61 MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ 67 62 "Copyright IBM Corp. 2001, 2012"); 68 63 MODULE_LICENSE("GPL"); 69 64 70 - static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 71 - static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 72 - 73 - static struct ap_driver zcrypt_cex2a_driver = { 74 - .probe = zcrypt_cex2a_probe, 75 - .remove = zcrypt_cex2a_remove, 76 - .ids = zcrypt_cex2a_ids, 77 - .request_timeout = CEX2A_CLEANUP_TIME, 65 + static struct ap_device_id zcrypt_cex2a_card_ids[] = { 66 + { .dev_type = AP_DEVICE_TYPE_CEX2A, 67 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 68 + { .dev_type = AP_DEVICE_TYPE_CEX3A, 69 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 70 + { /* end of list */ }, 78 71 }; 79 72 73 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids); 74 + 75 + static struct ap_device_id zcrypt_cex2a_queue_ids[] = { 76 + { .dev_type = AP_DEVICE_TYPE_CEX2A, 77 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 78 + { .dev_type = AP_DEVICE_TYPE_CEX3A, 79 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 80 + { /* end of list */ }, 81 + }; 82 + 83 + MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids); 84 + 80 85 /** 81 - * Probe function for CEX2A cards. It always accepts the AP device 82 - * since the bus_match already checked the hardware type. 86 + * Probe function for CEX2A card devices. It always accepts the AP device 87 + * since the bus_match already checked the card type. 83 88 * @ap_dev: pointer to the AP device. 84 89 */ 85 - static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 90 + static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev) 86 91 { 87 - struct zcrypt_device *zdev = NULL; 92 + /* 93 + * Normalized speed ratings per crypto adapter 94 + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 95 + */ 96 + static const int CEX2A_SPEED_IDX[] = { 97 + 800, 1000, 2000, 900, 1200, 2400, 0, 0}; 98 + static const int CEX3A_SPEED_IDX[] = { 99 + 400, 500, 1000, 450, 550, 1200, 0, 0}; 100 + 101 + struct ap_card *ac = to_ap_card(&ap_dev->device); 102 + struct zcrypt_card *zc; 88 103 int rc = 0; 89 104 90 - switch (ap_dev->device_type) { 91 - case AP_DEVICE_TYPE_CEX2A: 92 - zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 93 - if (!zdev) 94 - return -ENOMEM; 95 - zdev->user_space_type = ZCRYPT_CEX2A; 96 - zdev->type_string = "CEX2A"; 97 - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 98 - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 99 - zdev->short_crt = 1; 100 - zdev->speed_rating = CEX2A_SPEED_RATING; 101 - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 102 - break; 103 - case AP_DEVICE_TYPE_CEX3A: 104 - zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); 105 - if (!zdev) 106 - return -ENOMEM; 107 - zdev->user_space_type = ZCRYPT_CEX3A; 108 - zdev->type_string = "CEX3A"; 109 - zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 110 - zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 111 - zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 112 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && 113 - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { 114 - zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 115 - zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 105 + zc = zcrypt_card_alloc(); 106 + if (!zc) 107 + return -ENOMEM; 108 + zc->card = ac; 109 + ac->private = zc; 110 + 111 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) { 112 + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; 113 + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; 114 + memcpy(zc->speed_rating, CEX2A_SPEED_IDX, 115 + sizeof(CEX2A_SPEED_IDX)); 116 + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 117 + zc->type_string = "CEX2A"; 118 + zc->user_space_type = ZCRYPT_CEX2A; 119 + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) { 120 + zc->min_mod_size = CEX2A_MIN_MOD_SIZE; 121 + zc->max_mod_size = CEX2A_MAX_MOD_SIZE; 122 + zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 123 + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && 124 + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { 125 + zc->max_mod_size = CEX3A_MAX_MOD_SIZE; 126 + zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 116 127 } 117 - zdev->short_crt = 1; 118 - zdev->speed_rating = CEX3A_SPEED_RATING; 119 - break; 120 - } 121 - if (!zdev) 128 + memcpy(zc->speed_rating, CEX3A_SPEED_IDX, 129 + sizeof(CEX3A_SPEED_IDX)); 130 + zc->type_string = "CEX3A"; 131 + zc->user_space_type = ZCRYPT_CEX3A; 132 + } else { 133 + zcrypt_card_free(zc); 122 134 return -ENODEV; 123 - zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, 124 - MSGTYPE50_VARIANT_DEFAULT); 125 - zdev->ap_dev = ap_dev; 126 - zdev->online = 1; 127 - ap_device_init_reply(ap_dev, &zdev->reply); 128 - ap_dev->private = zdev; 129 - rc = zcrypt_device_register(zdev); 130 - if (rc) { 131 - ap_dev->private = NULL; 132 - zcrypt_msgtype_release(zdev->ops); 133 - zcrypt_device_free(zdev); 134 135 } 136 + zc->online = 1; 137 + 138 + rc = zcrypt_card_register(zc); 139 + if (rc) { 140 + ac->private = NULL; 141 + zcrypt_card_free(zc); 142 + } 143 + 135 144 return rc; 136 145 } 137 146 138 147 /** 139 - * This is called to remove the extended CEX2A driver information 140 - * if an AP device is removed. 148 + * This is called to remove the CEX2A card driver information 149 + * if an AP card device is removed. 141 150 */ 142 - static void zcrypt_cex2a_remove(struct ap_device *ap_dev) 151 + static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev) 143 152 { 144 - struct zcrypt_device *zdev = ap_dev->private; 145 - struct zcrypt_ops *zops = zdev->ops; 153 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 146 154 147 - zcrypt_device_unregister(zdev); 148 - zcrypt_msgtype_release(zops); 155 + if (zc) 156 + zcrypt_card_unregister(zc); 149 157 } 158 + 159 + static struct ap_driver zcrypt_cex2a_card_driver = { 160 + .probe = zcrypt_cex2a_card_probe, 161 + .remove = zcrypt_cex2a_card_remove, 162 + .ids = zcrypt_cex2a_card_ids, 163 + }; 164 + 165 + /** 166 + * Probe function for CEX2A queue devices. It always accepts the AP device 167 + * since the bus_match already checked the queue type. 168 + * @ap_dev: pointer to the AP device. 169 + */ 170 + static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev) 171 + { 172 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 173 + struct zcrypt_queue *zq = NULL; 174 + int rc; 175 + 176 + switch (ap_dev->device_type) { 177 + case AP_DEVICE_TYPE_CEX2A: 178 + zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE); 179 + if (!zq) 180 + return -ENOMEM; 181 + break; 182 + case AP_DEVICE_TYPE_CEX3A: 183 + zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE); 184 + if (!zq) 185 + return -ENOMEM; 186 + break; 187 + } 188 + if (!zq) 189 + return -ENODEV; 190 + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT); 191 + zq->queue = aq; 192 + zq->online = 1; 193 + atomic_set(&zq->load, 0); 194 + ap_queue_init_reply(aq, &zq->reply); 195 + aq->request_timeout = CEX2A_CLEANUP_TIME, 196 + aq->private = zq; 197 + rc = zcrypt_queue_register(zq); 198 + if (rc) { 199 + aq->private = NULL; 200 + zcrypt_queue_free(zq); 201 + } 202 + 203 + return rc; 204 + } 205 + 206 + /** 207 + * This is called to remove the CEX2A queue driver information 208 + * if an AP queue device is removed. 209 + */ 210 + static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev) 211 + { 212 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 213 + struct zcrypt_queue *zq = aq->private; 214 + 215 + ap_queue_remove(aq); 216 + if (zq) 217 + zcrypt_queue_unregister(zq); 218 + } 219 + 220 + static struct ap_driver zcrypt_cex2a_queue_driver = { 221 + .probe = zcrypt_cex2a_queue_probe, 222 + .remove = zcrypt_cex2a_queue_remove, 223 + .suspend = ap_queue_suspend, 224 + .resume = ap_queue_resume, 225 + .ids = zcrypt_cex2a_queue_ids, 226 + }; 150 227 151 228 int __init zcrypt_cex2a_init(void) 152 229 { 153 - return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); 230 + int rc; 231 + 232 + rc = ap_driver_register(&zcrypt_cex2a_card_driver, 233 + THIS_MODULE, "cex2acard"); 234 + if (rc) 235 + return rc; 236 + 237 + rc = ap_driver_register(&zcrypt_cex2a_queue_driver, 238 + THIS_MODULE, "cex2aqueue"); 239 + if (rc) 240 + ap_driver_unregister(&zcrypt_cex2a_card_driver); 241 + 242 + return rc; 154 243 } 155 244 156 245 void __exit zcrypt_cex2a_exit(void) 157 246 { 158 - ap_driver_unregister(&zcrypt_cex2a_driver); 247 + ap_driver_unregister(&zcrypt_cex2a_queue_driver); 248 + ap_driver_unregister(&zcrypt_cex2a_card_driver); 159 249 } 160 250 161 251 module_init(zcrypt_cex2a_init);
+209 -116
drivers/s390/crypto/zcrypt_cex4.c
··· 9 9 #include <linux/err.h> 10 10 #include <linux/atomic.h> 11 11 #include <linux/uaccess.h> 12 + #include <linux/mod_devicetable.h> 12 13 13 14 #include "ap_bus.h" 14 15 #include "zcrypt_api.h" ··· 25 24 #define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */ 26 25 #define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */ 27 26 28 - #define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */ 29 - #define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */ 30 - #define CEX4P_SPEED_RATING 7000 /* TODO new card, new speed rating */ 31 - #define CEX5A_SPEED_RATING 450 /* TODO new card, new speed rating */ 32 - #define CEX5C_SPEED_RATING 3250 /* TODO new card, new speed rating */ 33 - #define CEX5P_SPEED_RATING 3500 /* TODO new card, new speed rating */ 34 - 35 27 #define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE 36 28 #define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE 37 29 ··· 35 41 */ 36 42 #define CEX4_CLEANUP_TIME (900*HZ) 37 43 38 - static struct ap_device_id zcrypt_cex4_ids[] = { 39 - { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, 40 - { AP_DEVICE(AP_DEVICE_TYPE_CEX5) }, 41 - { /* end of list */ }, 42 - }; 43 - 44 - MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids); 45 44 MODULE_AUTHOR("IBM Corporation"); 46 45 MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \ 47 46 "Copyright IBM Corp. 2012"); 48 47 MODULE_LICENSE("GPL"); 49 48 50 - static int zcrypt_cex4_probe(struct ap_device *ap_dev); 51 - static void zcrypt_cex4_remove(struct ap_device *ap_dev); 52 - 53 - static struct ap_driver zcrypt_cex4_driver = { 54 - .probe = zcrypt_cex4_probe, 55 - .remove = zcrypt_cex4_remove, 56 - .ids = zcrypt_cex4_ids, 57 - .request_timeout = CEX4_CLEANUP_TIME, 49 + static struct ap_device_id zcrypt_cex4_card_ids[] = { 50 + { .dev_type = AP_DEVICE_TYPE_CEX4, 51 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 52 + { .dev_type = AP_DEVICE_TYPE_CEX5, 53 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 54 + { /* end of list */ }, 58 55 }; 59 56 57 + MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids); 58 + 59 + static struct ap_device_id zcrypt_cex4_queue_ids[] = { 60 + { .dev_type = AP_DEVICE_TYPE_CEX4, 61 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 62 + { .dev_type = AP_DEVICE_TYPE_CEX5, 63 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 64 + { /* end of list */ }, 65 + }; 66 + 67 + MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids); 68 + 60 69 /** 61 - * Probe function for CEX4 cards. It always accepts the AP device 70 + * Probe function for CEX4 card device. It always accepts the AP device 62 71 * since the bus_match already checked the hardware type. 63 72 * @ap_dev: pointer to the AP device. 64 73 */ 65 - static int zcrypt_cex4_probe(struct ap_device *ap_dev) 74 + static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) 66 75 { 67 - struct zcrypt_device *zdev = NULL; 76 + /* 77 + * Normalized speed ratings per crypto adapter 78 + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 79 + */ 80 + static const int CEX4A_SPEED_IDX[] = { 81 + 5, 6, 59, 20, 115, 581, 0, 0}; 82 + static const int CEX5A_SPEED_IDX[] = { 83 + 3, 3, 6, 8, 32, 218, 0, 0}; 84 + static const int CEX4C_SPEED_IDX[] = { 85 + 24, 25, 82, 41, 138, 1111, 79, 8}; 86 + static const int CEX5C_SPEED_IDX[] = { 87 + 10, 14, 23, 17, 45, 242, 63, 4}; 88 + static const int CEX4P_SPEED_IDX[] = { 89 + 142, 198, 1852, 203, 331, 1563, 0, 8}; 90 + static const int CEX5P_SPEED_IDX[] = { 91 + 49, 67, 131, 52, 85, 287, 0, 4}; 92 + 93 + struct ap_card *ac = to_ap_card(&ap_dev->device); 94 + struct zcrypt_card *zc; 68 95 int rc = 0; 69 96 70 - switch (ap_dev->device_type) { 71 - case AP_DEVICE_TYPE_CEX4: 72 - case AP_DEVICE_TYPE_CEX5: 73 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) { 74 - zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE); 75 - if (!zdev) 76 - return -ENOMEM; 77 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 78 - zdev->type_string = "CEX4A"; 79 - zdev->speed_rating = CEX4A_SPEED_RATING; 80 - } else { 81 - zdev->type_string = "CEX5A"; 82 - zdev->speed_rating = CEX5A_SPEED_RATING; 83 - } 84 - zdev->user_space_type = ZCRYPT_CEX3A; 85 - zdev->min_mod_size = CEX4A_MIN_MOD_SIZE; 86 - if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && 87 - ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) { 88 - zdev->max_mod_size = 89 - CEX4A_MAX_MOD_SIZE_4K; 90 - zdev->max_exp_bit_length = 91 - CEX4A_MAX_MOD_SIZE_4K; 92 - } else { 93 - zdev->max_mod_size = 94 - CEX4A_MAX_MOD_SIZE_2K; 95 - zdev->max_exp_bit_length = 96 - CEX4A_MAX_MOD_SIZE_2K; 97 - } 98 - zdev->short_crt = 1; 99 - zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, 100 - MSGTYPE50_VARIANT_DEFAULT); 101 - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) { 102 - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); 103 - if (!zdev) 104 - return -ENOMEM; 105 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 106 - zdev->type_string = "CEX4C"; 107 - zdev->speed_rating = CEX4C_SPEED_RATING; 108 - } else { 109 - zdev->type_string = "CEX5C"; 110 - zdev->speed_rating = CEX5C_SPEED_RATING; 111 - } 112 - zdev->user_space_type = ZCRYPT_CEX3C; 113 - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; 114 - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; 115 - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 116 - zdev->short_crt = 0; 117 - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 118 - MSGTYPE06_VARIANT_DEFAULT); 119 - } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) { 120 - zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE); 121 - if (!zdev) 122 - return -ENOMEM; 123 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) { 124 - zdev->type_string = "CEX4P"; 125 - zdev->speed_rating = CEX4P_SPEED_RATING; 126 - } else { 127 - zdev->type_string = "CEX5P"; 128 - zdev->speed_rating = CEX5P_SPEED_RATING; 129 - } 130 - zdev->user_space_type = ZCRYPT_CEX4; 131 - zdev->min_mod_size = CEX4C_MIN_MOD_SIZE; 132 - zdev->max_mod_size = CEX4C_MAX_MOD_SIZE; 133 - zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 134 - zdev->short_crt = 0; 135 - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 136 - MSGTYPE06_VARIANT_EP11); 97 + zc = zcrypt_card_alloc(); 98 + if (!zc) 99 + return -ENOMEM; 100 + zc->card = ac; 101 + ac->private = zc; 102 + if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) { 103 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 104 + zc->type_string = "CEX4A"; 105 + zc->user_space_type = ZCRYPT_CEX4; 106 + memcpy(zc->speed_rating, CEX4A_SPEED_IDX, 107 + sizeof(CEX4A_SPEED_IDX)); 108 + } else { 109 + zc->type_string = "CEX5A"; 110 + zc->user_space_type = ZCRYPT_CEX5; 111 + memcpy(zc->speed_rating, CEX5A_SPEED_IDX, 112 + sizeof(CEX5A_SPEED_IDX)); 137 113 } 138 - break; 139 - } 140 - if (!zdev) 114 + zc->min_mod_size = CEX4A_MIN_MOD_SIZE; 115 + if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && 116 + ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) { 117 + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K; 118 + zc->max_exp_bit_length = 119 + CEX4A_MAX_MOD_SIZE_4K; 120 + } else { 121 + zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K; 122 + zc->max_exp_bit_length = 123 + CEX4A_MAX_MOD_SIZE_2K; 124 + } 125 + } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) { 126 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 127 + zc->type_string = "CEX4C"; 128 + /* wrong user space type, must be CEX4 129 + * just keep it for cca compatibility 130 + */ 131 + zc->user_space_type = ZCRYPT_CEX3C; 132 + memcpy(zc->speed_rating, CEX4C_SPEED_IDX, 133 + sizeof(CEX4C_SPEED_IDX)); 134 + } else { 135 + zc->type_string = "CEX5C"; 136 + /* wrong user space type, must be CEX5 137 + * just keep it for cca compatibility 138 + */ 139 + zc->user_space_type = ZCRYPT_CEX3C; 140 + memcpy(zc->speed_rating, CEX5C_SPEED_IDX, 141 + sizeof(CEX5C_SPEED_IDX)); 142 + } 143 + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 144 + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; 145 + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 146 + } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) { 147 + if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) { 148 + zc->type_string = "CEX4P"; 149 + zc->user_space_type = ZCRYPT_CEX4; 150 + memcpy(zc->speed_rating, CEX4P_SPEED_IDX, 151 + sizeof(CEX4P_SPEED_IDX)); 152 + } else { 153 + zc->type_string = "CEX5P"; 154 + zc->user_space_type = ZCRYPT_CEX5; 155 + memcpy(zc->speed_rating, CEX5P_SPEED_IDX, 156 + sizeof(CEX5P_SPEED_IDX)); 157 + } 158 + zc->min_mod_size = CEX4C_MIN_MOD_SIZE; 159 + zc->max_mod_size = CEX4C_MAX_MOD_SIZE; 160 + zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE; 161 + } else { 162 + zcrypt_card_free(zc); 141 163 return -ENODEV; 142 - zdev->ap_dev = ap_dev; 143 - zdev->online = 1; 144 - ap_device_init_reply(ap_dev, &zdev->reply); 145 - ap_dev->private = zdev; 146 - rc = zcrypt_device_register(zdev); 147 - if (rc) { 148 - zcrypt_msgtype_release(zdev->ops); 149 - ap_dev->private = NULL; 150 - zcrypt_device_free(zdev); 151 164 } 165 + zc->online = 1; 166 + 167 + rc = zcrypt_card_register(zc); 168 + if (rc) { 169 + ac->private = NULL; 170 + zcrypt_card_free(zc); 171 + } 172 + 152 173 return rc; 153 174 } 154 175 155 176 /** 156 - * This is called to remove the extended CEX4 driver information 157 - * if an AP device is removed. 177 + * This is called to remove the CEX4 card driver information 178 + * if an AP card device is removed. 158 179 */ 159 - static void zcrypt_cex4_remove(struct ap_device *ap_dev) 180 + static void zcrypt_cex4_card_remove(struct ap_device *ap_dev) 160 181 { 161 - struct zcrypt_device *zdev = ap_dev->private; 162 - struct zcrypt_ops *zops; 182 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 163 183 164 - if (zdev) { 165 - zops = zdev->ops; 166 - zcrypt_device_unregister(zdev); 167 - zcrypt_msgtype_release(zops); 168 - } 184 + if (zc) 185 + zcrypt_card_unregister(zc); 169 186 } 187 + 188 + static struct ap_driver zcrypt_cex4_card_driver = { 189 + .probe = zcrypt_cex4_card_probe, 190 + .remove = zcrypt_cex4_card_remove, 191 + .ids = zcrypt_cex4_card_ids, 192 + }; 193 + 194 + /** 195 + * Probe function for CEX4 queue device. It always accepts the AP device 196 + * since the bus_match already checked the hardware type. 197 + * @ap_dev: pointer to the AP device. 198 + */ 199 + static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev) 200 + { 201 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 202 + struct zcrypt_queue *zq; 203 + int rc; 204 + 205 + if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) { 206 + zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE); 207 + if (!zq) 208 + return -ENOMEM; 209 + zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, 210 + MSGTYPE50_VARIANT_DEFAULT); 211 + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) { 212 + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); 213 + if (!zq) 214 + return -ENOMEM; 215 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 216 + MSGTYPE06_VARIANT_DEFAULT); 217 + } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) { 218 + zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE); 219 + if (!zq) 220 + return -ENOMEM; 221 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 222 + MSGTYPE06_VARIANT_EP11); 223 + } else { 224 + return -ENODEV; 225 + } 226 + zq->queue = aq; 227 + zq->online = 1; 228 + atomic_set(&zq->load, 0); 229 + ap_queue_init_reply(aq, &zq->reply); 230 + aq->request_timeout = CEX4_CLEANUP_TIME, 231 + aq->private = zq; 232 + rc = zcrypt_queue_register(zq); 233 + if (rc) { 234 + aq->private = NULL; 235 + zcrypt_queue_free(zq); 236 + } 237 + 238 + return rc; 239 + } 240 + 241 + /** 242 + * This is called to remove the CEX4 queue driver information 243 + * if an AP queue device is removed. 244 + */ 245 + static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev) 246 + { 247 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 248 + struct zcrypt_queue *zq = aq->private; 249 + 250 + ap_queue_remove(aq); 251 + if (zq) 252 + zcrypt_queue_unregister(zq); 253 + } 254 + 255 + static struct ap_driver zcrypt_cex4_queue_driver = { 256 + .probe = zcrypt_cex4_queue_probe, 257 + .remove = zcrypt_cex4_queue_remove, 258 + .suspend = ap_queue_suspend, 259 + .resume = ap_queue_resume, 260 + .ids = zcrypt_cex4_queue_ids, 261 + }; 170 262 171 263 int __init zcrypt_cex4_init(void) 172 264 { 173 - return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4"); 265 + int rc; 266 + 267 + rc = ap_driver_register(&zcrypt_cex4_card_driver, 268 + THIS_MODULE, "cex4card"); 269 + if (rc) 270 + return rc; 271 + 272 + rc = ap_driver_register(&zcrypt_cex4_queue_driver, 273 + THIS_MODULE, "cex4queue"); 274 + if (rc) 275 + ap_driver_unregister(&zcrypt_cex4_card_driver); 276 + 277 + return rc; 174 278 } 175 279 176 280 void __exit zcrypt_cex4_exit(void) 177 281 { 178 - ap_driver_unregister(&zcrypt_cex4_driver); 282 + ap_driver_unregister(&zcrypt_cex4_queue_driver); 283 + ap_driver_unregister(&zcrypt_cex4_card_driver); 179 284 } 180 285 181 286 module_init(zcrypt_cex4_init);
+11 -35
drivers/s390/crypto/zcrypt_debug.h
··· 1 1 /* 2 - * Copyright IBM Corp. 2012 2 + * Copyright IBM Corp. 2016 3 3 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com) 4 + * Harald Freudenberger <freude@de.ibm.com> 4 5 */ 5 6 #ifndef ZCRYPT_DEBUG_H 6 7 #define ZCRYPT_DEBUG_H 7 8 8 9 #include <asm/debug.h> 9 - #include "zcrypt_api.h" 10 10 11 - /* that gives us 15 characters in the text event views */ 12 - #define ZCRYPT_DBF_LEN 16 11 + #define DBF_ERR 3 /* error conditions */ 12 + #define DBF_WARN 4 /* warning conditions */ 13 + #define DBF_INFO 5 /* informational */ 14 + #define DBF_DEBUG 6 /* for debugging only */ 13 15 14 - #define DBF_ERR 3 /* error conditions */ 15 - #define DBF_WARN 4 /* warning conditions */ 16 - #define DBF_INFO 6 /* informational */ 17 - 16 + #define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO) 18 17 #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO) 19 18 20 - #define ZCRYPT_DBF_COMMON(level, text...) \ 21 - do { \ 22 - if (debug_level_enabled(zcrypt_dbf_common, level)) { \ 23 - char debug_buffer[ZCRYPT_DBF_LEN]; \ 24 - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 25 - debug_text_event(zcrypt_dbf_common, level, \ 26 - debug_buffer); \ 27 - } \ 28 - } while (0) 19 + #define DBF_MAX_SPRINTF_ARGS 5 29 20 30 - #define ZCRYPT_DBF_DEVICES(level, text...) \ 31 - do { \ 32 - if (debug_level_enabled(zcrypt_dbf_devices, level)) { \ 33 - char debug_buffer[ZCRYPT_DBF_LEN]; \ 34 - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 35 - debug_text_event(zcrypt_dbf_devices, level, \ 36 - debug_buffer); \ 37 - } \ 38 - } while (0) 21 + #define ZCRYPT_DBF(...) \ 22 + debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__) 39 23 40 - #define ZCRYPT_DBF_DEV(level, device, text...) \ 41 - do { \ 42 - if (debug_level_enabled(device->dbf_area, level)) { \ 43 - char debug_buffer[ZCRYPT_DBF_LEN]; \ 44 - snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \ 45 - debug_text_event(device->dbf_area, level, \ 46 - debug_buffer); \ 47 - } \ 48 - } while (0) 24 + extern debug_info_t *zcrypt_dbf_info; 49 25 50 26 int zcrypt_debug_init(void); 51 27 void zcrypt_debug_exit(void);
+57 -48
drivers/s390/crypto/zcrypt_error.h
··· 55 55 #define TYPE82_RSP_CODE 0x82 56 56 #define TYPE88_RSP_CODE 0x88 57 57 58 - #define REP82_ERROR_MACHINE_FAILURE 0x10 59 - #define REP82_ERROR_PREEMPT_FAILURE 0x12 60 - #define REP82_ERROR_CHECKPT_FAILURE 0x14 61 - #define REP82_ERROR_MESSAGE_TYPE 0x20 62 - #define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ 63 - #define REP82_ERROR_INVALID_MSG_LEN 0x23 64 - #define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ 65 - #define REP82_ERROR_FORMAT_FIELD 0x29 66 - #define REP82_ERROR_INVALID_COMMAND 0x30 67 - #define REP82_ERROR_MALFORMED_MSG 0x40 68 - #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ 69 - #define REP82_ERROR_WORD_ALIGNMENT 0x60 70 - #define REP82_ERROR_MESSAGE_LENGTH 0x80 71 - #define REP82_ERROR_OPERAND_INVALID 0x82 72 - #define REP82_ERROR_OPERAND_SIZE 0x84 73 - #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 74 - #define REP82_ERROR_RESERVED_FIELD 0x88 75 - #define REP82_ERROR_TRANSPORT_FAIL 0x90 76 - #define REP82_ERROR_PACKET_TRUNCATED 0xA0 77 - #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 58 + #define REP82_ERROR_MACHINE_FAILURE 0x10 59 + #define REP82_ERROR_PREEMPT_FAILURE 0x12 60 + #define REP82_ERROR_CHECKPT_FAILURE 0x14 61 + #define REP82_ERROR_MESSAGE_TYPE 0x20 62 + #define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ 63 + #define REP82_ERROR_INVALID_MSG_LEN 0x23 64 + #define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ 65 + #define REP82_ERROR_FORMAT_FIELD 0x29 66 + #define REP82_ERROR_INVALID_COMMAND 0x30 67 + #define REP82_ERROR_MALFORMED_MSG 0x40 68 + #define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42 69 + #define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ 70 + #define REP82_ERROR_WORD_ALIGNMENT 0x60 71 + #define REP82_ERROR_MESSAGE_LENGTH 0x80 72 + #define REP82_ERROR_OPERAND_INVALID 0x82 73 + #define REP82_ERROR_OPERAND_SIZE 0x84 74 + #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 75 + #define REP82_ERROR_RESERVED_FIELD 0x88 76 + #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A 77 + #define REP82_ERROR_TRANSPORT_FAIL 0x90 78 + #define REP82_ERROR_PACKET_TRUNCATED 0xA0 79 + #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 78 80 79 - #define REP88_ERROR_MODULE_FAILURE 0x10 81 + #define REP88_ERROR_MODULE_FAILURE 0x10 80 82 81 - #define REP88_ERROR_MESSAGE_TYPE 0x20 82 - #define REP88_ERROR_MESSAGE_MALFORMD 0x22 83 - #define REP88_ERROR_MESSAGE_LENGTH 0x23 84 - #define REP88_ERROR_RESERVED_FIELD 0x24 85 - #define REP88_ERROR_KEY_TYPE 0x34 86 - #define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ 87 - #define REP88_ERROR_OPERAND 0x84 /* CEX2A */ 88 - #define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ 83 + #define REP88_ERROR_MESSAGE_TYPE 0x20 84 + #define REP88_ERROR_MESSAGE_MALFORMD 0x22 85 + #define REP88_ERROR_MESSAGE_LENGTH 0x23 86 + #define REP88_ERROR_RESERVED_FIELD 0x24 87 + #define REP88_ERROR_KEY_TYPE 0x34 88 + #define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ 89 + #define REP88_ERROR_OPERAND 0x84 /* CEX2A */ 90 + #define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ 89 91 90 - static inline int convert_error(struct zcrypt_device *zdev, 92 + static inline int convert_error(struct zcrypt_queue *zq, 91 93 struct ap_message *reply) 92 94 { 93 95 struct error_hdr *ehdr = reply->message; 96 + int card = AP_QID_CARD(zq->queue->qid); 97 + int queue = AP_QID_QUEUE(zq->queue->qid); 94 98 95 99 switch (ehdr->reply_code) { 96 100 case REP82_ERROR_OPERAND_INVALID: 97 101 case REP82_ERROR_OPERAND_SIZE: 98 102 case REP82_ERROR_EVEN_MOD_IN_OPND: 99 103 case REP88_ERROR_MESSAGE_MALFORMD: 104 + case REP82_ERROR_INVALID_DOMAIN_PRECHECK: 105 + case REP82_ERROR_INVALID_DOMAIN_PENDING: 100 106 // REP88_ERROR_INVALID_KEY // '82' CEX2A 101 107 // REP88_ERROR_OPERAND // '84' CEX2A 102 108 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A 103 109 /* Invalid input data. */ 110 + ZCRYPT_DBF(DBF_WARN, 111 + "device=%02x.%04x reply=0x%02x => rc=EINVAL\n", 112 + card, queue, ehdr->reply_code); 104 113 return -EINVAL; 105 114 case REP82_ERROR_MESSAGE_TYPE: 106 115 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A ··· 119 110 * and then repeat the request. 120 111 */ 121 112 atomic_set(&zcrypt_rescan_req, 1); 122 - zdev->online = 0; 123 - pr_err("Cryptographic device %x failed and was set offline\n", 124 - AP_QID_DEVICE(zdev->ap_dev->qid)); 125 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 126 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 127 - ehdr->reply_code); 113 + zq->online = 0; 114 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 115 + card, queue); 116 + ZCRYPT_DBF(DBF_ERR, 117 + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", 118 + card, queue, ehdr->reply_code); 128 119 return -EAGAIN; 129 120 case REP82_ERROR_TRANSPORT_FAIL: 130 121 case REP82_ERROR_MACHINE_FAILURE: 131 122 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 132 123 /* If a card fails disable it and repeat the request. */ 133 124 atomic_set(&zcrypt_rescan_req, 1); 134 - zdev->online = 0; 135 - pr_err("Cryptographic device %x failed and was set offline\n", 136 - AP_QID_DEVICE(zdev->ap_dev->qid)); 137 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 138 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 139 - ehdr->reply_code); 125 + zq->online = 0; 126 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 127 + card, queue); 128 + ZCRYPT_DBF(DBF_ERR, 129 + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", 130 + card, queue, ehdr->reply_code); 140 131 return -EAGAIN; 141 132 default: 142 - zdev->online = 0; 143 - pr_err("Cryptographic device %x failed and was set offline\n", 144 - AP_QID_DEVICE(zdev->ap_dev->qid)); 145 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 146 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 147 - ehdr->reply_code); 133 + zq->online = 0; 134 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 135 + card, queue); 136 + ZCRYPT_DBF(DBF_ERR, 137 + "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n", 138 + card, queue, ehdr->reply_code); 148 139 return -EAGAIN; /* repeat the request on a different device. */ 149 140 } 150 141 }
+84 -51
drivers/s390/crypto/zcrypt_msgtype50.c
··· 53 53 "Copyright IBM Corp. 2001, 2012"); 54 54 MODULE_LICENSE("GPL"); 55 55 56 - static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, 57 - struct ap_message *); 58 - 59 56 /** 60 57 * The type 50 message family is associated with a CEX2A card. 61 58 * ··· 170 173 unsigned char reserved3[8]; 171 174 } __packed; 172 175 176 + unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) 177 + { 178 + 179 + if (!mex->inputdatalength) 180 + return -EINVAL; 181 + 182 + if (mex->inputdatalength <= 128) /* 1024 bit */ 183 + *fcode = MEX_1K; 184 + else if (mex->inputdatalength <= 256) /* 2048 bit */ 185 + *fcode = MEX_2K; 186 + else /* 4096 bit */ 187 + *fcode = MEX_4K; 188 + 189 + return 0; 190 + } 191 + 192 + unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) 193 + { 194 + 195 + if (!crt->inputdatalength) 196 + return -EINVAL; 197 + 198 + if (crt->inputdatalength <= 128) /* 1024 bit */ 199 + *fcode = CRT_1K; 200 + else if (crt->inputdatalength <= 256) /* 2048 bit */ 201 + *fcode = CRT_2K; 202 + else /* 4096 bit */ 203 + *fcode = CRT_4K; 204 + 205 + return 0; 206 + } 207 + 173 208 /** 174 209 * Convert a ICAMEX message to a type50 MEX message. 175 210 * 176 - * @zdev: crypto device pointer 177 - * @zreq: crypto request pointer 211 + * @zq: crypto queue pointer 212 + * @ap_msg: crypto request pointer 178 213 * @mex: pointer to user input data 179 214 * 180 215 * Returns 0 on success or -EFAULT. 181 216 */ 182 - static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, 217 + static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, 183 218 struct ap_message *ap_msg, 184 219 struct ica_rsa_modexpo *mex) 185 220 { ··· 263 234 /** 264 235 * Convert a ICACRT message to a type50 CRT message. 265 236 * 266 - * @zdev: crypto device pointer 267 - * @zreq: crypto request pointer 237 + * @zq: crypto queue pointer 238 + * @ap_msg: crypto request pointer 268 239 * @crt: pointer to user input data 269 240 * 270 241 * Returns 0 on success or -EFAULT. 271 242 */ 272 - static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, 243 + static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq, 273 244 struct ap_message *ap_msg, 274 245 struct ica_rsa_modexpo_crt *crt) 275 246 { ··· 312 283 u = crb2->u + sizeof(crb2->u) - short_len; 313 284 inp = crb2->message + sizeof(crb2->message) - mod_len; 314 285 } else if ((mod_len <= 512) && /* up to 4096 bit key size */ 315 - (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */ 286 + (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) { 316 287 struct type50_crb3_msg *crb3 = ap_msg->message; 317 288 memset(crb3, 0, sizeof(*crb3)); 318 289 ap_msg->length = sizeof(*crb3); ··· 346 317 /** 347 318 * Copy results from a type 80 reply message back to user space. 348 319 * 349 - * @zdev: crypto device pointer 320 + * @zq: crypto device pointer 350 321 * @reply: reply AP message. 351 322 * @data: pointer to user output data 352 323 * @length: size of user output data 353 324 * 354 325 * Returns 0 on success or -EFAULT. 355 326 */ 356 - static int convert_type80(struct zcrypt_device *zdev, 327 + static int convert_type80(struct zcrypt_queue *zq, 357 328 struct ap_message *reply, 358 329 char __user *outputdata, 359 330 unsigned int outputdatalength) ··· 363 334 364 335 if (t80h->len < sizeof(*t80h) + outputdatalength) { 365 336 /* The result is too short, the CEX2A card may not do that.. */ 366 - zdev->online = 0; 367 - pr_err("Cryptographic device %x failed and was set offline\n", 368 - AP_QID_DEVICE(zdev->ap_dev->qid)); 369 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 370 - AP_QID_DEVICE(zdev->ap_dev->qid), 371 - zdev->online, t80h->code); 372 - 337 + zq->online = 0; 338 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 339 + AP_QID_CARD(zq->queue->qid), 340 + AP_QID_QUEUE(zq->queue->qid)); 341 + ZCRYPT_DBF(DBF_ERR, 342 + "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n", 343 + AP_QID_CARD(zq->queue->qid), 344 + AP_QID_QUEUE(zq->queue->qid), 345 + t80h->code); 373 346 return -EAGAIN; /* repeat the request on a different device. */ 374 347 } 375 - if (zdev->user_space_type == ZCRYPT_CEX2A) 348 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 376 349 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); 377 350 else 378 351 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); ··· 384 353 return 0; 385 354 } 386 355 387 - static int convert_response(struct zcrypt_device *zdev, 356 + static int convert_response(struct zcrypt_queue *zq, 388 357 struct ap_message *reply, 389 358 char __user *outputdata, 390 359 unsigned int outputdatalength) 391 360 { 392 361 /* Response type byte is the second byte in the response. */ 393 - switch (((unsigned char *) reply->message)[1]) { 362 + unsigned char rtype = ((unsigned char *) reply->message)[1]; 363 + 364 + switch (rtype) { 394 365 case TYPE82_RSP_CODE: 395 366 case TYPE88_RSP_CODE: 396 - return convert_error(zdev, reply); 367 + return convert_error(zq, reply); 397 368 case TYPE80_RSP_CODE: 398 - return convert_type80(zdev, reply, 369 + return convert_type80(zq, reply, 399 370 outputdata, outputdatalength); 400 371 default: /* Unknown response type, this should NEVER EVER happen */ 401 - zdev->online = 0; 402 - pr_err("Cryptographic device %x failed and was set offline\n", 403 - AP_QID_DEVICE(zdev->ap_dev->qid)); 404 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 405 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 372 + zq->online = 0; 373 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 374 + AP_QID_CARD(zq->queue->qid), 375 + AP_QID_QUEUE(zq->queue->qid)); 376 + ZCRYPT_DBF(DBF_ERR, 377 + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", 378 + AP_QID_CARD(zq->queue->qid), 379 + AP_QID_QUEUE(zq->queue->qid), 380 + (unsigned int) rtype); 406 381 return -EAGAIN; /* repeat the request on a different device. */ 407 382 } 408 383 } ··· 417 380 * This function is called from the AP bus code after a crypto request 418 381 * "msg" has finished with the reply message "reply". 419 382 * It is called from tasklet context. 420 - * @ap_dev: pointer to the AP device 383 + * @aq: pointer to the AP device 421 384 * @msg: pointer to the AP message 422 385 * @reply: pointer to the AP reply message 423 386 */ 424 - static void zcrypt_cex2a_receive(struct ap_device *ap_dev, 387 + static void zcrypt_cex2a_receive(struct ap_queue *aq, 425 388 struct ap_message *msg, 426 389 struct ap_message *reply) 427 390 { ··· 437 400 goto out; /* ap_msg->rc indicates the error */ 438 401 t80h = reply->message; 439 402 if (t80h->type == TYPE80_RSP_CODE) { 440 - if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) 403 + if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) 441 404 length = min_t(int, 442 405 CEX2A_MAX_RESPONSE_SIZE, t80h->len); 443 406 else ··· 455 418 /** 456 419 * The request distributor calls this function if it picked the CEX2A 457 420 * device to handle a modexpo request. 458 - * @zdev: pointer to zcrypt_device structure that identifies the 421 + * @zq: pointer to zcrypt_queue structure that identifies the 459 422 * CEX2A device to the request distributor 460 423 * @mex: pointer to the modexpo request buffer 461 424 */ 462 - static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, 425 + static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq, 463 426 struct ica_rsa_modexpo *mex) 464 427 { 465 428 struct ap_message ap_msg; ··· 467 430 int rc; 468 431 469 432 ap_init_message(&ap_msg); 470 - if (zdev->user_space_type == ZCRYPT_CEX2A) 433 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 471 434 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 472 435 GFP_KERNEL); 473 436 else ··· 479 442 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 480 443 atomic_inc_return(&zcrypt_step); 481 444 ap_msg.private = &work; 482 - rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); 445 + rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex); 483 446 if (rc) 484 447 goto out_free; 485 448 init_completion(&work); 486 - ap_queue_message(zdev->ap_dev, &ap_msg); 449 + ap_queue_message(zq->queue, &ap_msg); 487 450 rc = wait_for_completion_interruptible(&work); 488 451 if (rc == 0) { 489 452 rc = ap_msg.rc; 490 453 if (rc == 0) 491 - rc = convert_response(zdev, &ap_msg, mex->outputdata, 454 + rc = convert_response(zq, &ap_msg, mex->outputdata, 492 455 mex->outputdatalength); 493 456 } else 494 457 /* Signal pending. */ 495 - ap_cancel_message(zdev->ap_dev, &ap_msg); 458 + ap_cancel_message(zq->queue, &ap_msg); 496 459 out_free: 497 460 kfree(ap_msg.message); 498 461 return rc; ··· 501 464 /** 502 465 * The request distributor calls this function if it picked the CEX2A 503 466 * device to handle a modexpo_crt request. 504 - * @zdev: pointer to zcrypt_device structure that identifies the 467 + * @zq: pointer to zcrypt_queue structure that identifies the 505 468 * CEX2A device to the request distributor 506 469 * @crt: pointer to the modexpoc_crt request buffer 507 470 */ 508 - static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, 471 + static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq, 509 472 struct ica_rsa_modexpo_crt *crt) 510 473 { 511 474 struct ap_message ap_msg; ··· 513 476 int rc; 514 477 515 478 ap_init_message(&ap_msg); 516 - if (zdev->user_space_type == ZCRYPT_CEX2A) 479 + if (zq->zcard->user_space_type == ZCRYPT_CEX2A) 517 480 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE, 518 481 GFP_KERNEL); 519 482 else ··· 525 488 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 526 489 atomic_inc_return(&zcrypt_step); 527 490 ap_msg.private = &work; 528 - rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); 491 + rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt); 529 492 if (rc) 530 493 goto out_free; 531 494 init_completion(&work); 532 - ap_queue_message(zdev->ap_dev, &ap_msg); 495 + ap_queue_message(zq->queue, &ap_msg); 533 496 rc = wait_for_completion_interruptible(&work); 534 497 if (rc == 0) { 535 498 rc = ap_msg.rc; 536 499 if (rc == 0) 537 - rc = convert_response(zdev, &ap_msg, crt->outputdata, 500 + rc = convert_response(zq, &ap_msg, crt->outputdata, 538 501 crt->outputdatalength); 539 502 } else 540 503 /* Signal pending. */ 541 - ap_cancel_message(zdev->ap_dev, &ap_msg); 504 + ap_cancel_message(zq->queue, &ap_msg); 542 505 out_free: 543 506 kfree(ap_msg.message); 544 507 return rc; ··· 555 518 .variant = MSGTYPE50_VARIANT_DEFAULT, 556 519 }; 557 520 558 - int __init zcrypt_msgtype50_init(void) 521 + void __init zcrypt_msgtype50_init(void) 559 522 { 560 523 zcrypt_msgtype_register(&zcrypt_msgtype50_ops); 561 - return 0; 562 524 } 563 525 564 526 void __exit zcrypt_msgtype50_exit(void) 565 527 { 566 528 zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops); 567 529 } 568 - 569 - module_init(zcrypt_msgtype50_init); 570 - module_exit(zcrypt_msgtype50_exit);
+4 -1
drivers/s390/crypto/zcrypt_msgtype50.h
··· 35 35 36 36 #define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/ 37 37 38 - int zcrypt_msgtype50_init(void); 38 + unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *); 39 + unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *); 40 + 41 + void zcrypt_msgtype50_init(void); 39 42 void zcrypt_msgtype50_exit(void); 40 43 41 44 #endif /* _ZCRYPT_MSGTYPE50_H_ */
+436 -224
drivers/s390/crypto/zcrypt_msgtype6.c
··· 60 60 "Copyright IBM Corp. 2001, 2012"); 61 61 MODULE_LICENSE("GPL"); 62 62 63 - static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *, 64 - struct ap_message *); 65 - 66 63 /** 67 64 * CPRB 68 65 * Note that all shorts, ints and longs are little-endian. ··· 146 149 .func_id = {0x54, 0x32}, 147 150 }; 148 151 152 + int speed_idx_cca(int req_type) 153 + { 154 + switch (req_type) { 155 + case 0x4142: 156 + case 0x4149: 157 + case 0x414D: 158 + case 0x4341: 159 + case 0x4344: 160 + case 0x4354: 161 + case 0x4358: 162 + case 0x444B: 163 + case 0x4558: 164 + case 0x4643: 165 + case 0x4651: 166 + case 0x4C47: 167 + case 0x4C4B: 168 + case 0x4C51: 169 + case 0x4F48: 170 + case 0x504F: 171 + case 0x5053: 172 + case 0x5058: 173 + case 0x5343: 174 + case 0x5344: 175 + case 0x5345: 176 + case 0x5350: 177 + return LOW; 178 + case 0x414B: 179 + case 0x4345: 180 + case 0x4349: 181 + case 0x434D: 182 + case 0x4847: 183 + case 0x4849: 184 + case 0x484D: 185 + case 0x4850: 186 + case 0x4851: 187 + case 0x4954: 188 + case 0x4958: 189 + case 0x4B43: 190 + case 0x4B44: 191 + case 0x4B45: 192 + case 0x4B47: 193 + case 0x4B48: 194 + case 0x4B49: 195 + case 0x4B4E: 196 + case 0x4B50: 197 + case 0x4B52: 198 + case 0x4B54: 199 + case 0x4B58: 200 + case 0x4D50: 201 + case 0x4D53: 202 + case 0x4D56: 203 + case 0x4D58: 204 + case 0x5044: 205 + case 0x5045: 206 + case 0x5046: 207 + case 0x5047: 208 + case 0x5049: 209 + case 0x504B: 210 + case 0x504D: 211 + case 0x5254: 212 + case 0x5347: 213 + case 0x5349: 214 + case 0x534B: 215 + case 0x534D: 216 + case 0x5356: 217 + case 0x5358: 218 + case 0x5443: 219 + case 0x544B: 220 + case 0x5647: 221 + return HIGH; 222 + default: 223 + return MEDIUM; 224 + } 225 + } 226 + 227 + int speed_idx_ep11(int req_type) 228 + { 229 + switch (req_type) { 230 + case 1: 231 + case 2: 232 + case 36: 233 + case 37: 234 + case 38: 235 + case 39: 236 + case 40: 237 + return LOW; 238 + case 17: 239 + case 18: 240 + case 19: 241 + case 20: 242 + case 21: 243 + case 22: 244 + case 26: 245 + case 30: 246 + case 31: 247 + case 32: 248 + case 33: 249 + case 34: 250 + case 35: 251 + return HIGH; 252 + default: 253 + return MEDIUM; 254 + } 255 + } 256 + 257 + 149 258 /** 150 259 * Convert a ICAMEX message to a type6 MEX message. 151 260 * 152 - * @zdev: crypto device pointer 261 + * @zq: crypto device pointer 153 262 * @ap_msg: pointer to AP message 154 263 * @mex: pointer to user input data 155 264 * 156 265 * Returns 0 on success or -EFAULT. 157 266 */ 158 - static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, 267 + static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, 159 268 struct ap_message *ap_msg, 160 269 struct ica_rsa_modexpo *mex) 161 270 { ··· 275 172 .function_code = {'P', 'K'}, 276 173 .ulen = 10, 277 174 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '} 278 - }; 279 - static struct function_and_rules_block static_pke_fnr_MCL2 = { 280 - .function_code = {'P', 'K'}, 281 - .ulen = 10, 282 - .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} 283 175 }; 284 176 struct { 285 177 struct type6_hdr hdr; ··· 302 204 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 303 205 304 206 msg->cprbx = static_cprbx; 305 - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 207 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 306 208 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; 307 209 308 - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 309 - static_pke_fnr_MCL2 : static_pke_fnr; 210 + msg->fr = static_pke_fnr; 310 211 311 212 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); 312 213 ··· 316 219 /** 317 220 * Convert a ICACRT message to a type6 CRT message. 318 221 * 319 - * @zdev: crypto device pointer 222 + * @zq: crypto device pointer 320 223 * @ap_msg: pointer to AP message 321 224 * @crt: pointer to user input data 322 225 * 323 226 * Returns 0 on success or -EFAULT. 324 227 */ 325 - static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, 228 + static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, 326 229 struct ap_message *ap_msg, 327 230 struct ica_rsa_modexpo_crt *crt) 328 231 { ··· 338 241 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'} 339 242 }; 340 243 341 - static struct function_and_rules_block static_pkd_fnr_MCL2 = { 342 - .function_code = {'P', 'D'}, 343 - .ulen = 10, 344 - .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'} 345 - }; 346 244 struct { 347 245 struct type6_hdr hdr; 348 246 struct CPRBX cprbx; ··· 364 272 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 365 273 366 274 msg->cprbx = static_cprbx; 367 - msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); 275 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 368 276 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = 369 277 size - sizeof(msg->hdr) - sizeof(msg->cprbx); 370 278 371 - msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? 372 - static_pkd_fnr_MCL2 : static_pkd_fnr; 279 + msg->fr = static_pkd_fnr; 373 280 374 281 ap_msg->length = size; 375 282 return 0; ··· 377 286 /** 378 287 * Convert a XCRB message to a type6 CPRB message. 379 288 * 380 - * @zdev: crypto device pointer 289 + * @zq: crypto device pointer 381 290 * @ap_msg: pointer to AP message 382 291 * @xcRB: pointer to user input data 383 292 * ··· 388 297 struct type86_fmt2_ext fmt2; 389 298 } __packed; 390 299 391 - static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, 392 - struct ap_message *ap_msg, 393 - struct ica_xcRB *xcRB) 300 + static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, 301 + struct ica_xcRB *xcRB, 302 + unsigned int *fcode, 303 + unsigned short **dom) 394 304 { 395 305 static struct type6_hdr static_type6_hdrX = { 396 306 .type = 0x06, ··· 471 379 memcpy(msg->hdr.function_code, function_code, 472 380 sizeof(msg->hdr.function_code)); 473 381 382 + *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; 383 + *dom = (unsigned short *)&msg->cprbx.domain; 384 + 474 385 if (memcmp(function_code, "US", 2) == 0) 475 386 ap_msg->special = 1; 476 387 else ··· 484 389 copy_from_user(req_data, xcRB->request_data_address, 485 390 xcRB->request_data_length)) 486 391 return -EFAULT; 392 + 487 393 return 0; 488 394 } 489 395 490 - static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev, 491 - struct ap_message *ap_msg, 492 - struct ep11_urb *xcRB) 396 + static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg, 397 + struct ep11_urb *xcRB, 398 + unsigned int *fcode) 493 399 { 494 400 unsigned int lfmt; 495 - 496 401 static struct type6_hdr static_type6_ep11_hdr = { 497 402 .type = 0x06, 498 403 .rqid = {0x00, 0x01}, ··· 516 421 unsigned char dom_tag; /* fixed value 0x4 */ 517 422 unsigned char dom_len; /* fixed value 0x4 */ 518 423 unsigned int dom_val; /* domain id */ 519 - } __packed * payload_hdr; 424 + } __packed * payload_hdr = NULL; 520 425 521 426 if (CEIL4(xcRB->req_len) < xcRB->req_len) 522 427 return -EINVAL; /* overflow after alignment*/ ··· 545 450 return -EFAULT; 546 451 } 547 452 548 - /* 549 - The target domain field within the cprb body/payload block will be 550 - replaced by the usage domain for non-management commands only. 551 - Therefore we check the first bit of the 'flags' parameter for 552 - management command indication. 553 - 0 - non management command 554 - 1 - management command 555 - */ 556 - if (!((msg->cprbx.flags & 0x80) == 0x80)) { 557 - msg->cprbx.target_id = (unsigned int) 558 - AP_QID_QUEUE(zdev->ap_dev->qid); 559 - 560 - if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 561 - switch (msg->pld_lenfmt & 0x03) { 562 - case 1: 563 - lfmt = 2; 564 - break; 565 - case 2: 566 - lfmt = 3; 567 - break; 568 - default: 569 - return -EINVAL; 570 - } 571 - } else { 572 - lfmt = 1; /* length format #1 */ 573 - } 574 - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 575 - payload_hdr->dom_val = (unsigned int) 576 - AP_QID_QUEUE(zdev->ap_dev->qid); 453 + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 454 + switch (msg->pld_lenfmt & 0x03) { 455 + case 1: 456 + lfmt = 2; 457 + break; 458 + case 2: 459 + lfmt = 3; 460 + break; 461 + default: 462 + return -EINVAL; 463 + } 464 + } else { 465 + lfmt = 1; /* length format #1 */ 577 466 } 467 + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 468 + *fcode = payload_hdr->func_val & 0xFFFF; 469 + 578 470 return 0; 579 471 } 580 472 581 473 /** 582 474 * Copy results from a type 86 ICA reply message back to user space. 583 475 * 584 - * @zdev: crypto device pointer 476 + * @zq: crypto device pointer 585 477 * @reply: reply AP message. 586 478 * @data: pointer to user output data 587 479 * @length: size of user output data ··· 590 508 struct ep11_cprb cprbx; 591 509 } __packed; 592 510 593 - static int convert_type86_ica(struct zcrypt_device *zdev, 511 + static int convert_type86_ica(struct zcrypt_queue *zq, 594 512 struct ap_message *reply, 595 513 char __user *outputdata, 596 514 unsigned int outputdatalength) ··· 638 556 service_rc = msg->cprbx.ccp_rtcode; 639 557 if (unlikely(service_rc != 0)) { 640 558 service_rs = msg->cprbx.ccp_rscode; 641 - if (service_rc == 8 && service_rs == 66) 559 + if ((service_rc == 8 && service_rs == 66) || 560 + (service_rc == 8 && service_rs == 65) || 561 + (service_rc == 8 && service_rs == 72) || 562 + (service_rc == 8 && service_rs == 770) || 563 + (service_rc == 12 && service_rs == 769)) { 564 + ZCRYPT_DBF(DBF_DEBUG, 565 + "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", 566 + AP_QID_CARD(zq->queue->qid), 567 + AP_QID_QUEUE(zq->queue->qid), 568 + (int) service_rc, (int) service_rs); 642 569 return -EINVAL; 643 - if (service_rc == 8 && service_rs == 65) 644 - return -EINVAL; 645 - if (service_rc == 8 && service_rs == 770) 646 - return -EINVAL; 570 + } 647 571 if (service_rc == 8 && service_rs == 783) { 648 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 572 + zq->zcard->min_mod_size = 573 + PCIXCC_MIN_MOD_SIZE_OLD; 574 + ZCRYPT_DBF(DBF_DEBUG, 575 + "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n", 576 + AP_QID_CARD(zq->queue->qid), 577 + AP_QID_QUEUE(zq->queue->qid), 578 + (int) service_rc, (int) service_rs); 649 579 return -EAGAIN; 650 580 } 651 - if (service_rc == 12 && service_rs == 769) 652 - return -EINVAL; 653 - if (service_rc == 8 && service_rs == 72) 654 - return -EINVAL; 655 - zdev->online = 0; 656 - pr_err("Cryptographic device %x failed and was set offline\n", 657 - AP_QID_DEVICE(zdev->ap_dev->qid)); 658 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 659 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, 660 - msg->hdr.reply_code); 581 + zq->online = 0; 582 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 583 + AP_QID_CARD(zq->queue->qid), 584 + AP_QID_QUEUE(zq->queue->qid)); 585 + ZCRYPT_DBF(DBF_ERR, 586 + "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", 587 + AP_QID_CARD(zq->queue->qid), 588 + AP_QID_QUEUE(zq->queue->qid), 589 + (int) service_rc, (int) service_rs); 661 590 return -EAGAIN; /* repeat the request on a different device. */ 662 591 } 663 592 data = msg->text; ··· 704 611 /** 705 612 * Copy results from a type 86 XCRB reply message back to user space. 706 613 * 707 - * @zdev: crypto device pointer 614 + * @zq: crypto device pointer 708 615 * @reply: reply AP message. 709 616 * @xcRB: pointer to XCRB 710 617 * 711 618 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 712 619 */ 713 - static int convert_type86_xcrb(struct zcrypt_device *zdev, 620 + static int convert_type86_xcrb(struct zcrypt_queue *zq, 714 621 struct ap_message *reply, 715 622 struct ica_xcRB *xcRB) 716 623 { ··· 735 642 /** 736 643 * Copy results from a type 86 EP11 XCRB reply message back to user space. 737 644 * 738 - * @zdev: crypto device pointer 645 + * @zq: crypto device pointer 739 646 * @reply: reply AP message. 740 647 * @xcRB: pointer to EP11 user request block 741 648 * 742 649 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 743 650 */ 744 - static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev, 651 + static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq, 745 652 struct ap_message *reply, 746 653 struct ep11_urb *xcRB) 747 654 { ··· 759 666 return 0; 760 667 } 761 668 762 - static int convert_type86_rng(struct zcrypt_device *zdev, 669 + static int convert_type86_rng(struct zcrypt_queue *zq, 763 670 struct ap_message *reply, 764 671 char *buffer) 765 672 { ··· 776 683 return msg->fmt2.count2; 777 684 } 778 685 779 - static int convert_response_ica(struct zcrypt_device *zdev, 686 + static int convert_response_ica(struct zcrypt_queue *zq, 780 687 struct ap_message *reply, 781 688 char __user *outputdata, 782 689 unsigned int outputdatalength) 783 690 { 784 691 struct type86x_reply *msg = reply->message; 785 692 786 - /* Response type byte is the second byte in the response. */ 787 - switch (((unsigned char *) reply->message)[1]) { 693 + switch (msg->hdr.type) { 788 694 case TYPE82_RSP_CODE: 789 695 case TYPE88_RSP_CODE: 790 - return convert_error(zdev, reply); 696 + return convert_error(zq, reply); 791 697 case TYPE86_RSP_CODE: 792 698 if (msg->cprbx.ccp_rtcode && 793 699 (msg->cprbx.ccp_rscode == 0x14f) && 794 700 (outputdatalength > 256)) { 795 - if (zdev->max_exp_bit_length <= 17) { 796 - zdev->max_exp_bit_length = 17; 701 + if (zq->zcard->max_exp_bit_length <= 17) { 702 + zq->zcard->max_exp_bit_length = 17; 797 703 return -EAGAIN; 798 704 } else 799 705 return -EINVAL; 800 706 } 801 707 if (msg->hdr.reply_code) 802 - return convert_error(zdev, reply); 708 + return convert_error(zq, reply); 803 709 if (msg->cprbx.cprb_ver_id == 0x02) 804 - return convert_type86_ica(zdev, reply, 710 + return convert_type86_ica(zq, reply, 805 711 outputdata, outputdatalength); 806 712 /* Fall through, no break, incorrect cprb version is an unknown 807 713 * response */ 808 714 default: /* Unknown response type, this should NEVER EVER happen */ 809 - zdev->online = 0; 810 - pr_err("Cryptographic device %x failed and was set offline\n", 811 - AP_QID_DEVICE(zdev->ap_dev->qid)); 812 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 813 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 715 + zq->online = 0; 716 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 717 + AP_QID_CARD(zq->queue->qid), 718 + AP_QID_QUEUE(zq->queue->qid)); 719 + ZCRYPT_DBF(DBF_ERR, 720 + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", 721 + AP_QID_CARD(zq->queue->qid), 722 + AP_QID_QUEUE(zq->queue->qid), 723 + (int) msg->hdr.type); 814 724 return -EAGAIN; /* repeat the request on a different device. */ 815 725 } 816 726 } 817 727 818 - static int convert_response_xcrb(struct zcrypt_device *zdev, 728 + static int convert_response_xcrb(struct zcrypt_queue *zq, 819 729 struct ap_message *reply, 820 730 struct ica_xcRB *xcRB) 821 731 { 822 732 struct type86x_reply *msg = reply->message; 823 733 824 - /* Response type byte is the second byte in the response. */ 825 - switch (((unsigned char *) reply->message)[1]) { 734 + switch (msg->hdr.type) { 826 735 case TYPE82_RSP_CODE: 827 736 case TYPE88_RSP_CODE: 828 737 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 829 - return convert_error(zdev, reply); 738 + return convert_error(zq, reply); 830 739 case TYPE86_RSP_CODE: 831 740 if (msg->hdr.reply_code) { 832 741 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); 833 - return convert_error(zdev, reply); 742 + return convert_error(zq, reply); 834 743 } 835 744 if (msg->cprbx.cprb_ver_id == 0x02) 836 - return convert_type86_xcrb(zdev, reply, xcRB); 745 + return convert_type86_xcrb(zq, reply, xcRB); 837 746 /* Fall through, no break, incorrect cprb version is an unknown 838 747 * response */ 839 748 default: /* Unknown response type, this should NEVER EVER happen */ 840 749 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 841 - zdev->online = 0; 842 - pr_err("Cryptographic device %x failed and was set offline\n", 843 - AP_QID_DEVICE(zdev->ap_dev->qid)); 844 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 845 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 750 + zq->online = 0; 751 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 752 + AP_QID_CARD(zq->queue->qid), 753 + AP_QID_QUEUE(zq->queue->qid)); 754 + ZCRYPT_DBF(DBF_ERR, 755 + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", 756 + AP_QID_CARD(zq->queue->qid), 757 + AP_QID_QUEUE(zq->queue->qid), 758 + (int) msg->hdr.type); 846 759 return -EAGAIN; /* repeat the request on a different device. */ 847 760 } 848 761 } 849 762 850 - static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, 763 + static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, 851 764 struct ap_message *reply, struct ep11_urb *xcRB) 852 765 { 853 766 struct type86_ep11_reply *msg = reply->message; 854 767 855 - /* Response type byte is the second byte in the response. */ 856 - switch (((unsigned char *)reply->message)[1]) { 768 + switch (msg->hdr.type) { 857 769 case TYPE82_RSP_CODE: 858 770 case TYPE87_RSP_CODE: 859 - return convert_error(zdev, reply); 771 + return convert_error(zq, reply); 860 772 case TYPE86_RSP_CODE: 861 773 if (msg->hdr.reply_code) 862 - return convert_error(zdev, reply); 774 + return convert_error(zq, reply); 863 775 if (msg->cprbx.cprb_ver_id == 0x04) 864 - return convert_type86_ep11_xcrb(zdev, reply, xcRB); 776 + return convert_type86_ep11_xcrb(zq, reply, xcRB); 865 777 /* Fall through, no break, incorrect cprb version is an unknown resp.*/ 866 778 default: /* Unknown response type, this should NEVER EVER happen */ 867 - zdev->online = 0; 868 - pr_err("Cryptographic device %x failed and was set offline\n", 869 - AP_QID_DEVICE(zdev->ap_dev->qid)); 870 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 871 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 779 + zq->online = 0; 780 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 781 + AP_QID_CARD(zq->queue->qid), 782 + AP_QID_QUEUE(zq->queue->qid)); 783 + ZCRYPT_DBF(DBF_ERR, 784 + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", 785 + AP_QID_CARD(zq->queue->qid), 786 + AP_QID_QUEUE(zq->queue->qid), 787 + (int) msg->hdr.type); 872 788 return -EAGAIN; /* repeat the request on a different device. */ 873 789 } 874 790 } 875 791 876 - static int convert_response_rng(struct zcrypt_device *zdev, 792 + static int convert_response_rng(struct zcrypt_queue *zq, 877 793 struct ap_message *reply, 878 794 char *data) 879 795 { ··· 896 794 if (msg->hdr.reply_code) 897 795 return -EINVAL; 898 796 if (msg->cprbx.cprb_ver_id == 0x02) 899 - return convert_type86_rng(zdev, reply, data); 797 + return convert_type86_rng(zq, reply, data); 900 798 /* Fall through, no break, incorrect cprb version is an unknown 901 799 * response */ 902 800 default: /* Unknown response type, this should NEVER EVER happen */ 903 - zdev->online = 0; 904 - pr_err("Cryptographic device %x failed and was set offline\n", 905 - AP_QID_DEVICE(zdev->ap_dev->qid)); 906 - ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", 907 - AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); 801 + zq->online = 0; 802 + pr_err("Cryptographic device %02x.%04x failed and was set offline\n", 803 + AP_QID_CARD(zq->queue->qid), 804 + AP_QID_QUEUE(zq->queue->qid)); 805 + ZCRYPT_DBF(DBF_ERR, 806 + "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n", 807 + AP_QID_CARD(zq->queue->qid), 808 + AP_QID_QUEUE(zq->queue->qid), 809 + (int) msg->hdr.type); 908 810 return -EAGAIN; /* repeat the request on a different device. */ 909 811 } 910 812 } ··· 917 811 * This function is called from the AP bus code after a crypto request 918 812 * "msg" has finished with the reply message "reply". 919 813 * It is called from tasklet context. 920 - * @ap_dev: pointer to the AP device 814 + * @aq: pointer to the AP queue 921 815 * @msg: pointer to the AP message 922 816 * @reply: pointer to the AP reply message 923 817 */ 924 - static void zcrypt_msgtype6_receive(struct ap_device *ap_dev, 818 + static void zcrypt_msgtype6_receive(struct ap_queue *aq, 925 819 struct ap_message *msg, 926 820 struct ap_message *reply) 927 821 { ··· 966 860 * This function is called from the AP bus code after a crypto request 967 861 * "msg" has finished with the reply message "reply". 968 862 * It is called from tasklet context. 969 - * @ap_dev: pointer to the AP device 863 + * @aq: pointer to the AP queue 970 864 * @msg: pointer to the AP message 971 865 * @reply: pointer to the AP reply message 972 866 */ 973 - static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, 867 + static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, 974 868 struct ap_message *msg, 975 869 struct ap_message *reply) 976 870 { ··· 1010 904 /** 1011 905 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1012 906 * device to handle a modexpo request. 1013 - * @zdev: pointer to zcrypt_device structure that identifies the 907 + * @zq: pointer to zcrypt_queue structure that identifies the 1014 908 * PCIXCC/CEX2C device to the request distributor 1015 909 * @mex: pointer to the modexpo request buffer 1016 910 */ 1017 - static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev, 911 + static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, 1018 912 struct ica_rsa_modexpo *mex) 1019 913 { 1020 914 struct ap_message ap_msg; ··· 1031 925 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1032 926 atomic_inc_return(&zcrypt_step); 1033 927 ap_msg.private = &resp_type; 1034 - rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); 928 + rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex); 1035 929 if (rc) 1036 930 goto out_free; 1037 931 init_completion(&resp_type.work); 1038 - ap_queue_message(zdev->ap_dev, &ap_msg); 932 + ap_queue_message(zq->queue, &ap_msg); 1039 933 rc = wait_for_completion_interruptible(&resp_type.work); 1040 934 if (rc == 0) { 1041 935 rc = ap_msg.rc; 1042 936 if (rc == 0) 1043 - rc = convert_response_ica(zdev, &ap_msg, 937 + rc = convert_response_ica(zq, &ap_msg, 1044 938 mex->outputdata, 1045 939 mex->outputdatalength); 1046 940 } else 1047 941 /* Signal pending. */ 1048 - ap_cancel_message(zdev->ap_dev, &ap_msg); 942 + ap_cancel_message(zq->queue, &ap_msg); 1049 943 out_free: 1050 944 free_page((unsigned long) ap_msg.message); 1051 945 return rc; ··· 1054 948 /** 1055 949 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1056 950 * device to handle a modexpo_crt request. 1057 - * @zdev: pointer to zcrypt_device structure that identifies the 951 + * @zq: pointer to zcrypt_queue structure that identifies the 1058 952 * PCIXCC/CEX2C device to the request distributor 1059 953 * @crt: pointer to the modexpoc_crt request buffer 1060 954 */ 1061 - static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev, 955 + static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, 1062 956 struct ica_rsa_modexpo_crt *crt) 1063 957 { 1064 958 struct ap_message ap_msg; ··· 1075 969 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1076 970 atomic_inc_return(&zcrypt_step); 1077 971 ap_msg.private = &resp_type; 1078 - rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); 972 + rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt); 1079 973 if (rc) 1080 974 goto out_free; 1081 975 init_completion(&resp_type.work); 1082 - ap_queue_message(zdev->ap_dev, &ap_msg); 976 + ap_queue_message(zq->queue, &ap_msg); 1083 977 rc = wait_for_completion_interruptible(&resp_type.work); 1084 978 if (rc == 0) { 1085 979 rc = ap_msg.rc; 1086 980 if (rc == 0) 1087 - rc = convert_response_ica(zdev, &ap_msg, 981 + rc = convert_response_ica(zq, &ap_msg, 1088 982 crt->outputdata, 1089 983 crt->outputdatalength); 1090 - } else 984 + } else { 1091 985 /* Signal pending. */ 1092 - ap_cancel_message(zdev->ap_dev, &ap_msg); 986 + ap_cancel_message(zq->queue, &ap_msg); 987 + } 1093 988 out_free: 1094 989 free_page((unsigned long) ap_msg.message); 990 + return rc; 991 + } 992 + 993 + unsigned int get_cprb_fc(struct ica_xcRB *xcRB, 994 + struct ap_message *ap_msg, 995 + unsigned int *func_code, unsigned short **dom) 996 + { 997 + struct response_type resp_type = { 998 + .type = PCIXCC_RESPONSE_TYPE_XCRB, 999 + }; 1000 + int rc; 1001 + 1002 + ap_init_message(ap_msg); 1003 + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1004 + if (!ap_msg->message) 1005 + return -ENOMEM; 1006 + ap_msg->receive = zcrypt_msgtype6_receive; 1007 + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1008 + atomic_inc_return(&zcrypt_step); 1009 + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1010 + if (!ap_msg->private) { 1011 + kzfree(ap_msg->message); 1012 + return -ENOMEM; 1013 + } 1014 + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1015 + rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom); 1016 + if (rc) { 1017 + kzfree(ap_msg->message); 1018 + kzfree(ap_msg->private); 1019 + } 1095 1020 return rc; 1096 1021 } 1097 1022 1098 1023 /** 1099 1024 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1100 1025 * device to handle a send_cprb request. 1101 - * @zdev: pointer to zcrypt_device structure that identifies the 1026 + * @zq: pointer to zcrypt_queue structure that identifies the 1102 1027 * PCIXCC/CEX2C device to the request distributor 1103 1028 * @xcRB: pointer to the send_cprb request buffer 1104 1029 */ 1105 - static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev, 1106 - struct ica_xcRB *xcRB) 1030 + static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq, 1031 + struct ica_xcRB *xcRB, 1032 + struct ap_message *ap_msg) 1107 1033 { 1108 - struct ap_message ap_msg; 1034 + int rc; 1035 + struct response_type *rtype = (struct response_type *)(ap_msg->private); 1036 + 1037 + init_completion(&rtype->work); 1038 + ap_queue_message(zq->queue, ap_msg); 1039 + rc = wait_for_completion_interruptible(&rtype->work); 1040 + if (rc == 0) { 1041 + rc = ap_msg->rc; 1042 + if (rc == 0) 1043 + rc = convert_response_xcrb(zq, ap_msg, xcRB); 1044 + } else 1045 + /* Signal pending. */ 1046 + ap_cancel_message(zq->queue, ap_msg); 1047 + 1048 + kzfree(ap_msg->message); 1049 + kzfree(ap_msg->private); 1050 + return rc; 1051 + } 1052 + 1053 + unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb, 1054 + struct ap_message *ap_msg, 1055 + unsigned int *func_code) 1056 + { 1109 1057 struct response_type resp_type = { 1110 - .type = PCIXCC_RESPONSE_TYPE_XCRB, 1058 + .type = PCIXCC_RESPONSE_TYPE_EP11, 1111 1059 }; 1112 1060 int rc; 1113 1061 1114 - ap_init_message(&ap_msg); 1115 - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1116 - if (!ap_msg.message) 1062 + ap_init_message(ap_msg); 1063 + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1064 + if (!ap_msg->message) 1117 1065 return -ENOMEM; 1118 - ap_msg.receive = zcrypt_msgtype6_receive; 1119 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1066 + ap_msg->receive = zcrypt_msgtype6_receive_ep11; 1067 + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1120 1068 atomic_inc_return(&zcrypt_step); 1121 - ap_msg.private = &resp_type; 1122 - rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB); 1123 - if (rc) 1124 - goto out_free; 1125 - init_completion(&resp_type.work); 1126 - ap_queue_message(zdev->ap_dev, &ap_msg); 1127 - rc = wait_for_completion_interruptible(&resp_type.work); 1128 - if (rc == 0) { 1129 - rc = ap_msg.rc; 1130 - if (rc == 0) 1131 - rc = convert_response_xcrb(zdev, &ap_msg, xcRB); 1132 - } else 1133 - /* Signal pending. */ 1134 - ap_cancel_message(zdev->ap_dev, &ap_msg); 1135 - out_free: 1136 - kzfree(ap_msg.message); 1069 + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1070 + if (!ap_msg->private) { 1071 + kzfree(ap_msg->message); 1072 + return -ENOMEM; 1073 + } 1074 + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1075 + rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code); 1076 + if (rc) { 1077 + kzfree(ap_msg->message); 1078 + kzfree(ap_msg->private); 1079 + } 1137 1080 return rc; 1138 1081 } 1139 1082 1140 1083 /** 1141 1084 * The request distributor calls this function if it picked the CEX4P 1142 1085 * device to handle a send_ep11_cprb request. 1143 - * @zdev: pointer to zcrypt_device structure that identifies the 1086 + * @zq: pointer to zcrypt_queue structure that identifies the 1144 1087 * CEX4P device to the request distributor 1145 1088 * @xcRB: pointer to the ep11 user request block 1146 1089 */ 1147 - static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev, 1148 - struct ep11_urb *xcrb) 1090 + static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq, 1091 + struct ep11_urb *xcrb, 1092 + struct ap_message *ap_msg) 1149 1093 { 1150 - struct ap_message ap_msg; 1151 - struct response_type resp_type = { 1152 - .type = PCIXCC_RESPONSE_TYPE_EP11, 1153 - }; 1154 1094 int rc; 1095 + unsigned int lfmt; 1096 + struct response_type *rtype = (struct response_type *)(ap_msg->private); 1097 + struct { 1098 + struct type6_hdr hdr; 1099 + struct ep11_cprb cprbx; 1100 + unsigned char pld_tag; /* fixed value 0x30 */ 1101 + unsigned char pld_lenfmt; /* payload length format */ 1102 + } __packed * msg = ap_msg->message; 1103 + struct pld_hdr { 1104 + unsigned char func_tag; /* fixed value 0x4 */ 1105 + unsigned char func_len; /* fixed value 0x4 */ 1106 + unsigned int func_val; /* function ID */ 1107 + unsigned char dom_tag; /* fixed value 0x4 */ 1108 + unsigned char dom_len; /* fixed value 0x4 */ 1109 + unsigned int dom_val; /* domain id */ 1110 + } __packed * payload_hdr = NULL; 1155 1111 1156 - ap_init_message(&ap_msg); 1157 - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1158 - if (!ap_msg.message) 1159 - return -ENOMEM; 1160 - ap_msg.receive = zcrypt_msgtype6_receive_ep11; 1161 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1162 - atomic_inc_return(&zcrypt_step); 1163 - ap_msg.private = &resp_type; 1164 - rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb); 1165 - if (rc) 1166 - goto out_free; 1167 - init_completion(&resp_type.work); 1168 - ap_queue_message(zdev->ap_dev, &ap_msg); 1169 - rc = wait_for_completion_interruptible(&resp_type.work); 1112 + 1113 + /** 1114 + * The target domain field within the cprb body/payload block will be 1115 + * replaced by the usage domain for non-management commands only. 1116 + * Therefore we check the first bit of the 'flags' parameter for 1117 + * management command indication. 1118 + * 0 - non management command 1119 + * 1 - management command 1120 + */ 1121 + if (!((msg->cprbx.flags & 0x80) == 0x80)) { 1122 + msg->cprbx.target_id = (unsigned int) 1123 + AP_QID_QUEUE(zq->queue->qid); 1124 + 1125 + if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/ 1126 + switch (msg->pld_lenfmt & 0x03) { 1127 + case 1: 1128 + lfmt = 2; 1129 + break; 1130 + case 2: 1131 + lfmt = 3; 1132 + break; 1133 + default: 1134 + return -EINVAL; 1135 + } 1136 + } else { 1137 + lfmt = 1; /* length format #1 */ 1138 + } 1139 + payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 1140 + payload_hdr->dom_val = (unsigned int) 1141 + AP_QID_QUEUE(zq->queue->qid); 1142 + } 1143 + 1144 + init_completion(&rtype->work); 1145 + ap_queue_message(zq->queue, ap_msg); 1146 + rc = wait_for_completion_interruptible(&rtype->work); 1170 1147 if (rc == 0) { 1171 - rc = ap_msg.rc; 1148 + rc = ap_msg->rc; 1172 1149 if (rc == 0) 1173 - rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb); 1150 + rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb); 1174 1151 } else 1175 1152 /* Signal pending. */ 1176 - ap_cancel_message(zdev->ap_dev, &ap_msg); 1153 + ap_cancel_message(zq->queue, ap_msg); 1177 1154 1178 - out_free: 1179 - kzfree(ap_msg.message); 1155 + kzfree(ap_msg->message); 1156 + kzfree(ap_msg->private); 1180 1157 return rc; 1158 + } 1159 + 1160 + unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code, 1161 + unsigned int *domain) 1162 + { 1163 + struct response_type resp_type = { 1164 + .type = PCIXCC_RESPONSE_TYPE_XCRB, 1165 + }; 1166 + 1167 + ap_init_message(ap_msg); 1168 + ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1169 + if (!ap_msg->message) 1170 + return -ENOMEM; 1171 + ap_msg->receive = zcrypt_msgtype6_receive; 1172 + ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1173 + atomic_inc_return(&zcrypt_step); 1174 + ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL); 1175 + if (!ap_msg->private) { 1176 + kzfree(ap_msg->message); 1177 + return -ENOMEM; 1178 + } 1179 + memcpy(ap_msg->private, &resp_type, sizeof(resp_type)); 1180 + 1181 + rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1182 + 1183 + *func_code = HWRNG; 1184 + return 0; 1181 1185 } 1182 1186 1183 1187 /** 1184 1188 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1185 1189 * device to generate random data. 1186 - * @zdev: pointer to zcrypt_device structure that identifies the 1190 + * @zq: pointer to zcrypt_queue structure that identifies the 1187 1191 * PCIXCC/CEX2C device to the request distributor 1188 1192 * @buffer: pointer to a memory page to return random data 1189 1193 */ 1190 - 1191 - static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev, 1192 - char *buffer) 1194 + static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, 1195 + char *buffer, struct ap_message *ap_msg) 1193 1196 { 1194 - struct ap_message ap_msg; 1195 - struct response_type resp_type = { 1196 - .type = PCIXCC_RESPONSE_TYPE_XCRB, 1197 - }; 1197 + struct { 1198 + struct type6_hdr hdr; 1199 + struct CPRBX cprbx; 1200 + char function_code[2]; 1201 + short int rule_length; 1202 + char rule[8]; 1203 + short int verb_length; 1204 + short int key_length; 1205 + } __packed * msg = ap_msg->message; 1206 + struct response_type *rtype = (struct response_type *)(ap_msg->private); 1198 1207 int rc; 1199 1208 1200 - ap_init_message(&ap_msg); 1201 - ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL); 1202 - if (!ap_msg.message) 1203 - return -ENOMEM; 1204 - ap_msg.receive = zcrypt_msgtype6_receive; 1205 - ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 1206 - atomic_inc_return(&zcrypt_step); 1207 - ap_msg.private = &resp_type; 1208 - rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE); 1209 - init_completion(&resp_type.work); 1210 - ap_queue_message(zdev->ap_dev, &ap_msg); 1211 - rc = wait_for_completion_interruptible(&resp_type.work); 1209 + msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 1210 + 1211 + init_completion(&rtype->work); 1212 + ap_queue_message(zq->queue, ap_msg); 1213 + rc = wait_for_completion_interruptible(&rtype->work); 1212 1214 if (rc == 0) { 1213 - rc = ap_msg.rc; 1215 + rc = ap_msg->rc; 1214 1216 if (rc == 0) 1215 - rc = convert_response_rng(zdev, &ap_msg, buffer); 1217 + rc = convert_response_rng(zq, ap_msg, buffer); 1216 1218 } else 1217 1219 /* Signal pending. */ 1218 - ap_cancel_message(zdev->ap_dev, &ap_msg); 1219 - kfree(ap_msg.message); 1220 + ap_cancel_message(zq->queue, ap_msg); 1221 + 1222 + kzfree(ap_msg->message); 1223 + kzfree(ap_msg->private); 1220 1224 return rc; 1221 1225 } 1222 1226 ··· 1361 1145 .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb, 1362 1146 }; 1363 1147 1364 - int __init zcrypt_msgtype6_init(void) 1148 + void __init zcrypt_msgtype6_init(void) 1365 1149 { 1366 1150 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); 1367 1151 zcrypt_msgtype_register(&zcrypt_msgtype6_ops); 1368 1152 zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops); 1369 - return 0; 1370 1153 } 1371 1154 1372 1155 void __exit zcrypt_msgtype6_exit(void) ··· 1374 1159 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); 1375 1160 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops); 1376 1161 } 1377 - 1378 - module_init(zcrypt_msgtype6_init); 1379 - module_exit(zcrypt_msgtype6_exit);
+18 -5
drivers/s390/crypto/zcrypt_msgtype6.h
··· 116 116 unsigned int offset4; /* 0x00000000 */ 117 117 } __packed; 118 118 119 + unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *, 120 + unsigned int *, unsigned short **); 121 + unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *, 122 + unsigned int *); 123 + unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *); 124 + 125 + #define LOW 10 126 + #define MEDIUM 100 127 + #define HIGH 500 128 + 129 + int speed_idx_cca(int); 130 + int speed_idx_ep11(int); 131 + 119 132 /** 120 133 * Prepare a type6 CPRB message for random number generation 121 134 * 122 135 * @ap_dev: AP device pointer 123 136 * @ap_msg: pointer to AP message 124 137 */ 125 - static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev, 126 - struct ap_message *ap_msg, 127 - unsigned random_number_length) 138 + static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, 139 + unsigned int random_number_length, 140 + unsigned int *domain) 128 141 { 129 142 struct { 130 143 struct type6_hdr hdr; ··· 169 156 msg->hdr.FromCardLen2 = random_number_length, 170 157 msg->cprbx = local_cprbx; 171 158 msg->cprbx.rpl_datal = random_number_length, 172 - msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); 173 159 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 174 160 msg->rule_length = 0x0a; 175 161 memcpy(msg->rule, "RANDOM ", 8); 176 162 msg->verb_length = 0x02; 177 163 msg->key_length = 0x02; 178 164 ap_msg->length = sizeof(*msg); 165 + *domain = (unsigned short)msg->cprbx.domain; 179 166 } 180 167 181 - int zcrypt_msgtype6_init(void); 168 + void zcrypt_msgtype6_init(void); 182 169 void zcrypt_msgtype6_exit(void); 183 170 184 171 #endif /* _ZCRYPT_MSGTYPE6_H_ */
+173 -213
drivers/s390/crypto/zcrypt_pcixcc.c
··· 32 32 #include <linux/slab.h> 33 33 #include <linux/atomic.h> 34 34 #include <asm/uaccess.h> 35 + #include <linux/mod_devicetable.h> 35 36 36 37 #include "ap_bus.h" 37 38 #include "zcrypt_api.h" ··· 46 45 #define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 47 46 #define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE 48 47 #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ 49 - 50 - #define PCIXCC_MCL2_SPEED_RATING 7870 51 - #define PCIXCC_MCL3_SPEED_RATING 7870 52 - #define CEX2C_SPEED_RATING 7000 53 - #define CEX3C_SPEED_RATING 6500 54 48 55 49 #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 56 50 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ ··· 63 67 #define PCIXCC_RESPONSE_TYPE_ICA 0 64 68 #define PCIXCC_RESPONSE_TYPE_XCRB 1 65 69 66 - static struct ap_device_id zcrypt_pcixcc_ids[] = { 67 - { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 68 - { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 69 - { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) }, 70 - { /* end of list */ }, 71 - }; 72 - 73 - MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 74 70 MODULE_AUTHOR("IBM Corporation"); 75 71 MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ 76 72 "Copyright IBM Corp. 2001, 2012"); 77 73 MODULE_LICENSE("GPL"); 78 74 79 - static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 80 - static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 81 - 82 - static struct ap_driver zcrypt_pcixcc_driver = { 83 - .probe = zcrypt_pcixcc_probe, 84 - .remove = zcrypt_pcixcc_remove, 85 - .ids = zcrypt_pcixcc_ids, 86 - .request_timeout = PCIXCC_CLEANUP_TIME, 75 + static struct ap_device_id zcrypt_pcixcc_card_ids[] = { 76 + { .dev_type = AP_DEVICE_TYPE_PCIXCC, 77 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 78 + { .dev_type = AP_DEVICE_TYPE_CEX2C, 79 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 80 + { .dev_type = AP_DEVICE_TYPE_CEX3C, 81 + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, 82 + { /* end of list */ }, 87 83 }; 88 84 89 - /** 90 - * Micro-code detection function. Its sends a message to a pcixcc card 91 - * to find out the microcode level. 92 - * @ap_dev: pointer to the AP device. 93 - */ 94 - static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) 95 - { 96 - static unsigned char msg[] = { 97 - 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 98 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 99 - 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 100 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 101 - 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00, 102 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 103 - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00, 104 - 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, 105 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 106 - 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, 107 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 108 - 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, 109 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8, 110 - 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, 111 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 112 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 113 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 114 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 115 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 116 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 117 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 118 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 119 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 120 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 121 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 122 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 123 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 124 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 125 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 126 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 127 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 128 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 129 - 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00, 130 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 131 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 132 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 133 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 134 - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 135 - 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A, 136 - 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, 137 - 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05, 138 - 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, 139 - 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55, 140 - 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, 141 - 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA, 142 - 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, 143 - 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB, 144 - 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, 145 - 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00, 146 - 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, 147 - 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40, 148 - 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, 149 - 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF, 150 - 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, 151 - 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63, 152 - 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, 153 - 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A, 154 - 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, 155 - 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28, 156 - 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, 157 - 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5, 158 - 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, 159 - 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98, 160 - 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, 161 - 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19, 162 - 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, 163 - 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36, 164 - 0xF1,0x3D,0x93,0x53 165 - }; 166 - unsigned long long psmid; 167 - struct CPRBX *cprbx; 168 - char *reply; 169 - int rc, i; 85 + MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids); 170 86 171 - reply = (void *) get_zeroed_page(GFP_KERNEL); 172 - if (!reply) 173 - return -ENOMEM; 87 + static struct ap_device_id zcrypt_pcixcc_queue_ids[] = { 88 + { .dev_type = AP_DEVICE_TYPE_PCIXCC, 89 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 90 + { .dev_type = AP_DEVICE_TYPE_CEX2C, 91 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 92 + { .dev_type = AP_DEVICE_TYPE_CEX3C, 93 + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, 94 + { /* end of list */ }, 95 + }; 174 96 175 - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); 176 - if (rc) 177 - goto out_free; 178 - 179 - /* Wait for the test message to complete. */ 180 - for (i = 0; i < 6; i++) { 181 - msleep(300); 182 - rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); 183 - if (rc == 0 && psmid == 0x0102030405060708ULL) 184 - break; 185 - } 186 - 187 - if (i >= 6) { 188 - /* Got no answer. */ 189 - rc = -ENODEV; 190 - goto out_free; 191 - } 192 - 193 - cprbx = (struct CPRBX *) (reply + 48); 194 - if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33) 195 - rc = ZCRYPT_PCIXCC_MCL2; 196 - else 197 - rc = ZCRYPT_PCIXCC_MCL3; 198 - out_free: 199 - free_page((unsigned long) reply); 200 - return rc; 201 - } 97 + MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids); 202 98 203 99 /** 204 100 * Large random number detection function. Its sends a message to a pcixcc ··· 99 211 * 100 212 * Returns 1 if large random numbers are supported, 0 if not and < 0 on error. 101 213 */ 102 - static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev) 214 + static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq) 103 215 { 104 216 struct ap_message ap_msg; 105 217 unsigned long long psmid; 218 + unsigned int domain; 106 219 struct { 107 220 struct type86_hdr hdr; 108 221 struct type86_fmt2_ext fmt2; 109 222 struct CPRBX cprbx; 110 223 } __attribute__((packed)) *reply; 224 + struct { 225 + struct type6_hdr hdr; 226 + struct CPRBX cprbx; 227 + char function_code[2]; 228 + short int rule_length; 229 + char rule[8]; 230 + short int verb_length; 231 + short int key_length; 232 + } __packed * msg; 111 233 int rc, i; 112 234 113 235 ap_init_message(&ap_msg); ··· 125 227 if (!ap_msg.message) 126 228 return -ENOMEM; 127 229 128 - rng_type6CPRB_msgX(ap_dev, &ap_msg, 4); 129 - rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message, 230 + rng_type6CPRB_msgX(&ap_msg, 4, &domain); 231 + 232 + msg = ap_msg.message; 233 + msg->cprbx.domain = AP_QID_QUEUE(aq->qid); 234 + 235 + rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message, 130 236 ap_msg.length); 131 237 if (rc) 132 238 goto out_free; ··· 138 236 /* Wait for the test message to complete. */ 139 237 for (i = 0; i < 2 * HZ; i++) { 140 238 msleep(1000 / HZ); 141 - rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096); 239 + rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096); 142 240 if (rc == 0 && psmid == 0x0102030405060708ULL) 143 241 break; 144 242 } ··· 160 258 } 161 259 162 260 /** 163 - * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device 164 - * since the bus_match already checked the hardware type. The PCIXCC 165 - * cards come in two flavours: micro code level 2 and micro code level 3. 166 - * This is checked by sending a test message to the device. 167 - * @ap_dev: pointer to the AP device. 261 + * Probe function for PCIXCC/CEX2C card devices. It always accepts the 262 + * AP device since the bus_match already checked the hardware type. The 263 + * PCIXCC cards come in two flavours: micro code level 2 and micro code 264 + * level 3. This is checked by sending a test message to the device. 265 + * @ap_dev: pointer to the AP card device. 168 266 */ 169 - static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 267 + static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev) 170 268 { 171 - struct zcrypt_device *zdev; 269 + /* 270 + * Normalized speed ratings per crypto adapter 271 + * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY 272 + */ 273 + static const int CEX2C_SPEED_IDX[] = { 274 + 1000, 1400, 2400, 1100, 1500, 2600, 100, 12}; 275 + static const int CEX3C_SPEED_IDX[] = { 276 + 500, 700, 1400, 550, 800, 1500, 80, 10}; 277 + 278 + struct ap_card *ac = to_ap_card(&ap_dev->device); 279 + struct zcrypt_card *zc; 172 280 int rc = 0; 173 281 174 - zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 175 - if (!zdev) 282 + zc = zcrypt_card_alloc(); 283 + if (!zc) 176 284 return -ENOMEM; 177 - zdev->ap_dev = ap_dev; 178 - zdev->online = 1; 179 - switch (ap_dev->device_type) { 180 - case AP_DEVICE_TYPE_PCIXCC: 181 - rc = zcrypt_pcixcc_mcl(ap_dev); 182 - if (rc < 0) { 183 - zcrypt_device_free(zdev); 184 - return rc; 185 - } 186 - zdev->user_space_type = rc; 187 - if (rc == ZCRYPT_PCIXCC_MCL2) { 188 - zdev->type_string = "PCIXCC_MCL2"; 189 - zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; 190 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; 191 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 192 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 193 - } else { 194 - zdev->type_string = "PCIXCC_MCL3"; 195 - zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; 196 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 197 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 198 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 199 - } 200 - break; 285 + zc->card = ac; 286 + ac->private = zc; 287 + switch (ac->ap_dev.device_type) { 201 288 case AP_DEVICE_TYPE_CEX2C: 202 - zdev->user_space_type = ZCRYPT_CEX2C; 203 - zdev->type_string = "CEX2C"; 204 - zdev->speed_rating = CEX2C_SPEED_RATING; 205 - zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 206 - zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 207 - zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 289 + zc->user_space_type = ZCRYPT_CEX2C; 290 + zc->type_string = "CEX2C"; 291 + memcpy(zc->speed_rating, CEX2C_SPEED_IDX, 292 + sizeof(CEX2C_SPEED_IDX)); 293 + zc->min_mod_size = PCIXCC_MIN_MOD_SIZE; 294 + zc->max_mod_size = PCIXCC_MAX_MOD_SIZE; 295 + zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE; 208 296 break; 209 297 case AP_DEVICE_TYPE_CEX3C: 210 - zdev->user_space_type = ZCRYPT_CEX3C; 211 - zdev->type_string = "CEX3C"; 212 - zdev->speed_rating = CEX3C_SPEED_RATING; 213 - zdev->min_mod_size = CEX3C_MIN_MOD_SIZE; 214 - zdev->max_mod_size = CEX3C_MAX_MOD_SIZE; 215 - zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; 298 + zc->user_space_type = ZCRYPT_CEX3C; 299 + zc->type_string = "CEX3C"; 300 + memcpy(zc->speed_rating, CEX3C_SPEED_IDX, 301 + sizeof(CEX3C_SPEED_IDX)); 302 + zc->min_mod_size = CEX3C_MIN_MOD_SIZE; 303 + zc->max_mod_size = CEX3C_MAX_MOD_SIZE; 304 + zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE; 216 305 break; 217 306 default: 218 - goto out_free; 307 + zcrypt_card_free(zc); 308 + return -ENODEV; 309 + } 310 + zc->online = 1; 311 + 312 + rc = zcrypt_card_register(zc); 313 + if (rc) { 314 + ac->private = NULL; 315 + zcrypt_card_free(zc); 219 316 } 220 317 221 - rc = zcrypt_pcixcc_rng_supported(ap_dev); 222 - if (rc < 0) { 223 - zcrypt_device_free(zdev); 224 - return rc; 225 - } 226 - if (rc) 227 - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 228 - MSGTYPE06_VARIANT_DEFAULT); 229 - else 230 - zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 231 - MSGTYPE06_VARIANT_NORNG); 232 - ap_device_init_reply(ap_dev, &zdev->reply); 233 - ap_dev->private = zdev; 234 - rc = zcrypt_device_register(zdev); 235 - if (rc) 236 - goto out_free; 237 - return 0; 238 - 239 - out_free: 240 - ap_dev->private = NULL; 241 - zcrypt_msgtype_release(zdev->ops); 242 - zcrypt_device_free(zdev); 243 318 return rc; 244 319 } 245 320 246 321 /** 247 - * This is called to remove the extended PCIXCC/CEX2C driver information 248 - * if an AP device is removed. 322 + * This is called to remove the PCIXCC/CEX2C card driver information 323 + * if an AP card device is removed. 249 324 */ 250 - static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) 325 + static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev) 251 326 { 252 - struct zcrypt_device *zdev = ap_dev->private; 253 - struct zcrypt_ops *zops = zdev->ops; 327 + struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private; 254 328 255 - zcrypt_device_unregister(zdev); 256 - zcrypt_msgtype_release(zops); 329 + if (zc) 330 + zcrypt_card_unregister(zc); 257 331 } 332 + 333 + static struct ap_driver zcrypt_pcixcc_card_driver = { 334 + .probe = zcrypt_pcixcc_card_probe, 335 + .remove = zcrypt_pcixcc_card_remove, 336 + .ids = zcrypt_pcixcc_card_ids, 337 + }; 338 + 339 + /** 340 + * Probe function for PCIXCC/CEX2C queue devices. It always accepts the 341 + * AP device since the bus_match already checked the hardware type. The 342 + * PCIXCC cards come in two flavours: micro code level 2 and micro code 343 + * level 3. This is checked by sending a test message to the device. 344 + * @ap_dev: pointer to the AP card device. 345 + */ 346 + static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev) 347 + { 348 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 349 + struct zcrypt_queue *zq; 350 + int rc; 351 + 352 + zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 353 + if (!zq) 354 + return -ENOMEM; 355 + zq->queue = aq; 356 + zq->online = 1; 357 + atomic_set(&zq->load, 0); 358 + rc = zcrypt_pcixcc_rng_supported(aq); 359 + if (rc < 0) { 360 + zcrypt_queue_free(zq); 361 + return rc; 362 + } 363 + if (rc) 364 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 365 + MSGTYPE06_VARIANT_DEFAULT); 366 + else 367 + zq->ops = zcrypt_msgtype(MSGTYPE06_NAME, 368 + MSGTYPE06_VARIANT_NORNG); 369 + ap_queue_init_reply(aq, &zq->reply); 370 + aq->request_timeout = PCIXCC_CLEANUP_TIME, 371 + aq->private = zq; 372 + rc = zcrypt_queue_register(zq); 373 + if (rc) { 374 + aq->private = NULL; 375 + zcrypt_queue_free(zq); 376 + } 377 + return rc; 378 + } 379 + 380 + /** 381 + * This is called to remove the PCIXCC/CEX2C queue driver information 382 + * if an AP queue device is removed. 383 + */ 384 + static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev) 385 + { 386 + struct ap_queue *aq = to_ap_queue(&ap_dev->device); 387 + struct zcrypt_queue *zq = aq->private; 388 + 389 + ap_queue_remove(aq); 390 + if (zq) 391 + zcrypt_queue_unregister(zq); 392 + } 393 + 394 + static struct ap_driver zcrypt_pcixcc_queue_driver = { 395 + .probe = zcrypt_pcixcc_queue_probe, 396 + .remove = zcrypt_pcixcc_queue_remove, 397 + .suspend = ap_queue_suspend, 398 + .resume = ap_queue_resume, 399 + .ids = zcrypt_pcixcc_queue_ids, 400 + }; 258 401 259 402 int __init zcrypt_pcixcc_init(void) 260 403 { 261 - return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); 404 + int rc; 405 + 406 + rc = ap_driver_register(&zcrypt_pcixcc_card_driver, 407 + THIS_MODULE, "pcixcccard"); 408 + if (rc) 409 + return rc; 410 + 411 + rc = ap_driver_register(&zcrypt_pcixcc_queue_driver, 412 + THIS_MODULE, "pcixccqueue"); 413 + if (rc) 414 + ap_driver_unregister(&zcrypt_pcixcc_card_driver); 415 + 416 + return rc; 262 417 } 263 418 264 419 void zcrypt_pcixcc_exit(void) 265 420 { 266 - ap_driver_unregister(&zcrypt_pcixcc_driver); 421 + ap_driver_unregister(&zcrypt_pcixcc_queue_driver); 422 + ap_driver_unregister(&zcrypt_pcixcc_card_driver); 267 423 } 268 424 269 425 module_init(zcrypt_pcixcc_init);
+226
drivers/s390/crypto/zcrypt_queue.c
··· 1 + /* 2 + * zcrypt 2.1.0 3 + * 4 + * Copyright IBM Corp. 2001, 2012 5 + * Author(s): Robert Burroughs 6 + * Eric Rossman (edrossma@us.ibm.com) 7 + * Cornelia Huck <cornelia.huck@de.ibm.com> 8 + * 9 + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 + * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 + * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 + * 14 + * This program is free software; you can redistribute it and/or modify 15 + * it under the terms of the GNU General Public License as published by 16 + * the Free Software Foundation; either version 2, or (at your option) 17 + * any later version. 18 + * 19 + * This program is distributed in the hope that it will be useful, 20 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 + * GNU General Public License for more details. 23 + */ 24 + 25 + #include <linux/module.h> 26 + #include <linux/init.h> 27 + #include <linux/interrupt.h> 28 + #include <linux/miscdevice.h> 29 + #include <linux/fs.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/seq_file.h> 32 + #include <linux/compat.h> 33 + #include <linux/slab.h> 34 + #include <linux/atomic.h> 35 + #include <linux/uaccess.h> 36 + #include <linux/hw_random.h> 37 + #include <linux/debugfs.h> 38 + #include <asm/debug.h> 39 + 40 + #include "zcrypt_debug.h" 41 + #include "zcrypt_api.h" 42 + 43 + #include "zcrypt_msgtype6.h" 44 + #include "zcrypt_msgtype50.h" 45 + 46 + /* 47 + * Device attributes common for all crypto queue devices. 48 + */ 49 + 50 + static ssize_t zcrypt_queue_online_show(struct device *dev, 51 + struct device_attribute *attr, 52 + char *buf) 53 + { 54 + struct zcrypt_queue *zq = to_ap_queue(dev)->private; 55 + 56 + return snprintf(buf, PAGE_SIZE, "%d\n", zq->online); 57 + } 58 + 59 + static ssize_t zcrypt_queue_online_store(struct device *dev, 60 + struct device_attribute *attr, 61 + const char *buf, size_t count) 62 + { 63 + struct zcrypt_queue *zq = to_ap_queue(dev)->private; 64 + struct zcrypt_card *zc = zq->zcard; 65 + int online; 66 + 67 + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 68 + return -EINVAL; 69 + 70 + if (online && !zc->online) 71 + return -EINVAL; 72 + zq->online = online; 73 + 74 + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n", 75 + AP_QID_CARD(zq->queue->qid), 76 + AP_QID_QUEUE(zq->queue->qid), 77 + online); 78 + 79 + if (!online) 80 + ap_flush_queue(zq->queue); 81 + return count; 82 + } 83 + 84 + static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show, 85 + zcrypt_queue_online_store); 86 + 87 + static struct attribute *zcrypt_queue_attrs[] = { 88 + &dev_attr_online.attr, 89 + NULL, 90 + }; 91 + 92 + static struct attribute_group zcrypt_queue_attr_group = { 93 + .attrs = zcrypt_queue_attrs, 94 + }; 95 + 96 + void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online) 97 + { 98 + zq->online = online; 99 + if (!online) 100 + ap_flush_queue(zq->queue); 101 + } 102 + 103 + struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size) 104 + { 105 + struct zcrypt_queue *zq; 106 + 107 + zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); 108 + if (!zq) 109 + return NULL; 110 + zq->reply.message = kmalloc(max_response_size, GFP_KERNEL); 111 + if (!zq->reply.message) 112 + goto out_free; 113 + zq->reply.length = max_response_size; 114 + INIT_LIST_HEAD(&zq->list); 115 + kref_init(&zq->refcount); 116 + return zq; 117 + 118 + out_free: 119 + kfree(zq); 120 + return NULL; 121 + } 122 + EXPORT_SYMBOL(zcrypt_queue_alloc); 123 + 124 + void zcrypt_queue_free(struct zcrypt_queue *zq) 125 + { 126 + kfree(zq->reply.message); 127 + kfree(zq); 128 + } 129 + EXPORT_SYMBOL(zcrypt_queue_free); 130 + 131 + static void zcrypt_queue_release(struct kref *kref) 132 + { 133 + struct zcrypt_queue *zq = 134 + container_of(kref, struct zcrypt_queue, refcount); 135 + zcrypt_queue_free(zq); 136 + } 137 + 138 + void zcrypt_queue_get(struct zcrypt_queue *zq) 139 + { 140 + kref_get(&zq->refcount); 141 + } 142 + EXPORT_SYMBOL(zcrypt_queue_get); 143 + 144 + int zcrypt_queue_put(struct zcrypt_queue *zq) 145 + { 146 + return kref_put(&zq->refcount, zcrypt_queue_release); 147 + } 148 + EXPORT_SYMBOL(zcrypt_queue_put); 149 + 150 + /** 151 + * zcrypt_queue_register() - Register a crypto queue device. 152 + * @zq: Pointer to a crypto queue device 153 + * 154 + * Register a crypto queue device. Returns 0 if successful. 155 + */ 156 + int zcrypt_queue_register(struct zcrypt_queue *zq) 157 + { 158 + struct zcrypt_card *zc; 159 + int rc; 160 + 161 + spin_lock(&zcrypt_list_lock); 162 + zc = zq->queue->card->private; 163 + zcrypt_card_get(zc); 164 + zq->zcard = zc; 165 + zq->online = 1; /* New devices are online by default. */ 166 + 167 + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n", 168 + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); 169 + 170 + list_add_tail(&zq->list, &zc->zqueues); 171 + zcrypt_device_count++; 172 + spin_unlock(&zcrypt_list_lock); 173 + 174 + rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj, 175 + &zcrypt_queue_attr_group); 176 + if (rc) 177 + goto out; 178 + get_device(&zq->queue->ap_dev.device); 179 + 180 + if (zq->ops->rng) { 181 + rc = zcrypt_rng_device_add(); 182 + if (rc) 183 + goto out_unregister; 184 + } 185 + return 0; 186 + 187 + out_unregister: 188 + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, 189 + &zcrypt_queue_attr_group); 190 + put_device(&zq->queue->ap_dev.device); 191 + out: 192 + spin_lock(&zcrypt_list_lock); 193 + list_del_init(&zq->list); 194 + spin_unlock(&zcrypt_list_lock); 195 + zcrypt_card_put(zc); 196 + return rc; 197 + } 198 + EXPORT_SYMBOL(zcrypt_queue_register); 199 + 200 + /** 201 + * zcrypt_queue_unregister(): Unregister a crypto queue device. 202 + * @zq: Pointer to crypto queue device 203 + * 204 + * Unregister a crypto queue device. 205 + */ 206 + void zcrypt_queue_unregister(struct zcrypt_queue *zq) 207 + { 208 + struct zcrypt_card *zc; 209 + 210 + ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n", 211 + AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid)); 212 + 213 + zc = zq->zcard; 214 + spin_lock(&zcrypt_list_lock); 215 + list_del_init(&zq->list); 216 + zcrypt_device_count--; 217 + spin_unlock(&zcrypt_list_lock); 218 + zcrypt_card_put(zc); 219 + if (zq->ops->rng) 220 + zcrypt_rng_device_remove(); 221 + sysfs_remove_group(&zq->queue->ap_dev.device.kobj, 222 + &zcrypt_queue_attr_group); 223 + put_device(&zq->queue->ap_dev.device); 224 + zcrypt_queue_put(zq); 225 + } 226 + EXPORT_SYMBOL(zcrypt_queue_unregister);
+2 -1
include/linux/mod_devicetable.h
··· 175 175 kernel_ulong_t driver_info; 176 176 }; 177 177 178 - #define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 178 + #define AP_DEVICE_ID_MATCH_CARD_TYPE 0x01 179 + #define AP_DEVICE_ID_MATCH_QUEUE_TYPE 0x02 179 180 180 181 /* s390 css bus devices (subchannels) */ 181 182 struct css_device_id {