Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
5 * Eric Rossman (edrossma@us.ibm.com)
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
13 */
14
15#define pr_fmt(fmt) "zcrypt: " fmt
16
17#include <linux/export.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/miscdevice.h>
22#include <linux/fs.h>
23#include <linux/slab.h>
24#include <linux/atomic.h>
25#include <linux/uaccess.h>
26#include <linux/hw_random.h>
27#include <linux/debugfs.h>
28#include <linux/cdev.h>
29#include <linux/ctype.h>
30#include <linux/capability.h>
31#include <asm/debug.h>
32
33#define CREATE_TRACE_POINTS
34#include <asm/trace/zcrypt.h>
35
36#include "zcrypt_api.h"
37#include "zcrypt_debug.h"
38
39#include "zcrypt_msgtype6.h"
40#include "zcrypt_msgtype50.h"
41#include "zcrypt_ccamisc.h"
42#include "zcrypt_ep11misc.h"
43
44/*
45 * Module description.
46 */
47MODULE_AUTHOR("IBM Corporation");
48MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
49 "Copyright IBM Corp. 2001, 2012");
50MODULE_LICENSE("GPL");
51
52unsigned int zcrypt_mempool_threshold = 5;
53module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440);
54MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
55
56/*
57 * zcrypt tracepoint functions
58 */
59EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
60EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
61
62DEFINE_SPINLOCK(zcrypt_list_lock);
63LIST_HEAD(zcrypt_card_list);
64
65static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
66
67static LIST_HEAD(zcrypt_ops_list);
68
69/* Zcrypt related debug feature stuff. */
70debug_info_t *zcrypt_dbf_info;
71
72/*
73 * Process a rescan of the transport layer.
74 * Runs a synchronous AP bus rescan.
75 * Returns true if something has changed (for example the
76 * bus scan has found and build up new devices) and it is
77 * worth to do a retry. Otherwise false is returned meaning
78 * no changes on the AP bus level.
79 */
80static inline bool zcrypt_process_rescan(void)
81{
82 return ap_bus_force_rescan();
83}
84
85void zcrypt_msgtype_register(struct zcrypt_ops *zops)
86{
87 list_add_tail(&zops->list, &zcrypt_ops_list);
88}
89
90void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
91{
92 list_del_init(&zops->list);
93}
94
95struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
96{
97 struct zcrypt_ops *zops;
98
99 list_for_each_entry(zops, &zcrypt_ops_list, list)
100 if (zops->variant == variant &&
101 (!strncmp(zops->name, name, sizeof(zops->name))))
102 return zops;
103 return NULL;
104}
105EXPORT_SYMBOL(zcrypt_msgtype);
106
107/*
108 * Multi device nodes extension functions.
109 */
110
111struct zcdn_device;
112
113static void zcdn_device_release(struct device *dev);
114static const struct class zcrypt_class = {
115 .name = ZCRYPT_NAME,
116 .dev_release = zcdn_device_release,
117};
118static dev_t zcrypt_devt;
119static struct cdev zcrypt_cdev;
120
121struct zcdn_device {
122 struct device device;
123 struct ap_perms perms;
124};
125
126#define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
127
128#define ZCDN_MAX_NAME 32
129
130static int zcdn_create(const char *name);
131static int zcdn_destroy(const char *name);
132
133/*
134 * Find zcdn device by name.
135 * Returns reference to the zcdn device which needs to be released
136 * with put_device() after use.
137 */
138static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
139{
140 struct device *dev = class_find_device_by_name(&zcrypt_class, name);
141
142 return dev ? to_zcdn_dev(dev) : NULL;
143}
144
145/*
146 * Find zcdn device by devt value.
147 * Returns reference to the zcdn device which needs to be released
148 * with put_device() after use.
149 */
150static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
151{
152 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
153
154 return dev ? to_zcdn_dev(dev) : NULL;
155}
156
157static ssize_t ioctlmask_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160{
161 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
162 int i, n;
163
164 if (mutex_lock_interruptible(&ap_attr_mutex))
165 return -ERESTARTSYS;
166
167 n = sysfs_emit(buf, "0x");
168 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
169 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
170 n += sysfs_emit_at(buf, n, "\n");
171
172 mutex_unlock(&ap_attr_mutex);
173
174 return n;
175}
176
177static ssize_t ioctlmask_store(struct device *dev,
178 struct device_attribute *attr,
179 const char *buf, size_t count)
180{
181 int rc;
182 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
183
184 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
185 AP_IOCTLS, &ap_attr_mutex);
186 if (rc)
187 return rc;
188
189 return count;
190}
191
192static DEVICE_ATTR_RW(ioctlmask);
193
194static ssize_t apmask_show(struct device *dev,
195 struct device_attribute *attr,
196 char *buf)
197{
198 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
199 int i, n;
200
201 if (mutex_lock_interruptible(&ap_attr_mutex))
202 return -ERESTARTSYS;
203
204 n = sysfs_emit(buf, "0x");
205 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
206 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
207 n += sysfs_emit_at(buf, n, "\n");
208
209 mutex_unlock(&ap_attr_mutex);
210
211 return n;
212}
213
214static ssize_t apmask_store(struct device *dev,
215 struct device_attribute *attr,
216 const char *buf, size_t count)
217{
218 int rc;
219 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
220
221 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
222 AP_DEVICES, &ap_attr_mutex);
223 if (rc)
224 return rc;
225
226 return count;
227}
228
229static DEVICE_ATTR_RW(apmask);
230
231static ssize_t aqmask_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
236 int i, n;
237
238 if (mutex_lock_interruptible(&ap_attr_mutex))
239 return -ERESTARTSYS;
240
241 n = sysfs_emit(buf, "0x");
242 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
243 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
244 n += sysfs_emit_at(buf, n, "\n");
245
246 mutex_unlock(&ap_attr_mutex);
247
248 return n;
249}
250
251static ssize_t aqmask_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
254{
255 int rc;
256 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
257
258 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
259 AP_DOMAINS, &ap_attr_mutex);
260 if (rc)
261 return rc;
262
263 return count;
264}
265
266static DEVICE_ATTR_RW(aqmask);
267
268static ssize_t admask_show(struct device *dev,
269 struct device_attribute *attr,
270 char *buf)
271{
272 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
273 int i, n;
274
275 if (mutex_lock_interruptible(&ap_attr_mutex))
276 return -ERESTARTSYS;
277
278 n = sysfs_emit(buf, "0x");
279 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
280 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
281 n += sysfs_emit_at(buf, n, "\n");
282
283 mutex_unlock(&ap_attr_mutex);
284
285 return n;
286}
287
288static ssize_t admask_store(struct device *dev,
289 struct device_attribute *attr,
290 const char *buf, size_t count)
291{
292 int rc;
293 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
294
295 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
296 AP_DOMAINS, &ap_attr_mutex);
297 if (rc)
298 return rc;
299
300 return count;
301}
302
303static DEVICE_ATTR_RW(admask);
304
305static struct attribute *zcdn_dev_attrs[] = {
306 &dev_attr_ioctlmask.attr,
307 &dev_attr_apmask.attr,
308 &dev_attr_aqmask.attr,
309 &dev_attr_admask.attr,
310 NULL
311};
312
313static struct attribute_group zcdn_dev_attr_group = {
314 .attrs = zcdn_dev_attrs
315};
316
317static const struct attribute_group *zcdn_dev_attr_groups[] = {
318 &zcdn_dev_attr_group,
319 NULL
320};
321
322static ssize_t zcdn_create_store(const struct class *class,
323 const struct class_attribute *attr,
324 const char *buf, size_t count)
325{
326 int rc;
327 char name[ZCDN_MAX_NAME];
328
329 strscpy(name, skip_spaces(buf), sizeof(name));
330
331 rc = zcdn_create(strim(name));
332
333 return rc ? rc : count;
334}
335
336static const struct class_attribute class_attr_zcdn_create =
337 __ATTR(create, 0600, NULL, zcdn_create_store);
338
339static ssize_t zcdn_destroy_store(const struct class *class,
340 const struct class_attribute *attr,
341 const char *buf, size_t count)
342{
343 int rc;
344 char name[ZCDN_MAX_NAME];
345
346 strscpy(name, skip_spaces(buf), sizeof(name));
347
348 rc = zcdn_destroy(strim(name));
349
350 return rc ? rc : count;
351}
352
353static const struct class_attribute class_attr_zcdn_destroy =
354 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
355
356static void zcdn_device_release(struct device *dev)
357{
358 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
359
360 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
361 __func__, MAJOR(dev->devt), MINOR(dev->devt));
362
363 kfree(zcdndev);
364}
365
366static int zcdn_create(const char *name)
367{
368 dev_t devt;
369 int i, rc = 0;
370 struct zcdn_device *zcdndev;
371
372 if (mutex_lock_interruptible(&ap_attr_mutex))
373 return -ERESTARTSYS;
374
375 /* check if device node with this name already exists */
376 if (name[0]) {
377 zcdndev = find_zcdndev_by_name(name);
378 if (zcdndev) {
379 put_device(&zcdndev->device);
380 rc = -EEXIST;
381 goto unlockout;
382 }
383 }
384
385 /* find an unused minor number */
386 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
387 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
388 zcdndev = find_zcdndev_by_devt(devt);
389 if (zcdndev)
390 put_device(&zcdndev->device);
391 else
392 break;
393 }
394 if (i == ZCRYPT_MAX_MINOR_NODES) {
395 rc = -ENOSPC;
396 goto unlockout;
397 }
398
399 /* alloc and prepare a new zcdn device */
400 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
401 if (!zcdndev) {
402 rc = -ENOMEM;
403 goto unlockout;
404 }
405 zcdndev->device.release = zcdn_device_release;
406 zcdndev->device.class = &zcrypt_class;
407 zcdndev->device.devt = devt;
408 zcdndev->device.groups = zcdn_dev_attr_groups;
409 if (name[0])
410 rc = dev_set_name(&zcdndev->device, "%s", name);
411 else
412 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
413 if (rc) {
414 kfree(zcdndev);
415 goto unlockout;
416 }
417 rc = device_register(&zcdndev->device);
418 if (rc) {
419 put_device(&zcdndev->device);
420 goto unlockout;
421 }
422
423 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
424 __func__, MAJOR(devt), MINOR(devt));
425
426unlockout:
427 mutex_unlock(&ap_attr_mutex);
428 return rc;
429}
430
431static int zcdn_destroy(const char *name)
432{
433 int rc = 0;
434 struct zcdn_device *zcdndev;
435
436 if (mutex_lock_interruptible(&ap_attr_mutex))
437 return -ERESTARTSYS;
438
439 /* try to find this zcdn device */
440 zcdndev = find_zcdndev_by_name(name);
441 if (!zcdndev) {
442 rc = -ENOENT;
443 goto unlockout;
444 }
445
446 /*
447 * The zcdn device is not hard destroyed. It is subject to
448 * reference counting and thus just needs to be unregistered.
449 */
450 put_device(&zcdndev->device);
451 device_unregister(&zcdndev->device);
452
453unlockout:
454 mutex_unlock(&ap_attr_mutex);
455 return rc;
456}
457
458static void zcdn_destroy_all(void)
459{
460 int i;
461 dev_t devt;
462 struct zcdn_device *zcdndev;
463
464 mutex_lock(&ap_attr_mutex);
465 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
466 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
467 zcdndev = find_zcdndev_by_devt(devt);
468 if (zcdndev) {
469 put_device(&zcdndev->device);
470 device_unregister(&zcdndev->device);
471 }
472 }
473 mutex_unlock(&ap_attr_mutex);
474}
475
476/*
477 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
478 *
479 * This function is not supported beyond zcrypt 1.3.1.
480 */
481static ssize_t zcrypt_read(struct file *filp, char __user *buf,
482 size_t count, loff_t *f_pos)
483{
484 return -EPERM;
485}
486
487/*
488 * zcrypt_write(): Not allowed.
489 *
490 * Write is not allowed
491 */
492static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
493 size_t count, loff_t *f_pos)
494{
495 return -EPERM;
496}
497
498/*
499 * zcrypt_open(): Count number of users.
500 *
501 * Device open function to count number of users.
502 */
503static int zcrypt_open(struct inode *inode, struct file *filp)
504{
505 struct ap_perms *perms = &ap_perms;
506
507 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
508 struct zcdn_device *zcdndev;
509
510 if (mutex_lock_interruptible(&ap_attr_mutex))
511 return -ERESTARTSYS;
512 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
513 /* find returns a reference, no get_device() needed */
514 mutex_unlock(&ap_attr_mutex);
515 if (zcdndev)
516 perms = &zcdndev->perms;
517 }
518 filp->private_data = (void *)perms;
519
520 atomic_inc(&zcrypt_open_count);
521 return stream_open(inode, filp);
522}
523
524/*
525 * zcrypt_release(): Count number of users.
526 *
527 * Device close function to count number of users.
528 */
529static int zcrypt_release(struct inode *inode, struct file *filp)
530{
531 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
532 struct zcdn_device *zcdndev;
533
534 mutex_lock(&ap_attr_mutex);
535 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
536 mutex_unlock(&ap_attr_mutex);
537 if (zcdndev) {
538 /* 2 puts here: one for find, one for open */
539 put_device(&zcdndev->device);
540 put_device(&zcdndev->device);
541 }
542 }
543
544 atomic_dec(&zcrypt_open_count);
545 return 0;
546}
547
548static inline int zcrypt_check_ioctl(struct ap_perms *perms,
549 unsigned int cmd)
550{
551 int rc = -EPERM;
552 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
553
554 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
555 if (test_bit_inv(ioctlnr, perms->ioctlm))
556 rc = 0;
557 }
558
559 if (rc)
560 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
561 __func__, ioctlnr, rc);
562
563 return rc;
564}
565
566static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
567{
568 return test_bit_inv(card, perms->apm) ? true : false;
569}
570
571static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
572{
573 return test_bit_inv(queue, perms->aqm) ? true : false;
574}
575
576static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
577 struct zcrypt_queue *zq,
578 struct module **pmod,
579 unsigned int weight)
580{
581 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
582 return NULL;
583 zcrypt_card_get(zc);
584 zcrypt_queue_get(zq);
585 get_device(&zq->queue->ap_dev.device);
586 atomic_add(weight, &zc->load);
587 atomic_add(weight, &zq->load);
588 zq->request_count++;
589 *pmod = zq->queue->ap_dev.device.driver->owner;
590 return zq;
591}
592
593static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
594 struct zcrypt_queue *zq,
595 struct module *mod,
596 unsigned int weight)
597{
598 zq->request_count--;
599 atomic_sub(weight, &zc->load);
600 atomic_sub(weight, &zq->load);
601 put_device(&zq->queue->ap_dev.device);
602 zcrypt_queue_put(zq);
603 zcrypt_card_put(zc);
604 module_put(mod);
605}
606
607static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
608 struct zcrypt_card *pref_zc,
609 unsigned int weight,
610 unsigned int pref_weight)
611{
612 if (!pref_zc)
613 return true;
614 weight += atomic_read(&zc->load);
615 pref_weight += atomic_read(&pref_zc->load);
616 if (weight == pref_weight)
617 return atomic64_read(&zc->card->total_request_count) <
618 atomic64_read(&pref_zc->card->total_request_count);
619 return weight < pref_weight;
620}
621
622static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
623 struct zcrypt_queue *pref_zq,
624 unsigned int weight,
625 unsigned int pref_weight)
626{
627 if (!pref_zq)
628 return true;
629 weight += atomic_read(&zq->load);
630 pref_weight += atomic_read(&pref_zq->load);
631 if (weight == pref_weight)
632 return zq->queue->total_request_count <
633 pref_zq->queue->total_request_count;
634 return weight < pref_weight;
635}
636
637/*
638 * zcrypt ioctls.
639 */
640static long zcrypt_rsa_modexpo(struct ap_perms *perms,
641 struct zcrypt_track *tr,
642 struct ica_rsa_modexpo *mex)
643{
644 struct zcrypt_card *zc, *pref_zc;
645 struct zcrypt_queue *zq, *pref_zq;
646 struct ap_message ap_msg;
647 unsigned int wgt = 0, pref_wgt = 0;
648 unsigned int func_code = 0;
649 int cpen, qpen, qid = 0, rc;
650 struct module *mod;
651
652 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
653
654 rc = ap_init_apmsg(&ap_msg, 0);
655 if (rc)
656 goto out;
657
658 if (mex->outputdatalength < mex->inputdatalength) {
659 rc = -EINVAL;
660 goto out;
661 }
662
663 /*
664 * As long as outputdatalength is big enough, we can set the
665 * outputdatalength equal to the inputdatalength, since that is the
666 * number of bytes we will copy in any case
667 */
668 mex->outputdatalength = mex->inputdatalength;
669
670 rc = get_rsa_modex_fc(mex, &func_code);
671 if (rc)
672 goto out;
673
674 pref_zc = NULL;
675 pref_zq = NULL;
676 spin_lock(&zcrypt_list_lock);
677 for_each_zcrypt_card(zc) {
678 /* Check for usable accelerator or CCA card */
679 if (!zc->online || !zc->card->config || zc->card->chkstop ||
680 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
681 continue;
682 /* Check for size limits */
683 if (zc->min_mod_size > mex->inputdatalength ||
684 zc->max_mod_size < mex->inputdatalength)
685 continue;
686 /* check if device node has admission for this card */
687 if (!zcrypt_check_card(perms, zc->card->id))
688 continue;
689 /* get weight index of the card device */
690 wgt = zc->speed_rating[func_code];
691 /* penalty if this msg was previously sent via this card */
692 cpen = (tr && tr->again_counter && tr->last_qid &&
693 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
694 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
695 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
696 continue;
697 for_each_zcrypt_queue(zq, zc) {
698 /* check if device is usable and eligible */
699 if (!zq->online || !zq->ops->rsa_modexpo ||
700 !ap_queue_usable(zq->queue))
701 continue;
702 /* check if device node has admission for this queue */
703 if (!zcrypt_check_queue(perms,
704 AP_QID_QUEUE(zq->queue->qid)))
705 continue;
706 /* penalty if the msg was previously sent at this qid */
707 qpen = (tr && tr->again_counter && tr->last_qid &&
708 tr->last_qid == zq->queue->qid) ?
709 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
710 if (!zcrypt_queue_compare(zq, pref_zq,
711 wgt + cpen + qpen, pref_wgt))
712 continue;
713 pref_zc = zc;
714 pref_zq = zq;
715 pref_wgt = wgt + cpen + qpen;
716 }
717 }
718 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
719 spin_unlock(&zcrypt_list_lock);
720
721 if (!pref_zq) {
722 pr_debug("no matching queue found => ENODEV\n");
723 rc = -ENODEV;
724 goto out;
725 }
726
727 qid = pref_zq->queue->qid;
728 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
729
730 spin_lock(&zcrypt_list_lock);
731 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
732 spin_unlock(&zcrypt_list_lock);
733
734out:
735 ap_release_apmsg(&ap_msg);
736 if (tr) {
737 tr->last_rc = rc;
738 tr->last_qid = qid;
739 }
740 trace_s390_zcrypt_rep(mex, func_code, rc,
741 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
742 ap_msg.psmid);
743 return rc;
744}
745
746static long zcrypt_rsa_crt(struct ap_perms *perms,
747 struct zcrypt_track *tr,
748 struct ica_rsa_modexpo_crt *crt)
749{
750 struct zcrypt_card *zc, *pref_zc;
751 struct zcrypt_queue *zq, *pref_zq;
752 struct ap_message ap_msg;
753 unsigned int wgt = 0, pref_wgt = 0;
754 unsigned int func_code = 0;
755 int cpen, qpen, qid = 0, rc;
756 struct module *mod;
757
758 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
759
760 rc = ap_init_apmsg(&ap_msg, 0);
761 if (rc)
762 goto out;
763
764 if (crt->outputdatalength < crt->inputdatalength) {
765 rc = -EINVAL;
766 goto out;
767 }
768
769 /*
770 * As long as outputdatalength is big enough, we can set the
771 * outputdatalength equal to the inputdatalength, since that is the
772 * number of bytes we will copy in any case
773 */
774 crt->outputdatalength = crt->inputdatalength;
775
776 rc = get_rsa_crt_fc(crt, &func_code);
777 if (rc)
778 goto out;
779
780 pref_zc = NULL;
781 pref_zq = NULL;
782 spin_lock(&zcrypt_list_lock);
783 for_each_zcrypt_card(zc) {
784 /* Check for usable accelerator or CCA card */
785 if (!zc->online || !zc->card->config || zc->card->chkstop ||
786 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
787 continue;
788 /* Check for size limits */
789 if (zc->min_mod_size > crt->inputdatalength ||
790 zc->max_mod_size < crt->inputdatalength)
791 continue;
792 /* check if device node has admission for this card */
793 if (!zcrypt_check_card(perms, zc->card->id))
794 continue;
795 /* get weight index of the card device */
796 wgt = zc->speed_rating[func_code];
797 /* penalty if this msg was previously sent via this card */
798 cpen = (tr && tr->again_counter && tr->last_qid &&
799 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
800 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
801 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
802 continue;
803 for_each_zcrypt_queue(zq, zc) {
804 /* check if device is usable and eligible */
805 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
806 !ap_queue_usable(zq->queue))
807 continue;
808 /* check if device node has admission for this queue */
809 if (!zcrypt_check_queue(perms,
810 AP_QID_QUEUE(zq->queue->qid)))
811 continue;
812 /* penalty if the msg was previously sent at this qid */
813 qpen = (tr && tr->again_counter && tr->last_qid &&
814 tr->last_qid == zq->queue->qid) ?
815 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
816 if (!zcrypt_queue_compare(zq, pref_zq,
817 wgt + cpen + qpen, pref_wgt))
818 continue;
819 pref_zc = zc;
820 pref_zq = zq;
821 pref_wgt = wgt + cpen + qpen;
822 }
823 }
824 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
825 spin_unlock(&zcrypt_list_lock);
826
827 if (!pref_zq) {
828 pr_debug("no matching queue found => ENODEV\n");
829 rc = -ENODEV;
830 goto out;
831 }
832
833 qid = pref_zq->queue->qid;
834 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
835
836 spin_lock(&zcrypt_list_lock);
837 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
838 spin_unlock(&zcrypt_list_lock);
839
840out:
841 ap_release_apmsg(&ap_msg);
842 if (tr) {
843 tr->last_rc = rc;
844 tr->last_qid = qid;
845 }
846 trace_s390_zcrypt_rep(crt, func_code, rc,
847 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
848 ap_msg.psmid);
849 return rc;
850}
851
852static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
853 struct zcrypt_track *tr,
854 struct ica_xcRB *xcrb)
855{
856 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
857 struct zcrypt_card *zc, *pref_zc;
858 struct zcrypt_queue *zq, *pref_zq;
859 struct ap_message ap_msg;
860 unsigned int wgt = 0, pref_wgt = 0;
861 unsigned int func_code = 0;
862 unsigned short *domain, tdom;
863 int cpen, qpen, qid = 0, rc;
864 struct module *mod;
865
866 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
867
868 xcrb->status = 0;
869
870 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
871 AP_MSG_FLAG_MEMPOOL : 0);
872 if (rc)
873 goto out;
874
875 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
876 if (rc)
877 goto out;
878 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
879 ap_msg.msg, ap_msg.len, false);
880
881 tdom = *domain;
882 if (perms != &ap_perms && tdom < AP_DOMAINS) {
883 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
884 if (!test_bit_inv(tdom, perms->adm)) {
885 rc = -ENODEV;
886 goto out;
887 }
888 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
889 rc = -EOPNOTSUPP;
890 goto out;
891 }
892 }
893 /*
894 * If a valid target domain is set and this domain is NOT a usage
895 * domain but a control only domain, autoselect target domain.
896 */
897 if (tdom < AP_DOMAINS &&
898 !ap_test_config_usage_domain(tdom) &&
899 ap_test_config_ctrl_domain(tdom))
900 tdom = AUTOSEL_DOM;
901
902 pref_zc = NULL;
903 pref_zq = NULL;
904 spin_lock(&zcrypt_list_lock);
905 for_each_zcrypt_card(zc) {
906 /* Check for usable CCA card */
907 if (!zc->online || !zc->card->config || zc->card->chkstop ||
908 !zc->card->hwinfo.cca)
909 continue;
910 /* Check for user selected CCA card */
911 if (xcrb->user_defined != AUTOSELECT &&
912 xcrb->user_defined != zc->card->id)
913 continue;
914 /* check if request size exceeds card max msg size */
915 if (ap_msg.len > zc->card->maxmsgsize)
916 continue;
917 /* check if device node has admission for this card */
918 if (!zcrypt_check_card(perms, zc->card->id))
919 continue;
920 /* get weight index of the card device */
921 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
922 /* penalty if this msg was previously sent via this card */
923 cpen = (tr && tr->again_counter && tr->last_qid &&
924 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
925 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
926 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
927 continue;
928 for_each_zcrypt_queue(zq, zc) {
929 /* check for device usable and eligible */
930 if (!zq->online || !zq->ops->send_cprb ||
931 !ap_queue_usable(zq->queue) ||
932 (tdom != AUTOSEL_DOM &&
933 tdom != AP_QID_QUEUE(zq->queue->qid)))
934 continue;
935 /* check if device node has admission for this queue */
936 if (!zcrypt_check_queue(perms,
937 AP_QID_QUEUE(zq->queue->qid)))
938 continue;
939 /* penalty if the msg was previously sent at this qid */
940 qpen = (tr && tr->again_counter && tr->last_qid &&
941 tr->last_qid == zq->queue->qid) ?
942 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
943 if (!zcrypt_queue_compare(zq, pref_zq,
944 wgt + cpen + qpen, pref_wgt))
945 continue;
946 pref_zc = zc;
947 pref_zq = zq;
948 pref_wgt = wgt + cpen + qpen;
949 }
950 }
951 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
952 spin_unlock(&zcrypt_list_lock);
953
954 if (!pref_zq) {
955 pr_debug("no match for address %02x.%04x => ENODEV\n",
956 xcrb->user_defined, *domain);
957 rc = -ENODEV;
958 goto out;
959 }
960
961 /* in case of auto select, provide the correct domain */
962 qid = pref_zq->queue->qid;
963 if (*domain == AUTOSEL_DOM)
964 *domain = AP_QID_QUEUE(qid);
965
966 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
967 if (!rc) {
968 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
969 ap_msg.msg, ap_msg.len, false);
970 }
971
972 spin_lock(&zcrypt_list_lock);
973 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
974 spin_unlock(&zcrypt_list_lock);
975
976out:
977 ap_release_apmsg(&ap_msg);
978 if (tr) {
979 tr->last_rc = rc;
980 tr->last_qid = qid;
981 }
982 trace_s390_zcrypt_rep(xcrb, func_code, rc,
983 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
984 ap_msg.psmid);
985 return rc;
986}
987
988long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
989{
990 struct zcrypt_track tr;
991 int rc;
992
993 memset(&tr, 0, sizeof(tr));
994
995 do {
996 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
997 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
998
999 /* on ENODEV failure: retry once again after a requested rescan */
1000 if (rc == -ENODEV && zcrypt_process_rescan())
1001 do {
1002 rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
1003 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1004 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1005 rc = -EIO;
1006 if (rc)
1007 pr_debug("rc=%d\n", rc);
1008
1009 return rc;
1010}
1011EXPORT_SYMBOL(zcrypt_send_cprb);
1012
1013static bool is_desired_ep11_card(unsigned int dev_id,
1014 unsigned short target_num,
1015 struct ep11_target_dev *targets)
1016{
1017 while (target_num-- > 0) {
1018 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1019 return true;
1020 targets++;
1021 }
1022 return false;
1023}
1024
1025static bool is_desired_ep11_queue(unsigned int dev_qid,
1026 unsigned short target_num,
1027 struct ep11_target_dev *targets)
1028{
1029 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1030
1031 while (target_num-- > 0) {
1032 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1033 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1034 return true;
1035 targets++;
1036 }
1037 return false;
1038}
1039
1040static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
1041 struct zcrypt_track *tr,
1042 struct ep11_urb *xcrb)
1043{
1044 bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
1045 struct zcrypt_card *zc, *pref_zc;
1046 struct zcrypt_queue *zq, *pref_zq;
1047 struct ep11_target_dev *targets = NULL;
1048 unsigned short target_num;
1049 unsigned int wgt = 0, pref_wgt = 0;
1050 unsigned int func_code = 0, domain;
1051 struct ap_message ap_msg;
1052 int cpen, qpen, qid = 0, rc;
1053 struct module *mod;
1054
1055 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1056
1057 rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
1058 AP_MSG_FLAG_MEMPOOL : 0);
1059 if (rc)
1060 goto out;
1061
1062 target_num = (unsigned short)xcrb->targets_num;
1063
1064 /* empty list indicates autoselect (all available targets) */
1065 rc = -ENOMEM;
1066 if (target_num != 0) {
1067 if (userspace) {
1068 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1069 if (!targets)
1070 goto out;
1071 if (copy_from_user(targets, xcrb->targets,
1072 target_num * sizeof(*targets))) {
1073 rc = -EFAULT;
1074 goto out;
1075 }
1076 } else {
1077 targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
1078 }
1079 }
1080
1081 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1082 if (rc)
1083 goto out;
1084 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
1085 ap_msg.msg, ap_msg.len, false);
1086
1087 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1088 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1089 if (!test_bit_inv(domain, perms->adm)) {
1090 rc = -ENODEV;
1091 goto out;
1092 }
1093 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1094 rc = -EOPNOTSUPP;
1095 goto out;
1096 }
1097 }
1098
1099 pref_zc = NULL;
1100 pref_zq = NULL;
1101 spin_lock(&zcrypt_list_lock);
1102 for_each_zcrypt_card(zc) {
1103 /* Check for usable EP11 card */
1104 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1105 !zc->card->hwinfo.ep11)
1106 continue;
1107 /* Check for user selected EP11 card */
1108 if (targets &&
1109 !is_desired_ep11_card(zc->card->id, target_num, targets))
1110 continue;
1111 /* check if request size exceeds card max msg size */
1112 if (ap_msg.len > zc->card->maxmsgsize)
1113 continue;
1114 /* check if device node has admission for this card */
1115 if (!zcrypt_check_card(perms, zc->card->id))
1116 continue;
1117 /* get weight index of the card device */
1118 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1119 /* penalty if this msg was previously sent via this card */
1120 cpen = (tr && tr->again_counter && tr->last_qid &&
1121 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1122 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1123 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1124 continue;
1125 for_each_zcrypt_queue(zq, zc) {
1126 /* check if device is usable and eligible */
1127 if (!zq->online || !zq->ops->send_ep11_cprb ||
1128 !ap_queue_usable(zq->queue) ||
1129 (targets &&
1130 !is_desired_ep11_queue(zq->queue->qid,
1131 target_num, targets)))
1132 continue;
1133 /* check if device node has admission for this queue */
1134 if (!zcrypt_check_queue(perms,
1135 AP_QID_QUEUE(zq->queue->qid)))
1136 continue;
1137 /* penalty if the msg was previously sent at this qid */
1138 qpen = (tr && tr->again_counter && tr->last_qid &&
1139 tr->last_qid == zq->queue->qid) ?
1140 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1141 if (!zcrypt_queue_compare(zq, pref_zq,
1142 wgt + cpen + qpen, pref_wgt))
1143 continue;
1144 pref_zc = zc;
1145 pref_zq = zq;
1146 pref_wgt = wgt + cpen + qpen;
1147 }
1148 }
1149 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1150 spin_unlock(&zcrypt_list_lock);
1151
1152 if (!pref_zq) {
1153 if (targets && target_num == 1) {
1154 pr_debug("no match for address %02x.%04x => ENODEV\n",
1155 (int)targets->ap_id, (int)targets->dom_id);
1156 } else if (targets) {
1157 pr_debug("no match for %d target addrs => ENODEV\n",
1158 (int)target_num);
1159 } else {
1160 pr_debug("no match for address ff.ffff => ENODEV\n");
1161 }
1162 rc = -ENODEV;
1163 goto out;
1164 }
1165
1166 qid = pref_zq->queue->qid;
1167 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1168 if (!rc) {
1169 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
1170 ap_msg.msg, ap_msg.len, false);
1171 }
1172
1173 spin_lock(&zcrypt_list_lock);
1174 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1175 spin_unlock(&zcrypt_list_lock);
1176
1177out:
1178 if (userspace)
1179 kfree(targets);
1180 ap_release_apmsg(&ap_msg);
1181 if (tr) {
1182 tr->last_rc = rc;
1183 tr->last_qid = qid;
1184 }
1185 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1186 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1187 ap_msg.psmid);
1188 return rc;
1189}
1190
1191long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
1192{
1193 struct zcrypt_track tr;
1194 int rc;
1195
1196 memset(&tr, 0, sizeof(tr));
1197
1198 do {
1199 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1200 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1201
1202 /* on ENODEV failure: retry once again after a requested rescan */
1203 if (rc == -ENODEV && zcrypt_process_rescan())
1204 do {
1205 rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
1206 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1207 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1208 rc = -EIO;
1209 if (rc)
1210 pr_debug("rc=%d\n", rc);
1211
1212 return rc;
1213}
1214EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1215
1216static long zcrypt_rng(char *buffer)
1217{
1218 struct zcrypt_card *zc, *pref_zc;
1219 struct zcrypt_queue *zq, *pref_zq;
1220 unsigned int wgt = 0, pref_wgt = 0;
1221 unsigned int func_code = 0;
1222 struct ap_message ap_msg;
1223 unsigned int domain;
1224 int qid = 0, rc = -ENODEV;
1225 struct module *mod;
1226
1227 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1228
1229 rc = ap_init_apmsg(&ap_msg, 0);
1230 if (rc)
1231 goto out;
1232 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1233 if (rc)
1234 goto out;
1235
1236 pref_zc = NULL;
1237 pref_zq = NULL;
1238 spin_lock(&zcrypt_list_lock);
1239 for_each_zcrypt_card(zc) {
1240 /* Check for usable CCA card */
1241 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1242 !zc->card->hwinfo.cca)
1243 continue;
1244 /* get weight index of the card device */
1245 wgt = zc->speed_rating[func_code];
1246 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1247 continue;
1248 for_each_zcrypt_queue(zq, zc) {
1249 /* check if device is usable and eligible */
1250 if (!zq->online || !zq->ops->rng ||
1251 !ap_queue_usable(zq->queue))
1252 continue;
1253 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1254 continue;
1255 pref_zc = zc;
1256 pref_zq = zq;
1257 pref_wgt = wgt;
1258 }
1259 }
1260 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1261 spin_unlock(&zcrypt_list_lock);
1262
1263 if (!pref_zq) {
1264 pr_debug("no matching queue found => ENODEV\n");
1265 rc = -ENODEV;
1266 goto out;
1267 }
1268
1269 qid = pref_zq->queue->qid;
1270 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1271
1272 spin_lock(&zcrypt_list_lock);
1273 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1274 spin_unlock(&zcrypt_list_lock);
1275
1276out:
1277 ap_release_apmsg(&ap_msg);
1278 trace_s390_zcrypt_rep(buffer, func_code, rc,
1279 AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1280 ap_msg.psmid);
1281 return rc;
1282}
1283
1284static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1285{
1286 struct zcrypt_card *zc;
1287 struct zcrypt_queue *zq;
1288 struct zcrypt_device_status *stat;
1289 int card, queue;
1290
1291 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1292 * sizeof(struct zcrypt_device_status));
1293
1294 spin_lock(&zcrypt_list_lock);
1295 for_each_zcrypt_card(zc) {
1296 for_each_zcrypt_queue(zq, zc) {
1297 card = AP_QID_CARD(zq->queue->qid);
1298 if (card >= MAX_ZDEV_CARDIDS)
1299 continue;
1300 queue = AP_QID_QUEUE(zq->queue->qid);
1301 stat = &devstatus[card * AP_DOMAINS + queue];
1302 stat->hwtype = zc->card->ap_dev.device_type;
1303 stat->functions = zc->card->hwinfo.fac >> 26;
1304 stat->qid = zq->queue->qid;
1305 stat->online = zq->online ? 0x01 : 0x00;
1306 }
1307 }
1308 spin_unlock(&zcrypt_list_lock);
1309}
1310
1311void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
1312 int maxcard, int maxqueue)
1313{
1314 struct zcrypt_card *zc;
1315 struct zcrypt_queue *zq;
1316 struct zcrypt_device_status_ext *stat;
1317 int card, queue;
1318
1319 maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
1320 maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
1321
1322 spin_lock(&zcrypt_list_lock);
1323 for_each_zcrypt_card(zc) {
1324 for_each_zcrypt_queue(zq, zc) {
1325 card = AP_QID_CARD(zq->queue->qid);
1326 queue = AP_QID_QUEUE(zq->queue->qid);
1327 if (card >= maxcard || queue >= maxqueue)
1328 continue;
1329 stat = &devstatus[card * maxqueue + queue];
1330 stat->hwtype = zc->card->ap_dev.device_type;
1331 stat->functions = zc->card->hwinfo.fac >> 26;
1332 stat->qid = zq->queue->qid;
1333 stat->online = zq->online ? 0x01 : 0x00;
1334 }
1335 }
1336 spin_unlock(&zcrypt_list_lock);
1337}
1338EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1339
1340int zcrypt_device_status_ext(int card, int queue,
1341 struct zcrypt_device_status_ext *devstat)
1342{
1343 struct zcrypt_card *zc;
1344 struct zcrypt_queue *zq;
1345
1346 memset(devstat, 0, sizeof(*devstat));
1347
1348 spin_lock(&zcrypt_list_lock);
1349 for_each_zcrypt_card(zc) {
1350 for_each_zcrypt_queue(zq, zc) {
1351 if (card == AP_QID_CARD(zq->queue->qid) &&
1352 queue == AP_QID_QUEUE(zq->queue->qid)) {
1353 devstat->hwtype = zc->card->ap_dev.device_type;
1354 devstat->functions = zc->card->hwinfo.fac >> 26;
1355 devstat->qid = zq->queue->qid;
1356 devstat->online = zq->online ? 0x01 : 0x00;
1357 spin_unlock(&zcrypt_list_lock);
1358 return 0;
1359 }
1360 }
1361 }
1362 spin_unlock(&zcrypt_list_lock);
1363
1364 return -ENODEV;
1365}
1366EXPORT_SYMBOL(zcrypt_device_status_ext);
1367
1368static void zcrypt_status_mask(char status[], size_t max_adapters)
1369{
1370 struct zcrypt_card *zc;
1371 struct zcrypt_queue *zq;
1372 int card;
1373
1374 memset(status, 0, max_adapters);
1375 spin_lock(&zcrypt_list_lock);
1376 for_each_zcrypt_card(zc) {
1377 for_each_zcrypt_queue(zq, zc) {
1378 card = AP_QID_CARD(zq->queue->qid);
1379 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1380 card >= max_adapters)
1381 continue;
1382 status[card] = zc->online ? zc->user_space_type : 0x0d;
1383 }
1384 }
1385 spin_unlock(&zcrypt_list_lock);
1386}
1387
1388static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1389{
1390 struct zcrypt_card *zc;
1391 struct zcrypt_queue *zq;
1392 int card;
1393
1394 memset(qdepth, 0, max_adapters);
1395 spin_lock(&zcrypt_list_lock);
1396 local_bh_disable();
1397 for_each_zcrypt_card(zc) {
1398 for_each_zcrypt_queue(zq, zc) {
1399 card = AP_QID_CARD(zq->queue->qid);
1400 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1401 card >= max_adapters)
1402 continue;
1403 spin_lock(&zq->queue->lock);
1404 qdepth[card] =
1405 zq->queue->pendingq_count +
1406 zq->queue->requestq_count;
1407 spin_unlock(&zq->queue->lock);
1408 }
1409 }
1410 local_bh_enable();
1411 spin_unlock(&zcrypt_list_lock);
1412}
1413
1414static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1415{
1416 struct zcrypt_card *zc;
1417 struct zcrypt_queue *zq;
1418 int card;
1419 u64 cnt;
1420
1421 memset(reqcnt, 0, sizeof(int) * max_adapters);
1422 spin_lock(&zcrypt_list_lock);
1423 local_bh_disable();
1424 for_each_zcrypt_card(zc) {
1425 for_each_zcrypt_queue(zq, zc) {
1426 card = AP_QID_CARD(zq->queue->qid);
1427 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1428 card >= max_adapters)
1429 continue;
1430 spin_lock(&zq->queue->lock);
1431 cnt = zq->queue->total_request_count;
1432 spin_unlock(&zq->queue->lock);
1433 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1434 }
1435 }
1436 local_bh_enable();
1437 spin_unlock(&zcrypt_list_lock);
1438}
1439
1440static int zcrypt_pendingq_count(void)
1441{
1442 struct zcrypt_card *zc;
1443 struct zcrypt_queue *zq;
1444 int pendingq_count;
1445
1446 pendingq_count = 0;
1447 spin_lock(&zcrypt_list_lock);
1448 local_bh_disable();
1449 for_each_zcrypt_card(zc) {
1450 for_each_zcrypt_queue(zq, zc) {
1451 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1452 continue;
1453 spin_lock(&zq->queue->lock);
1454 pendingq_count += zq->queue->pendingq_count;
1455 spin_unlock(&zq->queue->lock);
1456 }
1457 }
1458 local_bh_enable();
1459 spin_unlock(&zcrypt_list_lock);
1460 return pendingq_count;
1461}
1462
1463static int zcrypt_requestq_count(void)
1464{
1465 struct zcrypt_card *zc;
1466 struct zcrypt_queue *zq;
1467 int requestq_count;
1468
1469 requestq_count = 0;
1470 spin_lock(&zcrypt_list_lock);
1471 local_bh_disable();
1472 for_each_zcrypt_card(zc) {
1473 for_each_zcrypt_queue(zq, zc) {
1474 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1475 continue;
1476 spin_lock(&zq->queue->lock);
1477 requestq_count += zq->queue->requestq_count;
1478 spin_unlock(&zq->queue->lock);
1479 }
1480 }
1481 local_bh_enable();
1482 spin_unlock(&zcrypt_list_lock);
1483 return requestq_count;
1484}
1485
1486static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1487{
1488 int rc;
1489 struct zcrypt_track tr;
1490 struct ica_rsa_modexpo mex;
1491 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1492
1493 memset(&tr, 0, sizeof(tr));
1494 if (copy_from_user(&mex, umex, sizeof(mex)))
1495 return -EFAULT;
1496
1497 do {
1498 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1499 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1500
1501 /* on ENODEV failure: retry once again after a requested rescan */
1502 if (rc == -ENODEV && zcrypt_process_rescan())
1503 do {
1504 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1505 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1506 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1507 rc = -EIO;
1508 if (rc) {
1509 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
1510 return rc;
1511 }
1512 return put_user(mex.outputdatalength, &umex->outputdatalength);
1513}
1514
1515static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1516{
1517 int rc;
1518 struct zcrypt_track tr;
1519 struct ica_rsa_modexpo_crt crt;
1520 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1521
1522 memset(&tr, 0, sizeof(tr));
1523 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1524 return -EFAULT;
1525
1526 do {
1527 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1528 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1529
1530 /* on ENODEV failure: retry once again after a requested rescan */
1531 if (rc == -ENODEV && zcrypt_process_rescan())
1532 do {
1533 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1534 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1535 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1536 rc = -EIO;
1537 if (rc) {
1538 pr_debug("ioctl ICARSACRT rc=%d\n", rc);
1539 return rc;
1540 }
1541 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1542}
1543
1544static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1545{
1546 int rc;
1547 struct ica_xcRB xcrb;
1548 struct zcrypt_track tr;
1549 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1550 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1551
1552 memset(&tr, 0, sizeof(tr));
1553 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1554 return -EFAULT;
1555
1556 do {
1557 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1558 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1559
1560 /* on ENODEV failure: retry once again after a requested rescan */
1561 if (rc == -ENODEV && zcrypt_process_rescan())
1562 do {
1563 rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
1564 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1565 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1566 rc = -EIO;
1567 if (rc)
1568 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1569 rc, xcrb.status);
1570 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1571 return -EFAULT;
1572 return rc;
1573}
1574
1575static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1576{
1577 int rc;
1578 struct ep11_urb xcrb;
1579 struct zcrypt_track tr;
1580 u32 xflags = ZCRYPT_XFLAG_USERSPACE;
1581 struct ep11_urb __user *uxcrb = (void __user *)arg;
1582
1583 memset(&tr, 0, sizeof(tr));
1584 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1585 return -EFAULT;
1586
1587 do {
1588 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1589 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1590
1591 /* on ENODEV failure: retry once again after a requested rescan */
1592 if (rc == -ENODEV && zcrypt_process_rescan())
1593 do {
1594 rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
1595 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1596 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1597 rc = -EIO;
1598 if (rc)
1599 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1600 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1601 return -EFAULT;
1602 return rc;
1603}
1604
1605static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1606 unsigned long arg)
1607{
1608 int rc;
1609 struct ap_perms *perms =
1610 (struct ap_perms *)filp->private_data;
1611
1612 rc = zcrypt_check_ioctl(perms, cmd);
1613 if (rc)
1614 return rc;
1615
1616 switch (cmd) {
1617 case ICARSAMODEXPO:
1618 return icarsamodexpo_ioctl(perms, arg);
1619 case ICARSACRT:
1620 return icarsacrt_ioctl(perms, arg);
1621 case ZSECSENDCPRB:
1622 return zsecsendcprb_ioctl(perms, arg);
1623 case ZSENDEP11CPRB:
1624 return zsendep11cprb_ioctl(perms, arg);
1625 case ZCRYPT_DEVICE_STATUS: {
1626 struct zcrypt_device_status_ext *device_status;
1627 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1628 * sizeof(struct zcrypt_device_status_ext);
1629
1630 device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
1631 sizeof(struct zcrypt_device_status_ext),
1632 GFP_KERNEL);
1633 if (!device_status)
1634 return -ENOMEM;
1635 zcrypt_device_status_mask_ext(device_status,
1636 MAX_ZDEV_CARDIDS_EXT,
1637 MAX_ZDEV_DOMAINS_EXT);
1638 if (copy_to_user((char __user *)arg, device_status,
1639 total_size))
1640 rc = -EFAULT;
1641 kvfree(device_status);
1642 return rc;
1643 }
1644 case ZCRYPT_STATUS_MASK: {
1645 char status[AP_DEVICES];
1646
1647 zcrypt_status_mask(status, AP_DEVICES);
1648 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1649 return -EFAULT;
1650 return 0;
1651 }
1652 case ZCRYPT_QDEPTH_MASK: {
1653 char qdepth[AP_DEVICES];
1654
1655 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1656 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1657 return -EFAULT;
1658 return 0;
1659 }
1660 case ZCRYPT_PERDEV_REQCNT: {
1661 u32 *reqcnt;
1662
1663 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1664 if (!reqcnt)
1665 return -ENOMEM;
1666 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1667 if (copy_to_user((int __user *)arg, reqcnt,
1668 sizeof(u32) * AP_DEVICES))
1669 rc = -EFAULT;
1670 kfree(reqcnt);
1671 return rc;
1672 }
1673 case Z90STAT_REQUESTQ_COUNT:
1674 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1675 case Z90STAT_PENDINGQ_COUNT:
1676 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1677 case Z90STAT_TOTALOPEN_COUNT:
1678 return put_user(atomic_read(&zcrypt_open_count),
1679 (int __user *)arg);
1680 case Z90STAT_DOMAIN_INDEX:
1681 return put_user(ap_domain_index, (int __user *)arg);
1682 /*
1683 * Deprecated ioctls
1684 */
1685 case ZDEVICESTATUS: {
1686 /* the old ioctl supports only 64 adapters */
1687 struct zcrypt_device_status *device_status;
1688 size_t total_size = MAX_ZDEV_ENTRIES
1689 * sizeof(struct zcrypt_device_status);
1690
1691 device_status = kzalloc(total_size, GFP_KERNEL);
1692 if (!device_status)
1693 return -ENOMEM;
1694 zcrypt_device_status_mask(device_status);
1695 if (copy_to_user((char __user *)arg, device_status,
1696 total_size))
1697 rc = -EFAULT;
1698 kfree(device_status);
1699 return rc;
1700 }
1701 case Z90STAT_STATUS_MASK: {
1702 /* the old ioctl supports only 64 adapters */
1703 char status[MAX_ZDEV_CARDIDS];
1704
1705 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1706 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1707 return -EFAULT;
1708 return 0;
1709 }
1710 case Z90STAT_QDEPTH_MASK: {
1711 /* the old ioctl supports only 64 adapters */
1712 char qdepth[MAX_ZDEV_CARDIDS];
1713
1714 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1715 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1716 return -EFAULT;
1717 return 0;
1718 }
1719 case Z90STAT_PERDEV_REQCNT: {
1720 /* the old ioctl supports only 64 adapters */
1721 u32 reqcnt[MAX_ZDEV_CARDIDS];
1722
1723 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1724 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1725 return -EFAULT;
1726 return 0;
1727 }
1728 /* unknown ioctl number */
1729 default:
1730 pr_debug("unknown ioctl 0x%08x\n", cmd);
1731 return -ENOIOCTLCMD;
1732 }
1733}
1734
1735/*
1736 * Misc device file operations.
1737 */
1738static const struct file_operations zcrypt_fops = {
1739 .owner = THIS_MODULE,
1740 .read = zcrypt_read,
1741 .write = zcrypt_write,
1742 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1743 .open = zcrypt_open,
1744 .release = zcrypt_release,
1745};
1746
1747/*
1748 * Misc device.
1749 */
1750static struct miscdevice zcrypt_misc_device = {
1751 .minor = MISC_DYNAMIC_MINOR,
1752 .name = "z90crypt",
1753 .fops = &zcrypt_fops,
1754};
1755
1756static int zcrypt_rng_device_count;
1757static u32 *zcrypt_rng_buffer;
1758static int zcrypt_rng_buffer_index;
1759static DEFINE_MUTEX(zcrypt_rng_mutex);
1760
1761static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1762{
1763 int rc;
1764
1765 /*
1766 * We don't need locking here because the RNG API guarantees serialized
1767 * read method calls.
1768 */
1769 if (zcrypt_rng_buffer_index == 0) {
1770 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1771 /* on ENODEV failure: retry once again after an AP bus rescan */
1772 if (rc == -ENODEV && zcrypt_process_rescan())
1773 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1774 if (rc < 0)
1775 return -EIO;
1776 zcrypt_rng_buffer_index = rc / sizeof(*data);
1777 }
1778 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1779 return sizeof(*data);
1780}
1781
1782static struct hwrng zcrypt_rng_dev = {
1783 .name = "zcrypt",
1784 .data_read = zcrypt_rng_data_read,
1785 .quality = 990,
1786};
1787
1788int zcrypt_rng_device_add(void)
1789{
1790 int rc = 0;
1791
1792 mutex_lock(&zcrypt_rng_mutex);
1793 if (zcrypt_rng_device_count == 0) {
1794 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1795 if (!zcrypt_rng_buffer) {
1796 rc = -ENOMEM;
1797 goto out;
1798 }
1799 zcrypt_rng_buffer_index = 0;
1800 rc = hwrng_register(&zcrypt_rng_dev);
1801 if (rc)
1802 goto out_free;
1803 zcrypt_rng_device_count = 1;
1804 } else {
1805 zcrypt_rng_device_count++;
1806 }
1807 mutex_unlock(&zcrypt_rng_mutex);
1808 return 0;
1809
1810out_free:
1811 free_page((unsigned long)zcrypt_rng_buffer);
1812out:
1813 mutex_unlock(&zcrypt_rng_mutex);
1814 return rc;
1815}
1816
1817void zcrypt_rng_device_remove(void)
1818{
1819 mutex_lock(&zcrypt_rng_mutex);
1820 zcrypt_rng_device_count--;
1821 if (zcrypt_rng_device_count == 0) {
1822 hwrng_unregister(&zcrypt_rng_dev);
1823 free_page((unsigned long)zcrypt_rng_buffer);
1824 }
1825 mutex_unlock(&zcrypt_rng_mutex);
1826}
1827
1828/*
1829 * Wait until the zcrypt api is operational.
1830 * The AP bus scan and the binding of ap devices to device drivers is
1831 * an asynchronous job. This function waits until these initial jobs
1832 * are done and so the zcrypt api should be ready to serve crypto
1833 * requests - if there are resources available. The function uses an
1834 * internal timeout of 30s. The very first caller will either wait for
1835 * ap bus bindings complete or the timeout happens. This state will be
1836 * remembered for further callers which will only be blocked until a
1837 * decision is made (timeout or bindings complete).
1838 * On timeout -ETIME is returned, on success the return value is 0.
1839 */
1840int zcrypt_wait_api_operational(void)
1841{
1842 static DEFINE_MUTEX(zcrypt_wait_api_lock);
1843 static int zcrypt_wait_api_state;
1844 int rc;
1845
1846 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
1847 if (rc)
1848 return rc;
1849
1850 switch (zcrypt_wait_api_state) {
1851 case 0:
1852 /* initial state, invoke wait for the ap bus complete */
1853 rc = ap_wait_apqn_bindings_complete(
1854 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
1855 switch (rc) {
1856 case 0:
1857 /* ap bus bindings are complete */
1858 zcrypt_wait_api_state = 1;
1859 break;
1860 case -EINTR:
1861 /* interrupted, go back to caller */
1862 break;
1863 case -ETIME:
1864 /* timeout */
1865 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
1866 __func__);
1867 zcrypt_wait_api_state = -ETIME;
1868 break;
1869 default:
1870 /* other failure */
1871 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
1872 break;
1873 }
1874 break;
1875 case 1:
1876 /* a previous caller already found ap bus bindings complete */
1877 rc = 0;
1878 break;
1879 default:
1880 /* a previous caller had timeout or other failure */
1881 rc = zcrypt_wait_api_state;
1882 break;
1883 }
1884
1885 mutex_unlock(&zcrypt_wait_api_lock);
1886
1887 return rc;
1888}
1889EXPORT_SYMBOL(zcrypt_wait_api_operational);
1890
1891int __init zcrypt_debug_init(void)
1892{
1893 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
1894 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
1895 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1896 debug_set_level(zcrypt_dbf_info, DBF_ERR);
1897
1898 return 0;
1899}
1900
1901void zcrypt_debug_exit(void)
1902{
1903 debug_unregister(zcrypt_dbf_info);
1904}
1905
1906static int __init zcdn_init(void)
1907{
1908 int rc;
1909
1910 /* create a new class 'zcrypt' */
1911 rc = class_register(&zcrypt_class);
1912 if (rc)
1913 goto out_class_register_failed;
1914
1915 /* alloc device minor range */
1916 rc = alloc_chrdev_region(&zcrypt_devt,
1917 0, ZCRYPT_MAX_MINOR_NODES,
1918 ZCRYPT_NAME);
1919 if (rc)
1920 goto out_alloc_chrdev_failed;
1921
1922 cdev_init(&zcrypt_cdev, &zcrypt_fops);
1923 zcrypt_cdev.owner = THIS_MODULE;
1924 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1925 if (rc)
1926 goto out_cdev_add_failed;
1927
1928 /* need some class specific sysfs attributes */
1929 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
1930 if (rc)
1931 goto out_class_create_file_1_failed;
1932 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
1933 if (rc)
1934 goto out_class_create_file_2_failed;
1935
1936 return 0;
1937
1938out_class_create_file_2_failed:
1939 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1940out_class_create_file_1_failed:
1941 cdev_del(&zcrypt_cdev);
1942out_cdev_add_failed:
1943 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1944out_alloc_chrdev_failed:
1945 class_unregister(&zcrypt_class);
1946out_class_register_failed:
1947 return rc;
1948}
1949
1950static void zcdn_exit(void)
1951{
1952 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
1953 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
1954 zcdn_destroy_all();
1955 cdev_del(&zcrypt_cdev);
1956 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
1957 class_unregister(&zcrypt_class);
1958}
1959
1960/*
1961 * zcrypt_api_init(): Module initialization.
1962 *
1963 * The module initialization code.
1964 */
1965int __init zcrypt_api_init(void)
1966{
1967 int rc;
1968
1969 /* make sure the mempool threshold is >= 1 */
1970 if (zcrypt_mempool_threshold < 1) {
1971 rc = -EINVAL;
1972 goto out;
1973 }
1974
1975 rc = zcrypt_debug_init();
1976 if (rc)
1977 goto out;
1978
1979 rc = zcdn_init();
1980 if (rc)
1981 goto out_zcdn_init_failed;
1982
1983 rc = zcrypt_ccamisc_init();
1984 if (rc)
1985 goto out_ccamisc_init_failed;
1986
1987 rc = zcrypt_ep11misc_init();
1988 if (rc)
1989 goto out_ep11misc_init_failed;
1990
1991 /* Register the request sprayer. */
1992 rc = misc_register(&zcrypt_misc_device);
1993 if (rc < 0)
1994 goto out_misc_register_failed;
1995
1996 zcrypt_msgtype6_init();
1997 zcrypt_msgtype50_init();
1998
1999 return 0;
2000
2001out_misc_register_failed:
2002 zcrypt_ep11misc_exit();
2003out_ep11misc_init_failed:
2004 zcrypt_ccamisc_exit();
2005out_ccamisc_init_failed:
2006 zcdn_exit();
2007out_zcdn_init_failed:
2008 zcrypt_debug_exit();
2009out:
2010 return rc;
2011}
2012
2013/*
2014 * zcrypt_api_exit(): Module termination.
2015 *
2016 * The module termination code.
2017 */
2018void __exit zcrypt_api_exit(void)
2019{
2020 zcdn_exit();
2021 misc_deregister(&zcrypt_misc_device);
2022 zcrypt_msgtype6_exit();
2023 zcrypt_msgtype50_exit();
2024 zcrypt_ccamisc_exit();
2025 zcrypt_ep11misc_exit();
2026 zcrypt_debug_exit();
2027}
2028
2029module_init(zcrypt_api_init);
2030module_exit(zcrypt_api_exit);