Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/zcrypt: code cleanup

This patch tries to fix as much as possible of the
checkpatch.pl --strict findings:
CHECK: Logical continuations should be on the previous line
CHECK: No space is necessary after a cast
CHECK: Alignment should match open parenthesis
CHECK: 'useable' may be misspelled - perhaps 'usable'?
WARNING: Possible repeated word: 'is'
CHECK: spaces preferred around that '*' (ctx:VxV)
CHECK: Comparison to NULL could be written "!msg"
CHECK: Prefer kzalloc(sizeof(*zc)...) over kzalloc(sizeof(struct...)...)
CHECK: Unnecessary parentheses around resp_type->work
CHECK: Avoid CamelCase: <xcRB>

There is no functional change comming with this patch, only
code cleanup, renaming, whitespaces, indenting, ... but no
semantic change in any way. Also the API (zcrypt and pkey
header file) is semantically unchanged.

Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
Reviewed-by: Jürgen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

authored by

Harald Freudenberger and committed by
Heiko Carstens
2004b57c 6acb086d

+676 -645
+1 -1
arch/s390/include/uapi/asm/pkey.h
··· 171 171 #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey) 172 172 173 173 /* 174 - * Verify the given CCA AES secure key for being able to be useable with 174 + * Verify the given CCA AES secure key for being able to be usable with 175 175 * the pkey module. Check for correct key type and check for having at 176 176 * least one crypto card being able to handle this key (master key 177 177 * or old master key verification pattern matches).
+8 -8
arch/s390/include/uapi/asm/zcrypt.h
··· 236 236 }; 237 237 238 238 #define AUTOSELECT 0xFFFFFFFF 239 - #define AUTOSEL_AP ((__u16) 0xFFFF) 240 - #define AUTOSEL_DOM ((__u16) 0xFFFF) 239 + #define AUTOSEL_AP ((__u16)0xFFFF) 240 + #define AUTOSEL_DOM ((__u16)0xFFFF) 241 241 242 242 #define ZCRYPT_IOCTL_MAGIC 'z' 243 243 ··· 303 303 /** 304 304 * Supported ioctl calls 305 305 */ 306 - #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) 307 - #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 308 - #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 309 - #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 306 + #define ICARSAMODEXPO _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) 307 + #define ICARSACRT _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 308 + #define ZSECSENDCPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 309 + #define ZSENDEP11CPRB _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) 310 310 311 - #define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) 311 + #define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) 312 312 #define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) 313 313 #define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) 314 314 #define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) ··· 350 350 }; 351 351 352 352 /* Deprecated: use ZCRYPT_DEVICE_STATUS */ 353 - #define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) 353 + #define ZDEVICESTATUS _IOC(_IOC_READ | _IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) 354 354 /* Deprecated: use ZCRYPT_STATUS_MASK */ 355 355 #define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) 356 356 /* Deprecated: use ZCRYPT_QDEPTH_MASK */
+29 -27
drivers/s390/crypto/ap_bus.c
··· 179 179 * ap_apft_available(): Test if AP facilities test (APFT) 180 180 * facility is available. 181 181 * 182 - * Returns 1 if APFT is is available. 182 + * Returns 1 if APFT is available. 183 183 */ 184 184 static int ap_apft_available(void) 185 185 { ··· 722 722 723 723 static int __ap_calc_helper(struct device *dev, void *arg) 724 724 { 725 - struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *) arg; 725 + struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg; 726 726 727 727 if (is_queue_dev(dev)) { 728 728 pctrs->apqns++; ··· 738 738 struct __ap_calc_ctrs ctrs; 739 739 740 740 memset(&ctrs, 0, sizeof(ctrs)); 741 - bus_for_each_dev(&ap_bus_type, NULL, (void *) &ctrs, __ap_calc_helper); 741 + bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper); 742 742 743 743 *apqns = ctrs.apqns; 744 744 *bound = ctrs.bound; ··· 799 799 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data) 800 800 { 801 801 if (is_queue_dev(dev) && 802 - AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data) 802 + AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data) 803 803 device_unregister(dev); 804 804 return 0; 805 805 } ··· 812 812 card = AP_QID_CARD(to_ap_queue(dev)->qid); 813 813 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 814 814 mutex_lock(&ap_perms_mutex); 815 - devres = test_bit_inv(card, ap_perms.apm) 816 - && test_bit_inv(queue, ap_perms.aqm); 815 + devres = test_bit_inv(card, ap_perms.apm) && 816 + test_bit_inv(queue, ap_perms.aqm); 817 817 mutex_unlock(&ap_perms_mutex); 818 818 drvres = to_ap_drv(dev->driver)->flags 819 819 & AP_DRIVER_FLAG_DEFAULT; ··· 844 844 845 845 mutex_lock(&ap_perms_mutex); 846 846 847 - if (test_bit_inv(card, ap_perms.apm) 848 - && test_bit_inv(queue, ap_perms.aqm)) 847 + if (test_bit_inv(card, ap_perms.apm) && 848 + test_bit_inv(queue, ap_perms.aqm)) 849 849 rc = 1; 850 850 851 851 mutex_unlock(&ap_perms_mutex); ··· 894 894 card = AP_QID_CARD(to_ap_queue(dev)->qid); 895 895 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid); 896 896 mutex_lock(&ap_perms_mutex); 897 - devres = test_bit_inv(card, ap_perms.apm) 898 - && test_bit_inv(queue, ap_perms.aqm); 897 + devres = test_bit_inv(card, ap_perms.apm) && 898 + test_bit_inv(queue, ap_perms.aqm); 899 899 mutex_unlock(&ap_perms_mutex); 900 900 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; 901 901 if (!!devres != !!drvres) ··· 916 916 if (is_queue_dev(dev)) 917 917 hash_del(&to_ap_queue(dev)->hnode); 918 918 spin_unlock_bh(&ap_queues_lock); 919 - } else 919 + } else { 920 920 ap_check_bindings_complete(); 921 + } 921 922 922 923 out: 923 924 if (rc) ··· 999 998 EXPORT_SYMBOL(ap_bus_force_rescan); 1000 999 1001 1000 /* 1002 - * A config change has happened, force an ap bus rescan. 1003 - */ 1001 + * A config change has happened, force an ap bus rescan. 1002 + */ 1004 1003 void ap_bus_cfg_chg(void) 1005 1004 { 1006 1005 AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__); ··· 1124 1123 if (bits & 0x07) 1125 1124 return -EINVAL; 1126 1125 1127 - size = BITS_TO_LONGS(bits)*sizeof(unsigned long); 1126 + size = BITS_TO_LONGS(bits) * sizeof(unsigned long); 1128 1127 newmap = kmalloc(size, GFP_KERNEL); 1129 1128 if (!newmap) 1130 1129 return -ENOMEM; ··· 1260 1259 rc = ap_poll_thread_start(); 1261 1260 if (rc) 1262 1261 count = rc; 1263 - } else 1262 + } else { 1264 1263 ap_poll_thread_stop(); 1264 + } 1265 1265 return count; 1266 1266 } 1267 1267 ··· 1635 1633 apinfo.mode = (func >> 26) & 0x07; 1636 1634 apinfo.cat = AP_DEVICE_TYPE_CEX8; 1637 1635 status = ap_qact(qid, 0, &apinfo); 1638 - if (status.response_code == AP_RESPONSE_NORMAL 1639 - && apinfo.cat >= AP_DEVICE_TYPE_CEX2A 1640 - && apinfo.cat <= AP_DEVICE_TYPE_CEX8) 1636 + if (status.response_code == AP_RESPONSE_NORMAL && 1637 + apinfo.cat >= AP_DEVICE_TYPE_CEX2A && 1638 + apinfo.cat <= AP_DEVICE_TYPE_CEX8) 1641 1639 comp_type = apinfo.cat; 1642 1640 } 1643 1641 if (!comp_type) ··· 1657 1655 */ 1658 1656 static int __match_card_device_with_id(struct device *dev, const void *data) 1659 1657 { 1660 - return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *) data; 1658 + return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data; 1661 1659 } 1662 1660 1663 1661 /* ··· 1666 1664 */ 1667 1665 static int __match_queue_device_with_qid(struct device *dev, const void *data) 1668 1666 { 1669 - return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; 1667 + return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data; 1670 1668 } 1671 1669 1672 1670 /* ··· 1675 1673 */ 1676 1674 static int __match_queue_device_with_queue_id(struct device *dev, const void *data) 1677 1675 { 1678 - return is_queue_dev(dev) 1679 - && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data; 1676 + return is_queue_dev(dev) && 1677 + AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data; 1680 1678 } 1681 1679 1682 1680 /* Helper function for notify_config_changed */ ··· 1729 1727 static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac) 1730 1728 { 1731 1729 bus_for_each_dev(&ap_bus_type, NULL, 1732 - (void *)(long) ac->id, 1730 + (void *)(long)ac->id, 1733 1731 __ap_queue_devices_with_id_unregister); 1734 1732 device_unregister(&ac->ap_dev.device); 1735 1733 } ··· 1757 1755 for (dom = 0; dom <= ap_max_domain_id; dom++) { 1758 1756 qid = AP_MKQID(ac->id, dom); 1759 1757 dev = bus_find_device(&ap_bus_type, NULL, 1760 - (void *)(long) qid, 1758 + (void *)(long)qid, 1761 1759 __match_queue_device_with_qid); 1762 1760 aq = dev ? to_ap_queue(dev) : NULL; 1763 1761 if (!ap_test_config_usage_domain(dom)) { ··· 1903 1901 1904 1902 /* Is there currently a card device for this adapter ? */ 1905 1903 dev = bus_find_device(&ap_bus_type, NULL, 1906 - (void *)(long) ap, 1904 + (void *)(long)ap, 1907 1905 __match_card_device_with_id); 1908 1906 ac = dev ? to_ap_card(dev) : NULL; 1909 1907 ··· 2104 2102 if (ap_domain_index >= 0) { 2105 2103 struct device *dev = 2106 2104 bus_find_device(&ap_bus_type, NULL, 2107 - (void *)(long) ap_domain_index, 2105 + (void *)(long)ap_domain_index, 2108 2106 __match_queue_device_with_queue_id); 2109 2107 if (dev) 2110 2108 put_device(dev); ··· 2139 2137 2140 2138 static void __init ap_perms_init(void) 2141 2139 { 2142 - /* all resources useable if no kernel parameter string given */ 2140 + /* all resources usable if no kernel parameter string given */ 2143 2141 memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm)); 2144 2142 memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm)); 2145 2143 memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
+1
drivers/s390/crypto/ap_bus.h
··· 317 317 unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)]; 318 318 unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)]; 319 319 }; 320 + 320 321 extern struct ap_perms ap_perms; 321 322 extern struct mutex ap_perms_mutex; 322 323
+4 -3
drivers/s390/crypto/ap_queue.c
··· 99 99 { 100 100 struct ap_queue_status status; 101 101 102 - if (msg == NULL) 102 + if (!msg) 103 103 return -EINVAL; 104 104 status = ap_dqap(qid, psmid, msg, length, NULL, NULL); 105 105 switch (status.response_code) { ··· 603 603 static DEVICE_ATTR_RO(interrupt); 604 604 605 605 static ssize_t config_show(struct device *dev, 606 - struct device_attribute *attr, char *buf) 606 + struct device_attribute *attr, char *buf) 607 607 { 608 608 struct ap_queue *aq = to_ap_queue(dev); 609 609 int rc; ··· 827 827 aq->requestq_count++; 828 828 aq->total_request_count++; 829 829 atomic64_inc(&aq->card->total_request_count); 830 - } else 830 + } else { 831 831 rc = -ENODEV; 832 + } 832 833 833 834 /* Send/receive as many request from the queue as possible. */ 834 835 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+78 -71
drivers/s390/crypto/pkey_api.c
··· 232 232 int i, rc; 233 233 u16 card, dom; 234 234 u32 nr_apqns, *apqns = NULL; 235 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 235 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 236 236 237 237 zcrypt_wait_api_operational(); 238 238 ··· 267 267 u16 *pcardnr, u16 *pdomain, 268 268 u16 *pkeysize, u32 *pattributes) 269 269 { 270 - struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 270 + struct secaeskeytoken *t = (struct secaeskeytoken *)seckey; 271 271 u16 cardnr, domain; 272 272 int rc; 273 273 274 274 /* check the secure key for valid AES secure key */ 275 - rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *) seckey, 0); 275 + rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0); 276 276 if (rc) 277 277 goto out; 278 278 if (pattributes) ··· 425 425 t = (struct clearaeskeytoken *)key; 426 426 if (keylen != sizeof(*t) + t->len) 427 427 goto out; 428 - if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) 429 - || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) 430 - || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) 428 + if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16) || 429 + (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24) || 430 + (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32)) 431 431 memcpy(ckey.clrkey, t->clearkey, t->len); 432 432 else 433 433 goto out; ··· 541 541 542 542 DEBUG_DBG("%s rc=%d\n", __func__, rc); 543 543 return rc; 544 - 545 544 } 546 545 EXPORT_SYMBOL(pkey_keyblob2pkey); 547 546 ··· 587 588 } else if (ktype == PKEY_TYPE_CCA_DATA) { 588 589 rc = cca_genseckey(card, dom, ksize, keybuf); 589 590 *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 590 - } else /* TOKVER_CCA_VLSC */ 591 + } else { 592 + /* TOKVER_CCA_VLSC */ 591 593 rc = cca_gencipherkey(card, dom, ksize, kflags, 592 594 keybuf, keybufsize); 595 + } 593 596 if (rc == 0) 594 597 break; 595 598 } ··· 646 645 rc = cca_clr2seckey(card, dom, ksize, 647 646 clrkey, keybuf); 648 647 *keybufsize = (rc ? 0 : SECKEYBLOBSIZE); 649 - } else /* TOKVER_CCA_VLSC */ 648 + } else { 649 + /* TOKVER_CCA_VLSC */ 650 650 rc = cca_clr2cipherkey(card, dom, ksize, kflags, 651 651 clrkey, keybuf, keybufsize); 652 + } 652 653 if (rc == 0) 653 654 break; 654 655 } ··· 670 667 if (keylen < sizeof(struct keytoken_header)) 671 668 return -EINVAL; 672 669 673 - if (hdr->type == TOKTYPE_CCA_INTERNAL 674 - && hdr->version == TOKVER_CCA_AES) { 670 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 671 + hdr->version == TOKVER_CCA_AES) { 675 672 struct secaeskeytoken *t = (struct secaeskeytoken *)key; 676 673 677 674 rc = cca_check_secaeskeytoken(debug_info, 3, key, 0); ··· 680 677 if (ktype) 681 678 *ktype = PKEY_TYPE_CCA_DATA; 682 679 if (ksize) 683 - *ksize = (enum pkey_key_size) t->bitsize; 680 + *ksize = (enum pkey_key_size)t->bitsize; 684 681 685 682 rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain, 686 683 ZCRYPT_CEX3C, AES_MK_SET, t->mkvp, 0, 1); ··· 700 697 *cardnr = ((struct pkey_apqn *)_apqns)->card; 701 698 *domain = ((struct pkey_apqn *)_apqns)->domain; 702 699 703 - } else if (hdr->type == TOKTYPE_CCA_INTERNAL 704 - && hdr->version == TOKVER_CCA_VLSC) { 700 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 701 + hdr->version == TOKVER_CCA_VLSC) { 705 702 struct cipherkeytoken *t = (struct cipherkeytoken *)key; 706 703 707 704 rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1); ··· 737 734 *cardnr = ((struct pkey_apqn *)_apqns)->card; 738 735 *domain = ((struct pkey_apqn *)_apqns)->domain; 739 736 740 - } else if (hdr->type == TOKTYPE_NON_CCA 741 - && hdr->version == TOKVER_EP11_AES) { 737 + } else if (hdr->type == TOKTYPE_NON_CCA && 738 + hdr->version == TOKVER_EP11_AES) { 742 739 struct ep11keyblob *kb = (struct ep11keyblob *)key; 743 740 744 741 rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1); ··· 760 757 *cardnr = ((struct pkey_apqn *)_apqns)->card; 761 758 *domain = ((struct pkey_apqn *)_apqns)->domain; 762 759 763 - } else 760 + } else { 764 761 rc = -EINVAL; 762 + } 765 763 766 764 out: 767 765 kfree(_apqns); ··· 820 816 for (i = 0, rc = -ENODEV; i < nr_apqns; i++) { 821 817 card = apqns[i].card; 822 818 dom = apqns[i].domain; 823 - if (hdr->type == TOKTYPE_CCA_INTERNAL 824 - && hdr->version == TOKVER_CCA_AES) 819 + if (hdr->type == TOKTYPE_CCA_INTERNAL && 820 + hdr->version == TOKVER_CCA_AES) { 825 821 rc = cca_sec2protkey(card, dom, key, pkey->protkey, 826 822 &pkey->len, &pkey->type); 827 - else if (hdr->type == TOKTYPE_CCA_INTERNAL 828 - && hdr->version == TOKVER_CCA_VLSC) 823 + } else if (hdr->type == TOKTYPE_CCA_INTERNAL && 824 + hdr->version == TOKVER_CCA_VLSC) { 829 825 rc = cca_cipher2protkey(card, dom, key, pkey->protkey, 830 826 &pkey->len, &pkey->type); 831 - else { /* EP11 AES secure key blob */ 832 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 827 + } else { 828 + /* EP11 AES secure key blob */ 829 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 833 830 834 831 pkey->len = sizeof(pkey->protkey); 835 832 rc = ep11_kblob2protkey(card, dom, key, kb->head.len, ··· 856 851 857 852 zcrypt_wait_api_operational(); 858 853 859 - if (hdr->type == TOKTYPE_NON_CCA 860 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 861 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 862 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 854 + if (hdr->type == TOKTYPE_NON_CCA && 855 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 856 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 857 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 863 858 int minhwtype = 0, api = 0; 864 859 struct ep11keyblob *kb = (struct ep11keyblob *) 865 860 (key + sizeof(struct ep11kblob_header)); ··· 874 869 minhwtype, api, kb->wkvp); 875 870 if (rc) 876 871 goto out; 877 - } else if (hdr->type == TOKTYPE_NON_CCA 878 - && hdr->version == TOKVER_EP11_AES 879 - && is_ep11_keyblob(key)) { 872 + } else if (hdr->type == TOKTYPE_NON_CCA && 873 + hdr->version == TOKVER_EP11_AES && 874 + is_ep11_keyblob(key)) { 880 875 int minhwtype = 0, api = 0; 881 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 876 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 882 877 883 878 if (flags != PKEY_FLAGS_MATCH_CUR_MKVP) 884 879 return -EINVAL; ··· 936 931 cur_mkvp, old_mkvp, 1); 937 932 if (rc) 938 933 goto out; 939 - } else 934 + } else { 940 935 return -EINVAL; 936 + } 941 937 942 938 if (apqns) { 943 939 if (*nr_apqns < _nr_apqns) ··· 967 961 int minhwtype = ZCRYPT_CEX3C; 968 962 969 963 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 970 - cur_mkvp = *((u64 *) cur_mkvp); 964 + cur_mkvp = *((u64 *)cur_mkvp); 971 965 if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 972 - old_mkvp = *((u64 *) alt_mkvp); 966 + old_mkvp = *((u64 *)alt_mkvp); 973 967 if (ktype == PKEY_TYPE_CCA_CIPHER) 974 968 minhwtype = ZCRYPT_CEX6; 975 969 rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ··· 981 975 u64 cur_mkvp = 0, old_mkvp = 0; 982 976 983 977 if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) 984 - cur_mkvp = *((u64 *) cur_mkvp); 978 + cur_mkvp = *((u64 *)cur_mkvp); 985 979 if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) 986 - old_mkvp = *((u64 *) alt_mkvp); 980 + old_mkvp = *((u64 *)alt_mkvp); 987 981 rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, 988 982 ZCRYPT_CEX7, APKA_MK_SET, 989 983 cur_mkvp, old_mkvp, 1); ··· 1002 996 if (rc) 1003 997 goto out; 1004 998 1005 - } else 999 + } else { 1006 1000 return -EINVAL; 1001 + } 1007 1002 1008 1003 if (apqns) { 1009 1004 if (*nr_apqns < _nr_apqns) ··· 1033 1026 if (keylen < sizeof(struct keytoken_header)) 1034 1027 return -EINVAL; 1035 1028 1036 - if (hdr->type == TOKTYPE_NON_CCA 1037 - && hdr->version == TOKVER_EP11_AES_WITH_HEADER 1038 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1029 + if (hdr->type == TOKTYPE_NON_CCA && 1030 + hdr->version == TOKVER_EP11_AES_WITH_HEADER && 1031 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1039 1032 /* EP11 AES key blob with header */ 1040 1033 if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1)) 1041 1034 return -EINVAL; 1042 - } else if (hdr->type == TOKTYPE_NON_CCA 1043 - && hdr->version == TOKVER_EP11_ECC_WITH_HEADER 1044 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1035 + } else if (hdr->type == TOKTYPE_NON_CCA && 1036 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER && 1037 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { 1045 1038 /* EP11 ECC key blob with header */ 1046 1039 if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1)) 1047 1040 return -EINVAL; 1048 - } else if (hdr->type == TOKTYPE_NON_CCA 1049 - && hdr->version == TOKVER_EP11_AES 1050 - && is_ep11_keyblob(key)) { 1041 + } else if (hdr->type == TOKTYPE_NON_CCA && 1042 + hdr->version == TOKVER_EP11_AES && 1043 + is_ep11_keyblob(key)) { 1051 1044 /* EP11 AES key blob with header in session field */ 1052 1045 if (ep11_check_aes_key(debug_info, 3, key, keylen, 1)) 1053 1046 return -EINVAL; ··· 1095 1088 for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { 1096 1089 card = apqns[i].card; 1097 1090 dom = apqns[i].domain; 1098 - if (hdr->type == TOKTYPE_NON_CCA 1099 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 1100 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 1101 - && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) 1091 + if (hdr->type == TOKTYPE_NON_CCA && 1092 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1093 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1094 + is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) 1102 1095 rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1103 1096 protkey, protkeylen, protkeytype); 1104 - else if (hdr->type == TOKTYPE_NON_CCA 1105 - && hdr->version == TOKVER_EP11_AES 1106 - && is_ep11_keyblob(key)) 1097 + else if (hdr->type == TOKTYPE_NON_CCA && 1098 + hdr->version == TOKVER_EP11_AES && 1099 + is_ep11_keyblob(key)) 1107 1100 rc = ep11_kblob2protkey(card, dom, key, hdr->len, 1108 1101 protkey, protkeylen, protkeytype); 1109 1102 else if (hdr->type == TOKTYPE_CCA_INTERNAL && ··· 1151 1144 1152 1145 switch (cmd) { 1153 1146 case PKEY_GENSECK: { 1154 - struct pkey_genseck __user *ugs = (void __user *) arg; 1147 + struct pkey_genseck __user *ugs = (void __user *)arg; 1155 1148 struct pkey_genseck kgs; 1156 1149 1157 1150 if (copy_from_user(&kgs, ugs, sizeof(kgs))) ··· 1166 1159 break; 1167 1160 } 1168 1161 case PKEY_CLR2SECK: { 1169 - struct pkey_clr2seck __user *ucs = (void __user *) arg; 1162 + struct pkey_clr2seck __user *ucs = (void __user *)arg; 1170 1163 struct pkey_clr2seck kcs; 1171 1164 1172 1165 if (copy_from_user(&kcs, ucs, sizeof(kcs))) ··· 1182 1175 break; 1183 1176 } 1184 1177 case PKEY_SEC2PROTK: { 1185 - struct pkey_sec2protk __user *usp = (void __user *) arg; 1178 + struct pkey_sec2protk __user *usp = (void __user *)arg; 1186 1179 struct pkey_sec2protk ksp; 1187 1180 1188 1181 if (copy_from_user(&ksp, usp, sizeof(ksp))) ··· 1198 1191 break; 1199 1192 } 1200 1193 case PKEY_CLR2PROTK: { 1201 - struct pkey_clr2protk __user *ucp = (void __user *) arg; 1194 + struct pkey_clr2protk __user *ucp = (void __user *)arg; 1202 1195 struct pkey_clr2protk kcp; 1203 1196 1204 1197 if (copy_from_user(&kcp, ucp, sizeof(kcp))) ··· 1214 1207 break; 1215 1208 } 1216 1209 case PKEY_FINDCARD: { 1217 - struct pkey_findcard __user *ufc = (void __user *) arg; 1210 + struct pkey_findcard __user *ufc = (void __user *)arg; 1218 1211 struct pkey_findcard kfc; 1219 1212 1220 1213 if (copy_from_user(&kfc, ufc, sizeof(kfc))) ··· 1229 1222 break; 1230 1223 } 1231 1224 case PKEY_SKEY2PKEY: { 1232 - struct pkey_skey2pkey __user *usp = (void __user *) arg; 1225 + struct pkey_skey2pkey __user *usp = (void __user *)arg; 1233 1226 struct pkey_skey2pkey ksp; 1234 1227 1235 1228 if (copy_from_user(&ksp, usp, sizeof(ksp))) ··· 1243 1236 break; 1244 1237 } 1245 1238 case PKEY_VERIFYKEY: { 1246 - struct pkey_verifykey __user *uvk = (void __user *) arg; 1239 + struct pkey_verifykey __user *uvk = (void __user *)arg; 1247 1240 struct pkey_verifykey kvk; 1248 1241 1249 1242 if (copy_from_user(&kvk, uvk, sizeof(kvk))) ··· 1258 1251 break; 1259 1252 } 1260 1253 case PKEY_GENPROTK: { 1261 - struct pkey_genprotk __user *ugp = (void __user *) arg; 1254 + struct pkey_genprotk __user *ugp = (void __user *)arg; 1262 1255 struct pkey_genprotk kgp; 1263 1256 1264 1257 if (copy_from_user(&kgp, ugp, sizeof(kgp))) ··· 1272 1265 break; 1273 1266 } 1274 1267 case PKEY_VERIFYPROTK: { 1275 - struct pkey_verifyprotk __user *uvp = (void __user *) arg; 1268 + struct pkey_verifyprotk __user *uvp = (void __user *)arg; 1276 1269 struct pkey_verifyprotk kvp; 1277 1270 1278 1271 if (copy_from_user(&kvp, uvp, sizeof(kvp))) ··· 1282 1275 break; 1283 1276 } 1284 1277 case PKEY_KBLOB2PROTK: { 1285 - struct pkey_kblob2pkey __user *utp = (void __user *) arg; 1278 + struct pkey_kblob2pkey __user *utp = (void __user *)arg; 1286 1279 struct pkey_kblob2pkey ktp; 1287 1280 u8 *kkey; 1288 1281 ··· 1301 1294 break; 1302 1295 } 1303 1296 case PKEY_GENSECK2: { 1304 - struct pkey_genseck2 __user *ugs = (void __user *) arg; 1297 + struct pkey_genseck2 __user *ugs = (void __user *)arg; 1305 1298 struct pkey_genseck2 kgs; 1306 1299 struct pkey_apqn *apqns; 1307 1300 size_t klen = KEYBLOBBUFSIZE; ··· 1343 1336 break; 1344 1337 } 1345 1338 case PKEY_CLR2SECK2: { 1346 - struct pkey_clr2seck2 __user *ucs = (void __user *) arg; 1339 + struct pkey_clr2seck2 __user *ucs = (void __user *)arg; 1347 1340 struct pkey_clr2seck2 kcs; 1348 1341 struct pkey_apqn *apqns; 1349 1342 size_t klen = KEYBLOBBUFSIZE; ··· 1386 1379 break; 1387 1380 } 1388 1381 case PKEY_VERIFYKEY2: { 1389 - struct pkey_verifykey2 __user *uvk = (void __user *) arg; 1382 + struct pkey_verifykey2 __user *uvk = (void __user *)arg; 1390 1383 struct pkey_verifykey2 kvk; 1391 1384 u8 *kkey; 1392 1385 ··· 1407 1400 break; 1408 1401 } 1409 1402 case PKEY_KBLOB2PROTK2: { 1410 - struct pkey_kblob2pkey2 __user *utp = (void __user *) arg; 1403 + struct pkey_kblob2pkey2 __user *utp = (void __user *)arg; 1411 1404 struct pkey_kblob2pkey2 ktp; 1412 1405 struct pkey_apqn *apqns = NULL; 1413 1406 u8 *kkey; ··· 1434 1427 break; 1435 1428 } 1436 1429 case PKEY_APQNS4K: { 1437 - struct pkey_apqns4key __user *uak = (void __user *) arg; 1430 + struct pkey_apqns4key __user *uak = (void __user *)arg; 1438 1431 struct pkey_apqns4key kak; 1439 1432 struct pkey_apqn *apqns = NULL; 1440 1433 size_t nr_apqns, len; ··· 1483 1476 break; 1484 1477 } 1485 1478 case PKEY_APQNS4KT: { 1486 - struct pkey_apqns4keytype __user *uat = (void __user *) arg; 1479 + struct pkey_apqns4keytype __user *uat = (void __user *)arg; 1487 1480 struct pkey_apqns4keytype kat; 1488 1481 struct pkey_apqn *apqns = NULL; 1489 1482 size_t nr_apqns, len; ··· 1525 1518 break; 1526 1519 } 1527 1520 case PKEY_KBLOB2PROTK3: { 1528 - struct pkey_kblob2pkey3 __user *utp = (void __user *) arg; 1521 + struct pkey_kblob2pkey3 __user *utp = (void __user *)arg; 1529 1522 struct pkey_kblob2pkey3 ktp; 1530 1523 struct pkey_apqn *apqns = NULL; 1531 1524 u32 protkeylen = PROTKEYBLOBBUFSIZE; ··· 1715 1708 loff_t off, size_t count) 1716 1709 { 1717 1710 int rc; 1718 - struct pkey_seckey *seckey = (struct pkey_seckey *) buf; 1711 + struct pkey_seckey *seckey = (struct pkey_seckey *)buf; 1719 1712 1720 1713 if (off != 0 || count < sizeof(struct secaeskeytoken)) 1721 1714 return -EINVAL;
+108 -107
drivers/s390/crypto/zcrypt_api.c
··· 104 104 struct zcrypt_ops *zops; 105 105 106 106 list_for_each_entry(zops, &zcrypt_ops_list, list) 107 - if ((zops->variant == variant) && 107 + if (zops->variant == variant && 108 108 (!strncmp(zops->name, name, sizeof(zops->name)))) 109 109 return zops; 110 110 return NULL; ··· 438 438 strncpy(nodename, name, sizeof(nodename)); 439 439 else 440 440 snprintf(nodename, sizeof(nodename), 441 - ZCRYPT_NAME "_%d", (int) MINOR(devt)); 442 - nodename[sizeof(nodename)-1] = '\0'; 441 + ZCRYPT_NAME "_%d", (int)MINOR(devt)); 442 + nodename[sizeof(nodename) - 1] = '\0'; 443 443 if (dev_set_name(&zcdndev->device, nodename)) { 444 444 rc = -EINVAL; 445 445 goto unlockout; ··· 519 519 /* 520 520 * zcrypt_write(): Not allowed. 521 521 * 522 - * Write is is not allowed 522 + * Write is not allowed 523 523 */ 524 524 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 525 525 size_t count, loff_t *f_pos) ··· 549 549 perms = &zcdndev->perms; 550 550 } 551 551 #endif 552 - filp->private_data = (void *) perms; 552 + filp->private_data = (void *)perms; 553 553 554 554 atomic_inc(&zcrypt_open_count); 555 555 return stream_open(inode, filp); ··· 713 713 pref_zq = NULL; 714 714 spin_lock(&zcrypt_list_lock); 715 715 for_each_zcrypt_card(zc) { 716 - /* Check for useable accelarator or CCA card */ 716 + /* Check for usable accelarator or CCA card */ 717 717 if (!zc->online || !zc->card->config || zc->card->chkstop || 718 718 !(zc->card->functions & 0x18000000)) 719 719 continue; ··· 733 733 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 734 734 continue; 735 735 for_each_zcrypt_queue(zq, zc) { 736 - /* check if device is useable and eligible */ 736 + /* check if device is usable and eligible */ 737 737 if (!zq->online || !zq->ops->rsa_modexpo || 738 738 !zq->queue->config || zq->queue->chkstop) 739 739 continue; ··· 823 823 pref_zq = NULL; 824 824 spin_lock(&zcrypt_list_lock); 825 825 for_each_zcrypt_card(zc) { 826 - /* Check for useable accelarator or CCA card */ 826 + /* Check for usable accelarator or CCA card */ 827 827 if (!zc->online || !zc->card->config || zc->card->chkstop || 828 828 !(zc->card->functions & 0x18000000)) 829 829 continue; ··· 843 843 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 844 844 continue; 845 845 for_each_zcrypt_queue(zq, zc) { 846 - /* check if device is useable and eligible */ 846 + /* check if device is usable and eligible */ 847 847 if (!zq->online || !zq->ops->rsa_modexpo_crt || 848 848 !zq->queue->config || zq->queue->chkstop) 849 849 continue; ··· 893 893 894 894 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 895 895 struct zcrypt_track *tr, 896 - struct ica_xcRB *xcRB) 896 + struct ica_xcRB *xcrb) 897 897 { 898 898 struct zcrypt_card *zc, *pref_zc; 899 899 struct zcrypt_queue *zq, *pref_zq; ··· 904 904 int cpen, qpen, qid = 0, rc = -ENODEV; 905 905 struct module *mod; 906 906 907 - trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 907 + trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 908 908 909 - xcRB->status = 0; 909 + xcrb->status = 0; 910 910 ap_init_message(&ap_msg); 911 911 912 912 #ifdef CONFIG_ZCRYPT_DEBUG ··· 915 915 if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { 916 916 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", 917 917 __func__, tr->fi.cmd); 918 - xcRB->agent_ID = 0x4646; 918 + xcrb->agent_ID = 0x4646; 919 919 } 920 920 #endif 921 921 922 - rc = prep_cca_ap_msg(userspace, xcRB, &ap_msg, &func_code, &domain); 922 + rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 923 923 if (rc) 924 924 goto out; 925 925 ··· 948 948 pref_zq = NULL; 949 949 spin_lock(&zcrypt_list_lock); 950 950 for_each_zcrypt_card(zc) { 951 - /* Check for useable CCA card */ 951 + /* Check for usable CCA card */ 952 952 if (!zc->online || !zc->card->config || zc->card->chkstop || 953 953 !(zc->card->functions & 0x10000000)) 954 954 continue; 955 955 /* Check for user selected CCA card */ 956 - if (xcRB->user_defined != AUTOSELECT && 957 - xcRB->user_defined != zc->card->id) 956 + if (xcrb->user_defined != AUTOSELECT && 957 + xcrb->user_defined != zc->card->id) 958 958 continue; 959 959 /* check if request size exceeds card max msg size */ 960 960 if (ap_msg.len > zc->card->maxmsgsize) ··· 971 971 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 972 972 continue; 973 973 for_each_zcrypt_queue(zq, zc) { 974 - /* check for device useable and eligible */ 974 + /* check for device usable and eligible */ 975 975 if (!zq->online || !zq->ops->send_cprb || 976 976 !zq->queue->config || zq->queue->chkstop || 977 977 (tdom != AUTOSEL_DOM && ··· 998 998 999 999 if (!pref_zq) { 1000 1000 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1001 - __func__, xcRB->user_defined, *domain); 1001 + __func__, xcrb->user_defined, *domain); 1002 1002 rc = -ENODEV; 1003 1003 goto out; 1004 1004 } ··· 1016 1016 } 1017 1017 #endif 1018 1018 1019 - rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg); 1019 + rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 1020 1020 1021 1021 spin_lock(&zcrypt_list_lock); 1022 1022 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); ··· 1028 1028 tr->last_rc = rc; 1029 1029 tr->last_qid = qid; 1030 1030 } 1031 - trace_s390_zcrypt_rep(xcRB, func_code, rc, 1031 + trace_s390_zcrypt_rep(xcrb, func_code, rc, 1032 1032 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1033 1033 return rc; 1034 1034 } 1035 1035 1036 - long zcrypt_send_cprb(struct ica_xcRB *xcRB) 1036 + long zcrypt_send_cprb(struct ica_xcRB *xcrb) 1037 1037 { 1038 - return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB); 1038 + return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); 1039 1039 } 1040 1040 EXPORT_SYMBOL(zcrypt_send_cprb); 1041 1041 ··· 1089 1089 ap_msg.fi.cmd = tr->fi.cmd; 1090 1090 #endif 1091 1091 1092 - target_num = (unsigned short) xcrb->targets_num; 1092 + target_num = (unsigned short)xcrb->targets_num; 1093 1093 1094 1094 /* empty list indicates autoselect (all available targets) */ 1095 1095 targets = NULL; ··· 1103 1103 goto out; 1104 1104 } 1105 1105 1106 - uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 1106 + uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1107 1107 if (z_copy_from_user(userspace, targets, uptr, 1108 - target_num * sizeof(*targets))) { 1108 + target_num * sizeof(*targets))) { 1109 1109 func_code = 0; 1110 1110 rc = -EFAULT; 1111 1111 goto out_free; ··· 1132 1132 pref_zq = NULL; 1133 1133 spin_lock(&zcrypt_list_lock); 1134 1134 for_each_zcrypt_card(zc) { 1135 - /* Check for useable EP11 card */ 1135 + /* Check for usable EP11 card */ 1136 1136 if (!zc->online || !zc->card->config || zc->card->chkstop || 1137 1137 !(zc->card->functions & 0x04000000)) 1138 1138 continue; ··· 1155 1155 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1156 1156 continue; 1157 1157 for_each_zcrypt_queue(zq, zc) { 1158 - /* check if device is useable and eligible */ 1158 + /* check if device is usable and eligible */ 1159 1159 if (!zq->online || !zq->ops->send_ep11_cprb || 1160 1160 !zq->queue->config || zq->queue->chkstop || 1161 1161 (targets && ··· 1184 1184 if (!pref_zq) { 1185 1185 if (targets && target_num == 1) { 1186 1186 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1187 - __func__, (int) targets->ap_id, 1188 - (int) targets->dom_id); 1187 + __func__, (int)targets->ap_id, 1188 + (int)targets->dom_id); 1189 1189 } else if (targets) { 1190 1190 ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", 1191 - __func__, (int) target_num); 1191 + __func__, (int)target_num); 1192 1192 } else { 1193 1193 ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", 1194 1194 __func__); ··· 1245 1245 pref_zq = NULL; 1246 1246 spin_lock(&zcrypt_list_lock); 1247 1247 for_each_zcrypt_card(zc) { 1248 - /* Check for useable CCA card */ 1248 + /* Check for usable CCA card */ 1249 1249 if (!zc->online || !zc->card->config || zc->card->chkstop || 1250 1250 !(zc->card->functions & 0x10000000)) 1251 1251 continue; ··· 1254 1254 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1255 1255 continue; 1256 1256 for_each_zcrypt_queue(zq, zc) { 1257 - /* check if device is useable and eligible */ 1257 + /* check if device is usable and eligible */ 1258 1258 if (!zq->online || !zq->ops->rng || 1259 1259 !zq->queue->config || zq->queue->chkstop) 1260 1260 continue; ··· 1270 1270 1271 1271 if (!pref_zq) { 1272 1272 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 1273 - __func__); 1273 + __func__); 1274 1274 rc = -ENODEV; 1275 1275 goto out; 1276 1276 } ··· 1381 1381 for_each_zcrypt_card(zc) { 1382 1382 for_each_zcrypt_queue(zq, zc) { 1383 1383 card = AP_QID_CARD(zq->queue->qid); 1384 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1385 - || card >= max_adapters) 1384 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1385 + card >= max_adapters) 1386 1386 continue; 1387 1387 status[card] = zc->online ? zc->user_space_type : 0x0d; 1388 1388 } ··· 1402 1402 for_each_zcrypt_card(zc) { 1403 1403 for_each_zcrypt_queue(zq, zc) { 1404 1404 card = AP_QID_CARD(zq->queue->qid); 1405 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1406 - || card >= max_adapters) 1405 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1406 + card >= max_adapters) 1407 1407 continue; 1408 1408 spin_lock(&zq->queue->lock); 1409 1409 qdepth[card] = ··· 1429 1429 for_each_zcrypt_card(zc) { 1430 1430 for_each_zcrypt_queue(zq, zc) { 1431 1431 card = AP_QID_CARD(zq->queue->qid); 1432 - if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1433 - || card >= max_adapters) 1432 + if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1433 + card >= max_adapters) 1434 1434 continue; 1435 1435 spin_lock(&zq->queue->lock); 1436 1436 cnt = zq->queue->total_request_count; 1437 1437 spin_unlock(&zq->queue->lock); 1438 - reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; 1438 + reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1439 1439 } 1440 1440 } 1441 1441 local_bh_enable(); ··· 1493 1493 int rc; 1494 1494 struct zcrypt_track tr; 1495 1495 struct ica_rsa_modexpo mex; 1496 - struct ica_rsa_modexpo __user *umex = (void __user *) arg; 1496 + struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1497 1497 1498 1498 memset(&tr, 0, sizeof(tr)); 1499 1499 if (copy_from_user(&mex, umex, sizeof(mex))) ··· 1538 1538 int rc; 1539 1539 struct zcrypt_track tr; 1540 1540 struct ica_rsa_modexpo_crt crt; 1541 - struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 1541 + struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1542 1542 1543 1543 memset(&tr, 0, sizeof(tr)); 1544 1544 if (copy_from_user(&crt, ucrt, sizeof(crt))) ··· 1581 1581 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1582 1582 { 1583 1583 int rc; 1584 - struct ica_xcRB xcRB; 1584 + struct ica_xcRB xcrb; 1585 1585 struct zcrypt_track tr; 1586 - struct ica_xcRB __user *uxcRB = (void __user *) arg; 1586 + struct ica_xcRB __user *uxcrb = (void __user *)arg; 1587 1587 1588 1588 memset(&tr, 0, sizeof(tr)); 1589 - if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 1589 + if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1590 1590 return -EFAULT; 1591 1591 1592 1592 #ifdef CONFIG_ZCRYPT_DEBUG 1593 - if ((xcRB.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { 1593 + if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) { 1594 1594 if (!capable(CAP_SYS_ADMIN)) 1595 1595 return -EPERM; 1596 - tr.fi.cmd = (u16)(xcRB.status >> 16); 1596 + tr.fi.cmd = (u16)(xcrb.status >> 16); 1597 1597 } 1598 - xcRB.status = 0; 1598 + xcrb.status = 0; 1599 1599 #endif 1600 1600 1601 1601 do { 1602 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1602 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1603 1603 if (rc == -EAGAIN) 1604 1604 tr.again_counter++; 1605 1605 #ifdef CONFIG_ZCRYPT_DEBUG ··· 1610 1610 /* on failure: retry once again after a requested rescan */ 1611 1611 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1612 1612 do { 1613 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1613 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1614 1614 if (rc == -EAGAIN) 1615 1615 tr.again_counter++; 1616 1616 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); ··· 1618 1618 rc = -EIO; 1619 1619 if (rc) 1620 1620 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1621 - rc, xcRB.status); 1622 - if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 1621 + rc, xcrb.status); 1622 + if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1623 1623 return -EFAULT; 1624 1624 return rc; 1625 1625 } ··· 1674 1674 { 1675 1675 int rc; 1676 1676 struct ap_perms *perms = 1677 - (struct ap_perms *) filp->private_data; 1677 + (struct ap_perms *)filp->private_data; 1678 1678 1679 1679 rc = zcrypt_check_ioctl(perms, cmd); 1680 1680 if (rc) ··· 1698 1698 if (!device_status) 1699 1699 return -ENOMEM; 1700 1700 zcrypt_device_status_mask_ext(device_status); 1701 - if (copy_to_user((char __user *) arg, device_status, 1701 + if (copy_to_user((char __user *)arg, device_status, 1702 1702 total_size)) 1703 1703 rc = -EFAULT; 1704 1704 kfree(device_status); ··· 1708 1708 char status[AP_DEVICES]; 1709 1709 1710 1710 zcrypt_status_mask(status, AP_DEVICES); 1711 - if (copy_to_user((char __user *) arg, status, sizeof(status))) 1711 + if (copy_to_user((char __user *)arg, status, sizeof(status))) 1712 1712 return -EFAULT; 1713 1713 return 0; 1714 1714 } ··· 1716 1716 char qdepth[AP_DEVICES]; 1717 1717 1718 1718 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1719 - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1719 + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1720 1720 return -EFAULT; 1721 1721 return 0; 1722 1722 } ··· 1727 1727 if (!reqcnt) 1728 1728 return -ENOMEM; 1729 1729 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1730 - if (copy_to_user((int __user *) arg, reqcnt, 1730 + if (copy_to_user((int __user *)arg, reqcnt, 1731 1731 sizeof(u32) * AP_DEVICES)) 1732 1732 rc = -EFAULT; 1733 1733 kfree(reqcnt); 1734 1734 return rc; 1735 1735 } 1736 1736 case Z90STAT_REQUESTQ_COUNT: 1737 - return put_user(zcrypt_requestq_count(), (int __user *) arg); 1737 + return put_user(zcrypt_requestq_count(), (int __user *)arg); 1738 1738 case Z90STAT_PENDINGQ_COUNT: 1739 - return put_user(zcrypt_pendingq_count(), (int __user *) arg); 1739 + return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1740 1740 case Z90STAT_TOTALOPEN_COUNT: 1741 1741 return put_user(atomic_read(&zcrypt_open_count), 1742 - (int __user *) arg); 1742 + (int __user *)arg); 1743 1743 case Z90STAT_DOMAIN_INDEX: 1744 - return put_user(ap_domain_index, (int __user *) arg); 1744 + return put_user(ap_domain_index, (int __user *)arg); 1745 1745 /* 1746 1746 * Deprecated ioctls 1747 1747 */ ··· 1755 1755 if (!device_status) 1756 1756 return -ENOMEM; 1757 1757 zcrypt_device_status_mask(device_status); 1758 - if (copy_to_user((char __user *) arg, device_status, 1758 + if (copy_to_user((char __user *)arg, device_status, 1759 1759 total_size)) 1760 1760 rc = -EFAULT; 1761 1761 kfree(device_status); ··· 1766 1766 char status[MAX_ZDEV_CARDIDS]; 1767 1767 1768 1768 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1769 - if (copy_to_user((char __user *) arg, status, sizeof(status))) 1769 + if (copy_to_user((char __user *)arg, status, sizeof(status))) 1770 1770 return -EFAULT; 1771 1771 return 0; 1772 1772 } ··· 1775 1775 char qdepth[MAX_ZDEV_CARDIDS]; 1776 1776 1777 1777 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1778 - if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1778 + if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1779 1779 return -EFAULT; 1780 1780 return 0; 1781 1781 } ··· 1784 1784 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1785 1785 1786 1786 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1787 - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 1787 + if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1788 1788 return -EFAULT; 1789 1789 return 0; 1790 1790 } ··· 1899 1899 &ucrt32->outputdatalength); 1900 1900 } 1901 1901 1902 - struct compat_ica_xcRB { 1902 + struct compat_ica_xcrb { 1903 1903 unsigned short agent_ID; 1904 1904 unsigned int user_defined; 1905 1905 unsigned short request_ID; ··· 1919 1919 unsigned int status; 1920 1920 } __packed; 1921 1921 1922 - static long trans_xcRB32(struct ap_perms *perms, struct file *filp, 1922 + static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1923 1923 unsigned int cmd, unsigned long arg) 1924 1924 { 1925 - struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1926 - struct compat_ica_xcRB xcRB32; 1925 + struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1926 + struct compat_ica_xcrb xcrb32; 1927 1927 struct zcrypt_track tr; 1928 - struct ica_xcRB xcRB64; 1928 + struct ica_xcRB xcrb64; 1929 1929 long rc; 1930 1930 1931 1931 memset(&tr, 0, sizeof(tr)); 1932 - if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1932 + if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1933 1933 return -EFAULT; 1934 - xcRB64.agent_ID = xcRB32.agent_ID; 1935 - xcRB64.user_defined = xcRB32.user_defined; 1936 - xcRB64.request_ID = xcRB32.request_ID; 1937 - xcRB64.request_control_blk_length = 1938 - xcRB32.request_control_blk_length; 1939 - xcRB64.request_control_blk_addr = 1940 - compat_ptr(xcRB32.request_control_blk_addr); 1941 - xcRB64.request_data_length = 1942 - xcRB32.request_data_length; 1943 - xcRB64.request_data_address = 1944 - compat_ptr(xcRB32.request_data_address); 1945 - xcRB64.reply_control_blk_length = 1946 - xcRB32.reply_control_blk_length; 1947 - xcRB64.reply_control_blk_addr = 1948 - compat_ptr(xcRB32.reply_control_blk_addr); 1949 - xcRB64.reply_data_length = xcRB32.reply_data_length; 1950 - xcRB64.reply_data_addr = 1951 - compat_ptr(xcRB32.reply_data_addr); 1952 - xcRB64.priority_window = xcRB32.priority_window; 1953 - xcRB64.status = xcRB32.status; 1934 + xcrb64.agent_ID = xcrb32.agent_ID; 1935 + xcrb64.user_defined = xcrb32.user_defined; 1936 + xcrb64.request_ID = xcrb32.request_ID; 1937 + xcrb64.request_control_blk_length = 1938 + xcrb32.request_control_blk_length; 1939 + xcrb64.request_control_blk_addr = 1940 + compat_ptr(xcrb32.request_control_blk_addr); 1941 + xcrb64.request_data_length = 1942 + xcrb32.request_data_length; 1943 + xcrb64.request_data_address = 1944 + compat_ptr(xcrb32.request_data_address); 1945 + xcrb64.reply_control_blk_length = 1946 + xcrb32.reply_control_blk_length; 1947 + xcrb64.reply_control_blk_addr = 1948 + compat_ptr(xcrb32.reply_control_blk_addr); 1949 + xcrb64.reply_data_length = xcrb32.reply_data_length; 1950 + xcrb64.reply_data_addr = 1951 + compat_ptr(xcrb32.reply_data_addr); 1952 + xcrb64.priority_window = xcrb32.priority_window; 1953 + xcrb64.status = xcrb32.status; 1954 1954 do { 1955 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1955 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1956 1956 if (rc == -EAGAIN) 1957 1957 tr.again_counter++; 1958 1958 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1959 1959 /* on failure: retry once again after a requested rescan */ 1960 1960 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1961 1961 do { 1962 - rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1962 + rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1963 1963 if (rc == -EAGAIN) 1964 1964 tr.again_counter++; 1965 1965 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1966 1966 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1967 1967 rc = -EIO; 1968 - xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1969 - xcRB32.reply_data_length = xcRB64.reply_data_length; 1970 - xcRB32.status = xcRB64.status; 1971 - if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1968 + xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1969 + xcrb32.reply_data_length = xcrb64.reply_data_length; 1970 + xcrb32.status = xcrb64.status; 1971 + if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1972 1972 return -EFAULT; 1973 1973 return rc; 1974 1974 } 1975 1975 1976 1976 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1977 - unsigned long arg) 1977 + unsigned long arg) 1978 1978 { 1979 1979 int rc; 1980 1980 struct ap_perms *perms = 1981 - (struct ap_perms *) filp->private_data; 1981 + (struct ap_perms *)filp->private_data; 1982 1982 1983 1983 rc = zcrypt_check_ioctl(perms, cmd); 1984 1984 if (rc) ··· 1989 1989 if (cmd == ICARSACRT) 1990 1990 return trans_modexpo_crt32(perms, filp, cmd, arg); 1991 1991 if (cmd == ZSECSENDCPRB) 1992 - return trans_xcRB32(perms, filp, cmd, arg); 1992 + return trans_xcrb32(perms, filp, cmd, arg); 1993 1993 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1994 1994 } 1995 1995 #endif ··· 2033 2033 * read method calls. 2034 2034 */ 2035 2035 if (zcrypt_rng_buffer_index == 0) { 2036 - rc = zcrypt_rng((char *) zcrypt_rng_buffer); 2036 + rc = zcrypt_rng((char *)zcrypt_rng_buffer); 2037 2037 /* on failure: retry once again after a requested rescan */ 2038 2038 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 2039 - rc = zcrypt_rng((char *) zcrypt_rng_buffer); 2039 + rc = zcrypt_rng((char *)zcrypt_rng_buffer); 2040 2040 if (rc < 0) 2041 2041 return -EIO; 2042 2042 zcrypt_rng_buffer_index = rc / sizeof(*data); ··· 2057 2057 2058 2058 mutex_lock(&zcrypt_rng_mutex); 2059 2059 if (zcrypt_rng_device_count == 0) { 2060 - zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 2060 + zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 2061 2061 if (!zcrypt_rng_buffer) { 2062 2062 rc = -ENOMEM; 2063 2063 goto out; ··· 2069 2069 if (rc) 2070 2070 goto out_free; 2071 2071 zcrypt_rng_device_count = 1; 2072 - } else 2072 + } else { 2073 2073 zcrypt_rng_device_count++; 2074 + } 2074 2075 mutex_unlock(&zcrypt_rng_mutex); 2075 2076 return 0; 2076 2077 2077 2078 out_free: 2078 - free_page((unsigned long) zcrypt_rng_buffer); 2079 + free_page((unsigned long)zcrypt_rng_buffer); 2079 2080 out: 2080 2081 mutex_unlock(&zcrypt_rng_mutex); 2081 2082 return rc; ··· 2088 2087 zcrypt_rng_device_count--; 2089 2088 if (zcrypt_rng_device_count == 0) { 2090 2089 hwrng_unregister(&zcrypt_rng_dev); 2091 - free_page((unsigned long) zcrypt_rng_buffer); 2090 + free_page((unsigned long)zcrypt_rng_buffer); 2092 2091 } 2093 2092 mutex_unlock(&zcrypt_rng_mutex); 2094 2093 }
+2 -2
drivers/s390/crypto/zcrypt_api.h
··· 170 170 { 171 171 if (likely(userspace)) 172 172 return copy_from_user(to, from, n); 173 - memcpy(to, (void __force *) from, n); 173 + memcpy(to, (void __force *)from, n); 174 174 return 0; 175 175 } 176 176 ··· 181 181 { 182 182 if (likely(userspace)) 183 183 return copy_to_user(to, from, n); 184 - memcpy((void __force *) to, from, n); 184 + memcpy((void __force *)to, from, n); 185 185 return 0; 186 186 } 187 187
+1 -1
drivers/s390/crypto/zcrypt_card.c
··· 138 138 { 139 139 struct zcrypt_card *zc; 140 140 141 - zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL); 141 + zc = kzalloc(sizeof(*zc), GFP_KERNEL); 142 142 if (!zc) 143 143 return NULL; 144 144 INIT_LIST_HEAD(&zc->list);
+29 -29
drivers/s390/crypto/zcrypt_cca_key.h
··· 11 11 #ifndef _ZCRYPT_CCA_KEY_H_ 12 12 #define _ZCRYPT_CCA_KEY_H_ 13 13 14 - struct T6_keyBlock_hdr { 14 + struct t6_keyblock_hdr { 15 15 unsigned short blen; 16 16 unsigned short ulen; 17 17 unsigned short flags; ··· 63 63 * complement of the residue modulo 8 of the sum of 64 64 * (p_len + q_len + dp_len + dq_len + u_len). 65 65 */ 66 - struct cca_pvt_ext_CRT_sec { 66 + struct cca_pvt_ext_crt_sec { 67 67 unsigned char section_identifier; 68 68 unsigned char version; 69 69 unsigned short section_length; ··· 108 108 .section_identifier = 0x04, 109 109 }; 110 110 struct { 111 - struct T6_keyBlock_hdr t6_hdr; 112 - struct cca_token_hdr pubHdr; 113 - struct cca_public_sec pubSec; 111 + struct t6_keyblock_hdr t6_hdr; 112 + struct cca_token_hdr pubhdr; 113 + struct cca_public_sec pubsec; 114 114 char exponent[0]; 115 115 } __packed *key = p; 116 116 unsigned char *temp; ··· 127 127 128 128 memset(key, 0, sizeof(*key)); 129 129 130 - key->pubHdr = static_pub_hdr; 131 - key->pubSec = static_pub_sec; 130 + key->pubhdr = static_pub_hdr; 131 + key->pubsec = static_pub_sec; 132 132 133 133 /* key parameter block */ 134 134 temp = key->exponent; ··· 146 146 if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) 147 147 return -EFAULT; 148 148 149 - key->pubSec.modulus_bit_len = 8 * mex->inputdatalength; 150 - key->pubSec.modulus_byte_len = mex->inputdatalength; 151 - key->pubSec.exponent_len = mex->inputdatalength - i; 152 - key->pubSec.section_length = sizeof(key->pubSec) + 153 - 2*mex->inputdatalength - i; 154 - key->pubHdr.token_length = 155 - key->pubSec.section_length + sizeof(key->pubHdr); 156 - key->t6_hdr.ulen = key->pubHdr.token_length + 4; 157 - key->t6_hdr.blen = key->pubHdr.token_length + 6; 158 - return sizeof(*key) + 2*mex->inputdatalength - i; 149 + key->pubsec.modulus_bit_len = 8 * mex->inputdatalength; 150 + key->pubsec.modulus_byte_len = mex->inputdatalength; 151 + key->pubsec.exponent_len = mex->inputdatalength - i; 152 + key->pubsec.section_length = sizeof(key->pubsec) + 153 + 2 * mex->inputdatalength - i; 154 + key->pubhdr.token_length = 155 + key->pubsec.section_length + sizeof(key->pubhdr); 156 + key->t6_hdr.ulen = key->pubhdr.token_length + 4; 157 + key->t6_hdr.blen = key->pubhdr.token_length + 6; 158 + return sizeof(*key) + 2 * mex->inputdatalength - i; 159 159 } 160 160 161 161 /** ··· 177 177 }; 178 178 static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; 179 179 struct { 180 - struct T6_keyBlock_hdr t6_hdr; 180 + struct t6_keyblock_hdr t6_hdr; 181 181 struct cca_token_hdr token; 182 - struct cca_pvt_ext_CRT_sec pvt; 182 + struct cca_pvt_ext_crt_sec pvt; 183 183 char key_parts[0]; 184 184 } __packed *key = p; 185 185 struct cca_public_sec *pub; ··· 198 198 199 199 short_len = (crt->inputdatalength + 1) / 2; 200 200 long_len = short_len + 8; 201 - pad_len = -(3*long_len + 2*short_len) & 7; 202 - key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; 201 + pad_len = -(3 * long_len + 2 * short_len) & 7; 202 + key_len = 3 * long_len + 2 * short_len + pad_len + crt->inputdatalength; 203 203 size = sizeof(*key) + key_len + sizeof(*pub) + 3; 204 204 205 205 /* parameter block.key block */ ··· 223 223 /* key parts */ 224 224 if (copy_from_user(key->key_parts, crt->np_prime, long_len) || 225 225 copy_from_user(key->key_parts + long_len, 226 - crt->nq_prime, short_len) || 226 + crt->nq_prime, short_len) || 227 227 copy_from_user(key->key_parts + long_len + short_len, 228 - crt->bp_key, long_len) || 229 - copy_from_user(key->key_parts + 2*long_len + short_len, 230 - crt->bq_key, short_len) || 231 - copy_from_user(key->key_parts + 2*long_len + 2*short_len, 232 - crt->u_mult_inv, long_len)) 228 + crt->bp_key, long_len) || 229 + copy_from_user(key->key_parts + 2 * long_len + short_len, 230 + crt->bq_key, short_len) || 231 + copy_from_user(key->key_parts + 2 * long_len + 2 * short_len, 232 + crt->u_mult_inv, long_len)) 233 233 return -EFAULT; 234 - memset(key->key_parts + 3*long_len + 2*short_len + pad_len, 234 + memset(key->key_parts + 3 * long_len + 2 * short_len + pad_len, 235 235 0xff, crt->inputdatalength); 236 236 pub = (struct cca_public_sec *)(key->key_parts + key_len); 237 237 *pub = static_cca_pub_sec; ··· 241 241 * section. So, an arbitrary public exponent of 0x010001 will be 242 242 * used. 243 243 */ 244 - memcpy((char *) (pub + 1), pk_exponent, 3); 244 + memcpy((char *)(pub + 1), pk_exponent, 3); 245 245 return size; 246 246 } 247 247
+135 -134
drivers/s390/crypto/zcrypt_ccamisc.c
··· 53 53 int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl, 54 54 const u8 *token, int keybitsize) 55 55 { 56 - struct secaeskeytoken *t = (struct secaeskeytoken *) token; 56 + struct secaeskeytoken *t = (struct secaeskeytoken *)token; 57 57 58 58 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 59 59 60 60 if (t->type != TOKTYPE_CCA_INTERNAL) { 61 61 if (dbg) 62 62 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 63 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); 63 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); 64 64 return -EINVAL; 65 65 } 66 66 if (t->version != TOKVER_CCA_AES) { 67 67 if (dbg) 68 68 DBF("%s token check failed, version 0x%02x != 0x%02x\n", 69 - __func__, (int) t->version, TOKVER_CCA_AES); 69 + __func__, (int)t->version, TOKVER_CCA_AES); 70 70 return -EINVAL; 71 71 } 72 72 if (keybitsize > 0 && t->bitsize != keybitsize) { 73 73 if (dbg) 74 74 DBF("%s token check failed, bitsize %d != %d\n", 75 - __func__, (int) t->bitsize, keybitsize); 75 + __func__, (int)t->bitsize, keybitsize); 76 76 return -EINVAL; 77 77 } 78 78 ··· 93 93 const u8 *token, int keybitsize, 94 94 int checkcpacfexport) 95 95 { 96 - struct cipherkeytoken *t = (struct cipherkeytoken *) token; 96 + struct cipherkeytoken *t = (struct cipherkeytoken *)token; 97 97 bool keybitsizeok = true; 98 98 99 99 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) ··· 101 101 if (t->type != TOKTYPE_CCA_INTERNAL) { 102 102 if (dbg) 103 103 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 104 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL); 104 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL); 105 105 return -EINVAL; 106 106 } 107 107 if (t->version != TOKVER_CCA_VLSC) { 108 108 if (dbg) 109 109 DBF("%s token check failed, version 0x%02x != 0x%02x\n", 110 - __func__, (int) t->version, TOKVER_CCA_VLSC); 110 + __func__, (int)t->version, TOKVER_CCA_VLSC); 111 111 return -EINVAL; 112 112 } 113 113 if (t->algtype != 0x02) { 114 114 if (dbg) 115 115 DBF("%s token check failed, algtype 0x%02x != 0x02\n", 116 - __func__, (int) t->algtype); 116 + __func__, (int)t->algtype); 117 117 return -EINVAL; 118 118 } 119 119 if (t->keytype != 0x0001) { 120 120 if (dbg) 121 121 DBF("%s token check failed, keytype 0x%04x != 0x0001\n", 122 - __func__, (int) t->keytype); 122 + __func__, (int)t->keytype); 123 123 return -EINVAL; 124 124 } 125 125 if (t->plfver != 0x00 && t->plfver != 0x01) { 126 126 if (dbg) 127 127 DBF("%s token check failed, unknown plfver 0x%02x\n", 128 - __func__, (int) t->plfver); 128 + __func__, (int)t->plfver); 129 129 return -EINVAL; 130 130 } 131 131 if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) { 132 132 if (dbg) 133 133 DBF("%s token check failed, unknown wpllen %d\n", 134 - __func__, (int) t->wpllen); 134 + __func__, (int)t->wpllen); 135 135 return -EINVAL; 136 136 } 137 137 if (keybitsize > 0) { ··· 180 180 const u8 *token, size_t keysize, 181 181 int checkcpacfexport) 182 182 { 183 - struct eccprivkeytoken *t = (struct eccprivkeytoken *) token; 183 + struct eccprivkeytoken *t = (struct eccprivkeytoken *)token; 184 184 185 185 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 186 186 187 187 if (t->type != TOKTYPE_CCA_INTERNAL_PKA) { 188 188 if (dbg) 189 189 DBF("%s token check failed, type 0x%02x != 0x%02x\n", 190 - __func__, (int) t->type, TOKTYPE_CCA_INTERNAL_PKA); 190 + __func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA); 191 191 return -EINVAL; 192 192 } 193 193 if (t->len > keysize) { 194 194 if (dbg) 195 195 DBF("%s token check failed, len %d > keysize %zu\n", 196 - __func__, (int) t->len, keysize); 196 + __func__, (int)t->len, keysize); 197 197 return -EINVAL; 198 198 } 199 199 if (t->secid != 0x20) { 200 200 if (dbg) 201 201 DBF("%s token check failed, secid 0x%02x != 0x20\n", 202 - __func__, (int) t->secid); 202 + __func__, (int)t->secid); 203 203 return -EINVAL; 204 204 } 205 205 if (checkcpacfexport && !(t->kutc & 0x01)) { ··· 222 222 * on failure. 223 223 */ 224 224 static int alloc_and_prep_cprbmem(size_t paramblen, 225 - u8 **pcprbmem, 226 - struct CPRBX **preqCPRB, 227 - struct CPRBX **prepCPRB) 225 + u8 **p_cprb_mem, 226 + struct CPRBX **p_req_cprb, 227 + struct CPRBX **p_rep_cprb) 228 228 { 229 229 u8 *cprbmem; 230 230 size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; ··· 238 238 if (!cprbmem) 239 239 return -ENOMEM; 240 240 241 - preqcblk = (struct CPRBX *) cprbmem; 242 - prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); 241 + preqcblk = (struct CPRBX *)cprbmem; 242 + prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); 243 243 244 244 /* fill request cprb struct */ 245 245 preqcblk->cprb_len = sizeof(struct CPRBX); ··· 248 248 preqcblk->rpl_msgbl = cprbplusparamblen; 249 249 if (paramblen) { 250 250 preqcblk->req_parmb = 251 - ((u8 __user *) preqcblk) + sizeof(struct CPRBX); 251 + ((u8 __user *)preqcblk) + sizeof(struct CPRBX); 252 252 preqcblk->rpl_parmb = 253 - ((u8 __user *) prepcblk) + sizeof(struct CPRBX); 253 + ((u8 __user *)prepcblk) + sizeof(struct CPRBX); 254 254 } 255 255 256 - *pcprbmem = cprbmem; 257 - *preqCPRB = preqcblk; 258 - *prepCPRB = prepcblk; 256 + *p_cprb_mem = cprbmem; 257 + *p_req_cprb = preqcblk; 258 + *p_rep_cprb = prepcblk; 259 259 260 260 return 0; 261 261 } ··· 286 286 pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr); 287 287 pxcrb->request_control_blk_length = 288 288 preqcblk->cprb_len + preqcblk->req_parml; 289 - pxcrb->request_control_blk_addr = (void __user *) preqcblk; 289 + pxcrb->request_control_blk_addr = (void __user *)preqcblk; 290 290 pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl; 291 - pxcrb->reply_control_blk_addr = (void __user *) prepcblk; 291 + pxcrb->reply_control_blk_addr = (void __user *)prepcblk; 292 292 } 293 293 294 294 /* ··· 345 345 preqcblk->domain = domain; 346 346 347 347 /* fill request cprb param block with KG request */ 348 - preqparm = (struct kgreqparm __force *) preqcblk->req_parmb; 348 + preqparm = (struct kgreqparm __force *)preqcblk->req_parmb; 349 349 memcpy(preqparm->subfunc_code, "KG", 2); 350 350 preqparm->rule_array_len = sizeof(preqparm->rule_array_len); 351 351 preqparm->lv1.len = sizeof(struct lv1); ··· 387 387 rc = zcrypt_send_cprb(&xcrb); 388 388 if (rc) { 389 389 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", 390 - __func__, (int) cardnr, (int) domain, rc); 390 + __func__, (int)cardnr, (int)domain, rc); 391 391 goto out; 392 392 } 393 393 ··· 395 395 if (prepcblk->ccp_rtcode != 0) { 396 396 DEBUG_ERR("%s secure key generate failure, card response %d/%d\n", 397 397 __func__, 398 - (int) prepcblk->ccp_rtcode, 399 - (int) prepcblk->ccp_rscode); 398 + (int)prepcblk->ccp_rtcode, 399 + (int)prepcblk->ccp_rscode); 400 400 rc = -EIO; 401 401 goto out; 402 402 } 403 403 404 404 /* process response cprb param block */ 405 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 406 - prepcblk->rpl_parmb = (u8 __user *) ptr; 407 - prepparm = (struct kgrepparm *) ptr; 405 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 406 + prepcblk->rpl_parmb = (u8 __user *)ptr; 407 + prepparm = (struct kgrepparm *)ptr; 408 408 409 409 /* check length of the returned secure key token */ 410 410 seckeysize = prepparm->lv3.keyblock.toklen ··· 419 419 420 420 /* check secure key token */ 421 421 rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, 422 - prepparm->lv3.keyblock.tok, 8*keysize); 422 + prepparm->lv3.keyblock.tok, 8 * keysize); 423 423 if (rc) { 424 424 rc = -EIO; 425 425 goto out; ··· 486 486 preqcblk->domain = domain; 487 487 488 488 /* fill request cprb param block with CM request */ 489 - preqparm = (struct cmreqparm __force *) preqcblk->req_parmb; 489 + preqparm = (struct cmreqparm __force *)preqcblk->req_parmb; 490 490 memcpy(preqparm->subfunc_code, "CM", 2); 491 491 memcpy(preqparm->rule_array, "AES ", 8); 492 492 preqparm->rule_array_len = ··· 512 512 } 513 513 preqparm->lv1.len = sizeof(struct lv1) + keysize; 514 514 memcpy(preqparm->lv1.clrkey, clrkey, keysize); 515 - plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize); 515 + plv2 = (struct lv2 *)(((u8 *)&preqparm->lv2) + keysize); 516 516 plv2->len = sizeof(struct lv2); 517 517 plv2->keyid.len = sizeof(struct keyid); 518 518 plv2->keyid.attr = 0x30; ··· 525 525 rc = zcrypt_send_cprb(&xcrb); 526 526 if (rc) { 527 527 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 528 - __func__, (int) cardnr, (int) domain, rc); 528 + __func__, (int)cardnr, (int)domain, rc); 529 529 goto out; 530 530 } 531 531 ··· 533 533 if (prepcblk->ccp_rtcode != 0) { 534 534 DEBUG_ERR("%s clear key import failure, card response %d/%d\n", 535 535 __func__, 536 - (int) prepcblk->ccp_rtcode, 537 - (int) prepcblk->ccp_rscode); 536 + (int)prepcblk->ccp_rtcode, 537 + (int)prepcblk->ccp_rscode); 538 538 rc = -EIO; 539 539 goto out; 540 540 } 541 541 542 542 /* process response cprb param block */ 543 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 544 - prepcblk->rpl_parmb = (u8 __user *) ptr; 545 - prepparm = (struct cmrepparm *) ptr; 543 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 544 + prepcblk->rpl_parmb = (u8 __user *)ptr; 545 + prepparm = (struct cmrepparm *)ptr; 546 546 547 547 /* check length of the returned secure key token */ 548 548 seckeysize = prepparm->lv3.keyblock.toklen ··· 557 557 558 558 /* check secure key token */ 559 559 rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR, 560 - prepparm->lv3.keyblock.tok, 8*keysize); 560 + prepparm->lv3.keyblock.tok, 8 * keysize); 561 561 if (rc) { 562 562 rc = -EIO; 563 563 goto out; ··· 632 632 preqcblk->domain = domain; 633 633 634 634 /* fill request cprb param block with USK request */ 635 - preqparm = (struct uskreqparm __force *) preqcblk->req_parmb; 635 + preqparm = (struct uskreqparm __force *)preqcblk->req_parmb; 636 636 memcpy(preqparm->subfunc_code, "US", 2); 637 637 preqparm->rule_array_len = sizeof(preqparm->rule_array_len); 638 638 preqparm->lv1.len = sizeof(struct lv1); ··· 652 652 rc = zcrypt_send_cprb(&xcrb); 653 653 if (rc) { 654 654 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 655 - __func__, (int) cardnr, (int) domain, rc); 655 + __func__, (int)cardnr, (int)domain, rc); 656 656 goto out; 657 657 } 658 658 ··· 660 660 if (prepcblk->ccp_rtcode != 0) { 661 661 DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", 662 662 __func__, 663 - (int) prepcblk->ccp_rtcode, 664 - (int) prepcblk->ccp_rscode); 663 + (int)prepcblk->ccp_rtcode, 664 + (int)prepcblk->ccp_rscode); 665 665 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 666 666 rc = -EAGAIN; 667 667 else ··· 671 671 if (prepcblk->ccp_rscode != 0) { 672 672 DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n", 673 673 __func__, 674 - (int) prepcblk->ccp_rtcode, 675 - (int) prepcblk->ccp_rscode); 674 + (int)prepcblk->ccp_rtcode, 675 + (int)prepcblk->ccp_rscode); 676 676 } 677 677 678 678 /* process response cprb param block */ 679 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 680 - prepcblk->rpl_parmb = (u8 __user *) ptr; 681 - prepparm = (struct uskrepparm *) ptr; 679 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 680 + prepcblk->rpl_parmb = (u8 __user *)ptr; 681 + prepparm = (struct uskrepparm *)ptr; 682 682 683 683 /* check the returned keyblock */ 684 684 if (prepparm->lv3.ckb.version != 0x01 && 685 685 prepparm->lv3.ckb.version != 0x02) { 686 686 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 687 - __func__, (int) prepparm->lv3.ckb.version); 687 + __func__, (int)prepparm->lv3.ckb.version); 688 688 rc = -EIO; 689 689 goto out; 690 690 } 691 691 692 692 /* copy the tanslated protected key */ 693 693 switch (prepparm->lv3.ckb.len) { 694 - case 16+32: 694 + case 16 + 32: 695 695 /* AES 128 protected key */ 696 696 if (protkeytype) 697 697 *protkeytype = PKEY_KEYTYPE_AES_128; 698 698 break; 699 - case 24+32: 699 + case 24 + 32: 700 700 /* AES 192 protected key */ 701 701 if (protkeytype) 702 702 *protkeytype = PKEY_KEYTYPE_AES_192; 703 703 break; 704 - case 32+32: 704 + case 32 + 32: 705 705 /* AES 256 protected key */ 706 706 if (protkeytype) 707 707 *protkeytype = PKEY_KEYTYPE_AES_256; ··· 751 751 struct gkreqparm { 752 752 u8 subfunc_code[2]; 753 753 u16 rule_array_len; 754 - char rule_array[2*8]; 754 + char rule_array[2 * 8]; 755 755 struct { 756 756 u16 len; 757 757 u8 key_type_1[8]; ··· 827 827 preqcblk->req_parml = sizeof(struct gkreqparm); 828 828 829 829 /* prepare request param block with GK request */ 830 - preqparm = (struct gkreqparm __force *) preqcblk->req_parmb; 830 + preqparm = (struct gkreqparm __force *)preqcblk->req_parmb; 831 831 memcpy(preqparm->subfunc_code, "GK", 2); 832 832 preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8; 833 - memcpy(preqparm->rule_array, "AES OP ", 2*8); 833 + memcpy(preqparm->rule_array, "AES OP ", 2 * 8); 834 834 835 835 /* prepare vud block */ 836 836 preqparm->vud.len = sizeof(preqparm->vud); ··· 869 869 870 870 /* patch the skeleton key token export flags inside the kb block */ 871 871 if (keygenflags) { 872 - t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1; 873 - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); 874 - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); 872 + t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1; 873 + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); 874 + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); 875 875 } 876 876 877 877 /* prepare xcrb struct */ ··· 882 882 if (rc) { 883 883 DEBUG_ERR( 884 884 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 885 - __func__, (int) cardnr, (int) domain, rc); 885 + __func__, (int)cardnr, (int)domain, rc); 886 886 goto out; 887 887 } 888 888 ··· 891 891 DEBUG_ERR( 892 892 "%s cipher key generate failure, card response %d/%d\n", 893 893 __func__, 894 - (int) prepcblk->ccp_rtcode, 895 - (int) prepcblk->ccp_rscode); 894 + (int)prepcblk->ccp_rtcode, 895 + (int)prepcblk->ccp_rscode); 896 896 rc = -EIO; 897 897 goto out; 898 898 } 899 899 900 900 /* process response cprb param block */ 901 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 902 - prepcblk->rpl_parmb = (u8 __user *) ptr; 903 - prepparm = (struct gkrepparm *) ptr; 901 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 902 + prepcblk->rpl_parmb = (u8 __user *)ptr; 903 + prepparm = (struct gkrepparm *)ptr; 904 904 905 905 /* do some plausibility checks on the key block */ 906 906 if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) || ··· 921 921 } 922 922 923 923 /* copy the generated vlsc key token */ 924 - t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key; 924 + t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key; 925 925 if (keybuf) { 926 926 if (*keybufsize >= t->len) 927 927 memcpy(keybuf, t, t->len); ··· 1006 1006 preqcblk->req_parml = 0; 1007 1007 1008 1008 /* prepare request param block with IP request */ 1009 - preq_ra_block = (struct rule_array_block __force *) preqcblk->req_parmb; 1009 + preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb; 1010 1010 memcpy(preq_ra_block->subfunc_code, "IP", 2); 1011 1011 preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8; 1012 1012 memcpy(preq_ra_block->rule_array, rule_array_1, 8); ··· 1050 1050 if (rc) { 1051 1051 DEBUG_ERR( 1052 1052 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1053 - __func__, (int) cardnr, (int) domain, rc); 1053 + __func__, (int)cardnr, (int)domain, rc); 1054 1054 goto out; 1055 1055 } 1056 1056 ··· 1059 1059 DEBUG_ERR( 1060 1060 "%s CSNBKPI2 failure, card response %d/%d\n", 1061 1061 __func__, 1062 - (int) prepcblk->ccp_rtcode, 1063 - (int) prepcblk->ccp_rscode); 1062 + (int)prepcblk->ccp_rtcode, 1063 + (int)prepcblk->ccp_rscode); 1064 1064 rc = -EIO; 1065 1065 goto out; 1066 1066 } 1067 1067 1068 1068 /* process response cprb param block */ 1069 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1070 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1071 - prepparm = (struct iprepparm *) ptr; 1069 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1070 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1071 + prepparm = (struct iprepparm *)ptr; 1072 1072 1073 1073 /* do some plausibility checks on the key block */ 1074 1074 if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) || ··· 1082 1082 /* do not check the key here, it may be incomplete */ 1083 1083 1084 1084 /* copy the vlsc key token back */ 1085 - t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token; 1085 + t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token; 1086 1086 memcpy(key_token, t, t->len); 1087 1087 *key_token_size = t->len; 1088 1088 ··· 1117 1117 1118 1118 /* patch the skeleton key token export flags */ 1119 1119 if (keygenflags) { 1120 - t = (struct cipherkeytoken *) token; 1121 - t->kmf1 |= (u16) (keygenflags & 0x0000FF00); 1122 - t->kmf1 &= (u16) ~(keygenflags & 0x000000FF); 1120 + t = (struct cipherkeytoken *)token; 1121 + t->kmf1 |= (u16)(keygenflags & 0x0000FF00); 1122 + t->kmf1 &= (u16)~(keygenflags & 0x000000FF); 1123 1123 } 1124 1124 1125 1125 /* ··· 1241 1241 preqcblk->domain = domain; 1242 1242 1243 1243 /* fill request cprb param block with AU request */ 1244 - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; 1244 + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; 1245 1245 memcpy(preqparm->subfunc_code, "AU", 2); 1246 1246 preqparm->rule_array_len = 1247 1247 sizeof(preqparm->rule_array_len) ··· 1267 1267 if (rc) { 1268 1268 DEBUG_ERR( 1269 1269 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1270 - __func__, (int) cardnr, (int) domain, rc); 1270 + __func__, (int)cardnr, (int)domain, rc); 1271 1271 goto out; 1272 1272 } 1273 1273 ··· 1276 1276 DEBUG_ERR( 1277 1277 "%s unwrap secure key failure, card response %d/%d\n", 1278 1278 __func__, 1279 - (int) prepcblk->ccp_rtcode, 1280 - (int) prepcblk->ccp_rscode); 1279 + (int)prepcblk->ccp_rtcode, 1280 + (int)prepcblk->ccp_rscode); 1281 1281 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 1282 1282 rc = -EAGAIN; 1283 1283 else ··· 1288 1288 DEBUG_WARN( 1289 1289 "%s unwrap secure key warning, card response %d/%d\n", 1290 1290 __func__, 1291 - (int) prepcblk->ccp_rtcode, 1292 - (int) prepcblk->ccp_rscode); 1291 + (int)prepcblk->ccp_rtcode, 1292 + (int)prepcblk->ccp_rscode); 1293 1293 } 1294 1294 1295 1295 /* process response cprb param block */ 1296 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1297 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1298 - prepparm = (struct aurepparm *) ptr; 1296 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1297 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1298 + prepparm = (struct aurepparm *)ptr; 1299 1299 1300 1300 /* check the returned keyblock */ 1301 1301 if (prepparm->vud.ckb.version != 0x01 && 1302 1302 prepparm->vud.ckb.version != 0x02) { 1303 1303 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n", 1304 - __func__, (int) prepparm->vud.ckb.version); 1304 + __func__, (int)prepparm->vud.ckb.version); 1305 1305 rc = -EIO; 1306 1306 goto out; 1307 1307 } 1308 1308 if (prepparm->vud.ckb.algo != 0x02) { 1309 1309 DEBUG_ERR( 1310 1310 "%s reply param keyblock algo mismatch 0x%02x != 0x02\n", 1311 - __func__, (int) prepparm->vud.ckb.algo); 1311 + __func__, (int)prepparm->vud.ckb.algo); 1312 1312 rc = -EIO; 1313 1313 goto out; 1314 1314 } 1315 1315 1316 1316 /* copy the translated protected key */ 1317 1317 switch (prepparm->vud.ckb.keylen) { 1318 - case 16+32: 1318 + case 16 + 32: 1319 1319 /* AES 128 protected key */ 1320 1320 if (protkeytype) 1321 1321 *protkeytype = PKEY_KEYTYPE_AES_128; 1322 1322 break; 1323 - case 24+32: 1323 + case 24 + 32: 1324 1324 /* AES 192 protected key */ 1325 1325 if (protkeytype) 1326 1326 *protkeytype = PKEY_KEYTYPE_AES_192; 1327 1327 break; 1328 - case 32+32: 1328 + case 32 + 32: 1329 1329 /* AES 256 protected key */ 1330 1330 if (protkeytype) 1331 1331 *protkeytype = PKEY_KEYTYPE_AES_256; ··· 1410 1410 preqcblk->domain = domain; 1411 1411 1412 1412 /* fill request cprb param block with AU request */ 1413 - preqparm = (struct aureqparm __force *) preqcblk->req_parmb; 1413 + preqparm = (struct aureqparm __force *)preqcblk->req_parmb; 1414 1414 memcpy(preqparm->subfunc_code, "AU", 2); 1415 1415 preqparm->rule_array_len = 1416 1416 sizeof(preqparm->rule_array_len) ··· 1436 1436 if (rc) { 1437 1437 DEBUG_ERR( 1438 1438 "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1439 - __func__, (int) cardnr, (int) domain, rc); 1439 + __func__, (int)cardnr, (int)domain, rc); 1440 1440 goto out; 1441 1441 } 1442 1442 ··· 1445 1445 DEBUG_ERR( 1446 1446 "%s unwrap secure key failure, card response %d/%d\n", 1447 1447 __func__, 1448 - (int) prepcblk->ccp_rtcode, 1449 - (int) prepcblk->ccp_rscode); 1448 + (int)prepcblk->ccp_rtcode, 1449 + (int)prepcblk->ccp_rscode); 1450 1450 if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290) 1451 1451 rc = -EAGAIN; 1452 1452 else ··· 1457 1457 DEBUG_WARN( 1458 1458 "%s unwrap secure key warning, card response %d/%d\n", 1459 1459 __func__, 1460 - (int) prepcblk->ccp_rtcode, 1461 - (int) prepcblk->ccp_rscode); 1460 + (int)prepcblk->ccp_rtcode, 1461 + (int)prepcblk->ccp_rscode); 1462 1462 } 1463 1463 1464 1464 /* process response cprb param block */ 1465 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1466 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1467 - prepparm = (struct aurepparm *) ptr; 1465 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1466 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1467 + prepparm = (struct aurepparm *)ptr; 1468 1468 1469 1469 /* check the returned keyblock */ 1470 1470 if (prepparm->vud.ckb.version != 0x02) { 1471 1471 DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n", 1472 - __func__, (int) prepparm->vud.ckb.version); 1472 + __func__, (int)prepparm->vud.ckb.version); 1473 1473 rc = -EIO; 1474 1474 goto out; 1475 1475 } 1476 1476 if (prepparm->vud.ckb.algo != 0x81) { 1477 1477 DEBUG_ERR( 1478 1478 "%s reply param keyblock algo mismatch 0x%02x != 0x81\n", 1479 - __func__, (int) prepparm->vud.ckb.algo); 1479 + __func__, (int)prepparm->vud.ckb.algo); 1480 1480 rc = -EIO; 1481 1481 goto out; 1482 1482 } ··· 1537 1537 preqcblk->domain = domain; 1538 1538 1539 1539 /* fill request cprb param block with FQ request */ 1540 - preqparm = (struct fqreqparm __force *) preqcblk->req_parmb; 1540 + preqparm = (struct fqreqparm __force *)preqcblk->req_parmb; 1541 1541 memcpy(preqparm->subfunc_code, "FQ", 2); 1542 1542 memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array)); 1543 1543 preqparm->rule_array_len = ··· 1553 1553 rc = zcrypt_send_cprb(&xcrb); 1554 1554 if (rc) { 1555 1555 DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", 1556 - __func__, (int) cardnr, (int) domain, rc); 1556 + __func__, (int)cardnr, (int)domain, rc); 1557 1557 goto out; 1558 1558 } 1559 1559 ··· 1561 1561 if (prepcblk->ccp_rtcode != 0) { 1562 1562 DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n", 1563 1563 __func__, 1564 - (int) prepcblk->ccp_rtcode, 1565 - (int) prepcblk->ccp_rscode); 1564 + (int)prepcblk->ccp_rtcode, 1565 + (int)prepcblk->ccp_rscode); 1566 1566 rc = -EIO; 1567 1567 goto out; 1568 1568 } 1569 1569 1570 1570 /* process response cprb param block */ 1571 - ptr = ((u8 *) prepcblk) + sizeof(struct CPRBX); 1572 - prepcblk->rpl_parmb = (u8 __user *) ptr; 1573 - prepparm = (struct fqrepparm *) ptr; 1571 + ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX); 1572 + prepcblk->rpl_parmb = (u8 __user *)ptr; 1573 + prepparm = (struct fqrepparm *)ptr; 1574 1574 ptr = prepparm->lvdata; 1575 1575 1576 1576 /* check and possibly copy reply rule array */ 1577 - len = *((u16 *) ptr); 1577 + len = *((u16 *)ptr); 1578 1578 if (len > sizeof(u16)) { 1579 1579 ptr += sizeof(u16); 1580 1580 len -= sizeof(u16); ··· 1585 1585 ptr += len; 1586 1586 } 1587 1587 /* check and possible copy reply var array */ 1588 - len = *((u16 *) ptr); 1588 + len = *((u16 *)ptr); 1589 1589 if (len > sizeof(u16)) { 1590 1590 ptr += sizeof(u16); 1591 1591 len -= sizeof(u16); ··· 1696 1696 ci->hwtype = devstat.hwtype; 1697 1697 1698 1698 /* prep page for rule array and var array use */ 1699 - pg = (u8 *) __get_free_page(GFP_KERNEL); 1699 + pg = (u8 *)__get_free_page(GFP_KERNEL); 1700 1700 if (!pg) 1701 1701 return -ENOMEM; 1702 1702 rarray = pg; 1703 - varray = pg + PAGE_SIZE/2; 1704 - rlen = vlen = PAGE_SIZE/2; 1703 + varray = pg + PAGE_SIZE / 2; 1704 + rlen = vlen = PAGE_SIZE / 2; 1705 1705 1706 1706 /* QF for this card/domain */ 1707 1707 rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", 1708 1708 rarray, &rlen, varray, &vlen); 1709 - if (rc == 0 && rlen >= 10*8 && vlen >= 204) { 1709 + if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { 1710 1710 memcpy(ci->serial, rarray, 8); 1711 - ci->new_asym_mk_state = (char) rarray[4*8]; 1712 - ci->cur_asym_mk_state = (char) rarray[5*8]; 1713 - ci->old_asym_mk_state = (char) rarray[6*8]; 1711 + ci->new_asym_mk_state = (char)rarray[4 * 8]; 1712 + ci->cur_asym_mk_state = (char)rarray[5 * 8]; 1713 + ci->old_asym_mk_state = (char)rarray[6 * 8]; 1714 1714 if (ci->old_asym_mk_state == '2') 1715 1715 memcpy(ci->old_asym_mkvp, varray + 64, 16); 1716 1716 if (ci->cur_asym_mk_state == '2') 1717 1717 memcpy(ci->cur_asym_mkvp, varray + 84, 16); 1718 1718 if (ci->new_asym_mk_state == '3') 1719 1719 memcpy(ci->new_asym_mkvp, varray + 104, 16); 1720 - ci->new_aes_mk_state = (char) rarray[7*8]; 1721 - ci->cur_aes_mk_state = (char) rarray[8*8]; 1722 - ci->old_aes_mk_state = (char) rarray[9*8]; 1720 + ci->new_aes_mk_state = (char)rarray[7 * 8]; 1721 + ci->cur_aes_mk_state = (char)rarray[8 * 8]; 1722 + ci->old_aes_mk_state = (char)rarray[9 * 8]; 1723 1723 if (ci->old_aes_mk_state == '2') 1724 1724 memcpy(&ci->old_aes_mkvp, varray + 172, 8); 1725 1725 if (ci->cur_aes_mk_state == '2') ··· 1730 1730 } 1731 1731 if (!found) 1732 1732 goto out; 1733 - rlen = vlen = PAGE_SIZE/2; 1733 + rlen = vlen = PAGE_SIZE / 2; 1734 1734 rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", 1735 1735 rarray, &rlen, varray, &vlen); 1736 - if (rc == 0 && rlen >= 13*8 && vlen >= 240) { 1737 - ci->new_apka_mk_state = (char) rarray[10*8]; 1738 - ci->cur_apka_mk_state = (char) rarray[11*8]; 1739 - ci->old_apka_mk_state = (char) rarray[12*8]; 1736 + if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { 1737 + ci->new_apka_mk_state = (char)rarray[10 * 8]; 1738 + ci->cur_apka_mk_state = (char)rarray[11 * 8]; 1739 + ci->old_apka_mk_state = (char)rarray[12 * 8]; 1740 1740 if (ci->old_apka_mk_state == '2') 1741 1741 memcpy(&ci->old_apka_mkvp, varray + 208, 8); 1742 1742 if (ci->cur_apka_mk_state == '2') ··· 1747 1747 } 1748 1748 1749 1749 out: 1750 - free_page((unsigned long) pg); 1750 + free_page((unsigned long)pg); 1751 1751 return found == 2 ? 0 : -ENOENT; 1752 1752 } 1753 1753 ··· 1855 1855 if (pdomain) 1856 1856 *pdomain = dom; 1857 1857 rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); 1858 - } else 1858 + } else { 1859 1859 rc = -ENODEV; 1860 + } 1860 1861 1861 1862 kvfree(device_status); 1862 1863 return rc; ··· 1871 1870 { 1872 1871 u64 mkvp; 1873 1872 int minhwtype = 0; 1874 - const struct keytoken_header *hdr = (struct keytoken_header *) key; 1873 + const struct keytoken_header *hdr = (struct keytoken_header *)key; 1875 1874 1876 1875 if (hdr->type != TOKTYPE_CCA_INTERNAL) 1877 1876 return -EINVAL; ··· 1964 1963 } 1965 1964 /* apqn passed all filtering criterons, add to the array */ 1966 1965 if (_nr_apqns < 256) 1967 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); 1966 + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1968 1967 } 1969 1968 1970 1969 /* nothing found ? */
+5 -6
drivers/s390/crypto/zcrypt_cex2a.c
··· 34 34 35 35 #define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus 36 36 * (max outputdatalength) + 37 - * type80_hdr*/ 37 + * type80_hdr 38 + */ 38 39 #define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) 39 40 40 - #define CEX2A_CLEANUP_TIME (15*HZ) 41 + #define CEX2A_CLEANUP_TIME (15 * HZ) 41 42 #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME 42 43 43 44 MODULE_AUTHOR("IBM Corporation"); ··· 118 117 zc->online = 1; 119 118 120 119 rc = zcrypt_card_register(zc); 121 - if (rc) { 120 + if (rc) 122 121 zcrypt_card_free(zc); 123 - } 124 122 125 123 return rc; 126 124 } ··· 176 176 aq->request_timeout = CEX2A_CLEANUP_TIME; 177 177 dev_set_drvdata(&ap_dev->device, zq); 178 178 rc = zcrypt_queue_register(zq); 179 - if (rc) { 179 + if (rc) 180 180 zcrypt_queue_free(zq); 181 - } 182 181 183 182 return rc; 184 183 }
+5 -5
drivers/s390/crypto/zcrypt_cex2c.c
··· 31 31 #define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */ 32 32 #define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */ 33 33 #define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */ 34 - #define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024) 35 - #define CEX2C_CLEANUP_TIME (15*HZ) 34 + #define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024) 35 + #define CEX2C_CLEANUP_TIME (15 * HZ) 36 36 37 37 MODULE_AUTHOR("IBM Corporation"); 38 38 MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \ ··· 200 200 int rc, i; 201 201 202 202 ap_init_message(&ap_msg); 203 - ap_msg.msg = (void *) get_zeroed_page(GFP_KERNEL); 203 + ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL); 204 204 if (!ap_msg.msg) 205 205 return -ENOMEM; 206 206 207 - rng_type6CPRB_msgX(&ap_msg, 4, &domain); 207 + rng_type6cprb_msgx(&ap_msg, 4, &domain); 208 208 209 209 msg = ap_msg.msg; 210 210 msg->cprbx.domain = AP_QID_QUEUE(aq->qid); ··· 233 233 else 234 234 rc = 0; 235 235 out_free: 236 - free_page((unsigned long) ap_msg.msg); 236 + free_page((unsigned long)ap_msg.msg); 237 237 return rc; 238 238 } 239 239
+5 -3
drivers/s390/crypto/zcrypt_cex4.c
··· 33 33 * But the maximum time limit managed by the stomper code is set to 60sec. 34 34 * Hence we have to wait at least that time period. 35 35 */ 36 - #define CEX4_CLEANUP_TIME (900*HZ) 36 + #define CEX4_CLEANUP_TIME (900 * HZ) 37 37 38 38 MODULE_AUTHOR("IBM Corporation"); 39 39 MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \ ··· 364 364 bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp)); 365 365 n += 2 * sizeof(di.cur_wkvp); 366 366 n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 367 - } else 367 + } else { 368 368 n = scnprintf(buf, PAGE_SIZE, "WK CUR: - -\n"); 369 + } 369 370 370 371 if (di.new_wk_state == '0') { 371 372 n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n", ··· 377 376 bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp)); 378 377 n += 2 * sizeof(di.new_wkvp); 379 378 n += scnprintf(buf + n, PAGE_SIZE - n, "\n"); 380 - } else 379 + } else { 381 380 n += scnprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n"); 381 + } 382 382 383 383 return n; 384 384 }
+84 -84
drivers/s390/crypto/zcrypt_ep11misc.c
··· 119 119 int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl, 120 120 const u8 *key, size_t keylen, int checkcpacfexp) 121 121 { 122 - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; 123 - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); 122 + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 123 + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); 124 124 125 125 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 126 126 ··· 133 133 if (hdr->type != TOKTYPE_NON_CCA) { 134 134 if (dbg) 135 135 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 136 - __func__, (int) hdr->type, TOKTYPE_NON_CCA); 136 + __func__, (int)hdr->type, TOKTYPE_NON_CCA); 137 137 return -EINVAL; 138 138 } 139 139 if (hdr->hver != 0x00) { 140 140 if (dbg) 141 141 DBF("%s key check failed, header version 0x%02x != 0x00\n", 142 - __func__, (int) hdr->hver); 142 + __func__, (int)hdr->hver); 143 143 return -EINVAL; 144 144 } 145 145 if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) { 146 146 if (dbg) 147 147 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 148 - __func__, (int) hdr->version, TOKVER_EP11_AES_WITH_HEADER); 148 + __func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER); 149 149 return -EINVAL; 150 150 } 151 151 if (hdr->len > keylen) { 152 152 if (dbg) 153 153 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 154 - __func__, (int) hdr->len, keylen); 154 + __func__, (int)hdr->len, keylen); 155 155 return -EINVAL; 156 156 } 157 157 if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { 158 158 if (dbg) 159 159 DBF("%s key check failed, header len %d < %zu\n", 160 - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); 160 + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); 161 161 return -EINVAL; 162 162 } 163 163 164 164 if (kb->version != EP11_STRUCT_MAGIC) { 165 165 if (dbg) 166 166 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 167 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 167 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 168 168 return -EINVAL; 169 169 } 170 170 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 186 186 int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl, 187 187 const u8 *key, size_t keylen, int checkcpacfexp) 188 188 { 189 - struct ep11kblob_header *hdr = (struct ep11kblob_header *) key; 190 - struct ep11keyblob *kb = (struct ep11keyblob *) (key + sizeof(*hdr)); 189 + struct ep11kblob_header *hdr = (struct ep11kblob_header *)key; 190 + struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr)); 191 191 192 192 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 193 193 ··· 200 200 if (hdr->type != TOKTYPE_NON_CCA) { 201 201 if (dbg) 202 202 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 203 - __func__, (int) hdr->type, TOKTYPE_NON_CCA); 203 + __func__, (int)hdr->type, TOKTYPE_NON_CCA); 204 204 return -EINVAL; 205 205 } 206 206 if (hdr->hver != 0x00) { 207 207 if (dbg) 208 208 DBF("%s key check failed, header version 0x%02x != 0x00\n", 209 - __func__, (int) hdr->hver); 209 + __func__, (int)hdr->hver); 210 210 return -EINVAL; 211 211 } 212 212 if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) { 213 213 if (dbg) 214 214 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 215 - __func__, (int) hdr->version, TOKVER_EP11_ECC_WITH_HEADER); 215 + __func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER); 216 216 return -EINVAL; 217 217 } 218 218 if (hdr->len > keylen) { 219 219 if (dbg) 220 220 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 221 - __func__, (int) hdr->len, keylen); 221 + __func__, (int)hdr->len, keylen); 222 222 return -EINVAL; 223 223 } 224 224 if (hdr->len < sizeof(*hdr) + sizeof(*kb)) { 225 225 if (dbg) 226 226 DBF("%s key check failed, header len %d < %zu\n", 227 - __func__, (int) hdr->len, sizeof(*hdr) + sizeof(*kb)); 227 + __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb)); 228 228 return -EINVAL; 229 229 } 230 230 231 231 if (kb->version != EP11_STRUCT_MAGIC) { 232 232 if (dbg) 233 233 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 234 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 234 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 235 235 return -EINVAL; 236 236 } 237 237 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 254 254 int ep11_check_aes_key(debug_info_t *dbg, int dbflvl, 255 255 const u8 *key, size_t keylen, int checkcpacfexp) 256 256 { 257 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 257 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 258 258 259 259 #define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__) 260 260 ··· 267 267 if (kb->head.type != TOKTYPE_NON_CCA) { 268 268 if (dbg) 269 269 DBF("%s key check failed, type 0x%02x != 0x%02x\n", 270 - __func__, (int) kb->head.type, TOKTYPE_NON_CCA); 270 + __func__, (int)kb->head.type, TOKTYPE_NON_CCA); 271 271 return -EINVAL; 272 272 } 273 273 if (kb->head.version != TOKVER_EP11_AES) { 274 274 if (dbg) 275 275 DBF("%s key check failed, version 0x%02x != 0x%02x\n", 276 - __func__, (int) kb->head.version, TOKVER_EP11_AES); 276 + __func__, (int)kb->head.version, TOKVER_EP11_AES); 277 277 return -EINVAL; 278 278 } 279 279 if (kb->head.len > keylen) { 280 280 if (dbg) 281 281 DBF("%s key check failed, header len %d keylen %zu mismatch\n", 282 - __func__, (int) kb->head.len, keylen); 282 + __func__, (int)kb->head.len, keylen); 283 283 return -EINVAL; 284 284 } 285 285 if (kb->head.len < sizeof(*kb)) { 286 286 if (dbg) 287 287 DBF("%s key check failed, header len %d < %zu\n", 288 - __func__, (int) kb->head.len, sizeof(*kb)); 288 + __func__, (int)kb->head.len, sizeof(*kb)); 289 289 return -EINVAL; 290 290 } 291 291 292 292 if (kb->version != EP11_STRUCT_MAGIC) { 293 293 if (dbg) 294 294 DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n", 295 - __func__, (int) kb->version, EP11_STRUCT_MAGIC); 295 + __func__, (int)kb->version, EP11_STRUCT_MAGIC); 296 296 return -EINVAL; 297 297 } 298 298 if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) { ··· 347 347 } 348 348 if (valuelen > 127) { 349 349 ptr[1] = 0x81; 350 - ptr[2] = (u8) valuelen; 350 + ptr[2] = (u8)valuelen; 351 351 memcpy(ptr + 3, pvalue, valuelen); 352 352 return 3 + valuelen; 353 353 } 354 - ptr[1] = (u8) valuelen; 354 + ptr[1] = (u8)valuelen; 355 355 memcpy(ptr + 2, pvalue, valuelen); 356 356 return 2 + valuelen; 357 357 } ··· 389 389 struct ep11_cprb *req, size_t req_len, 390 390 struct ep11_cprb *rep, size_t rep_len) 391 391 { 392 - u->targets = (u8 __user *) t; 392 + u->targets = (u8 __user *)t; 393 393 u->targets_num = nt; 394 - u->req = (u8 __user *) req; 394 + u->req = (u8 __user *)req; 395 395 u->req_len = req_len; 396 - u->resp = (u8 __user *) rep; 396 + u->resp = (u8 __user *)rep; 397 397 u->resp_len = rep_len; 398 398 } 399 399 ··· 462 462 return 0; 463 463 } 464 464 465 - 466 465 /* 467 466 * Helper function which does an ep11 query with given query type. 468 467 */ ··· 495 496 req = alloc_cprb(sizeof(struct ep11_info_req_pl)); 496 497 if (!req) 497 498 goto out; 498 - req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req)); 499 + req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); 499 500 prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */ 500 501 req_pl->query_type_tag = 0x04; 501 502 req_pl->query_type_len = sizeof(u32); ··· 507 508 rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); 508 509 if (!rep) 509 510 goto out; 510 - rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 511 + rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 511 512 512 513 /* urb and target */ 513 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 514 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 514 515 if (!urb) 515 516 goto out; 516 517 target.ap_id = cardnr; ··· 523 524 if (rc) { 524 525 DEBUG_ERR( 525 526 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 526 - __func__, (int) cardnr, (int) domain, rc); 527 + __func__, (int)cardnr, (int)domain, rc); 527 528 goto out; 528 529 } 529 530 ··· 542 543 goto out; 543 544 } 544 545 545 - memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len); 546 + memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); 546 547 547 548 out: 548 549 kfree(req); ··· 591 592 return -ENOMEM; 592 593 rc = ep11_query_info(card, AUTOSEL_DOM, 593 594 0x01 /* module info query */, 594 - sizeof(*pmqi), (u8 *) pmqi); 595 + sizeof(*pmqi), (u8 *)pmqi); 595 596 if (rc) { 596 597 if (rc == -ENODEV) 597 598 card_cache_scrub(card); ··· 631 632 return -ENOMEM; 632 633 633 634 rc = ep11_query_info(card, domain, 0x03 /* domain info query */, 634 - sizeof(*p_dom_info), (u8 *) p_dom_info); 635 + sizeof(*p_dom_info), (u8 *)p_dom_info); 635 636 if (rc) 636 637 goto out; 637 638 ··· 643 644 info->cur_wk_state = '1'; 644 645 memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); 645 646 } 646 - if (p_dom_info->dom_flags & 0x04 /* new wk present */ 647 - || p_dom_info->dom_flags & 0x08 /* new wk committed */) { 647 + if (p_dom_info->dom_flags & 0x04 || /* new wk present */ 648 + p_dom_info->dom_flags & 0x08 /* new wk committed */) { 648 649 info->new_wk_state = 649 650 p_dom_info->dom_flags & 0x08 ? '2' : '1'; 650 651 memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); ··· 721 722 req = alloc_cprb(sizeof(struct keygen_req_pl)); 722 723 if (!req) 723 724 goto out; 724 - req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req)); 725 + req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); 725 726 api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; 726 727 prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */ 727 728 req_pl->var_tag = 0x04; ··· 745 746 rep = alloc_cprb(sizeof(struct keygen_rep_pl)); 746 747 if (!rep) 747 748 goto out; 748 - rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 749 + rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 749 750 750 751 /* urb and target */ 751 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 752 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 752 753 if (!urb) 753 754 goto out; 754 755 target.ap_id = card; ··· 761 762 if (rc) { 762 763 DEBUG_ERR( 763 764 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 764 - __func__, (int) card, (int) domain, rc); 765 + __func__, (int)card, (int)domain, rc); 765 766 goto out; 766 767 } 767 768 ··· 783 784 /* copy key blob and set header values */ 784 785 memcpy(keybuf, rep_pl->data, rep_pl->data_len); 785 786 *keybufsize = rep_pl->data_len; 786 - kb = (struct ep11keyblob *) keybuf; 787 + kb = (struct ep11keyblob *)keybuf; 787 788 kb->head.type = TOKTYPE_NON_CCA; 788 789 kb->head.len = rep_pl->data_len; 789 790 kb->head.version = TOKVER_EP11_AES; ··· 843 844 req = alloc_cprb(req_pl_size); 844 845 if (!req) 845 846 goto out; 846 - req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req)); 847 + req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); 847 848 prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19)); 848 849 req_pl->var_tag = 0x04; 849 850 req_pl->var_len = sizeof(u32); ··· 851 852 req_pl->mech_tag = 0x04; 852 853 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 853 854 req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ 854 - p = ((u8 *) req_pl) + sizeof(*req_pl); 855 + p = ((u8 *)req_pl) + sizeof(*req_pl); 855 856 if (iv) { 856 857 memcpy(p, iv, 16); 857 858 p += 16; ··· 865 866 rep = alloc_cprb(rep_pl_size); 866 867 if (!rep) 867 868 goto out; 868 - rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 869 + rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 869 870 870 871 /* urb and target */ 871 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 872 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 872 873 if (!urb) 873 874 goto out; 874 875 target.ap_id = card; ··· 881 882 if (rc) { 882 883 DEBUG_ERR( 883 884 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 884 - __func__, (int) card, (int) domain, rc); 885 + __func__, (int)card, (int)domain, rc); 885 886 goto out; 886 887 } 887 888 ··· 893 894 rc = -EIO; 894 895 goto out; 895 896 } 896 - p = ((u8 *) rep_pl) + sizeof(*rep_pl); 897 - if (rep_pl->data_lenfmt <= 127) 897 + p = ((u8 *)rep_pl) + sizeof(*rep_pl); 898 + if (rep_pl->data_lenfmt <= 127) { 898 899 n = rep_pl->data_lenfmt; 899 - else if (rep_pl->data_lenfmt == 0x81) 900 + } else if (rep_pl->data_lenfmt == 0x81) { 900 901 n = *p++; 901 - else if (rep_pl->data_lenfmt == 0x82) { 902 - n = *((u16 *) p); 902 + } else if (rep_pl->data_lenfmt == 0x82) { 903 + n = *((u16 *)p); 903 904 p += 2; 904 905 } else { 905 906 DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n", ··· 977 978 req = alloc_cprb(req_pl_size); 978 979 if (!req) 979 980 goto out; 980 - req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req)); 981 + req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); 981 982 api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1; 982 983 prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */ 983 984 req_pl->attr_tag = 0x04; ··· 993 994 req_pl->mech_tag = 0x04; 994 995 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 995 996 req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */ 996 - p = ((u8 *) req_pl) + sizeof(*req_pl); 997 + p = ((u8 *)req_pl) + sizeof(*req_pl); 997 998 if (iv) { 998 999 memcpy(p, iv, 16); 999 1000 p += 16; ··· 1013 1014 rep = alloc_cprb(sizeof(struct uw_rep_pl)); 1014 1015 if (!rep) 1015 1016 goto out; 1016 - rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 1017 + rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1017 1018 1018 1019 /* urb and target */ 1019 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 1020 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1020 1021 if (!urb) 1021 1022 goto out; 1022 1023 target.ap_id = card; ··· 1029 1030 if (rc) { 1030 1031 DEBUG_ERR( 1031 1032 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1032 - __func__, (int) card, (int) domain, rc); 1033 + __func__, (int)card, (int)domain, rc); 1033 1034 goto out; 1034 1035 } 1035 1036 ··· 1051 1052 /* copy key blob and set header values */ 1052 1053 memcpy(keybuf, rep_pl->data, rep_pl->data_len); 1053 1054 *keybufsize = rep_pl->data_len; 1054 - kb = (struct ep11keyblob *) keybuf; 1055 + kb = (struct ep11keyblob *)keybuf; 1055 1056 kb->head.type = TOKTYPE_NON_CCA; 1056 1057 kb->head.len = rep_pl->data_len; 1057 1058 kb->head.version = TOKVER_EP11_AES; ··· 1104 1105 u8 *p; 1105 1106 1106 1107 /* maybe the session field holds a header with key info */ 1107 - kb = (struct ep11keyblob *) key; 1108 + kb = (struct ep11keyblob *)key; 1108 1109 if (kb->head.type == TOKTYPE_NON_CCA && 1109 1110 kb->head.version == TOKVER_EP11_AES) { 1110 1111 has_header = true; ··· 1119 1120 goto out; 1120 1121 if (!mech || mech == 0x80060001) 1121 1122 req->flags |= 0x20; /* CPACF_WRAP needs special bit */ 1122 - req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req)); 1123 + req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req)); 1123 1124 api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */ 1124 1125 prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */ 1125 1126 req_pl->var_tag = 0x04; ··· 1128 1129 req_pl->mech_tag = 0x04; 1129 1130 req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0); 1130 1131 req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */ 1131 - p = ((u8 *) req_pl) + sizeof(*req_pl); 1132 + p = ((u8 *)req_pl) + sizeof(*req_pl); 1132 1133 if (iv) { 1133 1134 memcpy(p, iv, 16); 1134 1135 p += 16; ··· 1151 1152 rep = alloc_cprb(sizeof(struct wk_rep_pl)); 1152 1153 if (!rep) 1153 1154 goto out; 1154 - rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep)); 1155 + rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); 1155 1156 1156 1157 /* urb and target */ 1157 - urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL); 1158 + urb = kmalloc(sizeof(*urb), GFP_KERNEL); 1158 1159 if (!urb) 1159 1160 goto out; 1160 1161 target.ap_id = card; ··· 1167 1168 if (rc) { 1168 1169 DEBUG_ERR( 1169 1170 "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", 1170 - __func__, (int) card, (int) domain, rc); 1171 + __func__, (int)card, (int)domain, rc); 1171 1172 goto out; 1172 1173 } 1173 1174 ··· 1205 1206 u8 encbuf[64], *kek = NULL; 1206 1207 size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); 1207 1208 1208 - if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) 1209 + if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { 1209 1210 clrkeylen = keybitsize / 8; 1210 - else { 1211 + } else { 1211 1212 DEBUG_ERR( 1212 1213 "%s unknown/unsupported keybitsize %d\n", 1213 1214 __func__, keybitsize); ··· 1232 1233 __func__, rc); 1233 1234 goto out; 1234 1235 } 1235 - kb = (struct ep11keyblob *) kek; 1236 + kb = (struct ep11keyblob *)kek; 1236 1237 memset(&kb->head, 0, sizeof(kb->head)); 1237 1238 1238 1239 /* Step 2: encrypt clear key value with the kek key */ ··· 1281 1282 struct ep11kblob_header *hdr; 1282 1283 1283 1284 /* key with or without header ? */ 1284 - hdr = (struct ep11kblob_header *) keyblob; 1285 - if (hdr->type == TOKTYPE_NON_CCA 1286 - && (hdr->version == TOKVER_EP11_AES_WITH_HEADER 1287 - || hdr->version == TOKVER_EP11_ECC_WITH_HEADER) 1288 - && is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { 1285 + hdr = (struct ep11kblob_header *)keyblob; 1286 + if (hdr->type == TOKTYPE_NON_CCA && 1287 + (hdr->version == TOKVER_EP11_AES_WITH_HEADER || 1288 + hdr->version == TOKVER_EP11_ECC_WITH_HEADER) && 1289 + is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) { 1289 1290 /* EP11 AES or ECC key with header */ 1290 1291 key = keyblob + sizeof(struct ep11kblob_header); 1291 1292 keylen = hdr->len - sizeof(struct ep11kblob_header); 1292 - } else if (hdr->type == TOKTYPE_NON_CCA 1293 - && hdr->version == TOKVER_EP11_AES 1294 - && is_ep11_keyblob(keyblob)) { 1293 + } else if (hdr->type == TOKTYPE_NON_CCA && 1294 + hdr->version == TOKVER_EP11_AES && 1295 + is_ep11_keyblob(keyblob)) { 1295 1296 /* EP11 AES key (old style) */ 1296 1297 key = keyblob; 1297 1298 keylen = hdr->len; ··· 1299 1300 /* raw EP11 key blob */ 1300 1301 key = keyblob; 1301 1302 keylen = keybloblen; 1302 - } else 1303 + } else { 1303 1304 return -EINVAL; 1305 + } 1304 1306 1305 1307 /* alloc temp working buffer */ 1306 1308 wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); ··· 1318 1318 __func__, rc); 1319 1319 goto out; 1320 1320 } 1321 - wki = (struct wk_info *) wkbuf; 1321 + wki = (struct wk_info *)wkbuf; 1322 1322 1323 1323 /* check struct version and pkey type */ 1324 1324 if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) { 1325 1325 DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n", 1326 - __func__, (int) wki->version, (int) wki->pkeytype); 1326 + __func__, (int)wki->version, (int)wki->pkeytype); 1327 1327 rc = -EIO; 1328 1328 goto out; 1329 1329 } ··· 1332 1332 switch (wki->pkeytype) { 1333 1333 case 1: /* AES */ 1334 1334 switch (wki->pkeysize) { 1335 - case 16+32: 1335 + case 16 + 32: 1336 1336 /* AES 128 protected key */ 1337 1337 if (protkeytype) 1338 1338 *protkeytype = PKEY_KEYTYPE_AES_128; 1339 1339 break; 1340 - case 24+32: 1340 + case 24 + 32: 1341 1341 /* AES 192 protected key */ 1342 1342 if (protkeytype) 1343 1343 *protkeytype = PKEY_KEYTYPE_AES_192; 1344 1344 break; 1345 - case 32+32: 1345 + case 32 + 32: 1346 1346 /* AES 256 protected key */ 1347 1347 if (protkeytype) 1348 1348 *protkeytype = PKEY_KEYTYPE_AES_256; 1349 1349 break; 1350 1350 default: 1351 1351 DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n", 1352 - __func__, (int) wki->pkeysize); 1352 + __func__, (int)wki->pkeysize); 1353 1353 rc = -EIO; 1354 1354 goto out; 1355 1355 } ··· 1363 1363 case 2: /* TDES */ 1364 1364 default: 1365 1365 DEBUG_ERR("%s unknown/unsupported key type %d\n", 1366 - __func__, (int) wki->pkeytype); 1366 + __func__, (int)wki->pkeytype); 1367 1367 rc = -EIO; 1368 1368 goto out; 1369 1369 } ··· 1445 1445 } 1446 1446 /* apqn passed all filtering criterons, add to the array */ 1447 1447 if (_nr_apqns < 256) 1448 - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom); 1448 + _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); 1449 1449 } 1450 1450 1451 1451 /* nothing found ? */
+1 -1
drivers/s390/crypto/zcrypt_ep11misc.h
··· 50 50 /* check ep11 key magic to find out if this is an ep11 key blob */ 51 51 static inline bool is_ep11_keyblob(const u8 *key) 52 52 { 53 - struct ep11keyblob *kb = (struct ep11keyblob *) key; 53 + struct ep11keyblob *kb = (struct ep11keyblob *)key; 54 54 55 55 return (kb->version == EP11_STRUCT_MAGIC); 56 56 }
+2 -1
drivers/s390/crypto/zcrypt_error.h
··· 121 121 ZCRYPT_DBF_WARN( 122 122 "%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n", 123 123 __func__, card, queue, ehdr->reply_code, apfs); 124 - } else 124 + } else { 125 125 ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n", 126 126 __func__, card, queue, 127 127 ehdr->reply_code); 128 + } 128 129 return -EAGAIN; 129 130 default: 130 131 /* Assume request is valid and a retry will be worth it */
+18 -13
drivers/s390/crypto/zcrypt_msgtype50.c
··· 158 158 159 159 int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode) 160 160 { 161 - 162 161 if (!mex->inputdatalength) 163 162 return -EINVAL; 164 163 ··· 173 174 174 175 int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode) 175 176 { 176 - 177 177 if (!crt->inputdatalength) 178 178 return -EINVAL; 179 179 ··· 237 239 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; 238 240 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; 239 241 inp = meb3->message + sizeof(meb3->message) - mod_len; 240 - } else 242 + } else { 241 243 return -EINVAL; 244 + } 242 245 243 246 if (copy_from_user(mod, mex->n_modulus, mod_len) || 244 247 copy_from_user(exp, mex->b_key, mod_len) || ··· 322 323 dq = crb3->dq + sizeof(crb3->dq) - short_len; 323 324 u = crb3->u + sizeof(crb3->u) - short_len; 324 325 inp = crb3->message + sizeof(crb3->message) - mod_len; 325 - } else 326 + } else { 326 327 return -EINVAL; 328 + } 327 329 328 330 /* 329 331 * correct the offset of p, bp and mult_inv according zcrypt.h ··· 392 392 unsigned int outputdatalength) 393 393 { 394 394 /* Response type byte is the second byte in the response. */ 395 - unsigned char rtype = ((unsigned char *) reply->msg)[1]; 395 + unsigned char rtype = ((unsigned char *)reply->msg)[1]; 396 396 397 397 switch (rtype) { 398 398 case TYPE82_RSP_CODE: ··· 406 406 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 407 407 AP_QID_CARD(zq->queue->qid), 408 408 AP_QID_QUEUE(zq->queue->qid), 409 - (int) rtype); 409 + (int)rtype); 410 410 ZCRYPT_DBF_ERR( 411 411 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 412 412 __func__, AP_QID_CARD(zq->queue->qid), 413 - AP_QID_QUEUE(zq->queue->qid), (int) rtype); 413 + AP_QID_QUEUE(zq->queue->qid), (int)rtype); 414 414 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 415 415 return -EAGAIN; 416 416 } ··· 447 447 memcpy(msg->msg, reply->msg, len); 448 448 msg->len = len; 449 449 } 450 - } else 450 + } else { 451 451 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 452 + } 452 453 out: 453 - complete((struct completion *) msg->private); 454 + complete((struct completion *)msg->private); 454 455 } 455 456 456 457 static atomic_t zcrypt_step = ATOMIC_INIT(0); ··· 476 475 if (!ap_msg->msg) 477 476 return -ENOMEM; 478 477 ap_msg->receive = zcrypt_cex2a_receive; 479 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 478 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 480 479 atomic_inc_return(&zcrypt_step); 481 480 ap_msg->private = &work; 482 481 rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); ··· 493 492 rc = convert_response_cex2a(zq, ap_msg, 494 493 mex->outputdata, 495 494 mex->outputdatalength); 496 - } else 495 + } else { 497 496 /* Signal pending. */ 498 497 ap_cancel_message(zq->queue, ap_msg); 498 + } 499 + 499 500 out: 500 501 ap_msg->private = NULL; 501 502 if (rc) ··· 527 524 if (!ap_msg->msg) 528 525 return -ENOMEM; 529 526 ap_msg->receive = zcrypt_cex2a_receive; 530 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 527 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 531 528 atomic_inc_return(&zcrypt_step); 532 529 ap_msg->private = &work; 533 530 rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); ··· 544 541 rc = convert_response_cex2a(zq, ap_msg, 545 542 crt->outputdata, 546 543 crt->outputdatalength); 547 - } else 544 + } else { 548 545 /* Signal pending. */ 549 546 ap_cancel_message(zq->queue, ap_msg); 547 + } 548 + 550 549 out: 551 550 ap_msg->private = NULL; 552 551 if (rc)
+146 -135
drivers/s390/crypto/zcrypt_msgtype6.c
··· 29 29 30 30 #define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 31 31 32 - #define CEIL4(x) ((((x)+3)/4)*4) 32 + #define CEIL4(x) ((((x) + 3) / 4) * 4) 33 33 34 34 struct response_type { 35 35 struct completion work; 36 36 int type; 37 37 }; 38 + 38 39 #define CEXXC_RESPONSE_TYPE_ICA 0 39 40 #define CEXXC_RESPONSE_TYPE_XCRB 1 40 41 #define CEXXC_RESPONSE_TYPE_EP11 2 ··· 179 178 } 180 179 } 181 180 182 - 183 181 /* 184 182 * Convert a ICAMEX message to a type6 MEX message. 185 183 * ··· 188 188 * 189 189 * Returns 0 on success or negative errno value. 190 190 */ 191 - static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq, 191 + static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq, 192 192 struct ap_message *ap_msg, 193 193 struct ica_rsa_modexpo *mex) 194 194 { ··· 226 226 return -EFAULT; 227 227 228 228 /* Set up key which is located after the variable length text. */ 229 - size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength); 229 + size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength); 230 230 if (size < 0) 231 231 return size; 232 232 size += sizeof(*msg) + mex->inputdatalength; 233 233 234 234 /* message header, cprbx and f&r */ 235 235 msg->hdr = static_type6_hdrX; 236 - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 237 - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 236 + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); 237 + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 238 238 239 239 msg->cprbx = static_cprbx; 240 240 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); 241 - msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; 241 + msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1; 242 242 243 243 msg->fr = static_pke_fnr; 244 244 ··· 257 257 * 258 258 * Returns 0 on success or negative errno value. 259 259 */ 260 - static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq, 260 + static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq, 261 261 struct ap_message *ap_msg, 262 262 struct ica_rsa_modexpo_crt *crt) 263 263 { ··· 303 303 304 304 /* message header, cprbx and f&r */ 305 305 msg->hdr = static_type6_hdrX; 306 - msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); 307 - msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 306 + msg->hdr.tocardlen1 = size - sizeof(msg->hdr); 307 + msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); 308 308 309 309 msg->cprbx = static_cprbx; 310 310 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); ··· 331 331 struct type86_fmt2_ext fmt2; 332 332 } __packed; 333 333 334 - static int XCRB_msg_to_type6CPRB_msgX(bool userspace, struct ap_message *ap_msg, 335 - struct ica_xcRB *xcRB, 334 + static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, 335 + struct ica_xcRB *xcrb, 336 336 unsigned int *fcode, 337 337 unsigned short **dom) 338 338 { ··· 345 345 struct CPRBX cprbx; 346 346 } __packed * msg = ap_msg->msg; 347 347 348 - int rcblen = CEIL4(xcRB->request_control_blk_length); 348 + int rcblen = CEIL4(xcrb->request_control_blk_length); 349 349 int req_sumlen, resp_sumlen; 350 350 char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen; 351 351 char *function_code; 352 352 353 - if (CEIL4(xcRB->request_control_blk_length) < 354 - xcRB->request_control_blk_length) 353 + if (CEIL4(xcrb->request_control_blk_length) < 354 + xcrb->request_control_blk_length) 355 355 return -EINVAL; /* overflow after alignment*/ 356 356 357 357 /* length checks */ 358 358 ap_msg->len = sizeof(struct type6_hdr) + 359 - CEIL4(xcRB->request_control_blk_length) + 360 - xcRB->request_data_length; 359 + CEIL4(xcrb->request_control_blk_length) + 360 + xcrb->request_data_length; 361 361 if (ap_msg->len > ap_msg->bufsize) 362 362 return -EINVAL; 363 363 ··· 365 365 * Overflow check 366 366 * sum must be greater (or equal) than the largest operand 367 367 */ 368 - req_sumlen = CEIL4(xcRB->request_control_blk_length) + 369 - xcRB->request_data_length; 370 - if ((CEIL4(xcRB->request_control_blk_length) <= 371 - xcRB->request_data_length) ? 372 - (req_sumlen < xcRB->request_data_length) : 373 - (req_sumlen < CEIL4(xcRB->request_control_blk_length))) { 368 + req_sumlen = CEIL4(xcrb->request_control_blk_length) + 369 + xcrb->request_data_length; 370 + if ((CEIL4(xcrb->request_control_blk_length) <= 371 + xcrb->request_data_length) ? 372 + req_sumlen < xcrb->request_data_length : 373 + req_sumlen < CEIL4(xcrb->request_control_blk_length)) { 374 374 return -EINVAL; 375 375 } 376 376 377 - if (CEIL4(xcRB->reply_control_blk_length) < 378 - xcRB->reply_control_blk_length) 377 + if (CEIL4(xcrb->reply_control_blk_length) < 378 + xcrb->reply_control_blk_length) 379 379 return -EINVAL; /* overflow after alignment*/ 380 380 381 381 /* 382 382 * Overflow check 383 383 * sum must be greater (or equal) than the largest operand 384 384 */ 385 - resp_sumlen = CEIL4(xcRB->reply_control_blk_length) + 386 - xcRB->reply_data_length; 387 - if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ? 388 - (resp_sumlen < xcRB->reply_data_length) : 389 - (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) { 385 + resp_sumlen = CEIL4(xcrb->reply_control_blk_length) + 386 + xcrb->reply_data_length; 387 + if ((CEIL4(xcrb->reply_control_blk_length) <= 388 + xcrb->reply_data_length) ? 389 + resp_sumlen < xcrb->reply_data_length : 390 + resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) { 390 391 return -EINVAL; 391 392 } 392 393 393 394 /* prepare type6 header */ 394 395 msg->hdr = static_type6_hdrX; 395 - memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); 396 - msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; 397 - if (xcRB->request_data_length) { 396 + memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID)); 397 + msg->hdr.tocardlen1 = xcrb->request_control_blk_length; 398 + if (xcrb->request_data_length) { 398 399 msg->hdr.offset2 = msg->hdr.offset1 + rcblen; 399 - msg->hdr.ToCardLen2 = xcRB->request_data_length; 400 + msg->hdr.tocardlen2 = xcrb->request_data_length; 400 401 } 401 - msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; 402 - msg->hdr.FromCardLen2 = xcRB->reply_data_length; 402 + msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length; 403 + msg->hdr.fromcardlen2 = xcrb->reply_data_length; 403 404 404 405 /* prepare CPRB */ 405 - if (z_copy_from_user(userspace, &(msg->cprbx), xcRB->request_control_blk_addr, 406 - xcRB->request_control_blk_length)) 406 + if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr, 407 + xcrb->request_control_blk_length)) 407 408 return -EFAULT; 408 409 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > 409 - xcRB->request_control_blk_length) 410 + xcrb->request_control_blk_length) 410 411 return -EINVAL; 411 412 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 412 413 memcpy(msg->hdr.function_code, function_code, ··· 417 416 *dom = (unsigned short *)&msg->cprbx.domain; 418 417 419 418 /* check subfunction, US and AU need special flag with NQAP */ 420 - if (memcmp(function_code, "US", 2) == 0 421 - || memcmp(function_code, "AU", 2) == 0) 419 + if (memcmp(function_code, "US", 2) == 0 || 420 + memcmp(function_code, "AU", 2) == 0) 422 421 ap_msg->flags |= AP_MSG_FLAG_SPECIAL; 423 422 424 423 #ifdef CONFIG_ZCRYPT_DEBUG ··· 444 443 } 445 444 446 445 /* copy data block */ 447 - if (xcRB->request_data_length && 448 - z_copy_from_user(userspace, req_data, xcRB->request_data_address, 449 - xcRB->request_data_length)) 446 + if (xcrb->request_data_length && 447 + z_copy_from_user(userspace, req_data, xcrb->request_data_address, 448 + xcrb->request_data_length)) 450 449 return -EFAULT; 451 450 452 451 return 0; 453 452 } 454 453 455 454 static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg, 456 - struct ep11_urb *xcRB, 455 + struct ep11_urb *xcrb, 457 456 unsigned int *fcode, 458 457 unsigned int *domain) 459 458 { ··· 483 482 unsigned int dom_val; /* domain id */ 484 483 } __packed * payload_hdr = NULL; 485 484 486 - if (CEIL4(xcRB->req_len) < xcRB->req_len) 485 + if (CEIL4(xcrb->req_len) < xcrb->req_len) 487 486 return -EINVAL; /* overflow after alignment*/ 488 487 489 488 /* length checks */ 490 - ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcRB->req_len); 489 + ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len); 491 490 if (ap_msg->len > ap_msg->bufsize) 492 491 return -EINVAL; 493 492 494 - if (CEIL4(xcRB->resp_len) < xcRB->resp_len) 493 + if (CEIL4(xcrb->resp_len) < xcrb->resp_len) 495 494 return -EINVAL; /* overflow after alignment*/ 496 495 497 496 /* prepare type6 header */ 498 497 msg->hdr = static_type6_ep11_hdr; 499 - msg->hdr.ToCardLen1 = xcRB->req_len; 500 - msg->hdr.FromCardLen1 = xcRB->resp_len; 498 + msg->hdr.tocardlen1 = xcrb->req_len; 499 + msg->hdr.fromcardlen1 = xcrb->resp_len; 501 500 502 501 /* Import CPRB data from the ioctl input parameter */ 503 - if (z_copy_from_user(userspace, &(msg->cprbx.cprb_len), 504 - (char __force __user *)xcRB->req, xcRB->req_len)) { 502 + if (z_copy_from_user(userspace, &msg->cprbx.cprb_len, 503 + (char __force __user *)xcrb->req, xcrb->req_len)) { 505 504 return -EFAULT; 506 505 } 507 506 ··· 519 518 } else { 520 519 lfmt = 1; /* length format #1 */ 521 520 } 522 - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 521 + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); 523 522 *fcode = payload_hdr->func_val & 0xFFFF; 524 523 525 524 /* enable special processing based on the cprbs flags special bit */ ··· 568 567 } __packed; 569 568 570 569 static int convert_type86_ica(struct zcrypt_queue *zq, 571 - struct ap_message *reply, 572 - char __user *outputdata, 573 - unsigned int outputdatalength) 570 + struct ap_message *reply, 571 + char __user *outputdata, 572 + unsigned int outputdatalength) 574 573 { 575 574 static unsigned char static_pad[] = { 576 575 0x00, 0x02, ··· 623 622 ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n", 624 623 __func__, AP_QID_CARD(zq->queue->qid), 625 624 AP_QID_QUEUE(zq->queue->qid), 626 - (int) service_rc, (int) service_rs); 625 + (int)service_rc, (int)service_rs); 627 626 return -EINVAL; 628 627 } 629 628 zq->online = 0; 630 629 pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n", 631 630 AP_QID_CARD(zq->queue->qid), 632 631 AP_QID_QUEUE(zq->queue->qid), 633 - (int) service_rc, (int) service_rs); 632 + (int)service_rc, (int)service_rs); 634 633 ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n", 635 634 __func__, AP_QID_CARD(zq->queue->qid), 636 635 AP_QID_QUEUE(zq->queue->qid), 637 - (int) service_rc, (int) service_rs); 636 + (int)service_rc, (int)service_rs); 638 637 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 639 638 return -EAGAIN; 640 639 } ··· 673 672 * 674 673 * @zq: crypto device pointer 675 674 * @reply: reply AP message. 676 - * @xcRB: pointer to XCRB 675 + * @xcrb: pointer to XCRB 677 676 * 678 677 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 679 678 */ 680 679 static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq, 681 680 struct ap_message *reply, 682 - struct ica_xcRB *xcRB) 681 + struct ica_xcRB *xcrb) 683 682 { 684 683 struct type86_fmt2_msg *msg = reply->msg; 685 684 char *data = reply->msg; 686 685 687 686 /* Copy CPRB to user */ 688 - if (xcRB->reply_control_blk_length < msg->fmt2.count1) { 687 + if (xcrb->reply_control_blk_length < msg->fmt2.count1) { 689 688 ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n", 690 - __func__, xcRB->reply_control_blk_length, 689 + __func__, xcrb->reply_control_blk_length, 691 690 msg->fmt2.count1); 692 691 return -EMSGSIZE; 693 692 } 694 - if (z_copy_to_user(userspace, xcRB->reply_control_blk_addr, 693 + if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr, 695 694 data + msg->fmt2.offset1, msg->fmt2.count1)) 696 695 return -EFAULT; 697 - xcRB->reply_control_blk_length = msg->fmt2.count1; 696 + xcrb->reply_control_blk_length = msg->fmt2.count1; 698 697 699 698 /* Copy data buffer to user */ 700 699 if (msg->fmt2.count2) { 701 - if (xcRB->reply_data_length < msg->fmt2.count2) { 700 + if (xcrb->reply_data_length < msg->fmt2.count2) { 702 701 ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n", 703 - __func__, xcRB->reply_data_length, 702 + __func__, xcrb->reply_data_length, 704 703 msg->fmt2.count2); 705 704 return -EMSGSIZE; 706 705 } 707 - if (z_copy_to_user(userspace, xcRB->reply_data_addr, 706 + if (z_copy_to_user(userspace, xcrb->reply_data_addr, 708 707 data + msg->fmt2.offset2, msg->fmt2.count2)) 709 708 return -EFAULT; 710 709 } 711 - xcRB->reply_data_length = msg->fmt2.count2; 710 + xcrb->reply_data_length = msg->fmt2.count2; 712 711 713 712 return 0; 714 713 } ··· 718 717 * 719 718 * @zq: crypto device pointer 720 719 * @reply: reply AP message. 721 - * @xcRB: pointer to EP11 user request block 720 + * @xcrb: pointer to EP11 user request block 722 721 * 723 722 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. 724 723 */ 725 724 static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, 726 725 struct ap_message *reply, 727 - struct ep11_urb *xcRB) 726 + struct ep11_urb *xcrb) 728 727 { 729 728 struct type86_fmt2_msg *msg = reply->msg; 730 729 char *data = reply->msg; 731 730 732 - if (xcRB->resp_len < msg->fmt2.count1) { 731 + if (xcrb->resp_len < msg->fmt2.count1) { 733 732 ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n", 734 - __func__, (unsigned int)xcRB->resp_len, 733 + __func__, (unsigned int)xcrb->resp_len, 735 734 msg->fmt2.count1); 736 735 return -EMSGSIZE; 737 736 } 738 737 739 738 /* Copy response CPRB to user */ 740 - if (z_copy_to_user(userspace, (char __force __user *)xcRB->resp, 739 + if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp, 741 740 data + msg->fmt2.offset1, msg->fmt2.count1)) 742 741 return -EFAULT; 743 - xcRB->resp_len = msg->fmt2.count1; 742 + xcrb->resp_len = msg->fmt2.count1; 744 743 return 0; 745 744 } 746 745 747 746 static int convert_type86_rng(struct zcrypt_queue *zq, 748 - struct ap_message *reply, 749 - char *buffer) 747 + struct ap_message *reply, 748 + char *buffer) 750 749 { 751 750 struct { 752 751 struct type86_hdr hdr; ··· 762 761 } 763 762 764 763 static int convert_response_ica(struct zcrypt_queue *zq, 765 - struct ap_message *reply, 766 - char __user *outputdata, 767 - unsigned int outputdatalength) 764 + struct ap_message *reply, 765 + char __user *outputdata, 766 + unsigned int outputdatalength) 768 767 { 769 768 struct type86x_reply *msg = reply->msg; 770 769 ··· 774 773 return convert_error(zq, reply); 775 774 case TYPE86_RSP_CODE: 776 775 if (msg->cprbx.ccp_rtcode && 777 - (msg->cprbx.ccp_rscode == 0x14f) && 778 - (outputdatalength > 256)) { 776 + msg->cprbx.ccp_rscode == 0x14f && 777 + outputdatalength > 256) { 779 778 if (zq->zcard->max_exp_bit_length <= 17) { 780 779 zq->zcard->max_exp_bit_length = 17; 781 780 return -EAGAIN; 782 - } else 781 + } else { 783 782 return -EINVAL; 783 + } 784 784 } 785 785 if (msg->hdr.reply_code) 786 786 return convert_error(zq, reply); ··· 795 793 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 796 794 AP_QID_CARD(zq->queue->qid), 797 795 AP_QID_QUEUE(zq->queue->qid), 798 - (int) msg->hdr.type); 796 + (int)msg->hdr.type); 799 797 ZCRYPT_DBF_ERR( 800 798 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 801 799 __func__, AP_QID_CARD(zq->queue->qid), 802 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 800 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 803 801 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 804 802 return -EAGAIN; 805 803 } ··· 807 805 808 806 static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq, 809 807 struct ap_message *reply, 810 - struct ica_xcRB *xcRB) 808 + struct ica_xcRB *xcrb) 811 809 { 812 810 struct type86x_reply *msg = reply->msg; 813 811 814 812 switch (msg->hdr.type) { 815 813 case TYPE82_RSP_CODE: 816 814 case TYPE88_RSP_CODE: 817 - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 815 + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ 818 816 return convert_error(zq, reply); 819 817 case TYPE86_RSP_CODE: 820 818 if (msg->hdr.reply_code) { 821 - memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); 819 + memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32)); 822 820 return convert_error(zq, reply); 823 821 } 824 822 if (msg->cprbx.cprb_ver_id == 0x02) 825 - return convert_type86_xcrb(userspace, zq, reply, xcRB); 823 + return convert_type86_xcrb(userspace, zq, reply, xcrb); 826 824 fallthrough; /* wrong cprb version is an unknown response */ 827 825 default: /* Unknown response type, this should NEVER EVER happen */ 828 - xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 826 + xcrb->status = 0x0008044DL; /* HDD_InvalidParm */ 829 827 zq->online = 0; 830 828 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 831 829 AP_QID_CARD(zq->queue->qid), 832 830 AP_QID_QUEUE(zq->queue->qid), 833 - (int) msg->hdr.type); 831 + (int)msg->hdr.type); 834 832 ZCRYPT_DBF_ERR( 835 833 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 836 834 __func__, AP_QID_CARD(zq->queue->qid), 837 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 835 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 838 836 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 839 837 return -EAGAIN; 840 838 } 841 839 } 842 840 843 841 static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq, 844 - struct ap_message *reply, struct ep11_urb *xcRB) 842 + struct ap_message *reply, struct ep11_urb *xcrb) 845 843 { 846 844 struct type86_ep11_reply *msg = reply->msg; 847 845 ··· 853 851 if (msg->hdr.reply_code) 854 852 return convert_error(zq, reply); 855 853 if (msg->cprbx.cprb_ver_id == 0x04) 856 - return convert_type86_ep11_xcrb(userspace, zq, reply, xcRB); 854 + return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb); 857 855 fallthrough; /* wrong cprb version is an unknown resp */ 858 856 default: /* Unknown response type, this should NEVER EVER happen */ 859 857 zq->online = 0; 860 858 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 861 859 AP_QID_CARD(zq->queue->qid), 862 860 AP_QID_QUEUE(zq->queue->qid), 863 - (int) msg->hdr.type); 861 + (int)msg->hdr.type); 864 862 ZCRYPT_DBF_ERR( 865 863 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 866 864 __func__, AP_QID_CARD(zq->queue->qid), 867 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 865 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 868 866 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 869 867 return -EAGAIN; 870 868 } 871 869 } 872 870 873 871 static int convert_response_rng(struct zcrypt_queue *zq, 874 - struct ap_message *reply, 875 - char *data) 872 + struct ap_message *reply, 873 + char *data) 876 874 { 877 875 struct type86x_reply *msg = reply->msg; 878 876 ··· 891 889 pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 892 890 AP_QID_CARD(zq->queue->qid), 893 891 AP_QID_QUEUE(zq->queue->qid), 894 - (int) msg->hdr.type); 892 + (int)msg->hdr.type); 895 893 ZCRYPT_DBF_ERR( 896 894 "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n", 897 895 __func__, AP_QID_CARD(zq->queue->qid), 898 - AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type); 896 + AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type); 899 897 ap_send_online_uevent(&zq->queue->ap_dev, zq->online); 900 898 return -EAGAIN; 901 899 } ··· 910 908 * @reply: pointer to the AP reply message 911 909 */ 912 910 static void zcrypt_msgtype6_receive(struct ap_queue *aq, 913 - struct ap_message *msg, 914 - struct ap_message *reply) 911 + struct ap_message *msg, 912 + struct ap_message *reply) 915 913 { 916 914 static struct error_hdr error_reply = { 917 915 .type = TYPE82_RSP_CODE, 918 916 .reply_code = REP82_ERROR_MACHINE_FAILURE, 919 917 }; 920 918 struct response_type *resp_type = 921 - (struct response_type *) msg->private; 919 + (struct response_type *)msg->private; 922 920 struct type86x_reply *t86r; 923 921 int len; 924 922 ··· 927 925 goto out; /* ap_msg->rc indicates the error */ 928 926 t86r = reply->msg; 929 927 if (t86r->hdr.type == TYPE86_RSP_CODE && 930 - t86r->cprbx.cprb_ver_id == 0x02) { 928 + t86r->cprbx.cprb_ver_id == 0x02) { 931 929 switch (resp_type->type) { 932 930 case CEXXC_RESPONSE_TYPE_ICA: 933 931 len = sizeof(struct type86x_reply) + t86r->length - 2; ··· 950 948 default: 951 949 memcpy(msg->msg, &error_reply, sizeof(error_reply)); 952 950 } 953 - } else 951 + } else { 954 952 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 953 + } 955 954 out: 956 - complete(&(resp_type->work)); 955 + complete(&resp_type->work); 957 956 } 958 957 959 958 /* ··· 1001 998 memcpy(msg->msg, reply->msg, sizeof(error_reply)); 1002 999 } 1003 1000 out: 1004 - complete(&(resp_type->work)); 1001 + complete(&resp_type->work); 1005 1002 } 1006 1003 1007 1004 static atomic_t zcrypt_step = ATOMIC_INIT(0); ··· 1022 1019 }; 1023 1020 int rc; 1024 1021 1025 - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); 1022 + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 1026 1023 if (!ap_msg->msg) 1027 1024 return -ENOMEM; 1028 1025 ap_msg->bufsize = PAGE_SIZE; 1029 1026 ap_msg->receive = zcrypt_msgtype6_receive; 1030 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1027 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1031 1028 atomic_inc_return(&zcrypt_step); 1032 1029 ap_msg->private = &resp_type; 1033 - rc = ICAMEX_msg_to_type6MEX_msgX(zq, ap_msg, mex); 1030 + rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); 1034 1031 if (rc) 1035 1032 goto out_free; 1036 1033 init_completion(&resp_type.work); ··· 1044 1041 rc = convert_response_ica(zq, ap_msg, 1045 1042 mex->outputdata, 1046 1043 mex->outputdatalength); 1047 - } else 1044 + } else { 1048 1045 /* Signal pending. */ 1049 1046 ap_cancel_message(zq->queue, ap_msg); 1047 + } 1048 + 1050 1049 out_free: 1051 - free_page((unsigned long) ap_msg->msg); 1050 + free_page((unsigned long)ap_msg->msg); 1052 1051 ap_msg->private = NULL; 1053 1052 ap_msg->msg = NULL; 1054 1053 return rc; ··· 1072 1067 }; 1073 1068 int rc; 1074 1069 1075 - ap_msg->msg = (void *) get_zeroed_page(GFP_KERNEL); 1070 + ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); 1076 1071 if (!ap_msg->msg) 1077 1072 return -ENOMEM; 1078 1073 ap_msg->bufsize = PAGE_SIZE; 1079 1074 ap_msg->receive = zcrypt_msgtype6_receive; 1080 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1075 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1081 1076 atomic_inc_return(&zcrypt_step); 1082 1077 ap_msg->private = &resp_type; 1083 - rc = ICACRT_msg_to_type6CRT_msgX(zq, ap_msg, crt); 1078 + rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); 1084 1079 if (rc) 1085 1080 goto out_free; 1086 1081 init_completion(&resp_type.work); ··· 1098 1093 /* Signal pending. */ 1099 1094 ap_cancel_message(zq->queue, ap_msg); 1100 1095 } 1096 + 1101 1097 out_free: 1102 - free_page((unsigned long) ap_msg->msg); 1098 + free_page((unsigned long)ap_msg->msg); 1103 1099 ap_msg->private = NULL; 1104 1100 ap_msg->msg = NULL; 1105 1101 return rc; ··· 1115 1109 * by the caller with ap_init_message(). Also the caller has to 1116 1110 * make sure ap_release_message() is always called even on failure. 1117 1111 */ 1118 - int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcRB, 1112 + int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, 1119 1113 struct ap_message *ap_msg, 1120 1114 unsigned int *func_code, unsigned short **dom) 1121 1115 { ··· 1128 1122 if (!ap_msg->msg) 1129 1123 return -ENOMEM; 1130 1124 ap_msg->receive = zcrypt_msgtype6_receive; 1131 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1125 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1132 1126 atomic_inc_return(&zcrypt_step); 1133 1127 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1134 1128 if (!ap_msg->private) 1135 1129 return -ENOMEM; 1136 - return XCRB_msg_to_type6CPRB_msgX(userspace, ap_msg, xcRB, func_code, dom); 1130 + return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); 1137 1131 } 1138 1132 1139 1133 /* ··· 1141 1135 * device to handle a send_cprb request. 1142 1136 * @zq: pointer to zcrypt_queue structure that identifies the 1143 1137 * CEXxC device to the request distributor 1144 - * @xcRB: pointer to the send_cprb request buffer 1138 + * @xcrb: pointer to the send_cprb request buffer 1145 1139 */ 1146 1140 static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, 1147 - struct ica_xcRB *xcRB, 1141 + struct ica_xcRB *xcrb, 1148 1142 struct ap_message *ap_msg) 1149 1143 { 1150 1144 int rc; ··· 1159 1153 * Set the queue's reply buffer length minus 128 byte padding 1160 1154 * as reply limit for the card firmware. 1161 1155 */ 1162 - msg->hdr.FromCardLen1 = min_t(unsigned int, msg->hdr.FromCardLen1, 1156 + msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1, 1163 1157 zq->reply.bufsize - 128); 1164 - if (msg->hdr.FromCardLen2) 1165 - msg->hdr.FromCardLen2 = 1166 - zq->reply.bufsize - msg->hdr.FromCardLen1 - 128; 1158 + if (msg->hdr.fromcardlen2) 1159 + msg->hdr.fromcardlen2 = 1160 + zq->reply.bufsize - msg->hdr.fromcardlen1 - 128; 1167 1161 1168 1162 init_completion(&rtype->work); 1169 1163 rc = ap_queue_message(zq->queue, ap_msg); ··· 1173 1167 if (rc == 0) { 1174 1168 rc = ap_msg->rc; 1175 1169 if (rc == 0) 1176 - rc = convert_response_xcrb(userspace, zq, ap_msg, xcRB); 1177 - } else 1170 + rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb); 1171 + } else { 1178 1172 /* Signal pending. */ 1179 1173 ap_cancel_message(zq->queue, ap_msg); 1174 + } 1175 + 1180 1176 out: 1181 1177 if (rc) 1182 1178 ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", ··· 1209 1201 if (!ap_msg->msg) 1210 1202 return -ENOMEM; 1211 1203 ap_msg->receive = zcrypt_msgtype6_receive_ep11; 1212 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1204 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1213 1205 atomic_inc_return(&zcrypt_step); 1214 1206 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1215 1207 if (!ap_msg->private) ··· 1223 1215 * device to handle a send_ep11_cprb request. 1224 1216 * @zq: pointer to zcrypt_queue structure that identifies the 1225 1217 * CEX4P device to the request distributor 1226 - * @xcRB: pointer to the ep11 user request block 1218 + * @xcrb: pointer to the ep11 user request block 1227 1219 */ 1228 1220 static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq, 1229 1221 struct ep11_urb *xcrb, ··· 1273 1265 } else { 1274 1266 lfmt = 1; /* length format #1 */ 1275 1267 } 1276 - payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt); 1268 + payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt); 1277 1269 payload_hdr->dom_val = (unsigned int) 1278 1270 AP_QID_QUEUE(zq->queue->qid); 1279 1271 } ··· 1282 1274 * Set the queue's reply buffer length minus the two prepend headers 1283 1275 * as reply limit for the card firmware. 1284 1276 */ 1285 - msg->hdr.FromCardLen1 = zq->reply.bufsize - 1277 + msg->hdr.fromcardlen1 = zq->reply.bufsize - 1286 1278 sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); 1287 1279 1288 1280 init_completion(&rtype->work); ··· 1294 1286 rc = ap_msg->rc; 1295 1287 if (rc == 0) 1296 1288 rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb); 1297 - } else 1289 + } else { 1298 1290 /* Signal pending. */ 1299 1291 ap_cancel_message(zq->queue, ap_msg); 1292 + } 1293 + 1300 1294 out: 1301 1295 if (rc) 1302 1296 ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n", ··· 1319 1309 if (!ap_msg->msg) 1320 1310 return -ENOMEM; 1321 1311 ap_msg->receive = zcrypt_msgtype6_receive; 1322 - ap_msg->psmid = (((unsigned long long) current->pid) << 32) + 1312 + ap_msg->psmid = (((unsigned long long)current->pid) << 32) + 1323 1313 atomic_inc_return(&zcrypt_step); 1324 1314 ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); 1325 1315 if (!ap_msg->private) 1326 1316 return -ENOMEM; 1327 1317 1328 - rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1318 + rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); 1329 1319 1330 1320 *func_code = HWRNG; 1331 1321 return 0; ··· 1364 1354 rc = ap_msg->rc; 1365 1355 if (rc == 0) 1366 1356 rc = convert_response_rng(zq, ap_msg, buffer); 1367 - } else 1357 + } else { 1368 1358 /* Signal pending. */ 1369 1359 ap_cancel_message(zq->queue, ap_msg); 1360 + } 1370 1361 out: 1371 1362 return rc; 1372 1363 }
+13 -13
drivers/s390/crypto/zcrypt_msgtype6.h
··· 45 45 unsigned char reserved5[2]; /* 0x0000 */ 46 46 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ 47 47 unsigned char reserved6[2]; /* 0x0000 */ 48 - unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ 49 - unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ 50 - unsigned int ToCardLen3; /* 0x00000000 */ 51 - unsigned int ToCardLen4; /* 0x00000000 */ 52 - unsigned int FromCardLen1; /* response buffer length */ 53 - unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ 54 - unsigned int FromCardLen3; /* 0x00000000 */ 55 - unsigned int FromCardLen4; /* 0x00000000 */ 48 + unsigned int tocardlen1; /* (request CPRB len + 3) & -4 */ 49 + unsigned int tocardlen2; /* db len 0x00000000 for PKD */ 50 + unsigned int tocardlen3; /* 0x00000000 */ 51 + unsigned int tocardlen4; /* 0x00000000 */ 52 + unsigned int fromcardlen1; /* response buffer length */ 53 + unsigned int fromcardlen2; /* db len 0x00000000 for PKD */ 54 + unsigned int fromcardlen3; /* 0x00000000 */ 55 + unsigned int fromcardlen4; /* 0x00000000 */ 56 56 } __packed; 57 57 58 58 /** ··· 116 116 * @ap_dev: AP device pointer 117 117 * @ap_msg: pointer to AP message 118 118 */ 119 - static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg, 119 + static inline void rng_type6cprb_msgx(struct ap_message *ap_msg, 120 120 unsigned int random_number_length, 121 121 unsigned int *domain) 122 122 { ··· 134 134 .offset1 = 0x00000058, 135 135 .agent_id = {'C', 'A'}, 136 136 .function_code = {'R', 'L'}, 137 - .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr), 138 - .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr), 137 + .tocardlen1 = sizeof(*msg) - sizeof(msg->hdr), 138 + .fromcardlen1 = sizeof(*msg) - sizeof(msg->hdr), 139 139 }; 140 140 static struct CPRBX local_cprbx = { 141 141 .cprb_len = 0x00dc, ··· 147 147 }; 148 148 149 149 msg->hdr = static_type6_hdrX; 150 - msg->hdr.FromCardLen2 = random_number_length, 150 + msg->hdr.fromcardlen2 = random_number_length; 151 151 msg->cprbx = local_cprbx; 152 - msg->cprbx.rpl_datal = random_number_length, 152 + msg->cprbx.rpl_datal = random_number_length; 153 153 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 154 154 msg->rule_length = 0x0a; 155 155 memcpy(msg->rule, "RANDOM ", 8);
+1 -1
drivers/s390/crypto/zcrypt_queue.c
··· 114 114 { 115 115 struct zcrypt_queue *zq; 116 116 117 - zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL); 117 + zq = kzalloc(sizeof(*zq), GFP_KERNEL); 118 118 if (!zq) 119 119 return NULL; 120 120 zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);