Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[CRYPTO] s390: Generic sha_update and sha_final

The sha_{update|final} functions are similar for every sha variant.
Since that is error-prone and redundant replace these functions by
a shared generic implementation for s390.

Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Jan Glauber and committed by
Herbert Xu
604973f1 607424d8

+138 -171
+2 -2
arch/s390/crypto/Makefile
··· 2 2 # Cryptographic API 3 3 # 4 4 5 - obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o 6 - obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o 5 + obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o 6 + obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o 7 7 obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o 8 8 obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o 9 9 obj-$(CONFIG_S390_PRNG) += prng.o
+34
arch/s390/crypto/sha.h
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * s390 generic implementation of the SHA Secure Hash Algorithms. 5 + * 6 + * Copyright IBM Corp. 2007 7 + * Author(s): Jan Glauber (jang@de.ibm.com) 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the Free 11 + * Software Foundation; either version 2 of the License, or (at your option) 12 + * any later version. 13 + * 14 + */ 15 + #ifndef _CRYPTO_ARCH_S390_SHA_H 16 + #define _CRYPTO_ARCH_S390_SHA_H 17 + 18 + #include <linux/crypto.h> 19 + #include <crypto/sha.h> 20 + 21 + /* must be big enough for the largest SHA variant */ 22 + #define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE 23 + 24 + struct s390_sha_ctx { 25 + u64 count; /* message length in bytes */ 26 + u32 state[8]; 27 + u8 buf[2 * SHA_MAX_BLOCK_SIZE]; 28 + int func; /* KIMD function to use */ 29 + }; 30 + 31 + void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len); 32 + void s390_sha_final(struct crypto_tfm *tfm, u8 *out); 33 + 34 + #endif
+6 -85
arch/s390/crypto/sha1_s390.c
··· 29 29 #include <crypto/sha.h> 30 30 31 31 #include "crypt_s390.h" 32 - 33 - struct s390_sha1_ctx { 34 - u64 count; /* message length */ 35 - u32 state[5]; 36 - u8 buf[2 * SHA1_BLOCK_SIZE]; 37 - }; 32 + #include "sha.h" 38 33 39 34 static void sha1_init(struct crypto_tfm *tfm) 40 35 { 41 - struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 36 + struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); 42 37 43 38 sctx->state[0] = SHA1_H0; 44 39 sctx->state[1] = SHA1_H1; ··· 41 46 sctx->state[3] = SHA1_H3; 42 47 sctx->state[4] = SHA1_H4; 43 48 sctx->count = 0; 44 - } 45 - 46 - static void sha1_update(struct crypto_tfm *tfm, const u8 *data, 47 - unsigned int len) 48 - { 49 - struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 50 - unsigned int index; 51 - int ret; 52 - 53 - /* how much is already in the buffer? */ 54 - index = sctx->count & 0x3f; 55 - 56 - sctx->count += len; 57 - 58 - if (index + len < SHA1_BLOCK_SIZE) 59 - goto store; 60 - 61 - /* process one stored block */ 62 - if (index) { 63 - memcpy(sctx->buf + index, data, SHA1_BLOCK_SIZE - index); 64 - ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, 65 - SHA1_BLOCK_SIZE); 66 - BUG_ON(ret != SHA1_BLOCK_SIZE); 67 - data += SHA1_BLOCK_SIZE - index; 68 - len -= SHA1_BLOCK_SIZE - index; 69 - } 70 - 71 - /* process as many blocks as possible */ 72 - if (len >= SHA1_BLOCK_SIZE) { 73 - ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, 74 - len & ~(SHA1_BLOCK_SIZE - 1)); 75 - BUG_ON(ret != (len & ~(SHA1_BLOCK_SIZE - 1))); 76 - data += ret; 77 - len -= ret; 78 - } 79 - 80 - store: 81 - /* anything left? */ 82 - if (len) 83 - memcpy(sctx->buf + index , data, len); 84 - } 85 - 86 - /* Add padding and return the message digest. */ 87 - static void sha1_final(struct crypto_tfm *tfm, u8 *out) 88 - { 89 - struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); 90 - u64 bits; 91 - unsigned int index, end; 92 - int ret; 93 - 94 - /* must perform manual padding */ 95 - index = sctx->count & 0x3f; 96 - end = (index < 56) ? SHA1_BLOCK_SIZE : (2 * SHA1_BLOCK_SIZE); 97 - 98 - /* start pad with 1 */ 99 - sctx->buf[index] = 0x80; 100 - 101 - /* pad with zeros */ 102 - index++; 103 - memset(sctx->buf + index, 0x00, end - index - 8); 104 - 105 - /* append message length */ 106 - bits = sctx->count * 8; 107 - memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); 108 - 109 - ret = crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buf, end); 110 - BUG_ON(ret != end); 111 - 112 - /* copy digest to out */ 113 - memcpy(out, sctx->state, SHA1_DIGEST_SIZE); 114 - 115 - /* wipe context */ 116 - memset(sctx, 0, sizeof *sctx); 49 + sctx->func = KIMD_SHA_1; 117 50 } 118 51 119 52 static struct crypto_alg alg = { ··· 50 127 .cra_priority = CRYPT_S390_PRIORITY, 51 128 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 52 129 .cra_blocksize = SHA1_BLOCK_SIZE, 53 - .cra_ctxsize = sizeof(struct s390_sha1_ctx), 130 + .cra_ctxsize = sizeof(struct s390_sha_ctx), 54 131 .cra_module = THIS_MODULE, 55 132 .cra_list = LIST_HEAD_INIT(alg.cra_list), 56 133 .cra_u = { .digest = { 57 134 .dia_digestsize = SHA1_DIGEST_SIZE, 58 135 .dia_init = sha1_init, 59 - .dia_update = sha1_update, 60 - .dia_final = sha1_final } } 136 + .dia_update = s390_sha_update, 137 + .dia_final = s390_sha_final } } 61 138 }; 62 139 63 140 static int __init sha1_s390_init(void) 64 141 { 65 142 if (!crypt_s390_func_available(KIMD_SHA_1)) 66 143 return -EOPNOTSUPP; 67 - 68 144 return crypto_register_alg(&alg); 69 145 } 70 146 ··· 76 154 module_exit(sha1_s390_fini); 77 155 78 156 MODULE_ALIAS("sha1"); 79 - 80 157 MODULE_LICENSE("GPL"); 81 158 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+6 -84
arch/s390/crypto/sha256_s390.c
··· 22 22 #include <crypto/sha.h> 23 23 24 24 #include "crypt_s390.h" 25 - 26 - struct s390_sha256_ctx { 27 - u64 count; /* message length */ 28 - u32 state[8]; 29 - u8 buf[2 * SHA256_BLOCK_SIZE]; 30 - }; 25 + #include "sha.h" 31 26 32 27 static void sha256_init(struct crypto_tfm *tfm) 33 28 { 34 - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); 29 + struct s390_sha_ctx *sctx = crypto_tfm_ctx(tfm); 35 30 36 31 sctx->state[0] = SHA256_H0; 37 32 sctx->state[1] = SHA256_H1; ··· 37 42 sctx->state[6] = SHA256_H6; 38 43 sctx->state[7] = SHA256_H7; 39 44 sctx->count = 0; 40 - } 41 - 42 - static void sha256_update(struct crypto_tfm *tfm, const u8 *data, 43 - unsigned int len) 44 - { 45 - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); 46 - unsigned int index; 47 - int ret; 48 - 49 - /* how much is already in the buffer? */ 50 - index = sctx->count & 0x3f; 51 - 52 - sctx->count += len; 53 - 54 - if ((index + len) < SHA256_BLOCK_SIZE) 55 - goto store; 56 - 57 - /* process one stored block */ 58 - if (index) { 59 - memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index); 60 - ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, 61 - SHA256_BLOCK_SIZE); 62 - BUG_ON(ret != SHA256_BLOCK_SIZE); 63 - data += SHA256_BLOCK_SIZE - index; 64 - len -= SHA256_BLOCK_SIZE - index; 65 - } 66 - 67 - /* process as many blocks as possible */ 68 - if (len >= SHA256_BLOCK_SIZE) { 69 - ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, data, 70 - len & ~(SHA256_BLOCK_SIZE - 1)); 71 - BUG_ON(ret != (len & ~(SHA256_BLOCK_SIZE - 1))); 72 - data += ret; 73 - len -= ret; 74 - } 75 - 76 - store: 77 - /* anything left? */ 78 - if (len) 79 - memcpy(sctx->buf + index , data, len); 80 - } 81 - 82 - /* Add padding and return the message digest */ 83 - static void sha256_final(struct crypto_tfm *tfm, u8 *out) 84 - { 85 - struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); 86 - u64 bits; 87 - unsigned int index, end; 88 - int ret; 89 - 90 - /* must perform manual padding */ 91 - index = sctx->count & 0x3f; 92 - end = (index < 56) ? SHA256_BLOCK_SIZE : (2 * SHA256_BLOCK_SIZE); 93 - 94 - /* start pad with 1 */ 95 - sctx->buf[index] = 0x80; 96 - 97 - /* pad with zeros */ 98 - index++; 99 - memset(sctx->buf + index, 0x00, end - index - 8); 100 - 101 - /* append message length */ 102 - bits = sctx->count * 8; 103 - memcpy(sctx->buf + end - 8, &bits, sizeof(bits)); 104 - 105 - ret = crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf, end); 106 - BUG_ON(ret != end); 107 - 108 - /* copy digest to out */ 109 - memcpy(out, sctx->state, SHA256_DIGEST_SIZE); 110 - 111 - /* wipe context */ 112 - memset(sctx, 0, sizeof *sctx); 45 + sctx->func = KIMD_SHA_256; 113 46 } 114 47 115 48 static struct crypto_alg alg = { ··· 46 123 .cra_priority = CRYPT_S390_PRIORITY, 47 124 .cra_flags = CRYPTO_ALG_TYPE_DIGEST, 48 125 .cra_blocksize = SHA256_BLOCK_SIZE, 49 - .cra_ctxsize = sizeof(struct s390_sha256_ctx), 126 + .cra_ctxsize = sizeof(struct s390_sha_ctx), 50 127 .cra_module = THIS_MODULE, 51 128 .cra_list = LIST_HEAD_INIT(alg.cra_list), 52 129 .cra_u = { .digest = { 53 130 .dia_digestsize = SHA256_DIGEST_SIZE, 54 131 .dia_init = sha256_init, 55 - .dia_update = sha256_update, 56 - .dia_final = sha256_final } } 132 + .dia_update = s390_sha_update, 133 + .dia_final = s390_sha_final } } 57 134 }; 58 135 59 136 static int sha256_s390_init(void) ··· 73 150 module_exit(sha256_s390_fini); 74 151 75 152 MODULE_ALIAS("sha256"); 76 - 77 153 MODULE_LICENSE("GPL"); 78 154 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
+90
arch/s390/crypto/sha_common.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * s390 generic implementation of the SHA Secure Hash Algorithms. 5 + * 6 + * Copyright IBM Corp. 2007 7 + * Author(s): Jan Glauber (jang@de.ibm.com) 8 + * 9 + * This program is free software; you can redistribute it and/or modify it 10 + * under the terms of the GNU General Public License as published by the Free 11 + * Software Foundation; either version 2 of the License, or (at your option) 12 + * any later version. 13 + * 14 + */ 15 + 16 + #include <linux/crypto.h> 17 + #include "sha.h" 18 + #include "crypt_s390.h" 19 + 20 + void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) 21 + { 22 + struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 23 + unsigned int bsize = crypto_tfm_alg_blocksize(tfm); 24 + unsigned int index; 25 + int ret; 26 + 27 + /* how much is already in the buffer? */ 28 + index = ctx->count & (bsize - 1); 29 + ctx->count += len; 30 + 31 + if ((index + len) < bsize) 32 + goto store; 33 + 34 + /* process one stored block */ 35 + if (index) { 36 + memcpy(ctx->buf + index, data, bsize - index); 37 + ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); 38 + BUG_ON(ret != bsize); 39 + data += bsize - index; 40 + len -= bsize - index; 41 + } 42 + 43 + /* process as many blocks as possible */ 44 + if (len >= bsize) { 45 + ret = crypt_s390_kimd(ctx->func, ctx->state, data, 46 + len & ~(bsize - 1)); 47 + BUG_ON(ret != (len & ~(bsize - 1))); 48 + data += ret; 49 + len -= ret; 50 + } 51 + store: 52 + if (len) 53 + memcpy(ctx->buf + index , data, len); 54 + } 55 + EXPORT_SYMBOL_GPL(s390_sha_update); 56 + 57 + void s390_sha_final(struct crypto_tfm *tfm, u8 *out) 58 + { 59 + struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm); 60 + unsigned int bsize = crypto_tfm_alg_blocksize(tfm); 61 + u64 bits; 62 + unsigned int index, end; 63 + int ret; 64 + 65 + /* must perform manual padding */ 66 + index = ctx->count & (bsize - 1); 67 + end = (index < bsize - 8) ? bsize : (2 * bsize); 68 + 69 + /* start pad with 1 */ 70 + ctx->buf[index] = 0x80; 71 + index++; 72 + 73 + /* pad with zeros */ 74 + memset(ctx->buf + index, 0x00, end - index - 8); 75 + 76 + bits = ctx->count * 8; 77 + memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); 78 + 79 + ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); 80 + BUG_ON(ret != end); 81 + 82 + /* copy digest to out */ 83 + memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm))); 84 + /* wipe context */ 85 + memset(ctx, 0, sizeof *ctx); 86 + } 87 + EXPORT_SYMBOL_GPL(s390_sha_final); 88 + 89 + MODULE_LICENSE("GPL"); 90 + MODULE_DESCRIPTION("s390 SHA cipher common functions");