Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: arm/sha2-ce - move SHA-224/256 ARMv8 implementation to base layer

This removes all the boilerplate from the existing implementation,
and replaces it with calls into the base layer.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
9205b949 b59e2ae3

+42 -140
+1 -1
arch/arm/crypto/Kconfig
··· 39 39 config CRYPTO_SHA2_ARM_CE 40 40 tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)" 41 41 depends on KERNEL_MODE_NEON 42 - select CRYPTO_SHA256 42 + select CRYPTO_SHA256_ARM 43 43 select CRYPTO_HASH 44 44 help 45 45 SHA-256 secure hash standard (DFIPS 180-2) implemented
+5 -14
arch/arm/crypto/sha2-ce-core.S
··· 69 69 .word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 70 70 71 71 /* 72 - * void sha2_ce_transform(int blocks, u8 const *src, u32 *state, 73 - * u8 *head); 72 + * void sha2_ce_transform(struct sha256_state *sst, u8 const *src, 73 + int blocks); 74 74 */ 75 75 ENTRY(sha2_ce_transform) 76 76 /* load state */ 77 - vld1.32 {dga-dgb}, [r2] 78 - 79 - /* load partial input (if supplied) */ 80 - teq r3, #0 81 - beq 0f 82 - vld1.32 {q0-q1}, [r3]! 83 - vld1.32 {q2-q3}, [r3] 84 - teq r0, #0 85 - b 1f 77 + vld1.32 {dga-dgb}, [r0] 86 78 87 79 /* load input */ 88 80 0: vld1.32 {q0-q1}, [r1]! 89 81 vld1.32 {q2-q3}, [r1]! 90 - subs r0, r0, #1 82 + subs r2, r2, #1 91 83 92 - 1: 93 84 #ifndef CONFIG_CPU_BIG_ENDIAN 94 85 vrev32.8 q0, q0 95 86 vrev32.8 q1, q1 ··· 120 129 bne 0b 121 130 122 131 /* store new state */ 123 - vst1.32 {dga-dgb}, [r2] 132 + vst1.32 {dga-dgb}, [r0] 124 133 bx lr 125 134 ENDPROC(sha2_ce_transform)
+36 -125
arch/arm/crypto/sha2-ce-glue.c
··· 10 10 11 11 #include <crypto/internal/hash.h> 12 12 #include <crypto/sha.h> 13 + #include <crypto/sha256_base.h> 13 14 #include <linux/crypto.h> 14 15 #include <linux/module.h> 15 16 ··· 19 18 #include <asm/neon.h> 20 19 #include <asm/unaligned.h> 21 20 21 + #include "sha256_glue.h" 22 + 22 23 MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions"); 23 24 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 24 25 MODULE_LICENSE("GPL v2"); 25 26 26 - asmlinkage void sha2_ce_transform(int blocks, u8 const *src, u32 *state, 27 - u8 *head); 27 + asmlinkage void sha2_ce_transform(struct sha256_state *sst, u8 const *src, 28 + int blocks); 28 29 29 - static int sha224_init(struct shash_desc *desc) 30 + static int sha2_ce_update(struct shash_desc *desc, const u8 *data, 31 + unsigned int len) 30 32 { 31 33 struct sha256_state *sctx = shash_desc_ctx(desc); 32 34 33 - *sctx = (struct sha256_state){ 34 - .state = { 35 - SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 36 - SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 37 - } 38 - }; 35 + if (!may_use_simd() || 36 + (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) 37 + return crypto_sha256_arm_update(desc, data, len); 38 + 39 + kernel_neon_begin(); 40 + sha256_base_do_update(desc, data, len, 41 + (sha256_block_fn *)sha2_ce_transform); 42 + kernel_neon_end(); 43 + 39 44 return 0; 40 45 } 41 46 42 - static int sha256_init(struct shash_desc *desc) 47 + static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, 48 + unsigned int len, u8 *out) 43 49 { 44 - struct sha256_state *sctx = shash_desc_ctx(desc); 45 - 46 - *sctx = (struct sha256_state){ 47 - .state = { 48 - SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 49 - SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 50 - } 51 - }; 52 - return 0; 53 - } 54 - 55 - static int sha2_update(struct shash_desc *desc, const u8 *data, 56 - unsigned int len) 57 - { 58 - struct sha256_state *sctx = shash_desc_ctx(desc); 59 - unsigned int partial; 60 - 61 50 if (!may_use_simd()) 62 - return crypto_sha256_update(desc, data, len); 51 + return crypto_sha256_arm_finup(desc, data, len, out); 63 52 64 - partial = sctx->count % SHA256_BLOCK_SIZE; 65 - sctx->count += len; 66 - 67 - if ((partial + len) >= SHA256_BLOCK_SIZE) { 68 - int blocks; 69 - 70 - if (partial) { 71 - int p = SHA256_BLOCK_SIZE - partial; 72 - 73 - memcpy(sctx->buf + partial, data, p); 74 - data += p; 75 - len -= p; 76 - } 77 - 78 - blocks = len / SHA256_BLOCK_SIZE; 79 - len %= SHA256_BLOCK_SIZE; 80 - 81 - kernel_neon_begin(); 82 - sha2_ce_transform(blocks, data, sctx->state, 83 - partial ? sctx->buf : NULL); 84 - kernel_neon_end(); 85 - 86 - data += blocks * SHA256_BLOCK_SIZE; 87 - partial = 0; 88 - } 53 + kernel_neon_begin(); 89 54 if (len) 90 - memcpy(sctx->buf + partial, data, len); 91 - return 0; 55 + sha256_base_do_update(desc, data, len, 56 + (sha256_block_fn *)sha2_ce_transform); 57 + sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); 58 + kernel_neon_end(); 59 + 60 + return sha256_base_finish(desc, out); 92 61 } 93 62 94 - static void sha2_final(struct shash_desc *desc) 63 + static int sha2_ce_final(struct shash_desc *desc, u8 *out) 95 64 { 96 - static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; 97 - 98 - struct sha256_state *sctx = shash_desc_ctx(desc); 99 - __be64 bits = cpu_to_be64(sctx->count << 3); 100 - u32 padlen = SHA256_BLOCK_SIZE 101 - - ((sctx->count + sizeof(bits)) % SHA256_BLOCK_SIZE); 102 - 103 - sha2_update(desc, padding, padlen); 104 - sha2_update(desc, (const u8 *)&bits, sizeof(bits)); 105 - } 106 - 107 - static int sha224_final(struct shash_desc *desc, u8 *out) 108 - { 109 - struct sha256_state *sctx = shash_desc_ctx(desc); 110 - __be32 *dst = (__be32 *)out; 111 - int i; 112 - 113 - sha2_final(desc); 114 - 115 - for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(__be32); i++) 116 - put_unaligned_be32(sctx->state[i], dst++); 117 - 118 - *sctx = (struct sha256_state){}; 119 - return 0; 120 - } 121 - 122 - static int sha256_final(struct shash_desc *desc, u8 *out) 123 - { 124 - struct sha256_state *sctx = shash_desc_ctx(desc); 125 - __be32 *dst = (__be32 *)out; 126 - int i; 127 - 128 - sha2_final(desc); 129 - 130 - for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(__be32); i++) 131 - put_unaligned_be32(sctx->state[i], dst++); 132 - 133 - *sctx = (struct sha256_state){}; 134 - return 0; 135 - } 136 - 137 - static int sha2_export(struct shash_desc *desc, void *out) 138 - { 139 - struct sha256_state *sctx = shash_desc_ctx(desc); 140 - struct sha256_state *dst = out; 141 - 142 - *dst = *sctx; 143 - return 0; 144 - } 145 - 146 - static int sha2_import(struct shash_desc *desc, const void *in) 147 - { 148 - struct sha256_state *sctx = shash_desc_ctx(desc); 149 - struct sha256_state const *src = in; 150 - 151 - *sctx = *src; 152 - return 0; 65 + return sha2_ce_finup(desc, NULL, 0, out); 153 66 } 154 67 155 68 static struct shash_alg algs[] = { { 156 - .init = sha224_init, 157 - .update = sha2_update, 158 - .final = sha224_final, 159 - .export = sha2_export, 160 - .import = sha2_import, 69 + .init = sha224_base_init, 70 + .update = sha2_ce_update, 71 + .final = sha2_ce_final, 72 + .finup = sha2_ce_finup, 161 73 .descsize = sizeof(struct sha256_state), 162 74 .digestsize = SHA224_DIGEST_SIZE, 163 - .statesize = sizeof(struct sha256_state), 164 75 .base = { 165 76 .cra_name = "sha224", 166 77 .cra_driver_name = "sha224-ce", ··· 82 169 .cra_module = THIS_MODULE, 83 170 } 84 171 }, { 85 - .init = sha256_init, 86 - .update = sha2_update, 87 - .final = sha256_final, 88 - .export = sha2_export, 89 - .import = sha2_import, 172 + .init = sha256_base_init, 173 + .update = sha2_ce_update, 174 + .final = sha2_ce_final, 175 + .finup = sha2_ce_finup, 90 176 .descsize = sizeof(struct sha256_state), 91 177 .digestsize = SHA256_DIGEST_SIZE, 92 - .statesize = sizeof(struct sha256_state), 93 178 .base = { 94 179 .cra_name = "sha256", 95 180 .cra_driver_name = "sha256-ce",