Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: sha512 - implement base layer for SHA-512

To reduce the number of copies of boilerplate code throughout
the tree, this patch implements generic glue for the SHA-512
algorithm. This allows a specific arch or hardware implementation
to only implement the special handling that it needs.

The users need to supply an implementation of

void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, int blocks)

and pass it to the SHA-512 base functions. For easy casting between the
prototype above and existing block functions that take a 'u64 state[]'
as their first argument, the 'state' member of struct sha512_state is
moved to the base of the struct.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
b84a2a0b 11b8d5ef

+132 -1
+1 -1
include/crypto/sha.h
··· 77 77 }; 78 78 79 79 struct sha512_state { 80 - u64 count[2]; 81 80 u64 state[SHA512_DIGEST_SIZE / 8]; 81 + u64 count[2]; 82 82 u8 buf[SHA512_BLOCK_SIZE]; 83 83 }; 84 84
+131
include/crypto/sha512_base.h
··· 1 + /* 2 + * sha512_base.h - core logic for SHA-512 implementations 3 + * 4 + * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <crypto/internal/hash.h> 12 + #include <crypto/sha.h> 13 + #include <linux/crypto.h> 14 + #include <linux/module.h> 15 + 16 + #include <asm/unaligned.h> 17 + 18 + typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src, 19 + int blocks); 20 + 21 + static inline int sha384_base_init(struct shash_desc *desc) 22 + { 23 + struct sha512_state *sctx = shash_desc_ctx(desc); 24 + 25 + sctx->state[0] = SHA384_H0; 26 + sctx->state[1] = SHA384_H1; 27 + sctx->state[2] = SHA384_H2; 28 + sctx->state[3] = SHA384_H3; 29 + sctx->state[4] = SHA384_H4; 30 + sctx->state[5] = SHA384_H5; 31 + sctx->state[6] = SHA384_H6; 32 + sctx->state[7] = SHA384_H7; 33 + sctx->count[0] = sctx->count[1] = 0; 34 + 35 + return 0; 36 + } 37 + 38 + static inline int sha512_base_init(struct shash_desc *desc) 39 + { 40 + struct sha512_state *sctx = shash_desc_ctx(desc); 41 + 42 + sctx->state[0] = SHA512_H0; 43 + sctx->state[1] = SHA512_H1; 44 + sctx->state[2] = SHA512_H2; 45 + sctx->state[3] = SHA512_H3; 46 + sctx->state[4] = SHA512_H4; 47 + sctx->state[5] = SHA512_H5; 48 + sctx->state[6] = SHA512_H6; 49 + sctx->state[7] = SHA512_H7; 50 + sctx->count[0] = sctx->count[1] = 0; 51 + 52 + return 0; 53 + } 54 + 55 + static inline int sha512_base_do_update(struct shash_desc *desc, 56 + const u8 *data, 57 + unsigned int len, 58 + sha512_block_fn *block_fn) 59 + { 60 + struct sha512_state *sctx = shash_desc_ctx(desc); 61 + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; 62 + 63 + sctx->count[0] += len; 64 + if (sctx->count[0] < len) 65 + sctx->count[1]++; 66 + 67 + if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { 68 + int blocks; 69 + 70 + if (partial) { 71 + int p = SHA512_BLOCK_SIZE - partial; 72 + 73 + memcpy(sctx->buf + partial, data, p); 74 + data += p; 75 + len -= p; 76 + 77 + block_fn(sctx, sctx->buf, 1); 78 + } 79 + 80 + blocks = len / SHA512_BLOCK_SIZE; 81 + len %= SHA512_BLOCK_SIZE; 82 + 83 + if (blocks) { 84 + block_fn(sctx, data, blocks); 85 + data += blocks * SHA512_BLOCK_SIZE; 86 + } 87 + partial = 0; 88 + } 89 + if (len) 90 + memcpy(sctx->buf + partial, data, len); 91 + 92 + return 0; 93 + } 94 + 95 + static inline int sha512_base_do_finalize(struct shash_desc *desc, 96 + sha512_block_fn *block_fn) 97 + { 98 + const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]); 99 + struct sha512_state *sctx = shash_desc_ctx(desc); 100 + __be64 *bits = (__be64 *)(sctx->buf + bit_offset); 101 + unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; 102 + 103 + sctx->buf[partial++] = 0x80; 104 + if (partial > bit_offset) { 105 + memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial); 106 + partial = 0; 107 + 108 + block_fn(sctx, sctx->buf, 1); 109 + } 110 + 111 + memset(sctx->buf + partial, 0x0, bit_offset - partial); 112 + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); 113 + bits[1] = cpu_to_be64(sctx->count[0] << 3); 114 + block_fn(sctx, sctx->buf, 1); 115 + 116 + return 0; 117 + } 118 + 119 + static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) 120 + { 121 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 122 + struct sha512_state *sctx = shash_desc_ctx(desc); 123 + __be64 *digest = (__be64 *)out; 124 + int i; 125 + 126 + for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) 127 + put_unaligned_be64(sctx->state[i], digest++); 128 + 129 + *sctx = (struct sha512_state){}; 130 + return 0; 131 + }