Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: x86/sha - Eliminate casts on asm implementations

In order to avoid CFI function prototype mismatches, this removes the
casts on assembly implementations of sha1/256/512 accelerators. The
safety checks from BUILD_BUG_ON() remain.

Additionally, this renames various arguments for clarity, as suggested
by Eric Biggers.

Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Kees Cook and committed by
Herbert Xu
41419a28 e0437dc6

+102 -102
+3 -3
arch/x86/crypto/sha1_avx2_x86_64_asm.S
··· 62 62 *Visit http://software.intel.com/en-us/articles/ 63 63 *and refer to improving-the-performance-of-the-secure-hash-algorithm-1/ 64 64 * 65 - *Updates 20-byte SHA-1 record in 'hash' for even number of 66 - *'num_blocks' consecutive 64-byte blocks 65 + *Updates 20-byte SHA-1 record at start of 'state', from 'input', for 66 + *even number of 'blocks' consecutive 64-byte blocks. 67 67 * 68 68 *extern "C" void sha1_transform_avx2( 69 - * int *hash, const char* input, size_t num_blocks ); 69 + * struct sha1_state *state, const u8* input, int blocks ); 70 70 */ 71 71 72 72 #include <linux/linkage.h>
+9 -5
arch/x86/crypto/sha1_ssse3_asm.S
··· 457 457 movdqu \a,\b 458 458 .endm 459 459 460 - /* SSSE3 optimized implementation: 461 - * extern "C" void sha1_transform_ssse3(u32 *digest, const char *data, u32 *ws, 462 - * unsigned int rounds); 460 + /* 461 + * SSSE3 optimized implementation: 462 + * 463 + * extern "C" void sha1_transform_ssse3(struct sha1_state *state, 464 + * const u8 *data, int blocks); 465 + * 466 + * Note that struct sha1_state is assumed to begin with u32 state[5]. 463 467 */ 464 468 SHA1_VECTOR_ASM sha1_transform_ssse3 465 469 ··· 549 545 550 546 551 547 /* AVX optimized implementation: 552 - * extern "C" void sha1_transform_avx(u32 *digest, const char *data, u32 *ws, 553 - * unsigned int rounds); 548 + * extern "C" void sha1_transform_avx(struct sha1_state *state, 549 + * const u8 *data, int blocks); 554 550 */ 555 551 SHA1_VECTOR_ASM sha1_transform_avx 556 552
+30 -40
arch/x86/crypto/sha1_ssse3_glue.c
··· 27 27 #include <crypto/sha1_base.h> 28 28 #include <asm/simd.h> 29 29 30 - typedef void (sha1_transform_fn)(u32 *digest, const char *data, 31 - unsigned int rounds); 32 - 33 30 static int sha1_update(struct shash_desc *desc, const u8 *data, 34 - unsigned int len, sha1_transform_fn *sha1_xform) 31 + unsigned int len, sha1_block_fn *sha1_xform) 35 32 { 36 33 struct sha1_state *sctx = shash_desc_ctx(desc); 37 34 ··· 36 39 (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) 37 40 return crypto_sha1_update(desc, data, len); 38 41 39 - /* make sure casting to sha1_block_fn() is safe */ 42 + /* 43 + * Make sure struct sha1_state begins directly with the SHA1 44 + * 160-bit internal state, as this is what the asm functions expect. 45 + */ 40 46 BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0); 41 47 42 48 kernel_fpu_begin(); 43 - sha1_base_do_update(desc, data, len, 44 - (sha1_block_fn *)sha1_xform); 49 + sha1_base_do_update(desc, data, len, sha1_xform); 45 50 kernel_fpu_end(); 46 51 47 52 return 0; 48 53 } 49 54 50 55 static int sha1_finup(struct shash_desc *desc, const u8 *data, 51 - unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) 56 + unsigned int len, u8 *out, sha1_block_fn *sha1_xform) 52 57 { 53 58 if (!crypto_simd_usable()) 54 59 return crypto_sha1_finup(desc, data, len, out); 55 60 56 61 kernel_fpu_begin(); 57 62 if (len) 58 - sha1_base_do_update(desc, data, len, 59 - (sha1_block_fn *)sha1_xform); 60 - sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform); 63 + sha1_base_do_update(desc, data, len, sha1_xform); 64 + sha1_base_do_finalize(desc, sha1_xform); 61 65 kernel_fpu_end(); 62 66 63 67 return sha1_base_finish(desc, out); 64 68 } 65 69 66 - asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, 67 - unsigned int rounds); 70 + asmlinkage void sha1_transform_ssse3(struct sha1_state *state, 71 + const u8 *data, int blocks); 68 72 69 73 static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, 70 74 unsigned int len) 71 75 { 72 - return sha1_update(desc, data, len, 73 - (sha1_transform_fn *) sha1_transform_ssse3); 76 + return sha1_update(desc, data, len, sha1_transform_ssse3); 74 77 } 75 78 76 79 static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, 77 80 unsigned int len, u8 *out) 78 81 { 79 - return sha1_finup(desc, data, len, out, 80 - (sha1_transform_fn *) sha1_transform_ssse3); 82 + return sha1_finup(desc, data, len, out, sha1_transform_ssse3); 81 83 } 82 84 83 85 /* Add padding and return the message digest. */ ··· 115 119 } 116 120 117 121 #ifdef CONFIG_AS_AVX 118 - asmlinkage void sha1_transform_avx(u32 *digest, const char *data, 119 - unsigned int rounds); 122 + asmlinkage void sha1_transform_avx(struct sha1_state *state, 123 + const u8 *data, int blocks); 120 124 121 125 static int sha1_avx_update(struct shash_desc *desc, const u8 *data, 122 126 unsigned int len) 123 127 { 124 - return sha1_update(desc, data, len, 125 - (sha1_transform_fn *) sha1_transform_avx); 128 + return sha1_update(desc, data, len, sha1_transform_avx); 126 129 } 127 130 128 131 static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, 129 132 unsigned int len, u8 *out) 130 133 { 131 - return sha1_finup(desc, data, len, out, 132 - (sha1_transform_fn *) sha1_transform_avx); 134 + return sha1_finup(desc, data, len, out, sha1_transform_avx); 133 135 } 134 136 135 137 static int sha1_avx_final(struct shash_desc *desc, u8 *out) ··· 184 190 #if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX) 185 191 #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ 186 192 187 - asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, 188 - unsigned int rounds); 193 + asmlinkage void sha1_transform_avx2(struct sha1_state *state, 194 + const u8 *data, int blocks); 189 195 190 196 static bool avx2_usable(void) 191 197 { ··· 197 203 return false; 198 204 } 199 205 200 - static void sha1_apply_transform_avx2(u32 *digest, const char *data, 201 - unsigned int rounds) 206 + static void sha1_apply_transform_avx2(struct sha1_state *state, 207 + const u8 *data, int blocks) 202 208 { 203 209 /* Select the optimal transform based on data block size */ 204 - if (rounds >= SHA1_AVX2_BLOCK_OPTSIZE) 205 - sha1_transform_avx2(digest, data, rounds); 210 + if (blocks >= SHA1_AVX2_BLOCK_OPTSIZE) 211 + sha1_transform_avx2(state, data, blocks); 206 212 else 207 - sha1_transform_avx(digest, data, rounds); 213 + sha1_transform_avx(state, data, blocks); 208 214 } 209 215 210 216 static int sha1_avx2_update(struct shash_desc *desc, const u8 *data, 211 217 unsigned int len) 212 218 { 213 - return sha1_update(desc, data, len, 214 - (sha1_transform_fn *) sha1_apply_transform_avx2); 219 + return sha1_update(desc, data, len, sha1_apply_transform_avx2); 215 220 } 216 221 217 222 static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, 218 223 unsigned int len, u8 *out) 219 224 { 220 - return sha1_finup(desc, data, len, out, 221 - (sha1_transform_fn *) sha1_apply_transform_avx2); 225 + return sha1_finup(desc, data, len, out, sha1_apply_transform_avx2); 222 226 } 223 227 224 228 static int sha1_avx2_final(struct shash_desc *desc, u8 *out) ··· 259 267 #endif 260 268 261 269 #ifdef CONFIG_AS_SHA1_NI 262 - asmlinkage void sha1_ni_transform(u32 *digest, const char *data, 263 - unsigned int rounds); 270 + asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, 271 + int rounds); 264 272 265 273 static int sha1_ni_update(struct shash_desc *desc, const u8 *data, 266 274 unsigned int len) 267 275 { 268 - return sha1_update(desc, data, len, 269 - (sha1_transform_fn *) sha1_ni_transform); 276 + return sha1_update(desc, data, len, sha1_ni_transform); 270 277 } 271 278 272 279 static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, 273 280 unsigned int len, u8 *out) 274 281 { 275 - return sha1_finup(desc, data, len, out, 276 - (sha1_transform_fn *) sha1_ni_transform); 282 + return sha1_finup(desc, data, len, out, sha1_ni_transform); 277 283 } 278 284 279 285 static int sha1_ni_final(struct shash_desc *desc, u8 *out)
+2 -2
arch/x86/crypto/sha256-avx-asm.S
··· 341 341 .endm 342 342 343 343 ######################################################################## 344 - ## void sha256_transform_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) 345 - ## arg 1 : pointer to digest 344 + ## void sha256_transform_avx(state sha256_state *state, const u8 *data, int blocks) 345 + ## arg 1 : pointer to state 346 346 ## arg 2 : pointer to input data 347 347 ## arg 3 : Num blocks 348 348 ########################################################################
+2 -2
arch/x86/crypto/sha256-avx2-asm.S
··· 520 520 .endm 521 521 522 522 ######################################################################## 523 - ## void sha256_transform_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) 524 - ## arg 1 : pointer to digest 523 + ## void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks) 524 + ## arg 1 : pointer to state 525 525 ## arg 2 : pointer to input data 526 526 ## arg 3 : Num blocks 527 527 ########################################################################
+4 -2
arch/x86/crypto/sha256-ssse3-asm.S
··· 347 347 .endm 348 348 349 349 ######################################################################## 350 - ## void sha256_transform_ssse3(void *input_data, UINT32 digest[8], UINT64 num_blks) 351 - ## arg 1 : pointer to digest 350 + ## void sha256_transform_ssse3(struct sha256_state *state, const u8 *data, 351 + ## int blocks); 352 + ## arg 1 : pointer to state 353 + ## (struct sha256_state is assumed to begin with u32 state[8]) 352 354 ## arg 2 : pointer to input data 353 355 ## arg 3 : Num blocks 354 356 ########################################################################
+17 -17
arch/x86/crypto/sha256_ssse3_glue.c
··· 41 41 #include <linux/string.h> 42 42 #include <asm/simd.h> 43 43 44 - asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, 45 - u64 rounds); 46 - typedef void (sha256_transform_fn)(u32 *digest, const char *data, u64 rounds); 44 + asmlinkage void sha256_transform_ssse3(struct sha256_state *state, 45 + const u8 *data, int blocks); 47 46 48 47 static int _sha256_update(struct shash_desc *desc, const u8 *data, 49 - unsigned int len, sha256_transform_fn *sha256_xform) 48 + unsigned int len, sha256_block_fn *sha256_xform) 50 49 { 51 50 struct sha256_state *sctx = shash_desc_ctx(desc); 52 51 ··· 53 54 (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) 54 55 return crypto_sha256_update(desc, data, len); 55 56 56 - /* make sure casting to sha256_block_fn() is safe */ 57 + /* 58 + * Make sure struct sha256_state begins directly with the SHA256 59 + * 256-bit internal state, as this is what the asm functions expect. 60 + */ 57 61 BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); 58 62 59 63 kernel_fpu_begin(); 60 - sha256_base_do_update(desc, data, len, 61 - (sha256_block_fn *)sha256_xform); 64 + sha256_base_do_update(desc, data, len, sha256_xform); 62 65 kernel_fpu_end(); 63 66 64 67 return 0; 65 68 } 66 69 67 70 static int sha256_finup(struct shash_desc *desc, const u8 *data, 68 - unsigned int len, u8 *out, sha256_transform_fn *sha256_xform) 71 + unsigned int len, u8 *out, sha256_block_fn *sha256_xform) 69 72 { 70 73 if (!crypto_simd_usable()) 71 74 return crypto_sha256_finup(desc, data, len, out); 72 75 73 76 kernel_fpu_begin(); 74 77 if (len) 75 - sha256_base_do_update(desc, data, len, 76 - (sha256_block_fn *)sha256_xform); 77 - sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform); 78 + sha256_base_do_update(desc, data, len, sha256_xform); 79 + sha256_base_do_finalize(desc, sha256_xform); 78 80 kernel_fpu_end(); 79 81 80 82 return sha256_base_finish(desc, out); ··· 145 145 } 146 146 147 147 #ifdef CONFIG_AS_AVX 148 - asmlinkage void sha256_transform_avx(u32 *digest, const char *data, 149 - u64 rounds); 148 + asmlinkage void sha256_transform_avx(struct sha256_state *state, 149 + const u8 *data, int blocks); 150 150 151 151 static int sha256_avx_update(struct shash_desc *desc, const u8 *data, 152 152 unsigned int len) ··· 227 227 #endif 228 228 229 229 #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) 230 - asmlinkage void sha256_transform_rorx(u32 *digest, const char *data, 231 - u64 rounds); 230 + asmlinkage void sha256_transform_rorx(struct sha256_state *state, 231 + const u8 *data, int blocks); 232 232 233 233 static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, 234 234 unsigned int len) ··· 307 307 #endif 308 308 309 309 #ifdef CONFIG_AS_SHA256_NI 310 - asmlinkage void sha256_ni_transform(u32 *digest, const char *data, 311 - u64 rounds); /*unsigned int rounds);*/ 310 + asmlinkage void sha256_ni_transform(struct sha256_state *digest, 311 + const u8 *data, int rounds); 312 312 313 313 static int sha256_ni_update(struct shash_desc *desc, const u8 *data, 314 314 unsigned int len)
+6 -5
arch/x86/crypto/sha512-avx-asm.S
··· 271 271 .endm 272 272 273 273 ######################################################################## 274 - # void sha512_transform_avx(void* D, const void* M, u64 L) 275 - # Purpose: Updates the SHA512 digest stored at D with the message stored in M. 276 - # The size of the message pointed to by M must be an integer multiple of SHA512 277 - # message blocks. 278 - # L is the message length in SHA512 blocks 274 + # void sha512_transform_avx(sha512_state *state, const u8 *data, int blocks) 275 + # Purpose: Updates the SHA512 digest stored at "state" with the message 276 + # stored in "data". 277 + # The size of the message pointed to by "data" must be an integer multiple 278 + # of SHA512 message blocks. 279 + # "blocks" is the message length in SHA512 blocks 279 280 ######################################################################## 280 281 SYM_FUNC_START(sha512_transform_avx) 281 282 cmp $0, msglen
+6 -5
arch/x86/crypto/sha512-avx2-asm.S
··· 563 563 .endm 564 564 565 565 ######################################################################## 566 - # void sha512_transform_rorx(void* D, const void* M, uint64_t L)# 567 - # Purpose: Updates the SHA512 digest stored at D with the message stored in M. 568 - # The size of the message pointed to by M must be an integer multiple of SHA512 569 - # message blocks. 570 - # L is the message length in SHA512 blocks 566 + # void sha512_transform_rorx(sha512_state *state, const u8 *data, int blocks) 567 + # Purpose: Updates the SHA512 digest stored at "state" with the message 568 + # stored in "data". 569 + # The size of the message pointed to by "data" must be an integer multiple 570 + # of SHA512 message blocks. 571 + # "blocks" is the message length in SHA512 blocks 571 572 ######################################################################## 572 573 SYM_FUNC_START(sha512_transform_rorx) 573 574 # Allocate Stack Space
+8 -5
arch/x86/crypto/sha512-ssse3-asm.S
··· 269 269 .endm 270 270 271 271 ######################################################################## 272 - # void sha512_transform_ssse3(void* D, const void* M, u64 L)# 273 - # Purpose: Updates the SHA512 digest stored at D with the message stored in M. 274 - # The size of the message pointed to by M must be an integer multiple of SHA512 275 - # message blocks. 276 - # L is the message length in SHA512 blocks. 272 + ## void sha512_transform_ssse3(struct sha512_state *state, const u8 *data, 273 + ## int blocks); 274 + # (struct sha512_state is assumed to begin with u64 state[8]) 275 + # Purpose: Updates the SHA512 digest stored at "state" with the message 276 + # stored in "data". 277 + # The size of the message pointed to by "data" must be an integer multiple 278 + # of SHA512 message blocks. 279 + # "blocks" is the message length in SHA512 blocks. 277 280 ######################################################################## 278 281 SYM_FUNC_START(sha512_transform_ssse3) 279 282
+15 -16
arch/x86/crypto/sha512_ssse3_glue.c
··· 39 39 #include <crypto/sha512_base.h> 40 40 #include <asm/simd.h> 41 41 42 - asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, 43 - u64 rounds); 44 - 45 - typedef void (sha512_transform_fn)(u64 *digest, const char *data, u64 rounds); 42 + asmlinkage void sha512_transform_ssse3(struct sha512_state *state, 43 + const u8 *data, int blocks); 46 44 47 45 static int sha512_update(struct shash_desc *desc, const u8 *data, 48 - unsigned int len, sha512_transform_fn *sha512_xform) 46 + unsigned int len, sha512_block_fn *sha512_xform) 49 47 { 50 48 struct sha512_state *sctx = shash_desc_ctx(desc); 51 49 ··· 51 53 (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) 52 54 return crypto_sha512_update(desc, data, len); 53 55 54 - /* make sure casting to sha512_block_fn() is safe */ 56 + /* 57 + * Make sure struct sha512_state begins directly with the SHA512 58 + * 512-bit internal state, as this is what the asm functions expect. 59 + */ 55 60 BUILD_BUG_ON(offsetof(struct sha512_state, state) != 0); 56 61 57 62 kernel_fpu_begin(); 58 - sha512_base_do_update(desc, data, len, 59 - (sha512_block_fn *)sha512_xform); 63 + sha512_base_do_update(desc, data, len, sha512_xform); 60 64 kernel_fpu_end(); 61 65 62 66 return 0; 63 67 } 64 68 65 69 static int sha512_finup(struct shash_desc *desc, const u8 *data, 66 - unsigned int len, u8 *out, sha512_transform_fn *sha512_xform) 70 + unsigned int len, u8 *out, sha512_block_fn *sha512_xform) 67 71 { 68 72 if (!crypto_simd_usable()) 69 73 return crypto_sha512_finup(desc, data, len, out); 70 74 71 75 kernel_fpu_begin(); 72 76 if (len) 73 - sha512_base_do_update(desc, data, len, 74 - (sha512_block_fn *)sha512_xform); 75 - sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform); 77 + sha512_base_do_update(desc, data, len, sha512_xform); 78 + sha512_base_do_finalize(desc, sha512_xform); 76 79 kernel_fpu_end(); 77 80 78 81 return sha512_base_finish(desc, out); ··· 143 144 } 144 145 145 146 #ifdef CONFIG_AS_AVX 146 - asmlinkage void sha512_transform_avx(u64 *digest, const char *data, 147 - u64 rounds); 147 + asmlinkage void sha512_transform_avx(struct sha512_state *state, 148 + const u8 *data, int blocks); 148 149 static bool avx_usable(void) 149 150 { 150 151 if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { ··· 224 225 #endif 225 226 226 227 #if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) 227 - asmlinkage void sha512_transform_rorx(u64 *digest, const char *data, 228 - u64 rounds); 228 + asmlinkage void sha512_transform_rorx(struct sha512_state *state, 229 + const u8 *data, int blocks); 229 230 230 231 static int sha512_avx2_update(struct shash_desc *desc, const u8 *data, 231 232 unsigned int len)