Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cast5 - use unaligned accessors instead of alignmask

Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the
CAST5 input and output blocks, which propagates to mode drivers, and
results in pointless copying on architectures that don't care about
alignment, use the unaligned accessors, which will do the right thing on
each respective architecture, avoiding the need for double buffering.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
24a2ee44 83385415

+9 -14
+9 -14
crypto/cast5_generic.c
··· 13 13 */ 14 14 15 15 16 - #include <asm/byteorder.h> 16 + #include <asm/unaligned.h> 17 17 #include <linux/init.h> 18 18 #include <linux/crypto.h> 19 19 #include <linux/module.h> ··· 302 302 303 303 void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) 304 304 { 305 - const __be32 *src = (const __be32 *)inbuf; 306 - __be32 *dst = (__be32 *)outbuf; 307 305 u32 l, r, t; 308 306 u32 I; /* used by the Fx macros */ 309 307 u32 *Km; ··· 313 315 /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and 314 316 * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) 315 317 */ 316 - l = be32_to_cpu(src[0]); 317 - r = be32_to_cpu(src[1]); 318 + l = get_unaligned_be32(inbuf); 319 + r = get_unaligned_be32(inbuf + 4); 318 320 319 321 /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: 320 322 * Li = Ri-1; ··· 345 347 346 348 /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and 347 349 * concatenate to form the ciphertext.) */ 348 - dst[0] = cpu_to_be32(r); 349 - dst[1] = cpu_to_be32(l); 350 + put_unaligned_be32(r, outbuf); 351 + put_unaligned_be32(l, outbuf + 4); 350 352 } 351 353 EXPORT_SYMBOL_GPL(__cast5_encrypt); 352 354 ··· 357 359 358 360 void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) 359 361 { 360 - const __be32 *src = (const __be32 *)inbuf; 361 - __be32 *dst = (__be32 *)outbuf; 362 362 u32 l, r, t; 363 363 u32 I; 364 364 u32 *Km; ··· 365 369 Km = c->Km; 366 370 Kr = c->Kr; 367 371 368 - l = be32_to_cpu(src[0]); 369 - r = be32_to_cpu(src[1]); 372 + l = get_unaligned_be32(inbuf); 373 + r = get_unaligned_be32(inbuf + 4); 370 374 371 375 if (!(c->rr)) { 372 376 t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); ··· 387 391 t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); 388 392 t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); 389 393 390 - dst[0] = cpu_to_be32(r); 391 - dst[1] = cpu_to_be32(l); 394 + put_unaligned_be32(r, outbuf); 395 + put_unaligned_be32(l, outbuf + 4); 392 396 } 393 397 EXPORT_SYMBOL_GPL(__cast5_decrypt); 394 398 ··· 509 513 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 510 514 .cra_blocksize = CAST5_BLOCK_SIZE, 511 515 .cra_ctxsize = sizeof(struct cast5_ctx), 512 - .cra_alignmask = 3, 513 516 .cra_module = THIS_MODULE, 514 517 .cra_u = { 515 518 .cipher = {