Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: gf128mul - switch gf128mul_x_ble to le128

Currently, gf128mul_x_ble works with pointers to be128, even though it
actually interprets the words as little-endian. Consequently, it uses
cpu_to_le64/le64_to_cpu on fields of type __be64, which is incorrect.

This patch fixes that by changing the function to accept pointers to
le128 and updating all users accordingly.

Signed-off-by: Ondrej Mosnacek <omosnacek@gmail.com>
Reviewd-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ondrej Mosnáček and committed by
Herbert Xu
e55318c8 acb9b159

+30 -30
+2 -2
arch/x86/crypto/camellia_glue.c
··· 1522 1522 struct scatterlist *src, unsigned int nbytes) 1523 1523 { 1524 1524 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1525 - be128 buf[2 * 4]; 1525 + le128 buf[2 * 4]; 1526 1526 struct xts_crypt_req req = { 1527 1527 .tbuf = buf, 1528 1528 .tbuflen = sizeof(buf), ··· 1540 1540 struct scatterlist *src, unsigned int nbytes) 1541 1541 { 1542 1542 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 1543 - be128 buf[2 * 4]; 1543 + le128 buf[2 * 4]; 1544 1544 struct xts_crypt_req req = { 1545 1545 .tbuf = buf, 1546 1546 .tbuflen = sizeof(buf),
+2 -2
arch/x86/crypto/serpent_sse2_glue.c
··· 328 328 struct scatterlist *src, unsigned int nbytes) 329 329 { 330 330 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 331 - be128 buf[SERPENT_PARALLEL_BLOCKS]; 331 + le128 buf[SERPENT_PARALLEL_BLOCKS]; 332 332 struct crypt_priv crypt_ctx = { 333 333 .ctx = &ctx->crypt_ctx, 334 334 .fpu_enabled = false, ··· 355 355 struct scatterlist *src, unsigned int nbytes) 356 356 { 357 357 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 358 - be128 buf[SERPENT_PARALLEL_BLOCKS]; 358 + le128 buf[SERPENT_PARALLEL_BLOCKS]; 359 359 struct crypt_priv crypt_ctx = { 360 360 .ctx = &ctx->crypt_ctx, 361 361 .fpu_enabled = false,
+2 -2
arch/x86/crypto/twofish_glue_3way.c
··· 296 296 struct scatterlist *src, unsigned int nbytes) 297 297 { 298 298 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 299 - be128 buf[3]; 299 + le128 buf[3]; 300 300 struct xts_crypt_req req = { 301 301 .tbuf = buf, 302 302 .tbuflen = sizeof(buf), ··· 314 314 struct scatterlist *src, unsigned int nbytes) 315 315 { 316 316 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); 317 - be128 buf[3]; 317 + le128 buf[3]; 318 318 struct xts_crypt_req req = { 319 319 .tbuf = buf, 320 320 .tbuflen = sizeof(buf),
+19 -19
crypto/xts.c
··· 39 39 }; 40 40 41 41 struct rctx { 42 - be128 buf[XTS_BUFFER_SIZE / sizeof(be128)]; 42 + le128 buf[XTS_BUFFER_SIZE / sizeof(le128)]; 43 43 44 - be128 t; 44 + le128 t; 45 45 46 - be128 *ext; 46 + le128 *ext; 47 47 48 48 struct scatterlist srcbuf[2]; 49 49 struct scatterlist dstbuf[2]; ··· 99 99 static int post_crypt(struct skcipher_request *req) 100 100 { 101 101 struct rctx *rctx = skcipher_request_ctx(req); 102 - be128 *buf = rctx->ext ?: rctx->buf; 102 + le128 *buf = rctx->ext ?: rctx->buf; 103 103 struct skcipher_request *subreq; 104 104 const int bs = XTS_BLOCK_SIZE; 105 105 struct skcipher_walk w; ··· 112 112 113 113 while (w.nbytes) { 114 114 unsigned int avail = w.nbytes; 115 - be128 *wdst; 115 + le128 *wdst; 116 116 117 117 wdst = w.dst.virt.addr; 118 118 119 119 do { 120 - be128_xor(wdst, buf++, wdst); 120 + le128_xor(wdst, buf++, wdst); 121 121 wdst++; 122 122 } while ((avail -= bs) >= bs); 123 123 ··· 150 150 static int pre_crypt(struct skcipher_request *req) 151 151 { 152 152 struct rctx *rctx = skcipher_request_ctx(req); 153 - be128 *buf = rctx->ext ?: rctx->buf; 153 + le128 *buf = rctx->ext ?: rctx->buf; 154 154 struct skcipher_request *subreq; 155 155 const int bs = XTS_BLOCK_SIZE; 156 156 struct skcipher_walk w; ··· 174 174 175 175 while (w.nbytes) { 176 176 unsigned int avail = w.nbytes; 177 - be128 *wsrc; 178 - be128 *wdst; 177 + le128 *wsrc; 178 + le128 *wdst; 179 179 180 180 wsrc = w.src.virt.addr; 181 181 wdst = w.dst.virt.addr; 182 182 183 183 do { 184 184 *buf++ = rctx->t; 185 - be128_xor(wdst++, &rctx->t, wsrc++); 185 + le128_xor(wdst++, &rctx->t, wsrc++); 186 186 gf128mul_x_ble(&rctx->t, &rctx->t); 187 187 } while ((avail -= bs) >= bs); 188 188 ··· 353 353 const unsigned int max_blks = req->tbuflen / bsize; 354 354 struct blkcipher_walk walk; 355 355 unsigned int nblocks; 356 - be128 *src, *dst, *t; 357 - be128 *t_buf = req->tbuf; 356 + le128 *src, *dst, *t; 357 + le128 *t_buf = req->tbuf; 358 358 int err, i; 359 359 360 360 BUG_ON(max_blks < 1); ··· 367 367 return err; 368 368 369 369 nblocks = min(nbytes / bsize, max_blks); 370 - src = (be128 *)walk.src.virt.addr; 371 - dst = (be128 *)walk.dst.virt.addr; 370 + src = (le128 *)walk.src.virt.addr; 371 + dst = (le128 *)walk.dst.virt.addr; 372 372 373 373 /* calculate first value of T */ 374 374 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv); ··· 384 384 t = &t_buf[i]; 385 385 386 386 /* PP <- T xor P */ 387 - be128_xor(dst + i, t, src + i); 387 + le128_xor(dst + i, t, src + i); 388 388 } 389 389 390 390 /* CC <- E(Key2,PP) */ ··· 393 393 394 394 /* C <- T xor CC */ 395 395 for (i = 0; i < nblocks; i++) 396 - be128_xor(dst + i, dst + i, &t_buf[i]); 396 + le128_xor(dst + i, dst + i, &t_buf[i]); 397 397 398 398 src += nblocks; 399 399 dst += nblocks; ··· 401 401 nblocks = min(nbytes / bsize, max_blks); 402 402 } while (nblocks > 0); 403 403 404 - *(be128 *)walk.iv = *t; 404 + *(le128 *)walk.iv = *t; 405 405 406 406 err = blkcipher_walk_done(desc, &walk, nbytes); 407 407 nbytes = walk.nbytes; ··· 409 409 break; 410 410 411 411 nblocks = min(nbytes / bsize, max_blks); 412 - src = (be128 *)walk.src.virt.addr; 413 - dst = (be128 *)walk.dst.virt.addr; 412 + src = (le128 *)walk.src.virt.addr; 413 + dst = (le128 *)walk.dst.virt.addr; 414 414 } 415 415 416 416 return err;
+4 -4
include/crypto/gf128mul.h
··· 205 205 } 206 206 207 207 /* needed by XTS */ 208 - static inline void gf128mul_x_ble(be128 *r, const be128 *x) 208 + static inline void gf128mul_x_ble(le128 *r, const le128 *x) 209 209 { 210 210 u64 a = le64_to_cpu(x->a); 211 211 u64 b = le64_to_cpu(x->b); 212 212 213 213 /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ 214 - u64 _tt = gf128mul_mask_from_bit(b, 63) & 0x87; 214 + u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87; 215 215 216 - r->a = cpu_to_le64((a << 1) ^ _tt); 217 - r->b = cpu_to_le64((b << 1) | (a >> 63)); 216 + r->a = cpu_to_le64((a << 1) | (b >> 63)); 217 + r->b = cpu_to_le64((b << 1) ^ _tt); 218 218 } 219 219 220 220 /* 4k table optimization */
+1 -1
include/crypto/xts.h
··· 11 11 #define XTS_BLOCK_SIZE 16 12 12 13 13 struct xts_crypt_req { 14 - be128 *tbuf; 14 + le128 *tbuf; 15 15 unsigned int tbuflen; 16 16 17 17 void *tweak_ctx;