Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: lrw - add interface for parallelized cipher implementions

Export gf128mul table initialization routines and add lrw_crypt() function
that can be used by cipher implementations that can benefit from parallelized
cipher operations.

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Jussi Kivilinna and committed by
Herbert Xu
6c2205b8 171c0204

+129 -20
+86 -20
crypto/lrw.c
··· 3 3 * 4 4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> 5 5 * 6 - * Based om ecb.c 6 + * Based on ecb.c 7 7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 8 8 * 9 9 * This program is free software; you can redistribute it and/or modify it ··· 16 16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html 17 17 * 18 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 + 19 20 #include <crypto/algapi.h> 20 21 #include <linux/err.h> 21 22 #include <linux/init.h> ··· 27 26 28 27 #include <crypto/b128ops.h> 29 28 #include <crypto/gf128mul.h> 30 - 31 - #define LRW_BLOCK_SIZE 16 32 - 33 - struct lrw_table_ctx { 34 - /* optimizes multiplying a random (non incrementing, as at the 35 - * start of a new sector) value with key2, we could also have 36 - * used 4k optimization tables or no optimization at all. In the 37 - * latter case we would have to store key2 here */ 38 - struct gf128mul_64k *table; 39 - /* stores: 40 - * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, 41 - * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } 42 - * key2*{ 0,0,...1,1,1,1,1 }, etc 43 - * needed for optimized multiplication of incrementing values 44 - * with key2 */ 45 - be128 mulinc[128]; 46 - }; 29 + #include <crypto/lrw.h> 47 30 48 31 struct priv { 49 32 struct crypto_cipher *child; ··· 45 60 ), b); 46 61 } 47 62 48 - static int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 63 + int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) 49 64 { 50 65 be128 tmp = { 0 }; 51 66 int i; ··· 67 82 68 83 return 0; 69 84 } 85 + EXPORT_SYMBOL_GPL(lrw_init_table); 70 86 71 - static void lrw_free_table(struct lrw_table_ctx *ctx) 87 + void lrw_free_table(struct lrw_table_ctx *ctx) 72 88 { 73 89 if (ctx->table) 74 90 gf128mul_free_64k(ctx->table); 75 91 } 92 + EXPORT_SYMBOL_GPL(lrw_free_table); 76 93 77 94 static int setkey(struct crypto_tfm *parent, const u8 *key, 78 95 unsigned int keylen) ··· 213 226 return crypt(desc, &w, ctx, 214 227 crypto_cipher_alg(ctx->child)->cia_decrypt); 215 228 } 229 + 230 + int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, 231 + struct scatterlist *ssrc, unsigned int nbytes, 232 + struct lrw_crypt_req *req) 233 + { 234 + const unsigned int bsize = LRW_BLOCK_SIZE; 235 + const unsigned int max_blks = req->tbuflen / bsize; 236 + struct lrw_table_ctx *ctx = req->table_ctx; 237 + struct blkcipher_walk walk; 238 + unsigned int nblocks; 239 + be128 *iv, *src, *dst, *t; 240 + be128 *t_buf = req->tbuf; 241 + int err, i; 242 + 243 + BUG_ON(max_blks < 1); 244 + 245 + blkcipher_walk_init(&walk, sdst, ssrc, nbytes); 246 + 247 + err = blkcipher_walk_virt(desc, &walk); 248 + nbytes = walk.nbytes; 249 + if (!nbytes) 250 + return err; 251 + 252 + nblocks = min(walk.nbytes / bsize, max_blks); 253 + src = (be128 *)walk.src.virt.addr; 254 + dst = (be128 *)walk.dst.virt.addr; 255 + 256 + /* calculate first value of T */ 257 + iv = (be128 *)walk.iv; 258 + t_buf[0] = *iv; 259 + 260 + /* T <- I*Key2 */ 261 + gf128mul_64k_bbe(&t_buf[0], ctx->table); 262 + 263 + i = 0; 264 + goto first; 265 + 266 + for (;;) { 267 + do { 268 + for (i = 0; i < nblocks; i++) { 269 + /* T <- I*Key2, using the optimization 270 + * discussed in the specification */ 271 + be128_xor(&t_buf[i], t, 272 + &ctx->mulinc[get_index128(iv)]); 273 + inc(iv); 274 + first: 275 + t = &t_buf[i]; 276 + 277 + /* PP <- T xor P */ 278 + be128_xor(dst + i, t, src + i); 279 + } 280 + 281 + /* CC <- E(Key2,PP) */ 282 + req->crypt_fn(req->crypt_ctx, (u8 *)dst, 283 + nblocks * bsize); 284 + 285 + /* C <- T xor CC */ 286 + for (i = 0; i < nblocks; i++) 287 + be128_xor(dst + i, dst + i, &t_buf[i]); 288 + 289 + src += nblocks; 290 + dst += nblocks; 291 + nbytes -= nblocks * bsize; 292 + nblocks = min(nbytes / bsize, max_blks); 293 + } while (nblocks > 0); 294 + 295 + err = blkcipher_walk_done(desc, &walk, nbytes); 296 + nbytes = walk.nbytes; 297 + if (!nbytes) 298 + break; 299 + 300 + nblocks = min(nbytes / bsize, max_blks); 301 + src = (be128 *)walk.src.virt.addr; 302 + dst = (be128 *)walk.dst.virt.addr; 303 + } 304 + 305 + return err; 306 + } 307 + EXPORT_SYMBOL_GPL(lrw_crypt); 216 308 217 309 static int init_tfm(struct crypto_tfm *tfm) 218 310 {
+43
include/crypto/lrw.h
··· 1 + #ifndef _CRYPTO_LRW_H 2 + #define _CRYPTO_LRW_H 3 + 4 + #include <crypto/b128ops.h> 5 + 6 + struct scatterlist; 7 + struct gf128mul_64k; 8 + struct blkcipher_desc; 9 + 10 + #define LRW_BLOCK_SIZE 16 11 + 12 + struct lrw_table_ctx { 13 + /* optimizes multiplying a random (non incrementing, as at the 14 + * start of a new sector) value with key2, we could also have 15 + * used 4k optimization tables or no optimization at all. In the 16 + * latter case we would have to store key2 here */ 17 + struct gf128mul_64k *table; 18 + /* stores: 19 + * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, 20 + * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } 21 + * key2*{ 0,0,...1,1,1,1,1 }, etc 22 + * needed for optimized multiplication of incrementing values 23 + * with key2 */ 24 + be128 mulinc[128]; 25 + }; 26 + 27 + int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak); 28 + void lrw_free_table(struct lrw_table_ctx *ctx); 29 + 30 + struct lrw_crypt_req { 31 + be128 *tbuf; 32 + unsigned int tbuflen; 33 + 34 + struct lrw_table_ctx *table_ctx; 35 + void *crypt_ctx; 36 + void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes); 37 + }; 38 + 39 + int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, 40 + struct scatterlist *src, unsigned int nbytes, 41 + struct lrw_crypt_req *req); 42 + 43 + #endif /* _CRYPTO_LRW_H */