Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: lrw - Convert to skcipher

This patch converts lrw over to the skcipher interface. It also
optimises the implementation to be based on ECB instead of the
underlying cipher. For compatibility the existing naming scheme
of lrw(aes) is maintained as opposed to the more obvious one of
lrw(ecb(aes)).

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+390 -137
+390 -137
crypto/lrw.c
··· 17 17 * 18 18 * The test vectors are included in the testing module tcrypt.[ch] */ 19 19 20 - #include <crypto/algapi.h> 20 + #include <crypto/internal/skcipher.h> 21 + #include <crypto/scatterwalk.h> 21 22 #include <linux/err.h> 22 23 #include <linux/init.h> 23 24 #include <linux/kernel.h> ··· 30 29 #include <crypto/gf128mul.h> 31 30 #include <crypto/lrw.h> 32 31 32 + #define LRW_BUFFER_SIZE 128u 33 + 33 34 struct priv { 34 - struct crypto_cipher *child; 35 + struct crypto_skcipher *child; 35 36 struct lrw_table_ctx table; 37 + }; 38 + 39 + struct rctx { 40 + be128 buf[LRW_BUFFER_SIZE / sizeof(be128)]; 41 + 42 + be128 t; 43 + 44 + be128 *ext; 45 + 46 + struct scatterlist srcbuf[2]; 47 + struct scatterlist dstbuf[2]; 48 + struct scatterlist *src; 49 + struct scatterlist *dst; 50 + 51 + unsigned int left; 52 + 53 + struct skcipher_request subreq; 36 54 }; 37 55 38 56 static inline void setbit128_bbe(void *b, int bit) ··· 96 76 } 97 77 EXPORT_SYMBOL_GPL(lrw_free_table); 98 78 99 - static int setkey(struct crypto_tfm *parent, const u8 *key, 79 + static int setkey(struct crypto_skcipher *parent, const u8 *key, 100 80 unsigned int keylen) 101 81 { 102 - struct priv *ctx = crypto_tfm_ctx(parent); 103 - struct crypto_cipher *child = ctx->child; 82 + struct priv *ctx = crypto_skcipher_ctx(parent); 83 + struct crypto_skcipher *child = ctx->child; 104 84 int err, bsize = LRW_BLOCK_SIZE; 105 85 const u8 *tweak = key + keylen - bsize; 106 86 107 - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 108 - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 109 - CRYPTO_TFM_REQ_MASK); 110 - err = crypto_cipher_setkey(child, key, keylen - bsize); 87 + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 88 + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 89 + CRYPTO_TFM_REQ_MASK); 90 + err = crypto_skcipher_setkey(child, key, keylen - bsize); 91 + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 92 + CRYPTO_TFM_RES_MASK); 111 93 if (err) 112 94 return err; 113 - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 114 - CRYPTO_TFM_RES_MASK); 115 95 116 96 return lrw_init_table(&ctx->table, tweak); 117 97 } 118 - 119 - struct sinfo { 120 - be128 t; 121 - struct crypto_tfm *tfm; 122 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *); 123 - }; 124 98 125 99 static inline void inc(be128 *iv) 126 100 { 127 101 be64_add_cpu(&iv->b, 1); 128 102 if (!iv->b) 129 103 be64_add_cpu(&iv->a, 1); 130 - } 131 - 132 - static inline void lrw_round(struct sinfo *s, void *dst, const void *src) 133 - { 134 - be128_xor(dst, &s->t, src); /* PP <- T xor P */ 135 - s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ 136 - be128_xor(dst, dst, &s->t); /* C <- T xor CC */ 137 104 } 138 105 139 106 /* this returns the number of consequative 1 bits starting ··· 142 135 return x; 143 136 } 144 137 145 - static int crypt(struct blkcipher_desc *d, 146 - struct blkcipher_walk *w, struct priv *ctx, 147 - void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) 138 + static int post_crypt(struct skcipher_request *req) 148 139 { 149 - int err; 150 - unsigned int avail; 140 + struct rctx *rctx = skcipher_request_ctx(req); 141 + be128 *buf = rctx->ext ?: rctx->buf; 142 + struct skcipher_request *subreq; 151 143 const int bs = LRW_BLOCK_SIZE; 152 - struct sinfo s = { 153 - .tfm = crypto_cipher_tfm(ctx->child), 154 - .fn = fn 155 - }; 156 - be128 *iv; 157 - u8 *wsrc; 158 - u8 *wdst; 144 + struct skcipher_walk w; 145 + struct scatterlist *sg; 146 + unsigned offset; 147 + int err; 159 148 160 - err = blkcipher_walk_virt(d, w); 161 - if (!(avail = w->nbytes)) 162 - return err; 149 + subreq = &rctx->subreq; 150 + err = skcipher_walk_virt(&w, subreq, false); 163 151 164 - wsrc = w->src.virt.addr; 165 - wdst = w->dst.virt.addr; 152 + while (w.nbytes) { 153 + unsigned int avail = w.nbytes; 154 + be128 *wdst; 166 155 167 - /* calculate first value of T */ 168 - iv = (be128 *)w->iv; 169 - s.t = *iv; 156 + wdst = w.dst.virt.addr; 170 157 171 - /* T <- I*Key2 */ 172 - gf128mul_64k_bbe(&s.t, ctx->table.table); 173 - 174 - goto first; 175 - 176 - for (;;) { 177 158 do { 178 - /* T <- I*Key2, using the optimization 179 - * discussed in the specification */ 180 - be128_xor(&s.t, &s.t, 181 - &ctx->table.mulinc[get_index128(iv)]); 182 - inc(iv); 183 - 184 - first: 185 - lrw_round(&s, wdst, wsrc); 186 - 187 - wsrc += bs; 188 - wdst += bs; 159 + be128_xor(wdst, buf++, wdst); 160 + wdst++; 189 161 } while ((avail -= bs) >= bs); 190 162 191 - err = blkcipher_walk_done(d, w, avail); 192 - if (!(avail = w->nbytes)) 193 - break; 194 - 195 - wsrc = w->src.virt.addr; 196 - wdst = w->dst.virt.addr; 163 + err = skcipher_walk_done(&w, avail); 197 164 } 198 165 166 + rctx->left -= subreq->cryptlen; 167 + 168 + if (err || !rctx->left) 169 + goto out; 170 + 171 + rctx->dst = rctx->dstbuf; 172 + 173 + scatterwalk_done(&w.out, 0, 1); 174 + sg = w.out.sg; 175 + offset = w.out.offset; 176 + 177 + if (rctx->dst != sg) { 178 + rctx->dst[0] = *sg; 179 + sg_unmark_end(rctx->dst); 180 + scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); 181 + } 182 + rctx->dst[0].length -= offset - sg->offset; 183 + rctx->dst[0].offset = offset; 184 + 185 + out: 199 186 return err; 200 187 } 201 188 202 - static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 203 - struct scatterlist *src, unsigned int nbytes) 189 + static int pre_crypt(struct skcipher_request *req) 204 190 { 205 - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); 206 - struct blkcipher_walk w; 191 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 192 + struct rctx *rctx = skcipher_request_ctx(req); 193 + struct priv *ctx = crypto_skcipher_ctx(tfm); 194 + be128 *buf = rctx->ext ?: rctx->buf; 195 + struct skcipher_request *subreq; 196 + const int bs = LRW_BLOCK_SIZE; 197 + struct skcipher_walk w; 198 + struct scatterlist *sg; 199 + unsigned cryptlen; 200 + unsigned offset; 201 + be128 *iv; 202 + bool more; 203 + int err; 207 204 208 - blkcipher_walk_init(&w, dst, src, nbytes); 209 - return crypt(desc, &w, ctx, 210 - crypto_cipher_alg(ctx->child)->cia_encrypt); 205 + subreq = &rctx->subreq; 206 + skcipher_request_set_tfm(subreq, tfm); 207 + 208 + cryptlen = subreq->cryptlen; 209 + more = rctx->left > cryptlen; 210 + if (!more) 211 + cryptlen = rctx->left; 212 + 213 + skcipher_request_set_crypt(subreq, rctx->src, rctx->dst, 214 + cryptlen, req->iv); 215 + 216 + err = skcipher_walk_virt(&w, subreq, false); 217 + iv = w.iv; 218 + 219 + while (w.nbytes) { 220 + unsigned int avail = w.nbytes; 221 + be128 *wsrc; 222 + be128 *wdst; 223 + 224 + wsrc = w.src.virt.addr; 225 + wdst = w.dst.virt.addr; 226 + 227 + do { 228 + *buf++ = rctx->t; 229 + be128_xor(wdst++, &rctx->t, wsrc++); 230 + 231 + /* T <- I*Key2, using the optimization 232 + * discussed in the specification */ 233 + be128_xor(&rctx->t, &rctx->t, 234 + &ctx->table.mulinc[get_index128(iv)]); 235 + inc(iv); 236 + } while ((avail -= bs) >= bs); 237 + 238 + err = skcipher_walk_done(&w, avail); 239 + } 240 + 241 + skcipher_request_set_tfm(subreq, ctx->child); 242 + skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst, 243 + cryptlen, NULL); 244 + 245 + if (err || !more) 246 + goto out; 247 + 248 + rctx->src = rctx->srcbuf; 249 + 250 + scatterwalk_done(&w.in, 0, 1); 251 + sg = w.in.sg; 252 + offset = w.in.offset; 253 + 254 + if (rctx->src != sg) { 255 + rctx->src[0] = *sg; 256 + sg_unmark_end(rctx->src); 257 + scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); 258 + } 259 + rctx->src[0].length -= offset - sg->offset; 260 + rctx->src[0].offset = offset; 261 + 262 + out: 263 + return err; 211 264 } 212 265 213 - static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, 214 - struct scatterlist *src, unsigned int nbytes) 266 + static int init_crypt(struct skcipher_request *req, crypto_completion_t done) 215 267 { 216 - struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); 217 - struct blkcipher_walk w; 268 + struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); 269 + struct rctx *rctx = skcipher_request_ctx(req); 270 + struct skcipher_request *subreq; 271 + gfp_t gfp; 218 272 219 - blkcipher_walk_init(&w, dst, src, nbytes); 220 - return crypt(desc, &w, ctx, 221 - crypto_cipher_alg(ctx->child)->cia_decrypt); 273 + subreq = &rctx->subreq; 274 + skcipher_request_set_callback(subreq, req->base.flags, done, req); 275 + 276 + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 277 + GFP_ATOMIC; 278 + rctx->ext = NULL; 279 + 280 + subreq->cryptlen = LRW_BUFFER_SIZE; 281 + if (req->cryptlen > LRW_BUFFER_SIZE) { 282 + subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE); 283 + rctx->ext = kmalloc(subreq->cryptlen, gfp); 284 + } 285 + 286 + rctx->src = req->src; 287 + rctx->dst = req->dst; 288 + rctx->left = req->cryptlen; 289 + 290 + /* calculate first value of T */ 291 + memcpy(&rctx->t, req->iv, sizeof(rctx->t)); 292 + 293 + /* T <- I*Key2 */ 294 + gf128mul_64k_bbe(&rctx->t, ctx->table.table); 295 + 296 + return 0; 297 + } 298 + 299 + static void exit_crypt(struct skcipher_request *req) 300 + { 301 + struct rctx *rctx = skcipher_request_ctx(req); 302 + 303 + rctx->left = 0; 304 + 305 + if (rctx->ext) 306 + kfree(rctx->ext); 307 + } 308 + 309 + static int do_encrypt(struct skcipher_request *req, int err) 310 + { 311 + struct rctx *rctx = skcipher_request_ctx(req); 312 + struct skcipher_request *subreq; 313 + 314 + subreq = &rctx->subreq; 315 + 316 + while (!err && rctx->left) { 317 + err = pre_crypt(req) ?: 318 + crypto_skcipher_encrypt(subreq) ?: 319 + post_crypt(req); 320 + 321 + if (err == -EINPROGRESS || 322 + (err == -EBUSY && 323 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 324 + return err; 325 + } 326 + 327 + exit_crypt(req); 328 + return err; 329 + } 330 + 331 + static void encrypt_done(struct crypto_async_request *areq, int err) 332 + { 333 + struct skcipher_request *req = areq->data; 334 + struct skcipher_request *subreq; 335 + struct rctx *rctx; 336 + 337 + rctx = skcipher_request_ctx(req); 338 + subreq = &rctx->subreq; 339 + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 340 + 341 + err = do_encrypt(req, err ?: post_crypt(req)); 342 + if (rctx->left) 343 + return; 344 + 345 + skcipher_request_complete(req, err); 346 + } 347 + 348 + static int encrypt(struct skcipher_request *req) 349 + { 350 + return do_encrypt(req, init_crypt(req, encrypt_done)); 351 + } 352 + 353 + static int do_decrypt(struct skcipher_request *req, int err) 354 + { 355 + struct rctx *rctx = skcipher_request_ctx(req); 356 + struct skcipher_request *subreq; 357 + 358 + subreq = &rctx->subreq; 359 + 360 + while (!err && rctx->left) { 361 + err = pre_crypt(req) ?: 362 + crypto_skcipher_decrypt(subreq) ?: 363 + post_crypt(req); 364 + 365 + if (err == -EINPROGRESS || 366 + (err == -EBUSY && 367 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 368 + return err; 369 + } 370 + 371 + exit_crypt(req); 372 + return err; 373 + } 374 + 375 + static void decrypt_done(struct crypto_async_request *areq, int err) 376 + { 377 + struct skcipher_request *req = areq->data; 378 + struct skcipher_request *subreq; 379 + struct rctx *rctx; 380 + 381 + rctx = skcipher_request_ctx(req); 382 + subreq = &rctx->subreq; 383 + subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; 384 + 385 + err = do_decrypt(req, err ?: post_crypt(req)); 386 + if (rctx->left) 387 + return; 388 + 389 + skcipher_request_complete(req, err); 390 + } 391 + 392 + static int decrypt(struct skcipher_request *req) 393 + { 394 + return do_decrypt(req, init_crypt(req, decrypt_done)); 222 395 } 223 396 224 397 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, ··· 480 293 } 481 294 EXPORT_SYMBOL_GPL(lrw_crypt); 482 295 483 - static int init_tfm(struct crypto_tfm *tfm) 296 + static int init_tfm(struct crypto_skcipher *tfm) 484 297 { 485 - struct crypto_cipher *cipher; 486 - struct crypto_instance *inst = (void *)tfm->__crt_alg; 487 - struct crypto_spawn *spawn = crypto_instance_ctx(inst); 488 - struct priv *ctx = crypto_tfm_ctx(tfm); 489 - u32 *flags = &tfm->crt_flags; 298 + struct skcipher_instance *inst = skcipher_alg_instance(tfm); 299 + struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 300 + struct priv *ctx = crypto_skcipher_ctx(tfm); 301 + struct crypto_skcipher *cipher; 490 302 491 - cipher = crypto_spawn_cipher(spawn); 303 + cipher = crypto_spawn_skcipher(spawn); 492 304 if (IS_ERR(cipher)) 493 305 return PTR_ERR(cipher); 494 306 495 - if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { 496 - *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 497 - crypto_free_cipher(cipher); 498 - return -EINVAL; 499 - } 500 - 501 307 ctx->child = cipher; 308 + 309 + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + 310 + sizeof(struct rctx)); 311 + 502 312 return 0; 503 313 } 504 314 505 - static void exit_tfm(struct crypto_tfm *tfm) 315 + static void exit_tfm(struct crypto_skcipher *tfm) 506 316 { 507 - struct priv *ctx = crypto_tfm_ctx(tfm); 317 + struct priv *ctx = crypto_skcipher_ctx(tfm); 508 318 509 319 lrw_free_table(&ctx->table); 510 - crypto_free_cipher(ctx->child); 320 + crypto_free_skcipher(ctx->child); 511 321 } 512 322 513 - static struct crypto_instance *alloc(struct rtattr **tb) 323 + static void free(struct skcipher_instance *inst) 514 324 { 515 - struct crypto_instance *inst; 516 - struct crypto_alg *alg; 325 + crypto_drop_skcipher(skcipher_instance_ctx(inst)); 326 + kfree(inst); 327 + } 328 + 329 + static int create(struct crypto_template *tmpl, struct rtattr **tb) 330 + { 331 + struct crypto_skcipher_spawn *spawn; 332 + struct skcipher_instance *inst; 333 + struct crypto_attr_type *algt; 334 + struct skcipher_alg *alg; 335 + const char *cipher_name; 336 + char ecb_name[CRYPTO_MAX_ALG_NAME]; 517 337 int err; 518 338 519 - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 339 + algt = crypto_get_attr_type(tb); 340 + if (IS_ERR(algt)) 341 + return PTR_ERR(algt); 342 + 343 + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 344 + return -EINVAL; 345 + 346 + cipher_name = crypto_attr_alg_name(tb[1]); 347 + if (IS_ERR(cipher_name)) 348 + return PTR_ERR(cipher_name); 349 + 350 + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 351 + if (!inst) 352 + return -ENOMEM; 353 + 354 + spawn = skcipher_instance_ctx(inst); 355 + 356 + crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 357 + err = crypto_grab_skcipher(spawn, cipher_name, 0, 358 + crypto_requires_sync(algt->type, 359 + algt->mask)); 360 + if (err == -ENOENT) { 361 + err = -ENAMETOOLONG; 362 + if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", 363 + cipher_name) >= CRYPTO_MAX_ALG_NAME) 364 + goto err_free_inst; 365 + 366 + err = crypto_grab_skcipher(spawn, ecb_name, 0, 367 + crypto_requires_sync(algt->type, 368 + algt->mask)); 369 + } 370 + 520 371 if (err) 521 - return ERR_PTR(err); 372 + goto err_free_inst; 522 373 523 - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 524 - CRYPTO_ALG_TYPE_MASK); 525 - if (IS_ERR(alg)) 526 - return ERR_CAST(alg); 374 + alg = crypto_skcipher_spawn_alg(spawn); 527 375 528 - inst = crypto_alloc_instance("lrw", alg); 529 - if (IS_ERR(inst)) 530 - goto out_put_alg; 376 + err = -EINVAL; 377 + if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) 378 + goto err_drop_spawn; 531 379 532 - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 533 - inst->alg.cra_priority = alg->cra_priority; 534 - inst->alg.cra_blocksize = alg->cra_blocksize; 380 + if (crypto_skcipher_alg_ivsize(alg)) 381 + goto err_drop_spawn; 535 382 536 - if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; 537 - else inst->alg.cra_alignmask = alg->cra_alignmask; 538 - inst->alg.cra_type = &crypto_blkcipher_type; 383 + err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", 384 + &alg->base); 385 + if (err) 386 + goto err_drop_spawn; 539 387 540 - if (!(alg->cra_blocksize % 4)) 541 - inst->alg.cra_alignmask |= 3; 542 - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 543 - inst->alg.cra_blkcipher.min_keysize = 544 - alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; 545 - inst->alg.cra_blkcipher.max_keysize = 546 - alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; 388 + err = -EINVAL; 389 + cipher_name = alg->base.cra_name; 547 390 548 - inst->alg.cra_ctxsize = sizeof(struct priv); 391 + /* Alas we screwed up the naming so we have to mangle the 392 + * cipher name. 393 + */ 394 + if (!strncmp(cipher_name, "ecb(", 4)) { 395 + unsigned len; 549 396 550 - inst->alg.cra_init = init_tfm; 551 - inst->alg.cra_exit = exit_tfm; 397 + len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); 398 + if (len < 2 || len >= sizeof(ecb_name)) 399 + goto err_drop_spawn; 552 400 553 - inst->alg.cra_blkcipher.setkey = setkey; 554 - inst->alg.cra_blkcipher.encrypt = encrypt; 555 - inst->alg.cra_blkcipher.decrypt = decrypt; 401 + if (ecb_name[len - 1] != ')') 402 + goto err_drop_spawn; 556 403 557 - out_put_alg: 558 - crypto_mod_put(alg); 559 - return inst; 560 - } 404 + ecb_name[len - 1] = 0; 561 405 562 - static void free(struct crypto_instance *inst) 563 - { 564 - crypto_drop_spawn(crypto_instance_ctx(inst)); 406 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 407 + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) 408 + return -ENAMETOOLONG; 409 + } 410 + 411 + inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 412 + inst->alg.base.cra_priority = alg->base.cra_priority; 413 + inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; 414 + inst->alg.base.cra_alignmask = alg->base.cra_alignmask | 415 + (__alignof__(u64) - 1); 416 + 417 + inst->alg.ivsize = LRW_BLOCK_SIZE; 418 + inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 419 + LRW_BLOCK_SIZE; 420 + inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 421 + LRW_BLOCK_SIZE; 422 + 423 + inst->alg.base.cra_ctxsize = sizeof(struct priv); 424 + 425 + inst->alg.init = init_tfm; 426 + inst->alg.exit = exit_tfm; 427 + 428 + inst->alg.setkey = setkey; 429 + inst->alg.encrypt = encrypt; 430 + inst->alg.decrypt = decrypt; 431 + 432 + inst->free = free; 433 + 434 + err = skcipher_register_instance(tmpl, inst); 435 + if (err) 436 + goto err_drop_spawn; 437 + 438 + out: 439 + return err; 440 + 441 + err_drop_spawn: 442 + crypto_drop_skcipher(spawn); 443 + err_free_inst: 565 444 kfree(inst); 445 + goto out; 566 446 } 567 447 568 448 static struct crypto_template crypto_tmpl = { 569 449 .name = "lrw", 570 - .alloc = alloc, 571 - .free = free, 450 + .create = create, 572 451 .module = THIS_MODULE, 573 452 }; 574 453