Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ablkcipher - remove deprecated and unused ablkcipher support

Now that all users of the deprecated ablkcipher interface have been
moved to the skcipher interface, ablkcipher is no longer used and
can be removed.

Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
d63007eb 809abaef

+4 -1150
+1 -19
Documentation/crypto/api-skcipher.rst
··· 5 5 :doc: Block Cipher Algorithm Definitions 6 6 7 7 .. kernel-doc:: include/linux/crypto.h 8 - :functions: crypto_alg ablkcipher_alg cipher_alg compress_alg 8 + :functions: crypto_alg cipher_alg compress_alg 9 9 10 10 Symmetric Key Cipher API 11 11 ------------------------ ··· 33 33 34 34 .. kernel-doc:: include/linux/crypto.h 35 35 :functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one 36 - 37 - Asynchronous Block Cipher API - Deprecated 38 - ------------------------------------------ 39 - 40 - .. kernel-doc:: include/linux/crypto.h 41 - :doc: Asynchronous Block Cipher API 42 - 43 - .. kernel-doc:: include/linux/crypto.h 44 - :functions: crypto_free_ablkcipher crypto_ablkcipher_ivsize crypto_ablkcipher_blocksize crypto_ablkcipher_setkey crypto_ablkcipher_reqtfm crypto_ablkcipher_encrypt crypto_ablkcipher_decrypt 45 - 46 - Asynchronous Cipher Request Handle - Deprecated 47 - ----------------------------------------------- 48 - 49 - .. kernel-doc:: include/linux/crypto.h 50 - :doc: Asynchronous Cipher Request Handle 51 - 52 - .. kernel-doc:: include/linux/crypto.h 53 - :functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
-2
Documentation/crypto/architecture.rst
··· 201 201 - CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data 202 202 (MAC) 203 203 204 - - CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher 205 - 206 204 - CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as 207 205 an ECDH or DH implementation 208 206
-4
Documentation/crypto/crypto_engine.rst
··· 63 63 When your driver receives a crypto_request, you must to transfer it to 64 64 the crypto engine via one of: 65 65 66 - * crypto_transfer_ablkcipher_request_to_engine() 67 - 68 66 * crypto_transfer_aead_request_to_engine() 69 67 70 68 * crypto_transfer_akcipher_request_to_engine() ··· 72 74 * crypto_transfer_skcipher_request_to_engine() 73 75 74 76 At the end of the request process, a call to one of the following functions is needed: 75 - 76 - * crypto_finalize_ablkcipher_request() 77 77 78 78 * crypto_finalize_aead_request() 79 79
+1 -3
crypto/Makefile
··· 16 16 obj-$(CONFIG_CRYPTO_AEAD2) += aead.o 17 17 obj-$(CONFIG_CRYPTO_AEAD2) += geniv.o 18 18 19 - crypto_skcipher-y := ablkcipher.o 20 - crypto_skcipher-y += skcipher.o 21 - obj-$(CONFIG_CRYPTO_SKCIPHER2) += crypto_skcipher.o 19 + obj-$(CONFIG_CRYPTO_SKCIPHER2) += skcipher.o 22 20 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 23 21 obj-$(CONFIG_CRYPTO_ECHAINIV) += echainiv.o 24 22
-407
crypto/ablkcipher.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Asynchronous block chaining cipher operations. 4 - * 5 - * This is the asynchronous version of blkcipher.c indicating completion 6 - * via a callback. 7 - * 8 - * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 9 - */ 10 - 11 - #include <crypto/internal/skcipher.h> 12 - #include <linux/err.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/seq_file.h> 16 - #include <linux/cryptouser.h> 17 - #include <linux/compiler.h> 18 - #include <net/netlink.h> 19 - 20 - #include <crypto/scatterwalk.h> 21 - 22 - #include "internal.h" 23 - 24 - struct ablkcipher_buffer { 25 - struct list_head entry; 26 - struct scatter_walk dst; 27 - unsigned int len; 28 - void *data; 29 - }; 30 - 31 - enum { 32 - ABLKCIPHER_WALK_SLOW = 1 << 0, 33 - }; 34 - 35 - static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) 36 - { 37 - scatterwalk_copychunks(p->data, &p->dst, p->len, 1); 38 - } 39 - 40 - void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) 41 - { 42 - struct ablkcipher_buffer *p, *tmp; 43 - 44 - list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { 45 - ablkcipher_buffer_write(p); 46 - list_del(&p->entry); 47 - kfree(p); 48 - } 49 - } 50 - EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); 51 - 52 - static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, 53 - struct ablkcipher_buffer *p) 54 - { 55 - p->dst = walk->out; 56 - list_add_tail(&p->entry, &walk->buffers); 57 - } 58 - 59 - /* Get a spot of the specified length that does not straddle a page. 60 - * The caller needs to ensure that there is enough space for this operation. 61 - */ 62 - static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) 63 - { 64 - u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 65 - 66 - return max(start, end_page); 67 - } 68 - 69 - static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, 70 - unsigned int n) 71 - { 72 - for (;;) { 73 - unsigned int len_this_page = scatterwalk_pagelen(&walk->out); 74 - 75 - if (len_this_page > n) 76 - len_this_page = n; 77 - scatterwalk_advance(&walk->out, n); 78 - if (n == len_this_page) 79 - break; 80 - n -= len_this_page; 81 - scatterwalk_start(&walk->out, sg_next(walk->out.sg)); 82 - } 83 - } 84 - 85 - static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, 86 - unsigned int n) 87 - { 88 - scatterwalk_advance(&walk->in, n); 89 - scatterwalk_advance(&walk->out, n); 90 - } 91 - 92 - static int ablkcipher_walk_next(struct ablkcipher_request *req, 93 - struct ablkcipher_walk *walk); 94 - 95 - int ablkcipher_walk_done(struct ablkcipher_request *req, 96 - struct ablkcipher_walk *walk, int err) 97 - { 98 - struct crypto_tfm *tfm = req->base.tfm; 99 - unsigned int n; /* bytes processed */ 100 - bool more; 101 - 102 - if (unlikely(err < 0)) 103 - goto finish; 104 - 105 - n = walk->nbytes - err; 106 - walk->total -= n; 107 - more = (walk->total != 0); 108 - 109 - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { 110 - ablkcipher_done_fast(walk, n); 111 - } else { 112 - if (WARN_ON(err)) { 113 - /* unexpected case; didn't process all bytes */ 114 - err = -EINVAL; 115 - goto finish; 116 - } 117 - ablkcipher_done_slow(walk, n); 118 - } 119 - 120 - scatterwalk_done(&walk->in, 0, more); 121 - scatterwalk_done(&walk->out, 1, more); 122 - 123 - if (more) { 124 - crypto_yield(req->base.flags); 125 - return ablkcipher_walk_next(req, walk); 126 - } 127 - err = 0; 128 - finish: 129 - walk->nbytes = 0; 130 - if (walk->iv != req->info) 131 - memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); 132 - kfree(walk->iv_buffer); 133 - return err; 134 - } 135 - EXPORT_SYMBOL_GPL(ablkcipher_walk_done); 136 - 137 - static inline int ablkcipher_next_slow(struct ablkcipher_request *req, 138 - struct ablkcipher_walk *walk, 139 - unsigned int bsize, 140 - unsigned int alignmask, 141 - void **src_p, void **dst_p) 142 - { 143 - unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); 144 - struct ablkcipher_buffer *p; 145 - void *src, *dst, *base; 146 - unsigned int n; 147 - 148 - n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); 149 - n += (aligned_bsize * 3 - (alignmask + 1) + 150 - (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); 151 - 152 - p = kmalloc(n, GFP_ATOMIC); 153 - if (!p) 154 - return ablkcipher_walk_done(req, walk, -ENOMEM); 155 - 156 - base = p + 1; 157 - 158 - dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); 159 - src = dst = ablkcipher_get_spot(dst, bsize); 160 - 161 - p->len = bsize; 162 - p->data = dst; 163 - 164 - scatterwalk_copychunks(src, &walk->in, bsize, 0); 165 - 166 - ablkcipher_queue_write(walk, p); 167 - 168 - walk->nbytes = bsize; 169 - walk->flags |= ABLKCIPHER_WALK_SLOW; 170 - 171 - *src_p = src; 172 - *dst_p = dst; 173 - 174 - return 0; 175 - } 176 - 177 - static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, 178 - struct crypto_tfm *tfm, 179 - unsigned int alignmask) 180 - { 181 - unsigned bs = walk->blocksize; 182 - unsigned int ivsize = tfm->crt_ablkcipher.ivsize; 183 - unsigned aligned_bs = ALIGN(bs, alignmask + 1); 184 - unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - 185 - (alignmask + 1); 186 - u8 *iv; 187 - 188 - size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); 189 - walk->iv_buffer = kmalloc(size, GFP_ATOMIC); 190 - if (!walk->iv_buffer) 191 - return -ENOMEM; 192 - 193 - iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); 194 - iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 195 - iv = ablkcipher_get_spot(iv, bs) + aligned_bs; 196 - iv = ablkcipher_get_spot(iv, ivsize); 197 - 198 - walk->iv = memcpy(iv, walk->iv, ivsize); 199 - return 0; 200 - } 201 - 202 - static inline int ablkcipher_next_fast(struct ablkcipher_request *req, 203 - struct ablkcipher_walk *walk) 204 - { 205 - walk->src.page = scatterwalk_page(&walk->in); 206 - walk->src.offset = offset_in_page(walk->in.offset); 207 - walk->dst.page = scatterwalk_page(&walk->out); 208 - walk->dst.offset = offset_in_page(walk->out.offset); 209 - 210 - return 0; 211 - } 212 - 213 - static int ablkcipher_walk_next(struct ablkcipher_request *req, 214 - struct ablkcipher_walk *walk) 215 - { 216 - struct crypto_tfm *tfm = req->base.tfm; 217 - unsigned int alignmask, bsize, n; 218 - void *src, *dst; 219 - int err; 220 - 221 - alignmask = crypto_tfm_alg_alignmask(tfm); 222 - n = walk->total; 223 - if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { 224 - req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; 225 - return ablkcipher_walk_done(req, walk, -EINVAL); 226 - } 227 - 228 - walk->flags &= ~ABLKCIPHER_WALK_SLOW; 229 - src = dst = NULL; 230 - 231 - bsize = min(walk->blocksize, n); 232 - n = scatterwalk_clamp(&walk->in, n); 233 - n = scatterwalk_clamp(&walk->out, n); 234 - 235 - if (n < bsize || 236 - !scatterwalk_aligned(&walk->in, alignmask) || 237 - !scatterwalk_aligned(&walk->out, alignmask)) { 238 - err = ablkcipher_next_slow(req, walk, bsize, alignmask, 239 - &src, &dst); 240 - goto set_phys_lowmem; 241 - } 242 - 243 - walk->nbytes = n; 244 - 245 - return ablkcipher_next_fast(req, walk); 246 - 247 - set_phys_lowmem: 248 - if (err >= 0) { 249 - walk->src.page = virt_to_page(src); 250 - walk->dst.page = virt_to_page(dst); 251 - walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); 252 - walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); 253 - } 254 - 255 - return err; 256 - } 257 - 258 - static int ablkcipher_walk_first(struct ablkcipher_request *req, 259 - struct ablkcipher_walk *walk) 260 - { 261 - struct crypto_tfm *tfm = req->base.tfm; 262 - unsigned int alignmask; 263 - 264 - alignmask = crypto_tfm_alg_alignmask(tfm); 265 - if (WARN_ON_ONCE(in_irq())) 266 - return -EDEADLK; 267 - 268 - walk->iv = req->info; 269 - walk->nbytes = walk->total; 270 - if (unlikely(!walk->total)) 271 - return 0; 272 - 273 - walk->iv_buffer = NULL; 274 - if (unlikely(((unsigned long)walk->iv & alignmask))) { 275 - int err = ablkcipher_copy_iv(walk, tfm, alignmask); 276 - 277 - if (err) 278 - return err; 279 - } 280 - 281 - scatterwalk_start(&walk->in, walk->in.sg); 282 - scatterwalk_start(&walk->out, walk->out.sg); 283 - 284 - return ablkcipher_walk_next(req, walk); 285 - } 286 - 287 - int ablkcipher_walk_phys(struct ablkcipher_request *req, 288 - struct ablkcipher_walk *walk) 289 - { 290 - walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); 291 - return ablkcipher_walk_first(req, walk); 292 - } 293 - EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); 294 - 295 - static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 296 - unsigned int keylen) 297 - { 298 - struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); 299 - unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); 300 - int ret; 301 - u8 *buffer, *alignbuffer; 302 - unsigned long absize; 303 - 304 - absize = keylen + alignmask; 305 - buffer = kmalloc(absize, GFP_ATOMIC); 306 - if (!buffer) 307 - return -ENOMEM; 308 - 309 - alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 310 - memcpy(alignbuffer, key, keylen); 311 - ret = cipher->setkey(tfm, alignbuffer, keylen); 312 - memset(alignbuffer, 0, keylen); 313 - kfree(buffer); 314 - return ret; 315 - } 316 - 317 - static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, 318 - unsigned int keylen) 319 - { 320 - struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); 321 - unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); 322 - 323 - if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { 324 - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 325 - return -EINVAL; 326 - } 327 - 328 - if ((unsigned long)key & alignmask) 329 - return setkey_unaligned(tfm, key, keylen); 330 - 331 - return cipher->setkey(tfm, key, keylen); 332 - } 333 - 334 - static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, 335 - u32 mask) 336 - { 337 - return alg->cra_ctxsize; 338 - } 339 - 340 - static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, 341 - u32 mask) 342 - { 343 - struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 344 - struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; 345 - 346 - if (alg->ivsize > PAGE_SIZE / 8) 347 - return -EINVAL; 348 - 349 - crt->setkey = setkey; 350 - crt->encrypt = alg->encrypt; 351 - crt->decrypt = alg->decrypt; 352 - crt->base = __crypto_ablkcipher_cast(tfm); 353 - crt->ivsize = alg->ivsize; 354 - 355 - return 0; 356 - } 357 - 358 - #ifdef CONFIG_NET 359 - static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 360 - { 361 - struct crypto_report_blkcipher rblkcipher; 362 - 363 - memset(&rblkcipher, 0, sizeof(rblkcipher)); 364 - 365 - strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); 366 - strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv)); 367 - 368 - rblkcipher.blocksize = alg->cra_blocksize; 369 - rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; 370 - rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 371 - rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 372 - 373 - return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 374 - sizeof(rblkcipher), &rblkcipher); 375 - } 376 - #else 377 - static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 378 - { 379 - return -ENOSYS; 380 - } 381 - #endif 382 - 383 - static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 384 - __maybe_unused; 385 - static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) 386 - { 387 - struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; 388 - 389 - seq_printf(m, "type : ablkcipher\n"); 390 - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 391 - "yes" : "no"); 392 - seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 393 - seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); 394 - seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); 395 - seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); 396 - seq_printf(m, "geniv : <default>\n"); 397 - } 398 - 399 - const struct crypto_type crypto_ablkcipher_type = { 400 - .ctxsize = crypto_ablkcipher_ctxsize, 401 - .init = crypto_init_ablkcipher_ops, 402 - #ifdef CONFIG_PROC_FS 403 - .show = crypto_ablkcipher_show, 404 - #endif 405 - .report = crypto_ablkcipher_report, 406 - }; 407 - EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
-26
crypto/algapi.c
··· 1052 1052 } 1053 1053 EXPORT_SYMBOL_GPL(crypto_stats_get); 1054 1054 1055 - void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, 1056 - struct crypto_alg *alg) 1057 - { 1058 - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { 1059 - atomic64_inc(&alg->stats.cipher.err_cnt); 1060 - } else { 1061 - atomic64_inc(&alg->stats.cipher.encrypt_cnt); 1062 - atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen); 1063 - } 1064 - crypto_alg_put(alg); 1065 - } 1066 - EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt); 1067 - 1068 - void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, 1069 - struct crypto_alg *alg) 1070 - { 1071 - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { 1072 - atomic64_inc(&alg->stats.cipher.err_cnt); 1073 - } else { 1074 - atomic64_inc(&alg->stats.cipher.decrypt_cnt); 1075 - atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen); 1076 - } 1077 - crypto_alg_put(alg); 1078 - } 1079 - EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt); 1080 - 1081 1055 void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, 1082 1056 int ret) 1083 1057 {
-29
crypto/crypto_engine.c
··· 214 214 } 215 215 216 216 /** 217 - * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request 218 - * to list into the engine queue 219 - * @engine: the hardware engine 220 - * @req: the request need to be listed into the engine queue 221 - * TODO: Remove this function when skcipher conversion is finished 222 - */ 223 - int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, 224 - struct ablkcipher_request *req) 225 - { 226 - return crypto_transfer_request_to_engine(engine, &req->base); 227 - } 228 - EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine); 229 - 230 - /** 231 217 * crypto_transfer_aead_request_to_engine - transfer one aead_request 232 218 * to list into the engine queue 233 219 * @engine: the hardware engine ··· 264 278 return crypto_transfer_request_to_engine(engine, &req->base); 265 279 } 266 280 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); 267 - 268 - /** 269 - * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if 270 - * the request is done 271 - * @engine: the hardware engine 272 - * @req: the request need to be finalized 273 - * @err: error number 274 - * TODO: Remove this function when skcipher conversion is finished 275 - */ 276 - void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, 277 - struct ablkcipher_request *req, int err) 278 - { 279 - return crypto_finalize_request(engine, &req->base, err); 280 - } 281 - EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request); 282 281 283 282 /** 284 283 * crypto_finalize_aead_request - finalize one aead_request if
-106
crypto/skcipher.c
··· 580 580 581 581 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 582 582 { 583 - if (alg->cra_type == &crypto_ablkcipher_type) 584 - return sizeof(struct crypto_ablkcipher *); 585 - 586 583 return crypto_alg_extsize(alg); 587 584 } 588 585 ··· 587 590 { 588 591 if (tfm->keysize) 589 592 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); 590 - } 591 - 592 - static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, 593 - const u8 *key, unsigned int keylen) 594 - { 595 - struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); 596 - struct crypto_ablkcipher *ablkcipher = *ctx; 597 - int err; 598 - 599 - crypto_ablkcipher_clear_flags(ablkcipher, ~0); 600 - crypto_ablkcipher_set_flags(ablkcipher, 601 - crypto_skcipher_get_flags(tfm) & 602 - CRYPTO_TFM_REQ_MASK); 603 - err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); 604 - crypto_skcipher_set_flags(tfm, 605 - crypto_ablkcipher_get_flags(ablkcipher) & 606 - CRYPTO_TFM_RES_MASK); 607 - if (unlikely(err)) { 608 - skcipher_set_needkey(tfm); 609 - return err; 610 - } 611 - 612 - crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 613 - return 0; 614 - } 615 - 616 - static int skcipher_crypt_ablkcipher(struct skcipher_request *req, 617 - int (*crypt)(struct ablkcipher_request *)) 618 - { 619 - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 620 - struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); 621 - struct ablkcipher_request *subreq = skcipher_request_ctx(req); 622 - 623 - ablkcipher_request_set_tfm(subreq, *ctx); 624 - ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), 625 - req->base.complete, req->base.data); 626 - ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 627 - req->iv); 628 - 629 - return crypt(subreq); 630 - } 631 - 632 - static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) 633 - { 634 - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 635 - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 636 - struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 637 - 638 - return skcipher_crypt_ablkcipher(req, alg->encrypt); 639 - } 640 - 641 - static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) 642 - { 643 - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 644 - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 645 - struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; 646 - 647 - return skcipher_crypt_ablkcipher(req, alg->decrypt); 648 - } 649 - 650 - static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) 651 - { 652 - struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); 653 - 654 - crypto_free_ablkcipher(*ctx); 655 - } 656 - 657 - static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) 658 - { 659 - struct crypto_alg *calg = tfm->__crt_alg; 660 - struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 661 - struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); 662 - struct crypto_ablkcipher *ablkcipher; 663 - struct crypto_tfm *abtfm; 664 - 665 - if (!crypto_mod_get(calg)) 666 - return -EAGAIN; 667 - 668 - abtfm = __crypto_alloc_tfm(calg, 0, 0); 669 - if (IS_ERR(abtfm)) { 670 - crypto_mod_put(calg); 671 - return PTR_ERR(abtfm); 672 - } 673 - 674 - ablkcipher = __crypto_ablkcipher_cast(abtfm); 675 - *ctx = ablkcipher; 676 - tfm->exit = crypto_exit_skcipher_ops_ablkcipher; 677 - 678 - skcipher->setkey = skcipher_setkey_ablkcipher; 679 - skcipher->encrypt = skcipher_encrypt_ablkcipher; 680 - skcipher->decrypt = skcipher_decrypt_ablkcipher; 681 - 682 - skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); 683 - skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + 684 - sizeof(struct ablkcipher_request); 685 - skcipher->keysize = calg->cra_ablkcipher.max_keysize; 686 - 687 - skcipher_set_needkey(skcipher); 688 - 689 - return 0; 690 593 } 691 594 692 595 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, ··· 682 785 { 683 786 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 684 787 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 685 - 686 - if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type) 687 - return crypto_init_skcipher_ops_ablkcipher(tfm); 688 788 689 789 skcipher->setkey = skcipher_setkey; 690 790 skcipher->encrypt = alg->encrypt;
+1 -1
include/crypto/aead.h
··· 321 321 322 322 /** 323 323 * crypto_aead_decrypt() - decrypt ciphertext 324 - * @req: reference to the ablkcipher_request handle that holds all information 324 + * @req: reference to the aead_request handle that holds all information 325 325 * needed to perform the cipher operation 326 326 * 327 327 * Decrypt ciphertext data using the aead_request handle. That data structure
-75
include/crypto/algapi.h
··· 85 85 unsigned int offset; 86 86 }; 87 87 88 - struct ablkcipher_walk { 89 - struct { 90 - struct page *page; 91 - unsigned int offset; 92 - } src, dst; 93 - 94 - struct scatter_walk in; 95 - unsigned int nbytes; 96 - struct scatter_walk out; 97 - unsigned int total; 98 - struct list_head buffers; 99 - u8 *iv_buffer; 100 - u8 *iv; 101 - int flags; 102 - unsigned int blocksize; 103 - }; 104 - 105 - extern const struct crypto_type crypto_ablkcipher_type; 106 - 107 88 void crypto_mod_put(struct crypto_alg *alg); 108 89 109 90 int crypto_register_template(struct crypto_template *tmpl); ··· 183 202 } 184 203 } 185 204 186 - int ablkcipher_walk_done(struct ablkcipher_request *req, 187 - struct ablkcipher_walk *walk, int err); 188 - int ablkcipher_walk_phys(struct ablkcipher_request *req, 189 - struct ablkcipher_walk *walk); 190 - void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); 191 - 192 205 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 193 206 { 194 207 return PTR_ALIGN(crypto_tfm_ctx(tfm), ··· 200 225 return inst->__ctx; 201 226 } 202 227 203 - static inline struct ablkcipher_alg *crypto_ablkcipher_alg( 204 - struct crypto_ablkcipher *tfm) 205 - { 206 - return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; 207 - } 208 - 209 - static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) 210 - { 211 - return crypto_tfm_ctx(&tfm->base); 212 - } 213 - 214 - static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) 215 - { 216 - return crypto_tfm_ctx_aligned(&tfm->base); 217 - } 218 - 219 228 static inline struct crypto_cipher *crypto_spawn_cipher( 220 229 struct crypto_spawn *spawn) 221 230 { ··· 214 255 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; 215 256 } 216 257 217 - static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, 218 - struct scatterlist *dst, 219 - struct scatterlist *src, 220 - unsigned int nbytes) 221 - { 222 - walk->in.sg = src; 223 - walk->out.sg = dst; 224 - walk->total = nbytes; 225 - INIT_LIST_HEAD(&walk->buffers); 226 - } 227 - 228 - static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) 229 - { 230 - if (unlikely(!list_empty(&walk->buffers))) 231 - __ablkcipher_walk_complete(walk); 232 - } 233 - 234 258 static inline struct crypto_async_request *crypto_get_backlog( 235 259 struct crypto_queue *queue) 236 260 { 237 261 return queue->backlog == &queue->list ? NULL : 238 262 container_of(queue->backlog, struct crypto_async_request, list); 239 - } 240 - 241 - static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, 242 - struct ablkcipher_request *request) 243 - { 244 - return crypto_enqueue_request(queue, &request->base); 245 - } 246 - 247 - static inline struct ablkcipher_request *ablkcipher_dequeue_request( 248 - struct crypto_queue *queue) 249 - { 250 - return ablkcipher_request_cast(crypto_dequeue_request(queue)); 251 - } 252 - 253 - static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) 254 - { 255 - return req->__ctx; 256 263 } 257 264 258 265 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
-4
include/crypto/engine.h
··· 83 83 struct crypto_engine_op op; 84 84 }; 85 85 86 - int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, 87 - struct ablkcipher_request *req); 88 86 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 89 87 struct aead_request *req); 90 88 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, ··· 91 93 struct ahash_request *req); 92 94 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 93 95 struct skcipher_request *req); 94 - void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, 95 - struct ablkcipher_request *req, int err); 96 96 void crypto_finalize_aead_request(struct crypto_engine *engine, 97 97 struct aead_request *req, int err); 98 98 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
+1 -1
include/crypto/hash.h
··· 227 227 * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) 228 228 * 229 229 * The asynchronous cipher operation discussion provided for the 230 - * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. 230 + * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well. 231 231 */ 232 232 233 233 static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
-12
include/crypto/internal/des.h
··· 117 117 return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key); 118 118 } 119 119 120 - static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm, 121 - const u8 *key) 122 - { 123 - return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key); 124 - } 125 - 126 - static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm, 127 - const u8 *key) 128 - { 129 - return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key); 130 - } 131 - 132 120 static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key, 133 121 int keylen) 134 122 {
-20
include/crypto/internal/skcipher.h
··· 153 153 skcipher_walk_done(walk, -ECANCELED); 154 154 } 155 155 156 - static inline void ablkcipher_request_complete(struct ablkcipher_request *req, 157 - int err) 158 - { 159 - req->base.complete(&req->base, err); 160 - } 161 - 162 - static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) 163 - { 164 - return req->base.flags; 165 - } 166 - 167 156 static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) 168 157 { 169 158 return crypto_tfm_ctx(&tfm->base); ··· 171 182 static inline unsigned int crypto_skcipher_alg_min_keysize( 172 183 struct skcipher_alg *alg) 173 184 { 174 - if (alg->base.cra_ablkcipher.encrypt) 175 - return alg->base.cra_ablkcipher.min_keysize; 176 - 177 185 return alg->min_keysize; 178 186 } 179 187 180 188 static inline unsigned int crypto_skcipher_alg_max_keysize( 181 189 struct skcipher_alg *alg) 182 190 { 183 - if (alg->base.cra_ablkcipher.encrypt) 184 - return alg->base.cra_ablkcipher.max_keysize; 185 - 186 191 return alg->max_keysize; 187 192 } 188 193 189 194 static inline unsigned int crypto_skcipher_alg_walksize( 190 195 struct skcipher_alg *alg) 191 196 { 192 - if (alg->base.cra_ablkcipher.encrypt) 193 - return alg->base.cra_blocksize; 194 - 195 197 return alg->walksize; 196 198 } 197 199
-6
include/crypto/skcipher.h
··· 241 241 242 242 static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg) 243 243 { 244 - if (alg->base.cra_ablkcipher.encrypt) 245 - return alg->base.cra_ablkcipher.ivsize; 246 - 247 244 return alg->ivsize; 248 245 } 249 246 ··· 283 286 static inline unsigned int crypto_skcipher_alg_chunksize( 284 287 struct skcipher_alg *alg) 285 288 { 286 - if (alg->base.cra_ablkcipher.encrypt) 287 - return alg->base.cra_blocksize; 288 - 289 289 return alg->chunksize; 290 290 } 291 291
-435
include/linux/crypto.h
··· 41 41 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 42 42 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 43 43 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 44 - #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 45 44 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 46 45 #define CRYPTO_ALG_TYPE_KPP 0x00000008 47 46 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a ··· 136 137 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 137 138 138 139 struct scatterlist; 139 - struct crypto_ablkcipher; 140 140 struct crypto_async_request; 141 141 struct crypto_tfm; 142 142 struct crypto_type; ··· 158 160 u32 flags; 159 161 }; 160 162 161 - struct ablkcipher_request { 162 - struct crypto_async_request base; 163 - 164 - unsigned int nbytes; 165 - 166 - void *info; 167 - 168 - struct scatterlist *src; 169 - struct scatterlist *dst; 170 - 171 - void *__ctx[] CRYPTO_MINALIGN_ATTR; 172 - }; 173 - 174 163 /** 175 164 * DOC: Block Cipher Algorithm Definitions 176 165 * 177 166 * These data structures define modular crypto algorithm implementations, 178 167 * managed via crypto_register_alg() and crypto_unregister_alg(). 179 168 */ 180 - 181 - /** 182 - * struct ablkcipher_alg - asynchronous block cipher definition 183 - * @min_keysize: Minimum key size supported by the transformation. This is the 184 - * smallest key length supported by this transformation algorithm. 185 - * This must be set to one of the pre-defined values as this is 186 - * not hardware specific. Possible values for this field can be 187 - * found via git grep "_MIN_KEY_SIZE" include/crypto/ 188 - * @max_keysize: Maximum key size supported by the transformation. This is the 189 - * largest key length supported by this transformation algorithm. 190 - * This must be set to one of the pre-defined values as this is 191 - * not hardware specific. Possible values for this field can be 192 - * found via git grep "_MAX_KEY_SIZE" include/crypto/ 193 - * @setkey: Set key for the transformation. This function is used to either 194 - * program a supplied key into the hardware or store the key in the 195 - * transformation context for programming it later. Note that this 196 - * function does modify the transformation context. This function can 197 - * be called multiple times during the existence of the transformation 198 - * object, so one must make sure the key is properly reprogrammed into 199 - * the hardware. This function is also responsible for checking the key 200 - * length for validity. In case a software fallback was put in place in 201 - * the @cra_init call, this function might need to use the fallback if 202 - * the algorithm doesn't support all of the key sizes. 203 - * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 204 - * the supplied scatterlist containing the blocks of data. The crypto 205 - * API consumer is responsible for aligning the entries of the 206 - * scatterlist properly and making sure the chunks are correctly 207 - * sized. In case a software fallback was put in place in the 208 - * @cra_init call, this function might need to use the fallback if 209 - * the algorithm doesn't support all of the key sizes. In case the 210 - * key was stored in transformation context, the key might need to be 211 - * re-programmed into the hardware in this function. This function 212 - * shall not modify the transformation context, as this function may 213 - * be called in parallel with the same transformation object. 214 - * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 215 - * and the conditions are exactly the same. 216 - * @ivsize: IV size applicable for transformation. The consumer must provide an 217 - * IV of exactly that size to perform the encrypt or decrypt operation. 218 - * 219 - * All fields except @ivsize are mandatory and must be filled. 220 - */ 221 - struct ablkcipher_alg { 222 - int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 223 - unsigned int keylen); 224 - int (*encrypt)(struct ablkcipher_request *req); 225 - int (*decrypt)(struct ablkcipher_request *req); 226 - 227 - unsigned int min_keysize; 228 - unsigned int max_keysize; 229 - unsigned int ivsize; 230 - }; 231 169 232 170 /** 233 171 * struct cipher_alg - single-block symmetric ciphers definition ··· 349 415 }; 350 416 #endif /* CONFIG_CRYPTO_STATS */ 351 417 352 - #define cra_ablkcipher cra_u.ablkcipher 353 418 #define cra_cipher cra_u.cipher 354 419 #define cra_compress cra_u.compress 355 420 ··· 416 483 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 417 484 * counterpart to @cra_init, used to remove various changes set in 418 485 * @cra_init. 419 - * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher 420 - * definition. See @struct @ablkcipher_alg. 421 486 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 422 487 * definition. See @struct @cipher_alg. 423 488 * @cra_u.compress: Union member which contains a (de)compression algorithm. ··· 457 526 const struct crypto_type *cra_type; 458 527 459 528 union { 460 - struct ablkcipher_alg ablkcipher; 461 529 struct cipher_alg cipher; 462 530 struct compress_alg compress; 463 531 } cra_u; ··· 484 554 #ifdef CONFIG_CRYPTO_STATS 485 555 void crypto_stats_init(struct crypto_alg *alg); 486 556 void crypto_stats_get(struct crypto_alg *alg); 487 - void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 488 - void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 489 557 void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 490 558 void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 491 559 void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); ··· 505 577 static inline void crypto_stats_init(struct crypto_alg *alg) 506 578 {} 507 579 static inline void crypto_stats_get(struct crypto_alg *alg) 508 - {} 509 - static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 510 - {} 511 - static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 512 580 {} 513 581 static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) 514 582 {} ··· 599 675 * crypto_free_*(), as well as the various helpers below. 600 676 */ 601 677 602 - struct ablkcipher_tfm { 603 - int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 604 - unsigned int keylen); 605 - int (*encrypt)(struct ablkcipher_request *req); 606 - int (*decrypt)(struct ablkcipher_request *req); 607 - 608 - struct crypto_ablkcipher *base; 609 - 610 - unsigned int ivsize; 611 - unsigned int reqsize; 612 - }; 613 - 614 678 struct cipher_tfm { 615 679 int (*cit_setkey)(struct crypto_tfm *tfm, 616 680 const u8 *key, unsigned int keylen); ··· 615 703 u8 *dst, unsigned int *dlen); 616 704 }; 617 705 618 - #define crt_ablkcipher crt_u.ablkcipher 619 706 #define crt_cipher crt_u.cipher 620 707 #define crt_compress crt_u.compress 621 708 ··· 623 712 u32 crt_flags; 624 713 625 714 union { 626 - struct ablkcipher_tfm ablkcipher; 627 715 struct cipher_tfm cipher; 628 716 struct compress_tfm compress; 629 717 } crt_u; ··· 632 722 struct crypto_alg *__crt_alg; 633 723 634 724 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 635 - }; 636 - 637 - struct crypto_ablkcipher { 638 - struct crypto_tfm base; 639 725 }; 640 726 641 727 struct crypto_cipher { ··· 739 833 { 740 834 struct crypto_tfm *tfm; 741 835 return __alignof__(tfm->__crt_ctx); 742 - } 743 - 744 - /* 745 - * API wrappers. 746 - */ 747 - static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 748 - struct crypto_tfm *tfm) 749 - { 750 - return (struct crypto_ablkcipher *)tfm; 751 - } 752 - 753 - /** 754 - * DOC: Asynchronous Block Cipher API 755 - * 756 - * Asynchronous block cipher API is used with the ciphers of type 757 - * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 758 - * 759 - * Asynchronous cipher operations imply that the function invocation for a 760 - * cipher request returns immediately before the completion of the operation. 761 - * The cipher request is scheduled as a separate kernel thread and therefore 762 - * load-balanced on the different CPUs via the process scheduler. To allow 763 - * the kernel crypto API to inform the caller about the completion of a cipher 764 - * request, the caller must provide a callback function. That function is 765 - * invoked with the cipher handle when the request completes. 766 - * 767 - * To support the asynchronous operation, additional information than just the 768 - * cipher handle must be supplied to the kernel crypto API. That additional 769 - * information is given by filling in the ablkcipher_request data structure. 770 - * 771 - * For the asynchronous block cipher API, the state is maintained with the tfm 772 - * cipher handle. A single tfm can be used across multiple calls and in 773 - * parallel. For asynchronous block cipher calls, context data supplied and 774 - * only used by the caller can be referenced the request data structure in 775 - * addition to the IV used for the cipher request. The maintenance of such 776 - * state information would be important for a crypto driver implementer to 777 - * have, because when calling the callback function upon completion of the 778 - * cipher operation, that callback function may need some information about 779 - * which operation just finished if it invoked multiple in parallel. This 780 - * state information is unused by the kernel crypto API. 781 - */ 782 - 783 - static inline struct crypto_tfm *crypto_ablkcipher_tfm( 784 - struct crypto_ablkcipher *tfm) 785 - { 786 - return &tfm->base; 787 - } 788 - 789 - /** 790 - * crypto_free_ablkcipher() - zeroize and free cipher handle 791 - * @tfm: cipher handle to be freed 792 - */ 793 - static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 794 - { 795 - crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 796 - } 797 - 798 - static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 799 - struct crypto_ablkcipher *tfm) 800 - { 801 - return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 802 - } 803 - 804 - /** 805 - * crypto_ablkcipher_ivsize() - obtain IV size 806 - * @tfm: cipher handle 807 - * 808 - * The size of the IV for the ablkcipher referenced by the cipher handle is 809 - * returned. This IV size may be zero if the cipher does not need an IV. 810 - * 811 - * Return: IV size in bytes 812 - */ 813 - static inline unsigned int crypto_ablkcipher_ivsize( 814 - struct crypto_ablkcipher *tfm) 815 - { 816 - return crypto_ablkcipher_crt(tfm)->ivsize; 817 - } 818 - 819 - /** 820 - * crypto_ablkcipher_blocksize() - obtain block size of cipher 821 - * @tfm: cipher handle 822 - * 823 - * The block size for the ablkcipher referenced with the cipher handle is 824 - * returned. The caller may use that information to allocate appropriate 825 - * memory for the data returned by the encryption or decryption operation 826 - * 827 - * Return: block size of cipher 828 - */ 829 - static inline unsigned int crypto_ablkcipher_blocksize( 830 - struct crypto_ablkcipher *tfm) 831 - { 832 - return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 833 - } 834 - 835 - static inline unsigned int crypto_ablkcipher_alignmask( 836 - struct crypto_ablkcipher *tfm) 837 - { 838 - return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 839 - } 840 - 841 - static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 842 - { 843 - return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 844 - } 845 - 846 - static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 847 - u32 flags) 848 - { 849 - crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 850 - } 851 - 852 - static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 853 - u32 flags) 854 - { 855 - crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 856 - } 857 - 858 - /** 859 - * crypto_ablkcipher_setkey() - set key for cipher 860 - * @tfm: cipher handle 861 - * @key: buffer holding the key 862 - * @keylen: length of the key in bytes 863 - * 864 - * The caller provided key is set for the ablkcipher referenced by the cipher 865 - * handle. 866 - * 867 - * Note, the key length determines the cipher type. Many block ciphers implement 868 - * different cipher modes depending on the key size, such as AES-128 vs AES-192 869 - * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 870 - * is performed. 871 - * 872 - * Return: 0 if the setting of the key was successful; < 0 if an error occurred 873 - */ 874 - static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 875 - const u8 *key, unsigned int keylen) 876 - { 877 - struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 878 - 879 - return crt->setkey(crt->base, key, keylen); 880 - } 881 - 882 - /** 883 - * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 884 - * @req: ablkcipher_request out of which the cipher handle is to be obtained 885 - * 886 - * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 887 - * data structure. 888 - * 889 - * Return: crypto_ablkcipher handle 890 - */ 891 - static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 892 - struct ablkcipher_request *req) 893 - { 894 - return __crypto_ablkcipher_cast(req->base.tfm); 895 - } 896 - 897 - /** 898 - * crypto_ablkcipher_encrypt() - encrypt plaintext 899 - * @req: reference to the ablkcipher_request handle that holds all information 900 - * needed to perform the cipher operation 901 - * 902 - * Encrypt plaintext data using the ablkcipher_request handle. That data 903 - * structure and how it is filled with data is discussed with the 904 - * ablkcipher_request_* functions. 905 - * 906 - * Return: 0 if the cipher operation was successful; < 0 if an error occurred 907 - */ 908 - static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 909 - { 910 - struct ablkcipher_tfm *crt = 911 - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 912 - struct crypto_alg *alg = crt->base->base.__crt_alg; 913 - unsigned int nbytes = req->nbytes; 914 - int ret; 915 - 916 - crypto_stats_get(alg); 917 - ret = crt->encrypt(req); 918 - crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); 919 - return ret; 920 - } 921 - 922 - /** 923 - * crypto_ablkcipher_decrypt() - decrypt ciphertext 924 - * @req: reference to the ablkcipher_request handle that holds all information 925 - * needed to perform the cipher operation 926 - * 927 - * Decrypt ciphertext data using the ablkcipher_request handle. That data 928 - * structure and how it is filled with data is discussed with the 929 - * ablkcipher_request_* functions. 930 - * 931 - * Return: 0 if the cipher operation was successful; < 0 if an error occurred 932 - */ 933 - static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 934 - { 935 - struct ablkcipher_tfm *crt = 936 - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 937 - struct crypto_alg *alg = crt->base->base.__crt_alg; 938 - unsigned int nbytes = req->nbytes; 939 - int ret; 940 - 941 - crypto_stats_get(alg); 942 - ret = crt->decrypt(req); 943 - crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); 944 - return ret; 945 - } 946 - 947 - /** 948 - * DOC: Asynchronous Cipher Request Handle 949 - * 950 - * The ablkcipher_request data structure contains all pointers to data 951 - * required for the asynchronous cipher operation. This includes the cipher 952 - * handle (which can be used by multiple ablkcipher_request instances), pointer 953 - * to plaintext and ciphertext, asynchronous callback function, etc. It acts 954 - * as a handle to the ablkcipher_request_* API calls in a similar way as 955 - * ablkcipher handle to the crypto_ablkcipher_* API calls. 956 - */ 957 - 958 - /** 959 - * crypto_ablkcipher_reqsize() - obtain size of the request data structure 960 - * @tfm: cipher handle 961 - * 962 - * Return: number of bytes 963 - */ 964 - static inline unsigned int crypto_ablkcipher_reqsize( 965 - struct crypto_ablkcipher *tfm) 966 - { 967 - return crypto_ablkcipher_crt(tfm)->reqsize; 968 - } 969 - 970 - /** 971 - * ablkcipher_request_set_tfm() - update cipher handle reference in request 972 - * @req: request handle to be modified 973 - * @tfm: cipher handle that shall be added to the request handle 974 - * 975 - * Allow the caller to replace the existing ablkcipher handle in the request 976 - * data structure with a different one. 977 - */ 978 - static inline void ablkcipher_request_set_tfm( 979 - struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 980 - { 981 - req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 982 - } 983 - 984 - static inline struct ablkcipher_request *ablkcipher_request_cast( 985 - struct crypto_async_request *req) 986 - { 987 - return container_of(req, struct ablkcipher_request, base); 988 - } 989 - 990 - /** 991 - * ablkcipher_request_alloc() - allocate request data structure 992 - * @tfm: cipher handle to be registered with the request 993 - * @gfp: memory allocation flag that is handed to kmalloc by the API call. 994 - * 995 - * Allocate the request data structure that must be used with the ablkcipher 996 - * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 997 - * handle is registered in the request data structure. 998 - * 999 - * Return: allocated request handle in case of success, or NULL if out of memory 1000 - */ 1001 - static inline struct ablkcipher_request *ablkcipher_request_alloc( 1002 - struct crypto_ablkcipher *tfm, gfp_t gfp) 1003 - { 1004 - struct ablkcipher_request *req; 1005 - 1006 - req = kmalloc(sizeof(struct ablkcipher_request) + 1007 - crypto_ablkcipher_reqsize(tfm), gfp); 1008 - 1009 - if (likely(req)) 1010 - ablkcipher_request_set_tfm(req, tfm); 1011 - 1012 - return req; 1013 - } 1014 - 1015 - /** 1016 - * ablkcipher_request_free() - zeroize and free request data structure 1017 - * @req: request data structure cipher handle to be freed 1018 - */ 1019 - static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1020 - { 1021 - kzfree(req); 1022 - } 1023 - 1024 - /** 1025 - * ablkcipher_request_set_callback() - set asynchronous callback function 1026 - * @req: request handle 1027 - * @flags: specify zero or an ORing of the flags 1028 - * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1029 - * increase the wait queue beyond the initial maximum size; 1030 - * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1031 - * @compl: callback function pointer to be registered with the request handle 1032 - * @data: The data pointer refers to memory that is not used by the kernel 1033 - * crypto API, but provided to the callback function for it to use. Here, 1034 - * the caller can provide a reference to memory the callback function can 1035 - * operate on. As the callback function is invoked asynchronously to the 1036 - * related functionality, it may need to access data structures of the 1037 - * related functionality which can be referenced using this pointer. The 1038 - * callback function can access the memory via the "data" field in the 1039 - * crypto_async_request data structure provided to the callback function. 1040 - * 1041 - * This function allows setting the callback function that is triggered once the 1042 - * cipher operation completes. 1043 - * 1044 - * The callback function is registered with the ablkcipher_request handle and 1045 - * must comply with the following template:: 1046 - * 1047 - * void callback_function(struct crypto_async_request *req, int error) 1048 - */ 1049 - static inline void ablkcipher_request_set_callback( 1050 - struct ablkcipher_request *req, 1051 - u32 flags, crypto_completion_t compl, void *data) 1052 - { 1053 - req->base.complete = compl; 1054 - req->base.data = data; 1055 - req->base.flags = flags; 1056 - } 1057 - 1058 - /** 1059 - * ablkcipher_request_set_crypt() - set data buffers 1060 - * @req: request handle 1061 - * @src: source scatter / gather list 1062 - * @dst: destination scatter / gather list 1063 - * @nbytes: number of bytes to process from @src 1064 - * @iv: IV for the cipher operation which must comply with the IV size defined 1065 - * by crypto_ablkcipher_ivsize 1066 - * 1067 - * This function allows setting of the source data and destination data 1068 - * scatter / gather lists. 1069 - * 1070 - * For encryption, the source is treated as the plaintext and the 1071 - * destination is the ciphertext. For a decryption operation, the use is 1072 - * reversed - the source is the ciphertext and the destination is the plaintext. 1073 - */ 1074 - static inline void ablkcipher_request_set_crypt( 1075 - struct ablkcipher_request *req, 1076 - struct scatterlist *src, struct scatterlist *dst, 1077 - unsigned int nbytes, void *iv) 1078 - { 1079 - req->src = src; 1080 - req->dst = dst; 1081 - req->nbytes = nbytes; 1082 - req->info = iv; 1083 836 } 1084 837 1085 838 /**