at v3.19 82 kB view raw
1/* 2 * Scatterlist Cryptographic API. 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 9 * and Nettle, by Niels Möller. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the Free 13 * Software Foundation; either version 2 of the License, or (at your option) 14 * any later version. 15 * 16 */ 17#ifndef _LINUX_CRYPTO_H 18#define _LINUX_CRYPTO_H 19 20#include <linux/atomic.h> 21#include <linux/kernel.h> 22#include <linux/list.h> 23#include <linux/bug.h> 24#include <linux/slab.h> 25#include <linux/string.h> 26#include <linux/uaccess.h> 27 28/* 29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing 30 * arbitrary modules to be loaded. Loading from userspace may still need the 31 * unprefixed names, so retains those aliases as well. 32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro 34 * expands twice on the same line. Instead, use a separate base name for the 35 * alias. 36 */ 37#define MODULE_ALIAS_CRYPTO(name) \ 38 __MODULE_INFO(alias, alias_userspace, name); \ 39 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 40 41/* 42 * Algorithm masks and types. 43 */ 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f 45#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 46#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 47#define CRYPTO_ALG_TYPE_AEAD 0x00000003 48#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 49#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 50#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 51#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 52#define CRYPTO_ALG_TYPE_HASH 0x00000008 53#define CRYPTO_ALG_TYPE_SHASH 0x00000009 54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 55#define CRYPTO_ALG_TYPE_RNG 0x0000000c 56#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f 57 58#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 59#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 60#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 61 62#define CRYPTO_ALG_LARVAL 0x00000010 63#define CRYPTO_ALG_DEAD 0x00000020 64#define CRYPTO_ALG_DYING 0x00000040 65#define CRYPTO_ALG_ASYNC 0x00000080 66 67/* 68 * Set this bit if and only if the algorithm requires another algorithm of 69 * the same type to handle corner cases. 70 */ 71#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 72 73/* 74 * This bit is set for symmetric key ciphers that have already been wrapped 75 * with a generic IV generator to prevent them from being wrapped again. 76 */ 77#define CRYPTO_ALG_GENIV 0x00000200 78 79/* 80 * Set if the algorithm has passed automated run-time testing. Note that 81 * if there is no run-time testing for a given algorithm it is considered 82 * to have passed. 83 */ 84 85#define CRYPTO_ALG_TESTED 0x00000400 86 87/* 88 * Set if the algorithm is an instance that is build from templates. 89 */ 90#define CRYPTO_ALG_INSTANCE 0x00000800 91 92/* Set this bit if the algorithm provided is hardware accelerated but 93 * not available to userspace via instruction set or so. 94 */ 95#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 96 97/* 98 * Transform masks and values (for crt_flags). 99 */ 100#define CRYPTO_TFM_REQ_MASK 0x000fff00 101#define CRYPTO_TFM_RES_MASK 0xfff00000 102 103#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 104#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 105#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 106#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 107#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 108#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 109#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 110#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 111 112/* 113 * Miscellaneous stuff. 114 */ 115#define CRYPTO_MAX_ALG_NAME 64 116 117/* 118 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 119 * declaration) is used to ensure that the crypto_tfm context structure is 120 * aligned correctly for the given architecture so that there are no alignment 121 * faults for C data types. In particular, this is required on platforms such 122 * as arm where pointers are 32-bit aligned but there are data types such as 123 * u64 which require 64-bit alignment. 124 */ 125#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 126 127#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 128 129struct scatterlist; 130struct crypto_ablkcipher; 131struct crypto_async_request; 132struct crypto_aead; 133struct crypto_blkcipher; 134struct crypto_hash; 135struct crypto_rng; 136struct crypto_tfm; 137struct crypto_type; 138struct aead_givcrypt_request; 139struct skcipher_givcrypt_request; 140 141typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 142 143/** 144 * DOC: Block Cipher Context Data Structures 145 * 146 * These data structures define the operating context for each block cipher 147 * type. 148 */ 149 150struct crypto_async_request { 151 struct list_head list; 152 crypto_completion_t complete; 153 void *data; 154 struct crypto_tfm *tfm; 155 156 u32 flags; 157}; 158 159struct ablkcipher_request { 160 struct crypto_async_request base; 161 162 unsigned int nbytes; 163 164 void *info; 165 166 struct scatterlist *src; 167 struct scatterlist *dst; 168 169 void *__ctx[] CRYPTO_MINALIGN_ATTR; 170}; 171 172/** 173 * struct aead_request - AEAD request 174 * @base: Common attributes for async crypto requests 175 * @assoclen: Length in bytes of associated data for authentication 176 * @cryptlen: Length of data to be encrypted or decrypted 177 * @iv: Initialisation vector 178 * @assoc: Associated data 179 * @src: Source data 180 * @dst: Destination data 181 * @__ctx: Start of private context data 182 */ 183struct aead_request { 184 struct crypto_async_request base; 185 186 unsigned int assoclen; 187 unsigned int cryptlen; 188 189 u8 *iv; 190 191 struct scatterlist *assoc; 192 struct scatterlist *src; 193 struct scatterlist *dst; 194 195 void *__ctx[] CRYPTO_MINALIGN_ATTR; 196}; 197 198struct blkcipher_desc { 199 struct crypto_blkcipher *tfm; 200 void *info; 201 u32 flags; 202}; 203 204struct cipher_desc { 205 struct crypto_tfm *tfm; 206 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 207 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, 208 const u8 *src, unsigned int nbytes); 209 void *info; 210}; 211 212struct hash_desc { 213 struct crypto_hash *tfm; 214 u32 flags; 215}; 216 217/** 218 * DOC: Block Cipher Algorithm Definitions 219 * 220 * These data structures define modular crypto algorithm implementations, 221 * managed via crypto_register_alg() and crypto_unregister_alg(). 222 */ 223 224/** 225 * struct ablkcipher_alg - asynchronous block cipher definition 226 * @min_keysize: Minimum key size supported by the transformation. This is the 227 * smallest key length supported by this transformation algorithm. 228 * This must be set to one of the pre-defined values as this is 229 * not hardware specific. Possible values for this field can be 230 * found via git grep "_MIN_KEY_SIZE" include/crypto/ 231 * @max_keysize: Maximum key size supported by the transformation. This is the 232 * largest key length supported by this transformation algorithm. 233 * This must be set to one of the pre-defined values as this is 234 * not hardware specific. Possible values for this field can be 235 * found via git grep "_MAX_KEY_SIZE" include/crypto/ 236 * @setkey: Set key for the transformation. This function is used to either 237 * program a supplied key into the hardware or store the key in the 238 * transformation context for programming it later. Note that this 239 * function does modify the transformation context. This function can 240 * be called multiple times during the existence of the transformation 241 * object, so one must make sure the key is properly reprogrammed into 242 * the hardware. This function is also responsible for checking the key 243 * length for validity. In case a software fallback was put in place in 244 * the @cra_init call, this function might need to use the fallback if 245 * the algorithm doesn't support all of the key sizes. 246 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 247 * the supplied scatterlist containing the blocks of data. The crypto 248 * API consumer is responsible for aligning the entries of the 249 * scatterlist properly and making sure the chunks are correctly 250 * sized. In case a software fallback was put in place in the 251 * @cra_init call, this function might need to use the fallback if 252 * the algorithm doesn't support all of the key sizes. In case the 253 * key was stored in transformation context, the key might need to be 254 * re-programmed into the hardware in this function. This function 255 * shall not modify the transformation context, as this function may 256 * be called in parallel with the same transformation object. 257 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 258 * and the conditions are exactly the same. 259 * @givencrypt: Update the IV for encryption. With this function, a cipher 260 * implementation may provide the function on how to update the IV 261 * for encryption. 262 * @givdecrypt: Update the IV for decryption. This is the reverse of 263 * @givencrypt . 264 * @geniv: The transformation implementation may use an "IV generator" provided 265 * by the kernel crypto API. Several use cases have a predefined 266 * approach how IVs are to be updated. For such use cases, the kernel 267 * crypto API provides ready-to-use implementations that can be 268 * referenced with this variable. 269 * @ivsize: IV size applicable for transformation. The consumer must provide an 270 * IV of exactly that size to perform the encrypt or decrypt operation. 271 * 272 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 273 * mandatory and must be filled. 274 */ 275struct ablkcipher_alg { 276 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 277 unsigned int keylen); 278 int (*encrypt)(struct ablkcipher_request *req); 279 int (*decrypt)(struct ablkcipher_request *req); 280 int (*givencrypt)(struct skcipher_givcrypt_request *req); 281 int (*givdecrypt)(struct skcipher_givcrypt_request *req); 282 283 const char *geniv; 284 285 unsigned int min_keysize; 286 unsigned int max_keysize; 287 unsigned int ivsize; 288}; 289 290/** 291 * struct aead_alg - AEAD cipher definition 292 * @maxauthsize: Set the maximum authentication tag size supported by the 293 * transformation. A transformation may support smaller tag sizes. 294 * As the authentication tag is a message digest to ensure the 295 * integrity of the encrypted data, a consumer typically wants the 296 * largest authentication tag possible as defined by this 297 * variable. 298 * @setauthsize: Set authentication size for the AEAD transformation. This 299 * function is used to specify the consumer requested size of the 300 * authentication tag to be either generated by the transformation 301 * during encryption or the size of the authentication tag to be 302 * supplied during the decryption operation. This function is also 303 * responsible for checking the authentication tag size for 304 * validity. 305 * @setkey: see struct ablkcipher_alg 306 * @encrypt: see struct ablkcipher_alg 307 * @decrypt: see struct ablkcipher_alg 308 * @givencrypt: see struct ablkcipher_alg 309 * @givdecrypt: see struct ablkcipher_alg 310 * @geniv: see struct ablkcipher_alg 311 * @ivsize: see struct ablkcipher_alg 312 * 313 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 314 * mandatory and must be filled. 315 */ 316struct aead_alg { 317 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 318 unsigned int keylen); 319 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); 320 int (*encrypt)(struct aead_request *req); 321 int (*decrypt)(struct aead_request *req); 322 int (*givencrypt)(struct aead_givcrypt_request *req); 323 int (*givdecrypt)(struct aead_givcrypt_request *req); 324 325 const char *geniv; 326 327 unsigned int ivsize; 328 unsigned int maxauthsize; 329}; 330 331/** 332 * struct blkcipher_alg - synchronous block cipher definition 333 * @min_keysize: see struct ablkcipher_alg 334 * @max_keysize: see struct ablkcipher_alg 335 * @setkey: see struct ablkcipher_alg 336 * @encrypt: see struct ablkcipher_alg 337 * @decrypt: see struct ablkcipher_alg 338 * @geniv: see struct ablkcipher_alg 339 * @ivsize: see struct ablkcipher_alg 340 * 341 * All fields except @geniv and @ivsize are mandatory and must be filled. 342 */ 343struct blkcipher_alg { 344 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 345 unsigned int keylen); 346 int (*encrypt)(struct blkcipher_desc *desc, 347 struct scatterlist *dst, struct scatterlist *src, 348 unsigned int nbytes); 349 int (*decrypt)(struct blkcipher_desc *desc, 350 struct scatterlist *dst, struct scatterlist *src, 351 unsigned int nbytes); 352 353 const char *geniv; 354 355 unsigned int min_keysize; 356 unsigned int max_keysize; 357 unsigned int ivsize; 358}; 359 360/** 361 * struct cipher_alg - single-block symmetric ciphers definition 362 * @cia_min_keysize: Minimum key size supported by the transformation. This is 363 * the smallest key length supported by this transformation 364 * algorithm. This must be set to one of the pre-defined 365 * values as this is not hardware specific. Possible values 366 * for this field can be found via git grep "_MIN_KEY_SIZE" 367 * include/crypto/ 368 * @cia_max_keysize: Maximum key size supported by the transformation. This is 369 * the largest key length supported by this transformation 370 * algorithm. This must be set to one of the pre-defined values 371 * as this is not hardware specific. Possible values for this 372 * field can be found via git grep "_MAX_KEY_SIZE" 373 * include/crypto/ 374 * @cia_setkey: Set key for the transformation. This function is used to either 375 * program a supplied key into the hardware or store the key in the 376 * transformation context for programming it later. Note that this 377 * function does modify the transformation context. This function 378 * can be called multiple times during the existence of the 379 * transformation object, so one must make sure the key is properly 380 * reprogrammed into the hardware. This function is also 381 * responsible for checking the key length for validity. 382 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 383 * single block of data, which must be @cra_blocksize big. This 384 * always operates on a full @cra_blocksize and it is not possible 385 * to encrypt a block of smaller size. The supplied buffers must 386 * therefore also be at least of @cra_blocksize size. Both the 387 * input and output buffers are always aligned to @cra_alignmask. 388 * In case either of the input or output buffer supplied by user 389 * of the crypto API is not aligned to @cra_alignmask, the crypto 390 * API will re-align the buffers. The re-alignment means that a 391 * new buffer will be allocated, the data will be copied into the 392 * new buffer, then the processing will happen on the new buffer, 393 * then the data will be copied back into the original buffer and 394 * finally the new buffer will be freed. In case a software 395 * fallback was put in place in the @cra_init call, this function 396 * might need to use the fallback if the algorithm doesn't support 397 * all of the key sizes. In case the key was stored in 398 * transformation context, the key might need to be re-programmed 399 * into the hardware in this function. This function shall not 400 * modify the transformation context, as this function may be 401 * called in parallel with the same transformation object. 402 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 403 * @cia_encrypt, and the conditions are exactly the same. 404 * 405 * All fields are mandatory and must be filled. 406 */ 407struct cipher_alg { 408 unsigned int cia_min_keysize; 409 unsigned int cia_max_keysize; 410 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 411 unsigned int keylen); 412 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 413 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 414}; 415 416struct compress_alg { 417 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 418 unsigned int slen, u8 *dst, unsigned int *dlen); 419 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 420 unsigned int slen, u8 *dst, unsigned int *dlen); 421}; 422 423/** 424 * struct rng_alg - random number generator definition 425 * @rng_make_random: The function defined by this variable obtains a random 426 * number. The random number generator transform must generate 427 * the random number out of the context provided with this 428 * call. 429 * @rng_reset: Reset of the random number generator by clearing the entire state. 430 * With the invocation of this function call, the random number 431 * generator shall completely reinitialize its state. If the random 432 * number generator requires a seed for setting up a new state, 433 * the seed must be provided by the consumer while invoking this 434 * function. The required size of the seed is defined with 435 * @seedsize . 436 * @seedsize: The seed size required for a random number generator 437 * initialization defined with this variable. Some random number 438 * generators like the SP800-90A DRBG does not require a seed as the 439 * seeding is implemented internally without the need of support by 440 * the consumer. In this case, the seed size is set to zero. 441 */ 442struct rng_alg { 443 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, 444 unsigned int dlen); 445 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); 446 447 unsigned int seedsize; 448}; 449 450 451#define cra_ablkcipher cra_u.ablkcipher 452#define cra_aead cra_u.aead 453#define cra_blkcipher cra_u.blkcipher 454#define cra_cipher cra_u.cipher 455#define cra_compress cra_u.compress 456#define cra_rng cra_u.rng 457 458/** 459 * struct crypto_alg - definition of a cryptograpic cipher algorithm 460 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 461 * CRYPTO_ALG_* flags for the flags which go in here. Those are 462 * used for fine-tuning the description of the transformation 463 * algorithm. 464 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 465 * of the smallest possible unit which can be transformed with 466 * this algorithm. The users must respect this value. 467 * In case of HASH transformation, it is possible for a smaller 468 * block than @cra_blocksize to be passed to the crypto API for 469 * transformation, in case of any other transformation type, an 470 * error will be returned upon any attempt to transform smaller 471 * than @cra_blocksize chunks. 472 * @cra_ctxsize: Size of the operational context of the transformation. This 473 * value informs the kernel crypto API about the memory size 474 * needed to be allocated for the transformation context. 475 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 476 * buffer containing the input data for the algorithm must be 477 * aligned to this alignment mask. The data buffer for the 478 * output data must be aligned to this alignment mask. Note that 479 * the Crypto API will do the re-alignment in software, but 480 * only under special conditions and there is a performance hit. 481 * The re-alignment happens at these occasions for different 482 * @cra_u types: cipher -- For both input data and output data 483 * buffer; ahash -- For output hash destination buf; shash -- 484 * For output hash destination buf. 485 * This is needed on hardware which is flawed by design and 486 * cannot pick data from arbitrary addresses. 487 * @cra_priority: Priority of this transformation implementation. In case 488 * multiple transformations with same @cra_name are available to 489 * the Crypto API, the kernel will use the one with highest 490 * @cra_priority. 491 * @cra_name: Generic name (usable by multiple implementations) of the 492 * transformation algorithm. This is the name of the transformation 493 * itself. This field is used by the kernel when looking up the 494 * providers of particular transformation. 495 * @cra_driver_name: Unique name of the transformation provider. This is the 496 * name of the provider of the transformation. This can be any 497 * arbitrary value, but in the usual case, this contains the 498 * name of the chip or provider and the name of the 499 * transformation algorithm. 500 * @cra_type: Type of the cryptographic transformation. This is a pointer to 501 * struct crypto_type, which implements callbacks common for all 502 * trasnformation types. There are multiple options: 503 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 504 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. 505 * This field might be empty. In that case, there are no common 506 * callbacks. This is the case for: cipher, compress, shash. 507 * @cra_u: Callbacks implementing the transformation. This is a union of 508 * multiple structures. Depending on the type of transformation selected 509 * by @cra_type and @cra_flags above, the associated structure must be 510 * filled with callbacks. This field might be empty. This is the case 511 * for ahash, shash. 512 * @cra_init: Initialize the cryptographic transformation object. This function 513 * is used to initialize the cryptographic transformation object. 514 * This function is called only once at the instantiation time, right 515 * after the transformation context was allocated. In case the 516 * cryptographic hardware has some special requirements which need to 517 * be handled by software, this function shall check for the precise 518 * requirement of the transformation and put any software fallbacks 519 * in place. 520 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 521 * counterpart to @cra_init, used to remove various changes set in 522 * @cra_init. 523 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 524 * @cra_list: internally used 525 * @cra_users: internally used 526 * @cra_refcnt: internally used 527 * @cra_destroy: internally used 528 * 529 * The struct crypto_alg describes a generic Crypto API algorithm and is common 530 * for all of the transformations. Any variable not documented here shall not 531 * be used by a cipher implementation as it is internal to the Crypto API. 532 */ 533struct crypto_alg { 534 struct list_head cra_list; 535 struct list_head cra_users; 536 537 u32 cra_flags; 538 unsigned int cra_blocksize; 539 unsigned int cra_ctxsize; 540 unsigned int cra_alignmask; 541 542 int cra_priority; 543 atomic_t cra_refcnt; 544 545 char cra_name[CRYPTO_MAX_ALG_NAME]; 546 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 547 548 const struct crypto_type *cra_type; 549 550 union { 551 struct ablkcipher_alg ablkcipher; 552 struct aead_alg aead; 553 struct blkcipher_alg blkcipher; 554 struct cipher_alg cipher; 555 struct compress_alg compress; 556 struct rng_alg rng; 557 } cra_u; 558 559 int (*cra_init)(struct crypto_tfm *tfm); 560 void (*cra_exit)(struct crypto_tfm *tfm); 561 void (*cra_destroy)(struct crypto_alg *alg); 562 563 struct module *cra_module; 564}; 565 566/* 567 * Algorithm registration interface. 568 */ 569int crypto_register_alg(struct crypto_alg *alg); 570int crypto_unregister_alg(struct crypto_alg *alg); 571int crypto_register_algs(struct crypto_alg *algs, int count); 572int crypto_unregister_algs(struct crypto_alg *algs, int count); 573 574/* 575 * Algorithm query interface. 576 */ 577int crypto_has_alg(const char *name, u32 type, u32 mask); 578 579/* 580 * Transforms: user-instantiated objects which encapsulate algorithms 581 * and core processing logic. Managed via crypto_alloc_*() and 582 * crypto_free_*(), as well as the various helpers below. 583 */ 584 585struct ablkcipher_tfm { 586 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 587 unsigned int keylen); 588 int (*encrypt)(struct ablkcipher_request *req); 589 int (*decrypt)(struct ablkcipher_request *req); 590 int (*givencrypt)(struct skcipher_givcrypt_request *req); 591 int (*givdecrypt)(struct skcipher_givcrypt_request *req); 592 593 struct crypto_ablkcipher *base; 594 595 unsigned int ivsize; 596 unsigned int reqsize; 597}; 598 599struct aead_tfm { 600 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 601 unsigned int keylen); 602 int (*encrypt)(struct aead_request *req); 603 int (*decrypt)(struct aead_request *req); 604 int (*givencrypt)(struct aead_givcrypt_request *req); 605 int (*givdecrypt)(struct aead_givcrypt_request *req); 606 607 struct crypto_aead *base; 608 609 unsigned int ivsize; 610 unsigned int authsize; 611 unsigned int reqsize; 612}; 613 614struct blkcipher_tfm { 615 void *iv; 616 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 617 unsigned int keylen); 618 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 619 struct scatterlist *src, unsigned int nbytes); 620 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 621 struct scatterlist *src, unsigned int nbytes); 622}; 623 624struct cipher_tfm { 625 int (*cit_setkey)(struct crypto_tfm *tfm, 626 const u8 *key, unsigned int keylen); 627 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 628 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 629}; 630 631struct hash_tfm { 632 int (*init)(struct hash_desc *desc); 633 int (*update)(struct hash_desc *desc, 634 struct scatterlist *sg, unsigned int nsg); 635 int (*final)(struct hash_desc *desc, u8 *out); 636 int (*digest)(struct hash_desc *desc, struct scatterlist *sg, 637 unsigned int nsg, u8 *out); 638 int (*setkey)(struct crypto_hash *tfm, const u8 *key, 639 unsigned int keylen); 640 unsigned int digestsize; 641}; 642 643struct compress_tfm { 644 int (*cot_compress)(struct crypto_tfm *tfm, 645 const u8 *src, unsigned int slen, 646 u8 *dst, unsigned int *dlen); 647 int (*cot_decompress)(struct crypto_tfm *tfm, 648 const u8 *src, unsigned int slen, 649 u8 *dst, unsigned int *dlen); 650}; 651 652struct rng_tfm { 653 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, 654 unsigned int dlen); 655 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); 656}; 657 658#define crt_ablkcipher crt_u.ablkcipher 659#define crt_aead crt_u.aead 660#define crt_blkcipher crt_u.blkcipher 661#define crt_cipher crt_u.cipher 662#define crt_hash crt_u.hash 663#define crt_compress crt_u.compress 664#define crt_rng crt_u.rng 665 666struct crypto_tfm { 667 668 u32 crt_flags; 669 670 union { 671 struct ablkcipher_tfm ablkcipher; 672 struct aead_tfm aead; 673 struct blkcipher_tfm blkcipher; 674 struct cipher_tfm cipher; 675 struct hash_tfm hash; 676 struct compress_tfm compress; 677 struct rng_tfm rng; 678 } crt_u; 679 680 void (*exit)(struct crypto_tfm *tfm); 681 682 struct crypto_alg *__crt_alg; 683 684 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 685}; 686 687struct crypto_ablkcipher { 688 struct crypto_tfm base; 689}; 690 691struct crypto_aead { 692 struct crypto_tfm base; 693}; 694 695struct crypto_blkcipher { 696 struct crypto_tfm base; 697}; 698 699struct crypto_cipher { 700 struct crypto_tfm base; 701}; 702 703struct crypto_comp { 704 struct crypto_tfm base; 705}; 706 707struct crypto_hash { 708 struct crypto_tfm base; 709}; 710 711struct crypto_rng { 712 struct crypto_tfm base; 713}; 714 715enum { 716 CRYPTOA_UNSPEC, 717 CRYPTOA_ALG, 718 CRYPTOA_TYPE, 719 CRYPTOA_U32, 720 __CRYPTOA_MAX, 721}; 722 723#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) 724 725/* Maximum number of (rtattr) parameters for each template. */ 726#define CRYPTO_MAX_ATTRS 32 727 728struct crypto_attr_alg { 729 char name[CRYPTO_MAX_ALG_NAME]; 730}; 731 732struct crypto_attr_type { 733 u32 type; 734 u32 mask; 735}; 736 737struct crypto_attr_u32 { 738 u32 num; 739}; 740 741/* 742 * Transform user interface. 743 */ 744 745struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 746void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 747 748static inline void crypto_free_tfm(struct crypto_tfm *tfm) 749{ 750 return crypto_destroy_tfm(tfm, tfm); 751} 752 753int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 754 755/* 756 * Transform helpers which query the underlying algorithm. 757 */ 758static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 759{ 760 return tfm->__crt_alg->cra_name; 761} 762 763static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 764{ 765 return tfm->__crt_alg->cra_driver_name; 766} 767 768static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 769{ 770 return tfm->__crt_alg->cra_priority; 771} 772 773static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 774{ 775 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 776} 777 778static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 779{ 780 return tfm->__crt_alg->cra_blocksize; 781} 782 783static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 784{ 785 return tfm->__crt_alg->cra_alignmask; 786} 787 788static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 789{ 790 return tfm->crt_flags; 791} 792 793static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 794{ 795 tfm->crt_flags |= flags; 796} 797 798static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 799{ 800 tfm->crt_flags &= ~flags; 801} 802 803static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 804{ 805 return tfm->__crt_ctx; 806} 807 808static inline unsigned int crypto_tfm_ctx_alignment(void) 809{ 810 struct crypto_tfm *tfm; 811 return __alignof__(tfm->__crt_ctx); 812} 813 814/* 815 * API wrappers. 816 */ 817static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 818 struct crypto_tfm *tfm) 819{ 820 return (struct crypto_ablkcipher *)tfm; 821} 822 823static inline u32 crypto_skcipher_type(u32 type) 824{ 825 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 826 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 827 return type; 828} 829 830static inline u32 crypto_skcipher_mask(u32 mask) 831{ 832 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 833 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; 834 return mask; 835} 836 837/** 838 * DOC: Asynchronous Block Cipher API 839 * 840 * Asynchronous block cipher API is used with the ciphers of type 841 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 842 * 843 * Asynchronous cipher operations imply that the function invocation for a 844 * cipher request returns immediately before the completion of the operation. 845 * The cipher request is scheduled as a separate kernel thread and therefore 846 * load-balanced on the different CPUs via the process scheduler. To allow 847 * the kernel crypto API to inform the caller about the completion of a cipher 848 * request, the caller must provide a callback function. That function is 849 * invoked with the cipher handle when the request completes. 850 * 851 * To support the asynchronous operation, additional information than just the 852 * cipher handle must be supplied to the kernel crypto API. That additional 853 * information is given by filling in the ablkcipher_request data structure. 854 * 855 * For the asynchronous block cipher API, the state is maintained with the tfm 856 * cipher handle. A single tfm can be used across multiple calls and in 857 * parallel. For asynchronous block cipher calls, context data supplied and 858 * only used by the caller can be referenced the request data structure in 859 * addition to the IV used for the cipher request. The maintenance of such 860 * state information would be important for a crypto driver implementer to 861 * have, because when calling the callback function upon completion of the 862 * cipher operation, that callback function may need some information about 863 * which operation just finished if it invoked multiple in parallel. This 864 * state information is unused by the kernel crypto API. 865 */ 866 867/** 868 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle 869 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 870 * ablkcipher cipher 871 * @type: specifies the type of the cipher 872 * @mask: specifies the mask for the cipher 873 * 874 * Allocate a cipher handle for an ablkcipher. The returned struct 875 * crypto_ablkcipher is the cipher handle that is required for any subsequent 876 * API invocation for that ablkcipher. 877 * 878 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 879 * of an error, PTR_ERR() returns the error code. 880 */ 881struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, 882 u32 type, u32 mask); 883 884static inline struct crypto_tfm *crypto_ablkcipher_tfm( 885 struct crypto_ablkcipher *tfm) 886{ 887 return &tfm->base; 888} 889 890/** 891 * crypto_free_ablkcipher() - zeroize and free cipher handle 892 * @tfm: cipher handle to be freed 893 */ 894static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 895{ 896 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 897} 898 899/** 900 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. 901 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 902 * ablkcipher 903 * @type: specifies the type of the cipher 904 * @mask: specifies the mask for the cipher 905 * 906 * Return: true when the ablkcipher is known to the kernel crypto API; false 907 * otherwise 908 */ 909static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 910 u32 mask) 911{ 912 return crypto_has_alg(alg_name, crypto_skcipher_type(type), 913 crypto_skcipher_mask(mask)); 914} 915 916static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 917 struct crypto_ablkcipher *tfm) 918{ 919 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 920} 921 922/** 923 * crypto_ablkcipher_ivsize() - obtain IV size 924 * @tfm: cipher handle 925 * 926 * The size of the IV for the ablkcipher referenced by the cipher handle is 927 * returned. This IV size may be zero if the cipher does not need an IV. 928 * 929 * Return: IV size in bytes 930 */ 931static inline unsigned int crypto_ablkcipher_ivsize( 932 struct crypto_ablkcipher *tfm) 933{ 934 return crypto_ablkcipher_crt(tfm)->ivsize; 935} 936 937/** 938 * crypto_ablkcipher_blocksize() - obtain block size of cipher 939 * @tfm: cipher handle 940 * 941 * The block size for the ablkcipher referenced with the cipher handle is 942 * returned. The caller may use that information to allocate appropriate 943 * memory for the data returned by the encryption or decryption operation 944 * 945 * Return: block size of cipher 946 */ 947static inline unsigned int crypto_ablkcipher_blocksize( 948 struct crypto_ablkcipher *tfm) 949{ 950 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 951} 952 953static inline unsigned int crypto_ablkcipher_alignmask( 954 struct crypto_ablkcipher *tfm) 955{ 956 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 957} 958 959static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 960{ 961 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 962} 963 964static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 965 u32 flags) 966{ 967 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 968} 969 970static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 971 u32 flags) 972{ 973 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 974} 975 976/** 977 * crypto_ablkcipher_setkey() - set key for cipher 978 * @tfm: cipher handle 979 * @key: buffer holding the key 980 * @keylen: length of the key in bytes 981 * 982 * The caller provided key is set for the ablkcipher referenced by the cipher 983 * handle. 984 * 985 * Note, the key length determines the cipher type. Many block ciphers implement 986 * different cipher modes depending on the key size, such as AES-128 vs AES-192 987 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 988 * is performed. 989 * 990 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 991 */ 992static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 993 const u8 *key, unsigned int keylen) 994{ 995 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 996 997 return crt->setkey(crt->base, key, keylen); 998} 999 1000/** 1001 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 1002 * @req: ablkcipher_request out of which the cipher handle is to be obtained 1003 * 1004 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 1005 * data structure. 1006 * 1007 * Return: crypto_ablkcipher handle 1008 */ 1009static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 1010 struct ablkcipher_request *req) 1011{ 1012 return __crypto_ablkcipher_cast(req->base.tfm); 1013} 1014 1015/** 1016 * crypto_ablkcipher_encrypt() - encrypt plaintext 1017 * @req: reference to the ablkcipher_request handle that holds all information 1018 * needed to perform the cipher operation 1019 * 1020 * Encrypt plaintext data using the ablkcipher_request handle. That data 1021 * structure and how it is filled with data is discussed with the 1022 * ablkcipher_request_* functions. 1023 * 1024 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1025 */ 1026static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1027{ 1028 struct ablkcipher_tfm *crt = 1029 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1030 return crt->encrypt(req); 1031} 1032 1033/** 1034 * crypto_ablkcipher_decrypt() - decrypt ciphertext 1035 * @req: reference to the ablkcipher_request handle that holds all information 1036 * needed to perform the cipher operation 1037 * 1038 * Decrypt ciphertext data using the ablkcipher_request handle. That data 1039 * structure and how it is filled with data is discussed with the 1040 * ablkcipher_request_* functions. 1041 * 1042 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1043 */ 1044static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1045{ 1046 struct ablkcipher_tfm *crt = 1047 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1048 return crt->decrypt(req); 1049} 1050 1051/** 1052 * DOC: Asynchronous Cipher Request Handle 1053 * 1054 * The ablkcipher_request data structure contains all pointers to data 1055 * required for the asynchronous cipher operation. This includes the cipher 1056 * handle (which can be used by multiple ablkcipher_request instances), pointer 1057 * to plaintext and ciphertext, asynchronous callback function, etc. It acts 1058 * as a handle to the ablkcipher_request_* API calls in a similar way as 1059 * ablkcipher handle to the crypto_ablkcipher_* API calls. 1060 */ 1061 1062/** 1063 * crypto_ablkcipher_reqsize() - obtain size of the request data structure 1064 * @tfm: cipher handle 1065 * 1066 * Return: number of bytes 1067 */ 1068static inline unsigned int crypto_ablkcipher_reqsize( 1069 struct crypto_ablkcipher *tfm) 1070{ 1071 return crypto_ablkcipher_crt(tfm)->reqsize; 1072} 1073 1074/** 1075 * ablkcipher_request_set_tfm() - update cipher handle reference in request 1076 * @req: request handle to be modified 1077 * @tfm: cipher handle that shall be added to the request handle 1078 * 1079 * Allow the caller to replace the existing ablkcipher handle in the request 1080 * data structure with a different one. 1081 */ 1082static inline void ablkcipher_request_set_tfm( 1083 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1084{ 1085 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 1086} 1087 1088static inline struct ablkcipher_request *ablkcipher_request_cast( 1089 struct crypto_async_request *req) 1090{ 1091 return container_of(req, struct ablkcipher_request, base); 1092} 1093 1094/** 1095 * ablkcipher_request_alloc() - allocate request data structure 1096 * @tfm: cipher handle to be registered with the request 1097 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 1098 * 1099 * Allocate the request data structure that must be used with the ablkcipher 1100 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 1101 * handle is registered in the request data structure. 1102 * 1103 * Return: allocated request handle in case of success; IS_ERR() is true in case 1104 * of an error, PTR_ERR() returns the error code. 1105 */ 1106static inline struct ablkcipher_request *ablkcipher_request_alloc( 1107 struct crypto_ablkcipher *tfm, gfp_t gfp) 1108{ 1109 struct ablkcipher_request *req; 1110 1111 req = kmalloc(sizeof(struct ablkcipher_request) + 1112 crypto_ablkcipher_reqsize(tfm), gfp); 1113 1114 if (likely(req)) 1115 ablkcipher_request_set_tfm(req, tfm); 1116 1117 return req; 1118} 1119 1120/** 1121 * ablkcipher_request_free() - zeroize and free request data structure 1122 * @req: request data structure cipher handle to be freed 1123 */ 1124static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1125{ 1126 kzfree(req); 1127} 1128 1129/** 1130 * ablkcipher_request_set_callback() - set asynchronous callback function 1131 * @req: request handle 1132 * @flags: specify zero or an ORing of the flags 1133 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1134 * increase the wait queue beyond the initial maximum size; 1135 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1136 * @compl: callback function pointer to be registered with the request handle 1137 * @data: The data pointer refers to memory that is not used by the kernel 1138 * crypto API, but provided to the callback function for it to use. Here, 1139 * the caller can provide a reference to memory the callback function can 1140 * operate on. As the callback function is invoked asynchronously to the 1141 * related functionality, it may need to access data structures of the 1142 * related functionality which can be referenced using this pointer. The 1143 * callback function can access the memory via the "data" field in the 1144 * crypto_async_request data structure provided to the callback function. 1145 * 1146 * This function allows setting the callback function that is triggered once the 1147 * cipher operation completes. 1148 * 1149 * The callback function is registered with the ablkcipher_request handle and 1150 * must comply with the following template: 1151 * 1152 * void callback_function(struct crypto_async_request *req, int error) 1153 */ 1154static inline void ablkcipher_request_set_callback( 1155 struct ablkcipher_request *req, 1156 u32 flags, crypto_completion_t compl, void *data) 1157{ 1158 req->base.complete = compl; 1159 req->base.data = data; 1160 req->base.flags = flags; 1161} 1162 1163/** 1164 * ablkcipher_request_set_crypt() - set data buffers 1165 * @req: request handle 1166 * @src: source scatter / gather list 1167 * @dst: destination scatter / gather list 1168 * @nbytes: number of bytes to process from @src 1169 * @iv: IV for the cipher operation which must comply with the IV size defined 1170 * by crypto_ablkcipher_ivsize 1171 * 1172 * This function allows setting of the source data and destination data 1173 * scatter / gather lists. 1174 * 1175 * For encryption, the source is treated as the plaintext and the 1176 * destination is the ciphertext. For a decryption operation, the use is 1177 * reversed: the source is the ciphertext and the destination is the plaintext. 1178 */ 1179static inline void ablkcipher_request_set_crypt( 1180 struct ablkcipher_request *req, 1181 struct scatterlist *src, struct scatterlist *dst, 1182 unsigned int nbytes, void *iv) 1183{ 1184 req->src = src; 1185 req->dst = dst; 1186 req->nbytes = nbytes; 1187 req->info = iv; 1188} 1189 1190/** 1191 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API 1192 * 1193 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD 1194 * (listed as type "aead" in /proc/crypto) 1195 * 1196 * The most prominent examples for this type of encryption is GCM and CCM. 1197 * However, the kernel supports other types of AEAD ciphers which are defined 1198 * with the following cipher string: 1199 * 1200 * authenc(keyed message digest, block cipher) 1201 * 1202 * For example: authenc(hmac(sha256), cbc(aes)) 1203 * 1204 * The example code provided for the asynchronous block cipher operation 1205 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged 1206 * the *aead* pendants discussed in the following. In addtion, for the AEAD 1207 * operation, the aead_request_set_assoc function must be used to set the 1208 * pointer to the associated data memory location before performing the 1209 * encryption or decryption operation. In case of an encryption, the associated 1210 * data memory is filled during the encryption operation. For decryption, the 1211 * associated data memory must contain data that is used to verify the integrity 1212 * of the decrypted data. Another deviation from the asynchronous block cipher 1213 * operation is that the caller should explicitly check for -EBADMSG of the 1214 * crypto_aead_decrypt. That error indicates an authentication error, i.e. 1215 * a breach in the integrity of the message. In essence, that -EBADMSG error 1216 * code is the key bonus an AEAD cipher has over "standard" block chaining 1217 * modes. 1218 */ 1219 1220static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) 1221{ 1222 return (struct crypto_aead *)tfm; 1223} 1224 1225/** 1226 * crypto_alloc_aead() - allocate AEAD cipher handle 1227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1228 * AEAD cipher 1229 * @type: specifies the type of the cipher 1230 * @mask: specifies the mask for the cipher 1231 * 1232 * Allocate a cipher handle for an AEAD. The returned struct 1233 * crypto_aead is the cipher handle that is required for any subsequent 1234 * API invocation for that AEAD. 1235 * 1236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1237 * of an error, PTR_ERR() returns the error code. 1238 */ 1239struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); 1240 1241static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) 1242{ 1243 return &tfm->base; 1244} 1245 1246/** 1247 * crypto_free_aead() - zeroize and free aead handle 1248 * @tfm: cipher handle to be freed 1249 */ 1250static inline void crypto_free_aead(struct crypto_aead *tfm) 1251{ 1252 crypto_free_tfm(crypto_aead_tfm(tfm)); 1253} 1254 1255static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) 1256{ 1257 return &crypto_aead_tfm(tfm)->crt_aead; 1258} 1259 1260/** 1261 * crypto_aead_ivsize() - obtain IV size 1262 * @tfm: cipher handle 1263 * 1264 * The size of the IV for the aead referenced by the cipher handle is 1265 * returned. This IV size may be zero if the cipher does not need an IV. 1266 * 1267 * Return: IV size in bytes 1268 */ 1269static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) 1270{ 1271 return crypto_aead_crt(tfm)->ivsize; 1272} 1273 1274/** 1275 * crypto_aead_authsize() - obtain maximum authentication data size 1276 * @tfm: cipher handle 1277 * 1278 * The maximum size of the authentication data for the AEAD cipher referenced 1279 * by the AEAD cipher handle is returned. The authentication data size may be 1280 * zero if the cipher implements a hard-coded maximum. 1281 * 1282 * The authentication data may also be known as "tag value". 1283 * 1284 * Return: authentication data size / tag size in bytes 1285 */ 1286static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) 1287{ 1288 return crypto_aead_crt(tfm)->authsize; 1289} 1290 1291/** 1292 * crypto_aead_blocksize() - obtain block size of cipher 1293 * @tfm: cipher handle 1294 * 1295 * The block size for the AEAD referenced with the cipher handle is returned. 1296 * The caller may use that information to allocate appropriate memory for the 1297 * data returned by the encryption or decryption operation 1298 * 1299 * Return: block size of cipher 1300 */ 1301static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) 1302{ 1303 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); 1304} 1305 1306static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) 1307{ 1308 return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); 1309} 1310 1311static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) 1312{ 1313 return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); 1314} 1315 1316static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) 1317{ 1318 crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); 1319} 1320 1321static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) 1322{ 1323 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); 1324} 1325 1326/** 1327 * crypto_aead_setkey() - set key for cipher 1328 * @tfm: cipher handle 1329 * @key: buffer holding the key 1330 * @keylen: length of the key in bytes 1331 * 1332 * The caller provided key is set for the AEAD referenced by the cipher 1333 * handle. 1334 * 1335 * Note, the key length determines the cipher type. Many block ciphers implement 1336 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1337 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1338 * is performed. 1339 * 1340 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1341 */ 1342static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1343 unsigned int keylen) 1344{ 1345 struct aead_tfm *crt = crypto_aead_crt(tfm); 1346 1347 return crt->setkey(crt->base, key, keylen); 1348} 1349 1350/** 1351 * crypto_aead_setauthsize() - set authentication data size 1352 * @tfm: cipher handle 1353 * @authsize: size of the authentication data / tag in bytes 1354 * 1355 * Set the authentication data size / tag size. AEAD requires an authentication 1356 * tag (or MAC) in addition to the associated data. 1357 * 1358 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1359 */ 1360int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); 1361 1362static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) 1363{ 1364 return __crypto_aead_cast(req->base.tfm); 1365} 1366 1367/** 1368 * crypto_aead_encrypt() - encrypt plaintext 1369 * @req: reference to the aead_request handle that holds all information 1370 * needed to perform the cipher operation 1371 * 1372 * Encrypt plaintext data using the aead_request handle. That data structure 1373 * and how it is filled with data is discussed with the aead_request_* 1374 * functions. 1375 * 1376 * IMPORTANT NOTE The encryption operation creates the authentication data / 1377 * tag. That data is concatenated with the created ciphertext. 1378 * The ciphertext memory size is therefore the given number of 1379 * block cipher blocks + the size defined by the 1380 * crypto_aead_setauthsize invocation. The caller must ensure 1381 * that sufficient memory is available for the ciphertext and 1382 * the authentication tag. 1383 * 1384 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1385 */ 1386static inline int crypto_aead_encrypt(struct aead_request *req) 1387{ 1388 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); 1389} 1390 1391/** 1392 * crypto_aead_decrypt() - decrypt ciphertext 1393 * @req: reference to the ablkcipher_request handle that holds all information 1394 * needed to perform the cipher operation 1395 * 1396 * Decrypt ciphertext data using the aead_request handle. That data structure 1397 * and how it is filled with data is discussed with the aead_request_* 1398 * functions. 1399 * 1400 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the 1401 * authentication data / tag. That authentication data / tag 1402 * must have the size defined by the crypto_aead_setauthsize 1403 * invocation. 1404 * 1405 * 1406 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD 1407 * cipher operation performs the authentication of the data during the 1408 * decryption operation. Therefore, the function returns this error if 1409 * the authentication of the ciphertext was unsuccessful (i.e. the 1410 * integrity of the ciphertext or the associated data was violated); 1411 * < 0 if an error occurred. 1412 */ 1413static inline int crypto_aead_decrypt(struct aead_request *req) 1414{ 1415 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1416} 1417 1418/** 1419 * DOC: Asynchronous AEAD Request Handle 1420 * 1421 * The aead_request data structure contains all pointers to data required for 1422 * the AEAD cipher operation. This includes the cipher handle (which can be 1423 * used by multiple aead_request instances), pointer to plaintext and 1424 * ciphertext, asynchronous callback function, etc. It acts as a handle to the 1425 * aead_request_* API calls in a similar way as AEAD handle to the 1426 * crypto_aead_* API calls. 1427 */ 1428 1429/** 1430 * crypto_aead_reqsize() - obtain size of the request data structure 1431 * @tfm: cipher handle 1432 * 1433 * Return: number of bytes 1434 */ 1435static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) 1436{ 1437 return crypto_aead_crt(tfm)->reqsize; 1438} 1439 1440/** 1441 * aead_request_set_tfm() - update cipher handle reference in request 1442 * @req: request handle to be modified 1443 * @tfm: cipher handle that shall be added to the request handle 1444 * 1445 * Allow the caller to replace the existing aead handle in the request 1446 * data structure with a different one. 1447 */ 1448static inline void aead_request_set_tfm(struct aead_request *req, 1449 struct crypto_aead *tfm) 1450{ 1451 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); 1452} 1453 1454/** 1455 * aead_request_alloc() - allocate request data structure 1456 * @tfm: cipher handle to be registered with the request 1457 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 1458 * 1459 * Allocate the request data structure that must be used with the AEAD 1460 * encrypt and decrypt API calls. During the allocation, the provided aead 1461 * handle is registered in the request data structure. 1462 * 1463 * Return: allocated request handle in case of success; IS_ERR() is true in case 1464 * of an error, PTR_ERR() returns the error code. 1465 */ 1466static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, 1467 gfp_t gfp) 1468{ 1469 struct aead_request *req; 1470 1471 req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); 1472 1473 if (likely(req)) 1474 aead_request_set_tfm(req, tfm); 1475 1476 return req; 1477} 1478 1479/** 1480 * aead_request_free() - zeroize and free request data structure 1481 * @req: request data structure cipher handle to be freed 1482 */ 1483static inline void aead_request_free(struct aead_request *req) 1484{ 1485 kzfree(req); 1486} 1487 1488/** 1489 * aead_request_set_callback() - set asynchronous callback function 1490 * @req: request handle 1491 * @flags: specify zero or an ORing of the flags 1492 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1493 * increase the wait queue beyond the initial maximum size; 1494 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1495 * @compl: callback function pointer to be registered with the request handle 1496 * @data: The data pointer refers to memory that is not used by the kernel 1497 * crypto API, but provided to the callback function for it to use. Here, 1498 * the caller can provide a reference to memory the callback function can 1499 * operate on. As the callback function is invoked asynchronously to the 1500 * related functionality, it may need to access data structures of the 1501 * related functionality which can be referenced using this pointer. The 1502 * callback function can access the memory via the "data" field in the 1503 * crypto_async_request data structure provided to the callback function. 1504 * 1505 * Setting the callback function that is triggered once the cipher operation 1506 * completes 1507 * 1508 * The callback function is registered with the aead_request handle and 1509 * must comply with the following template: 1510 * 1511 * void callback_function(struct crypto_async_request *req, int error) 1512 */ 1513static inline void aead_request_set_callback(struct aead_request *req, 1514 u32 flags, 1515 crypto_completion_t compl, 1516 void *data) 1517{ 1518 req->base.complete = compl; 1519 req->base.data = data; 1520 req->base.flags = flags; 1521} 1522 1523/** 1524 * aead_request_set_crypt - set data buffers 1525 * @req: request handle 1526 * @src: source scatter / gather list 1527 * @dst: destination scatter / gather list 1528 * @cryptlen: number of bytes to process from @src 1529 * @iv: IV for the cipher operation which must comply with the IV size defined 1530 * by crypto_aead_ivsize() 1531 * 1532 * Setting the source data and destination data scatter / gather lists. 1533 * 1534 * For encryption, the source is treated as the plaintext and the 1535 * destination is the ciphertext. For a decryption operation, the use is 1536 * reversed: the source is the ciphertext and the destination is the plaintext. 1537 * 1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, 1539 * the caller must concatenate the ciphertext followed by the 1540 * authentication tag and provide the entire data stream to the 1541 * decryption operation (i.e. the data length used for the 1542 * initialization of the scatterlist and the data length for the 1543 * decryption operation is identical). For encryption, however, 1544 * the authentication tag is created while encrypting the data. 1545 * The destination buffer must hold sufficient space for the 1546 * ciphertext and the authentication tag while the encryption 1547 * invocation must only point to the plaintext data size. The 1548 * following code snippet illustrates the memory usage 1549 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); 1550 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); 1551 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); 1552 */ 1553static inline void aead_request_set_crypt(struct aead_request *req, 1554 struct scatterlist *src, 1555 struct scatterlist *dst, 1556 unsigned int cryptlen, u8 *iv) 1557{ 1558 req->src = src; 1559 req->dst = dst; 1560 req->cryptlen = cryptlen; 1561 req->iv = iv; 1562} 1563 1564/** 1565 * aead_request_set_assoc() - set the associated data scatter / gather list 1566 * @req: request handle 1567 * @assoc: associated data scatter / gather list 1568 * @assoclen: number of bytes to process from @assoc 1569 * 1570 * For encryption, the memory is filled with the associated data. For 1571 * decryption, the memory must point to the associated data. 1572 */ 1573static inline void aead_request_set_assoc(struct aead_request *req, 1574 struct scatterlist *assoc, 1575 unsigned int assoclen) 1576{ 1577 req->assoc = assoc; 1578 req->assoclen = assoclen; 1579} 1580 1581/** 1582 * DOC: Synchronous Block Cipher API 1583 * 1584 * The synchronous block cipher API is used with the ciphers of type 1585 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) 1586 * 1587 * Synchronous calls, have a context in the tfm. But since a single tfm can be 1588 * used in multiple calls and in parallel, this info should not be changeable 1589 * (unless a lock is used). This applies, for example, to the symmetric key. 1590 * However, the IV is changeable, so there is an iv field in blkcipher_tfm 1591 * structure for synchronous blkcipher api. So, its the only state info that can 1592 * be kept for synchronous calls without using a big lock across a tfm. 1593 * 1594 * The block cipher API allows the use of a complete cipher, i.e. a cipher 1595 * consisting of a template (a block chaining mode) and a single block cipher 1596 * primitive (e.g. AES). 1597 * 1598 * The plaintext data buffer and the ciphertext data buffer are pointed to 1599 * by using scatter/gather lists. The cipher operation is performed 1600 * on all segments of the provided scatter/gather lists. 1601 * 1602 * The kernel crypto API supports a cipher operation "in-place" which means that 1603 * the caller may provide the same scatter/gather list for the plaintext and 1604 * cipher text. After the completion of the cipher operation, the plaintext 1605 * data is replaced with the ciphertext data in case of an encryption and vice 1606 * versa for a decryption. The caller must ensure that the scatter/gather lists 1607 * for the output data point to sufficiently large buffers, i.e. multiples of 1608 * the block size of the cipher. 1609 */ 1610 1611static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1612 struct crypto_tfm *tfm) 1613{ 1614 return (struct crypto_blkcipher *)tfm; 1615} 1616 1617static inline struct crypto_blkcipher *crypto_blkcipher_cast( 1618 struct crypto_tfm *tfm) 1619{ 1620 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); 1621 return __crypto_blkcipher_cast(tfm); 1622} 1623 1624/** 1625 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle 1626 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1627 * blkcipher cipher 1628 * @type: specifies the type of the cipher 1629 * @mask: specifies the mask for the cipher 1630 * 1631 * Allocate a cipher handle for a block cipher. The returned struct 1632 * crypto_blkcipher is the cipher handle that is required for any subsequent 1633 * API invocation for that block cipher. 1634 * 1635 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1636 * of an error, PTR_ERR() returns the error code. 1637 */ 1638static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1639 const char *alg_name, u32 type, u32 mask) 1640{ 1641 type &= ~CRYPTO_ALG_TYPE_MASK; 1642 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1643 mask |= CRYPTO_ALG_TYPE_MASK; 1644 1645 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); 1646} 1647 1648static inline struct crypto_tfm *crypto_blkcipher_tfm( 1649 struct crypto_blkcipher *tfm) 1650{ 1651 return &tfm->base; 1652} 1653 1654/** 1655 * crypto_free_blkcipher() - zeroize and free the block cipher handle 1656 * @tfm: cipher handle to be freed 1657 */ 1658static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1659{ 1660 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1661} 1662 1663/** 1664 * crypto_has_blkcipher() - Search for the availability of a block cipher 1665 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1666 * block cipher 1667 * @type: specifies the type of the cipher 1668 * @mask: specifies the mask for the cipher 1669 * 1670 * Return: true when the block cipher is known to the kernel crypto API; false 1671 * otherwise 1672 */ 1673static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1674{ 1675 type &= ~CRYPTO_ALG_TYPE_MASK; 1676 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1677 mask |= CRYPTO_ALG_TYPE_MASK; 1678 1679 return crypto_has_alg(alg_name, type, mask); 1680} 1681 1682/** 1683 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle 1684 * @tfm: cipher handle 1685 * 1686 * Return: The character string holding the name of the cipher 1687 */ 1688static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1689{ 1690 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1691} 1692 1693static inline struct blkcipher_tfm *crypto_blkcipher_crt( 1694 struct crypto_blkcipher *tfm) 1695{ 1696 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; 1697} 1698 1699static inline struct blkcipher_alg *crypto_blkcipher_alg( 1700 struct crypto_blkcipher *tfm) 1701{ 1702 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1703} 1704 1705/** 1706 * crypto_blkcipher_ivsize() - obtain IV size 1707 * @tfm: cipher handle 1708 * 1709 * The size of the IV for the block cipher referenced by the cipher handle is 1710 * returned. This IV size may be zero if the cipher does not need an IV. 1711 * 1712 * Return: IV size in bytes 1713 */ 1714static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1715{ 1716 return crypto_blkcipher_alg(tfm)->ivsize; 1717} 1718 1719/** 1720 * crypto_blkcipher_blocksize() - obtain block size of cipher 1721 * @tfm: cipher handle 1722 * 1723 * The block size for the block cipher referenced with the cipher handle is 1724 * returned. The caller may use that information to allocate appropriate 1725 * memory for the data returned by the encryption or decryption operation. 1726 * 1727 * Return: block size of cipher 1728 */ 1729static inline unsigned int crypto_blkcipher_blocksize( 1730 struct crypto_blkcipher *tfm) 1731{ 1732 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); 1733} 1734 1735static inline unsigned int crypto_blkcipher_alignmask( 1736 struct crypto_blkcipher *tfm) 1737{ 1738 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); 1739} 1740 1741static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) 1742{ 1743 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); 1744} 1745 1746static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, 1747 u32 flags) 1748{ 1749 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); 1750} 1751 1752static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, 1753 u32 flags) 1754{ 1755 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1756} 1757 1758/** 1759 * crypto_blkcipher_setkey() - set key for cipher 1760 * @tfm: cipher handle 1761 * @key: buffer holding the key 1762 * @keylen: length of the key in bytes 1763 * 1764 * The caller provided key is set for the block cipher referenced by the cipher 1765 * handle. 1766 * 1767 * Note, the key length determines the cipher type. Many block ciphers implement 1768 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1769 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1770 * is performed. 1771 * 1772 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1773 */ 1774static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1775 const u8 *key, unsigned int keylen) 1776{ 1777 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), 1778 key, keylen); 1779} 1780 1781/** 1782 * crypto_blkcipher_encrypt() - encrypt plaintext 1783 * @desc: reference to the block cipher handle with meta data 1784 * @dst: scatter/gather list that is filled by the cipher operation with the 1785 * ciphertext 1786 * @src: scatter/gather list that holds the plaintext 1787 * @nbytes: number of bytes of the plaintext to encrypt. 1788 * 1789 * Encrypt plaintext data using the IV set by the caller with a preceding 1790 * call of crypto_blkcipher_set_iv. 1791 * 1792 * The blkcipher_desc data structure must be filled by the caller and can 1793 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1794 * with the block cipher handle; desc.flags is filled with either 1795 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1796 * 1797 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1798 */ 1799static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1800 struct scatterlist *dst, 1801 struct scatterlist *src, 1802 unsigned int nbytes) 1803{ 1804 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1805 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1806} 1807 1808/** 1809 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV 1810 * @desc: reference to the block cipher handle with meta data 1811 * @dst: scatter/gather list that is filled by the cipher operation with the 1812 * ciphertext 1813 * @src: scatter/gather list that holds the plaintext 1814 * @nbytes: number of bytes of the plaintext to encrypt. 1815 * 1816 * Encrypt plaintext data with the use of an IV that is solely used for this 1817 * cipher operation. Any previously set IV is not used. 1818 * 1819 * The blkcipher_desc data structure must be filled by the caller and can 1820 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1821 * with the block cipher handle; desc.info is filled with the IV to be used for 1822 * the current operation; desc.flags is filled with either 1823 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1824 * 1825 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1826 */ 1827static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1828 struct scatterlist *dst, 1829 struct scatterlist *src, 1830 unsigned int nbytes) 1831{ 1832 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1833} 1834 1835/** 1836 * crypto_blkcipher_decrypt() - decrypt ciphertext 1837 * @desc: reference to the block cipher handle with meta data 1838 * @dst: scatter/gather list that is filled by the cipher operation with the 1839 * plaintext 1840 * @src: scatter/gather list that holds the ciphertext 1841 * @nbytes: number of bytes of the ciphertext to decrypt. 1842 * 1843 * Decrypt ciphertext data using the IV set by the caller with a preceding 1844 * call of crypto_blkcipher_set_iv. 1845 * 1846 * The blkcipher_desc data structure must be filled by the caller as documented 1847 * for the crypto_blkcipher_encrypt call above. 1848 * 1849 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1850 * 1851 */ 1852static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1853 struct scatterlist *dst, 1854 struct scatterlist *src, 1855 unsigned int nbytes) 1856{ 1857 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1858 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1859} 1860 1861/** 1862 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV 1863 * @desc: reference to the block cipher handle with meta data 1864 * @dst: scatter/gather list that is filled by the cipher operation with the 1865 * plaintext 1866 * @src: scatter/gather list that holds the ciphertext 1867 * @nbytes: number of bytes of the ciphertext to decrypt. 1868 * 1869 * Decrypt ciphertext data with the use of an IV that is solely used for this 1870 * cipher operation. Any previously set IV is not used. 1871 * 1872 * The blkcipher_desc data structure must be filled by the caller as documented 1873 * for the crypto_blkcipher_encrypt_iv call above. 1874 * 1875 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1876 */ 1877static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1878 struct scatterlist *dst, 1879 struct scatterlist *src, 1880 unsigned int nbytes) 1881{ 1882 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1883} 1884 1885/** 1886 * crypto_blkcipher_set_iv() - set IV for cipher 1887 * @tfm: cipher handle 1888 * @src: buffer holding the IV 1889 * @len: length of the IV in bytes 1890 * 1891 * The caller provided IV is set for the block cipher referenced by the cipher 1892 * handle. 1893 */ 1894static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1895 const u8 *src, unsigned int len) 1896{ 1897 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1898} 1899 1900/** 1901 * crypto_blkcipher_get_iv() - obtain IV from cipher 1902 * @tfm: cipher handle 1903 * @dst: buffer filled with the IV 1904 * @len: length of the buffer dst 1905 * 1906 * The caller can obtain the IV set for the block cipher referenced by the 1907 * cipher handle and store it into the user-provided buffer. If the buffer 1908 * has an insufficient space, the IV is truncated to fit the buffer. 1909 */ 1910static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1911 u8 *dst, unsigned int len) 1912{ 1913 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1914} 1915 1916/** 1917 * DOC: Single Block Cipher API 1918 * 1919 * The single block cipher API is used with the ciphers of type 1920 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). 1921 * 1922 * Using the single block cipher API calls, operations with the basic cipher 1923 * primitive can be implemented. These cipher primitives exclude any block 1924 * chaining operations including IV handling. 1925 * 1926 * The purpose of this single block cipher API is to support the implementation 1927 * of templates or other concepts that only need to perform the cipher operation 1928 * on one block at a time. Templates invoke the underlying cipher primitive 1929 * block-wise and process either the input or the output data of these cipher 1930 * operations. 1931 */ 1932 1933static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1934{ 1935 return (struct crypto_cipher *)tfm; 1936} 1937 1938static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 1939{ 1940 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 1941 return __crypto_cipher_cast(tfm); 1942} 1943 1944/** 1945 * crypto_alloc_cipher() - allocate single block cipher handle 1946 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1947 * single block cipher 1948 * @type: specifies the type of the cipher 1949 * @mask: specifies the mask for the cipher 1950 * 1951 * Allocate a cipher handle for a single block cipher. The returned struct 1952 * crypto_cipher is the cipher handle that is required for any subsequent API 1953 * invocation for that single block cipher. 1954 * 1955 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1956 * of an error, PTR_ERR() returns the error code. 1957 */ 1958static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1959 u32 type, u32 mask) 1960{ 1961 type &= ~CRYPTO_ALG_TYPE_MASK; 1962 type |= CRYPTO_ALG_TYPE_CIPHER; 1963 mask |= CRYPTO_ALG_TYPE_MASK; 1964 1965 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 1966} 1967 1968static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 1969{ 1970 return &tfm->base; 1971} 1972 1973/** 1974 * crypto_free_cipher() - zeroize and free the single block cipher handle 1975 * @tfm: cipher handle to be freed 1976 */ 1977static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1978{ 1979 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1980} 1981 1982/** 1983 * crypto_has_cipher() - Search for the availability of a single block cipher 1984 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1985 * single block cipher 1986 * @type: specifies the type of the cipher 1987 * @mask: specifies the mask for the cipher 1988 * 1989 * Return: true when the single block cipher is known to the kernel crypto API; 1990 * false otherwise 1991 */ 1992static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1993{ 1994 type &= ~CRYPTO_ALG_TYPE_MASK; 1995 type |= CRYPTO_ALG_TYPE_CIPHER; 1996 mask |= CRYPTO_ALG_TYPE_MASK; 1997 1998 return crypto_has_alg(alg_name, type, mask); 1999} 2000 2001static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 2002{ 2003 return &crypto_cipher_tfm(tfm)->crt_cipher; 2004} 2005 2006/** 2007 * crypto_cipher_blocksize() - obtain block size for cipher 2008 * @tfm: cipher handle 2009 * 2010 * The block size for the single block cipher referenced with the cipher handle 2011 * tfm is returned. The caller may use that information to allocate appropriate 2012 * memory for the data returned by the encryption or decryption operation 2013 * 2014 * Return: block size of cipher 2015 */ 2016static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 2017{ 2018 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 2019} 2020 2021static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 2022{ 2023 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 2024} 2025 2026static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 2027{ 2028 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 2029} 2030 2031static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 2032 u32 flags) 2033{ 2034 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 2035} 2036 2037static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 2038 u32 flags) 2039{ 2040 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 2041} 2042 2043/** 2044 * crypto_cipher_setkey() - set key for cipher 2045 * @tfm: cipher handle 2046 * @key: buffer holding the key 2047 * @keylen: length of the key in bytes 2048 * 2049 * The caller provided key is set for the single block cipher referenced by the 2050 * cipher handle. 2051 * 2052 * Note, the key length determines the cipher type. Many block ciphers implement 2053 * different cipher modes depending on the key size, such as AES-128 vs AES-192 2054 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 2055 * is performed. 2056 * 2057 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 2058 */ 2059static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 2060 const u8 *key, unsigned int keylen) 2061{ 2062 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 2063 key, keylen); 2064} 2065 2066/** 2067 * crypto_cipher_encrypt_one() - encrypt one block of plaintext 2068 * @tfm: cipher handle 2069 * @dst: points to the buffer that will be filled with the ciphertext 2070 * @src: buffer holding the plaintext to be encrypted 2071 * 2072 * Invoke the encryption operation of one block. The caller must ensure that 2073 * the plaintext and ciphertext buffers are at least one block in size. 2074 */ 2075static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 2076 u8 *dst, const u8 *src) 2077{ 2078 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 2079 dst, src); 2080} 2081 2082/** 2083 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext 2084 * @tfm: cipher handle 2085 * @dst: points to the buffer that will be filled with the plaintext 2086 * @src: buffer holding the ciphertext to be decrypted 2087 * 2088 * Invoke the decryption operation of one block. The caller must ensure that 2089 * the plaintext and ciphertext buffers are at least one block in size. 2090 */ 2091static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 2092 u8 *dst, const u8 *src) 2093{ 2094 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 2095 dst, src); 2096} 2097 2098/** 2099 * DOC: Synchronous Message Digest API 2100 * 2101 * The synchronous message digest API is used with the ciphers of type 2102 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) 2103 */ 2104 2105static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 2106{ 2107 return (struct crypto_hash *)tfm; 2108} 2109 2110static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) 2111{ 2112 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & 2113 CRYPTO_ALG_TYPE_HASH_MASK); 2114 return __crypto_hash_cast(tfm); 2115} 2116 2117/** 2118 * crypto_alloc_hash() - allocate synchronous message digest handle 2119 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 2120 * message digest cipher 2121 * @type: specifies the type of the cipher 2122 * @mask: specifies the mask for the cipher 2123 * 2124 * Allocate a cipher handle for a message digest. The returned struct 2125 * crypto_hash is the cipher handle that is required for any subsequent 2126 * API invocation for that message digest. 2127 * 2128 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 2129 * of an error, PTR_ERR() returns the error code. 2130 */ 2131static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 2132 u32 type, u32 mask) 2133{ 2134 type &= ~CRYPTO_ALG_TYPE_MASK; 2135 mask &= ~CRYPTO_ALG_TYPE_MASK; 2136 type |= CRYPTO_ALG_TYPE_HASH; 2137 mask |= CRYPTO_ALG_TYPE_HASH_MASK; 2138 2139 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); 2140} 2141 2142static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) 2143{ 2144 return &tfm->base; 2145} 2146 2147/** 2148 * crypto_free_hash() - zeroize and free message digest handle 2149 * @tfm: cipher handle to be freed 2150 */ 2151static inline void crypto_free_hash(struct crypto_hash *tfm) 2152{ 2153 crypto_free_tfm(crypto_hash_tfm(tfm)); 2154} 2155 2156/** 2157 * crypto_has_hash() - Search for the availability of a message digest 2158 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 2159 * message digest cipher 2160 * @type: specifies the type of the cipher 2161 * @mask: specifies the mask for the cipher 2162 * 2163 * Return: true when the message digest cipher is known to the kernel crypto 2164 * API; false otherwise 2165 */ 2166static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 2167{ 2168 type &= ~CRYPTO_ALG_TYPE_MASK; 2169 mask &= ~CRYPTO_ALG_TYPE_MASK; 2170 type |= CRYPTO_ALG_TYPE_HASH; 2171 mask |= CRYPTO_ALG_TYPE_HASH_MASK; 2172 2173 return crypto_has_alg(alg_name, type, mask); 2174} 2175 2176static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) 2177{ 2178 return &crypto_hash_tfm(tfm)->crt_hash; 2179} 2180 2181/** 2182 * crypto_hash_blocksize() - obtain block size for message digest 2183 * @tfm: cipher handle 2184 * 2185 * The block size for the message digest cipher referenced with the cipher 2186 * handle is returned. 2187 * 2188 * Return: block size of cipher 2189 */ 2190static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 2191{ 2192 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 2193} 2194 2195static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) 2196{ 2197 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 2198} 2199 2200/** 2201 * crypto_hash_digestsize() - obtain message digest size 2202 * @tfm: cipher handle 2203 * 2204 * The size for the message digest created by the message digest cipher 2205 * referenced with the cipher handle is returned. 2206 * 2207 * Return: message digest size 2208 */ 2209static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 2210{ 2211 return crypto_hash_crt(tfm)->digestsize; 2212} 2213 2214static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) 2215{ 2216 return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); 2217} 2218 2219static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) 2220{ 2221 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); 2222} 2223 2224static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) 2225{ 2226 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 2227} 2228 2229/** 2230 * crypto_hash_init() - (re)initialize message digest handle 2231 * @desc: cipher request handle that to be filled by caller -- 2232 * desc.tfm is filled with the hash cipher handle; 2233 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. 2234 * 2235 * The call (re-)initializes the message digest referenced by the hash cipher 2236 * request handle. Any potentially existing state created by previous 2237 * operations is discarded. 2238 * 2239 * Return: 0 if the message digest initialization was successful; < 0 if an 2240 * error occurred 2241 */ 2242static inline int crypto_hash_init(struct hash_desc *desc) 2243{ 2244 return crypto_hash_crt(desc->tfm)->init(desc); 2245} 2246 2247/** 2248 * crypto_hash_update() - add data to message digest for processing 2249 * @desc: cipher request handle 2250 * @sg: scatter / gather list pointing to the data to be added to the message 2251 * digest 2252 * @nbytes: number of bytes to be processed from @sg 2253 * 2254 * Updates the message digest state of the cipher handle pointed to by the 2255 * hash cipher request handle with the input data pointed to by the 2256 * scatter/gather list. 2257 * 2258 * Return: 0 if the message digest update was successful; < 0 if an error 2259 * occurred 2260 */ 2261static inline int crypto_hash_update(struct hash_desc *desc, 2262 struct scatterlist *sg, 2263 unsigned int nbytes) 2264{ 2265 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 2266} 2267 2268/** 2269 * crypto_hash_final() - calculate message digest 2270 * @desc: cipher request handle 2271 * @out: message digest output buffer -- The caller must ensure that the out 2272 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize 2273 * function). 2274 * 2275 * Finalize the message digest operation and create the message digest 2276 * based on all data added to the cipher handle. The message digest is placed 2277 * into the output buffer. 2278 * 2279 * Return: 0 if the message digest creation was successful; < 0 if an error 2280 * occurred 2281 */ 2282static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 2283{ 2284 return crypto_hash_crt(desc->tfm)->final(desc, out); 2285} 2286 2287/** 2288 * crypto_hash_digest() - calculate message digest for a buffer 2289 * @desc: see crypto_hash_final() 2290 * @sg: see crypto_hash_update() 2291 * @nbytes: see crypto_hash_update() 2292 * @out: see crypto_hash_final() 2293 * 2294 * This function is a "short-hand" for the function calls of crypto_hash_init, 2295 * crypto_hash_update and crypto_hash_final. The parameters have the same 2296 * meaning as discussed for those separate three functions. 2297 * 2298 * Return: 0 if the message digest creation was successful; < 0 if an error 2299 * occurred 2300 */ 2301static inline int crypto_hash_digest(struct hash_desc *desc, 2302 struct scatterlist *sg, 2303 unsigned int nbytes, u8 *out) 2304{ 2305 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 2306} 2307 2308/** 2309 * crypto_hash_setkey() - set key for message digest 2310 * @hash: cipher handle 2311 * @key: buffer holding the key 2312 * @keylen: length of the key in bytes 2313 * 2314 * The caller provided key is set for the message digest cipher. The cipher 2315 * handle must point to a keyed hash in order for this function to succeed. 2316 * 2317 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 2318 */ 2319static inline int crypto_hash_setkey(struct crypto_hash *hash, 2320 const u8 *key, unsigned int keylen) 2321{ 2322 return crypto_hash_crt(hash)->setkey(hash, key, keylen); 2323} 2324 2325static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 2326{ 2327 return (struct crypto_comp *)tfm; 2328} 2329 2330static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 2331{ 2332 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 2333 CRYPTO_ALG_TYPE_MASK); 2334 return __crypto_comp_cast(tfm); 2335} 2336 2337static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 2338 u32 type, u32 mask) 2339{ 2340 type &= ~CRYPTO_ALG_TYPE_MASK; 2341 type |= CRYPTO_ALG_TYPE_COMPRESS; 2342 mask |= CRYPTO_ALG_TYPE_MASK; 2343 2344 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 2345} 2346 2347static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 2348{ 2349 return &tfm->base; 2350} 2351 2352static inline void crypto_free_comp(struct crypto_comp *tfm) 2353{ 2354 crypto_free_tfm(crypto_comp_tfm(tfm)); 2355} 2356 2357static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 2358{ 2359 type &= ~CRYPTO_ALG_TYPE_MASK; 2360 type |= CRYPTO_ALG_TYPE_COMPRESS; 2361 mask |= CRYPTO_ALG_TYPE_MASK; 2362 2363 return crypto_has_alg(alg_name, type, mask); 2364} 2365 2366static inline const char *crypto_comp_name(struct crypto_comp *tfm) 2367{ 2368 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 2369} 2370 2371static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 2372{ 2373 return &crypto_comp_tfm(tfm)->crt_compress; 2374} 2375 2376static inline int crypto_comp_compress(struct crypto_comp *tfm, 2377 const u8 *src, unsigned int slen, 2378 u8 *dst, unsigned int *dlen) 2379{ 2380 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), 2381 src, slen, dst, dlen); 2382} 2383 2384static inline int crypto_comp_decompress(struct crypto_comp *tfm, 2385 const u8 *src, unsigned int slen, 2386 u8 *dst, unsigned int *dlen) 2387{ 2388 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), 2389 src, slen, dst, dlen); 2390} 2391 2392#endif /* _LINUX_CRYPTO_H */ 2393