at v4.2 66 kB view raw
1/* 2 * Scatterlist Cryptographic API. 3 * 4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 5 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 7 * 8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 9 * and Nettle, by Niels Möller. 10 * 11 * This program is free software; you can redistribute it and/or modify it 12 * under the terms of the GNU General Public License as published by the Free 13 * Software Foundation; either version 2 of the License, or (at your option) 14 * any later version. 15 * 16 */ 17#ifndef _LINUX_CRYPTO_H 18#define _LINUX_CRYPTO_H 19 20#include <linux/atomic.h> 21#include <linux/kernel.h> 22#include <linux/list.h> 23#include <linux/bug.h> 24#include <linux/slab.h> 25#include <linux/string.h> 26#include <linux/uaccess.h> 27 28/* 29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing 30 * arbitrary modules to be loaded. Loading from userspace may still need the 31 * unprefixed names, so retains those aliases as well. 32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro 34 * expands twice on the same line. Instead, use a separate base name for the 35 * alias. 36 */ 37#define MODULE_ALIAS_CRYPTO(name) \ 38 __MODULE_INFO(alias, alias_userspace, name); \ 39 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 40 41/* 42 * Algorithm masks and types. 43 */ 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f 45#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 46#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 47#define CRYPTO_ALG_TYPE_AEAD 0x00000003 48#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 49#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 50#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 51#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 52#define CRYPTO_ALG_TYPE_HASH 0x00000008 53#define CRYPTO_ALG_TYPE_SHASH 0x00000009 54#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 55#define CRYPTO_ALG_TYPE_RNG 0x0000000c 56#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 57#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f 58 59#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 60#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c 61#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 62 63#define CRYPTO_ALG_LARVAL 0x00000010 64#define CRYPTO_ALG_DEAD 0x00000020 65#define CRYPTO_ALG_DYING 0x00000040 66#define CRYPTO_ALG_ASYNC 0x00000080 67 68/* 69 * Set this bit if and only if the algorithm requires another algorithm of 70 * the same type to handle corner cases. 71 */ 72#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 73 74/* 75 * This bit is set for symmetric key ciphers that have already been wrapped 76 * with a generic IV generator to prevent them from being wrapped again. 77 */ 78#define CRYPTO_ALG_GENIV 0x00000200 79 80/* 81 * Set if the algorithm has passed automated run-time testing. Note that 82 * if there is no run-time testing for a given algorithm it is considered 83 * to have passed. 84 */ 85 86#define CRYPTO_ALG_TESTED 0x00000400 87 88/* 89 * Set if the algorithm is an instance that is build from templates. 90 */ 91#define CRYPTO_ALG_INSTANCE 0x00000800 92 93/* Set this bit if the algorithm provided is hardware accelerated but 94 * not available to userspace via instruction set or so. 95 */ 96#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 97 98/* 99 * Mark a cipher as a service implementation only usable by another 100 * cipher and never by a normal user of the kernel crypto API 101 */ 102#define CRYPTO_ALG_INTERNAL 0x00002000 103 104/* 105 * Temporary flag used to prevent legacy AEAD implementations from 106 * being used by user-space. 107 */ 108#define CRYPTO_ALG_AEAD_NEW 0x00004000 109 110/* 111 * Transform masks and values (for crt_flags). 112 */ 113#define CRYPTO_TFM_REQ_MASK 0x000fff00 114#define CRYPTO_TFM_RES_MASK 0xfff00000 115 116#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 117#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 118#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 119#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 120#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 121#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 122#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 123#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 124 125/* 126 * Miscellaneous stuff. 127 */ 128#define CRYPTO_MAX_ALG_NAME 64 129 130/* 131 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 132 * declaration) is used to ensure that the crypto_tfm context structure is 133 * aligned correctly for the given architecture so that there are no alignment 134 * faults for C data types. In particular, this is required on platforms such 135 * as arm where pointers are 32-bit aligned but there are data types such as 136 * u64 which require 64-bit alignment. 137 */ 138#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 139 140#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 141 142struct scatterlist; 143struct crypto_ablkcipher; 144struct crypto_async_request; 145struct crypto_aead; 146struct crypto_blkcipher; 147struct crypto_hash; 148struct crypto_tfm; 149struct crypto_type; 150struct aead_request; 151struct aead_givcrypt_request; 152struct skcipher_givcrypt_request; 153 154typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 155 156/** 157 * DOC: Block Cipher Context Data Structures 158 * 159 * These data structures define the operating context for each block cipher 160 * type. 161 */ 162 163struct crypto_async_request { 164 struct list_head list; 165 crypto_completion_t complete; 166 void *data; 167 struct crypto_tfm *tfm; 168 169 u32 flags; 170}; 171 172struct ablkcipher_request { 173 struct crypto_async_request base; 174 175 unsigned int nbytes; 176 177 void *info; 178 179 struct scatterlist *src; 180 struct scatterlist *dst; 181 182 void *__ctx[] CRYPTO_MINALIGN_ATTR; 183}; 184 185struct blkcipher_desc { 186 struct crypto_blkcipher *tfm; 187 void *info; 188 u32 flags; 189}; 190 191struct cipher_desc { 192 struct crypto_tfm *tfm; 193 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 194 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, 195 const u8 *src, unsigned int nbytes); 196 void *info; 197}; 198 199struct hash_desc { 200 struct crypto_hash *tfm; 201 u32 flags; 202}; 203 204/** 205 * DOC: Block Cipher Algorithm Definitions 206 * 207 * These data structures define modular crypto algorithm implementations, 208 * managed via crypto_register_alg() and crypto_unregister_alg(). 209 */ 210 211/** 212 * struct ablkcipher_alg - asynchronous block cipher definition 213 * @min_keysize: Minimum key size supported by the transformation. This is the 214 * smallest key length supported by this transformation algorithm. 215 * This must be set to one of the pre-defined values as this is 216 * not hardware specific. Possible values for this field can be 217 * found via git grep "_MIN_KEY_SIZE" include/crypto/ 218 * @max_keysize: Maximum key size supported by the transformation. This is the 219 * largest key length supported by this transformation algorithm. 220 * This must be set to one of the pre-defined values as this is 221 * not hardware specific. Possible values for this field can be 222 * found via git grep "_MAX_KEY_SIZE" include/crypto/ 223 * @setkey: Set key for the transformation. This function is used to either 224 * program a supplied key into the hardware or store the key in the 225 * transformation context for programming it later. Note that this 226 * function does modify the transformation context. This function can 227 * be called multiple times during the existence of the transformation 228 * object, so one must make sure the key is properly reprogrammed into 229 * the hardware. This function is also responsible for checking the key 230 * length for validity. In case a software fallback was put in place in 231 * the @cra_init call, this function might need to use the fallback if 232 * the algorithm doesn't support all of the key sizes. 233 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 234 * the supplied scatterlist containing the blocks of data. The crypto 235 * API consumer is responsible for aligning the entries of the 236 * scatterlist properly and making sure the chunks are correctly 237 * sized. In case a software fallback was put in place in the 238 * @cra_init call, this function might need to use the fallback if 239 * the algorithm doesn't support all of the key sizes. In case the 240 * key was stored in transformation context, the key might need to be 241 * re-programmed into the hardware in this function. This function 242 * shall not modify the transformation context, as this function may 243 * be called in parallel with the same transformation object. 244 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 245 * and the conditions are exactly the same. 246 * @givencrypt: Update the IV for encryption. With this function, a cipher 247 * implementation may provide the function on how to update the IV 248 * for encryption. 249 * @givdecrypt: Update the IV for decryption. This is the reverse of 250 * @givencrypt . 251 * @geniv: The transformation implementation may use an "IV generator" provided 252 * by the kernel crypto API. Several use cases have a predefined 253 * approach how IVs are to be updated. For such use cases, the kernel 254 * crypto API provides ready-to-use implementations that can be 255 * referenced with this variable. 256 * @ivsize: IV size applicable for transformation. The consumer must provide an 257 * IV of exactly that size to perform the encrypt or decrypt operation. 258 * 259 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 260 * mandatory and must be filled. 261 */ 262struct ablkcipher_alg { 263 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 264 unsigned int keylen); 265 int (*encrypt)(struct ablkcipher_request *req); 266 int (*decrypt)(struct ablkcipher_request *req); 267 int (*givencrypt)(struct skcipher_givcrypt_request *req); 268 int (*givdecrypt)(struct skcipher_givcrypt_request *req); 269 270 const char *geniv; 271 272 unsigned int min_keysize; 273 unsigned int max_keysize; 274 unsigned int ivsize; 275}; 276 277/** 278 * struct old_aead_alg - AEAD cipher definition 279 * @maxauthsize: Set the maximum authentication tag size supported by the 280 * transformation. A transformation may support smaller tag sizes. 281 * As the authentication tag is a message digest to ensure the 282 * integrity of the encrypted data, a consumer typically wants the 283 * largest authentication tag possible as defined by this 284 * variable. 285 * @setauthsize: Set authentication size for the AEAD transformation. This 286 * function is used to specify the consumer requested size of the 287 * authentication tag to be either generated by the transformation 288 * during encryption or the size of the authentication tag to be 289 * supplied during the decryption operation. This function is also 290 * responsible for checking the authentication tag size for 291 * validity. 292 * @setkey: see struct ablkcipher_alg 293 * @encrypt: see struct ablkcipher_alg 294 * @decrypt: see struct ablkcipher_alg 295 * @givencrypt: see struct ablkcipher_alg 296 * @givdecrypt: see struct ablkcipher_alg 297 * @geniv: see struct ablkcipher_alg 298 * @ivsize: see struct ablkcipher_alg 299 * 300 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are 301 * mandatory and must be filled. 302 */ 303struct old_aead_alg { 304 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 305 unsigned int keylen); 306 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); 307 int (*encrypt)(struct aead_request *req); 308 int (*decrypt)(struct aead_request *req); 309 int (*givencrypt)(struct aead_givcrypt_request *req); 310 int (*givdecrypt)(struct aead_givcrypt_request *req); 311 312 const char *geniv; 313 314 unsigned int ivsize; 315 unsigned int maxauthsize; 316}; 317 318/** 319 * struct blkcipher_alg - synchronous block cipher definition 320 * @min_keysize: see struct ablkcipher_alg 321 * @max_keysize: see struct ablkcipher_alg 322 * @setkey: see struct ablkcipher_alg 323 * @encrypt: see struct ablkcipher_alg 324 * @decrypt: see struct ablkcipher_alg 325 * @geniv: see struct ablkcipher_alg 326 * @ivsize: see struct ablkcipher_alg 327 * 328 * All fields except @geniv and @ivsize are mandatory and must be filled. 329 */ 330struct blkcipher_alg { 331 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 332 unsigned int keylen); 333 int (*encrypt)(struct blkcipher_desc *desc, 334 struct scatterlist *dst, struct scatterlist *src, 335 unsigned int nbytes); 336 int (*decrypt)(struct blkcipher_desc *desc, 337 struct scatterlist *dst, struct scatterlist *src, 338 unsigned int nbytes); 339 340 const char *geniv; 341 342 unsigned int min_keysize; 343 unsigned int max_keysize; 344 unsigned int ivsize; 345}; 346 347/** 348 * struct cipher_alg - single-block symmetric ciphers definition 349 * @cia_min_keysize: Minimum key size supported by the transformation. This is 350 * the smallest key length supported by this transformation 351 * algorithm. This must be set to one of the pre-defined 352 * values as this is not hardware specific. Possible values 353 * for this field can be found via git grep "_MIN_KEY_SIZE" 354 * include/crypto/ 355 * @cia_max_keysize: Maximum key size supported by the transformation. This is 356 * the largest key length supported by this transformation 357 * algorithm. This must be set to one of the pre-defined values 358 * as this is not hardware specific. Possible values for this 359 * field can be found via git grep "_MAX_KEY_SIZE" 360 * include/crypto/ 361 * @cia_setkey: Set key for the transformation. This function is used to either 362 * program a supplied key into the hardware or store the key in the 363 * transformation context for programming it later. Note that this 364 * function does modify the transformation context. This function 365 * can be called multiple times during the existence of the 366 * transformation object, so one must make sure the key is properly 367 * reprogrammed into the hardware. This function is also 368 * responsible for checking the key length for validity. 369 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 370 * single block of data, which must be @cra_blocksize big. This 371 * always operates on a full @cra_blocksize and it is not possible 372 * to encrypt a block of smaller size. The supplied buffers must 373 * therefore also be at least of @cra_blocksize size. Both the 374 * input and output buffers are always aligned to @cra_alignmask. 375 * In case either of the input or output buffer supplied by user 376 * of the crypto API is not aligned to @cra_alignmask, the crypto 377 * API will re-align the buffers. The re-alignment means that a 378 * new buffer will be allocated, the data will be copied into the 379 * new buffer, then the processing will happen on the new buffer, 380 * then the data will be copied back into the original buffer and 381 * finally the new buffer will be freed. In case a software 382 * fallback was put in place in the @cra_init call, this function 383 * might need to use the fallback if the algorithm doesn't support 384 * all of the key sizes. In case the key was stored in 385 * transformation context, the key might need to be re-programmed 386 * into the hardware in this function. This function shall not 387 * modify the transformation context, as this function may be 388 * called in parallel with the same transformation object. 389 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 390 * @cia_encrypt, and the conditions are exactly the same. 391 * 392 * All fields are mandatory and must be filled. 393 */ 394struct cipher_alg { 395 unsigned int cia_min_keysize; 396 unsigned int cia_max_keysize; 397 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 398 unsigned int keylen); 399 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 400 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 401}; 402 403struct compress_alg { 404 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 405 unsigned int slen, u8 *dst, unsigned int *dlen); 406 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 407 unsigned int slen, u8 *dst, unsigned int *dlen); 408}; 409 410 411#define cra_ablkcipher cra_u.ablkcipher 412#define cra_aead cra_u.aead 413#define cra_blkcipher cra_u.blkcipher 414#define cra_cipher cra_u.cipher 415#define cra_compress cra_u.compress 416 417/** 418 * struct crypto_alg - definition of a cryptograpic cipher algorithm 419 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 420 * CRYPTO_ALG_* flags for the flags which go in here. Those are 421 * used for fine-tuning the description of the transformation 422 * algorithm. 423 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 424 * of the smallest possible unit which can be transformed with 425 * this algorithm. The users must respect this value. 426 * In case of HASH transformation, it is possible for a smaller 427 * block than @cra_blocksize to be passed to the crypto API for 428 * transformation, in case of any other transformation type, an 429 * error will be returned upon any attempt to transform smaller 430 * than @cra_blocksize chunks. 431 * @cra_ctxsize: Size of the operational context of the transformation. This 432 * value informs the kernel crypto API about the memory size 433 * needed to be allocated for the transformation context. 434 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 435 * buffer containing the input data for the algorithm must be 436 * aligned to this alignment mask. The data buffer for the 437 * output data must be aligned to this alignment mask. Note that 438 * the Crypto API will do the re-alignment in software, but 439 * only under special conditions and there is a performance hit. 440 * The re-alignment happens at these occasions for different 441 * @cra_u types: cipher -- For both input data and output data 442 * buffer; ahash -- For output hash destination buf; shash -- 443 * For output hash destination buf. 444 * This is needed on hardware which is flawed by design and 445 * cannot pick data from arbitrary addresses. 446 * @cra_priority: Priority of this transformation implementation. In case 447 * multiple transformations with same @cra_name are available to 448 * the Crypto API, the kernel will use the one with highest 449 * @cra_priority. 450 * @cra_name: Generic name (usable by multiple implementations) of the 451 * transformation algorithm. This is the name of the transformation 452 * itself. This field is used by the kernel when looking up the 453 * providers of particular transformation. 454 * @cra_driver_name: Unique name of the transformation provider. This is the 455 * name of the provider of the transformation. This can be any 456 * arbitrary value, but in the usual case, this contains the 457 * name of the chip or provider and the name of the 458 * transformation algorithm. 459 * @cra_type: Type of the cryptographic transformation. This is a pointer to 460 * struct crypto_type, which implements callbacks common for all 461 * transformation types. There are multiple options: 462 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 463 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. 464 * This field might be empty. In that case, there are no common 465 * callbacks. This is the case for: cipher, compress, shash. 466 * @cra_u: Callbacks implementing the transformation. This is a union of 467 * multiple structures. Depending on the type of transformation selected 468 * by @cra_type and @cra_flags above, the associated structure must be 469 * filled with callbacks. This field might be empty. This is the case 470 * for ahash, shash. 471 * @cra_init: Initialize the cryptographic transformation object. This function 472 * is used to initialize the cryptographic transformation object. 473 * This function is called only once at the instantiation time, right 474 * after the transformation context was allocated. In case the 475 * cryptographic hardware has some special requirements which need to 476 * be handled by software, this function shall check for the precise 477 * requirement of the transformation and put any software fallbacks 478 * in place. 479 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 480 * counterpart to @cra_init, used to remove various changes set in 481 * @cra_init. 482 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 483 * @cra_list: internally used 484 * @cra_users: internally used 485 * @cra_refcnt: internally used 486 * @cra_destroy: internally used 487 * 488 * The struct crypto_alg describes a generic Crypto API algorithm and is common 489 * for all of the transformations. Any variable not documented here shall not 490 * be used by a cipher implementation as it is internal to the Crypto API. 491 */ 492struct crypto_alg { 493 struct list_head cra_list; 494 struct list_head cra_users; 495 496 u32 cra_flags; 497 unsigned int cra_blocksize; 498 unsigned int cra_ctxsize; 499 unsigned int cra_alignmask; 500 501 int cra_priority; 502 atomic_t cra_refcnt; 503 504 char cra_name[CRYPTO_MAX_ALG_NAME]; 505 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 506 507 const struct crypto_type *cra_type; 508 509 union { 510 struct ablkcipher_alg ablkcipher; 511 struct old_aead_alg aead; 512 struct blkcipher_alg blkcipher; 513 struct cipher_alg cipher; 514 struct compress_alg compress; 515 } cra_u; 516 517 int (*cra_init)(struct crypto_tfm *tfm); 518 void (*cra_exit)(struct crypto_tfm *tfm); 519 void (*cra_destroy)(struct crypto_alg *alg); 520 521 struct module *cra_module; 522} CRYPTO_MINALIGN_ATTR; 523 524/* 525 * Algorithm registration interface. 526 */ 527int crypto_register_alg(struct crypto_alg *alg); 528int crypto_unregister_alg(struct crypto_alg *alg); 529int crypto_register_algs(struct crypto_alg *algs, int count); 530int crypto_unregister_algs(struct crypto_alg *algs, int count); 531 532/* 533 * Algorithm query interface. 534 */ 535int crypto_has_alg(const char *name, u32 type, u32 mask); 536 537/* 538 * Transforms: user-instantiated objects which encapsulate algorithms 539 * and core processing logic. Managed via crypto_alloc_*() and 540 * crypto_free_*(), as well as the various helpers below. 541 */ 542 543struct ablkcipher_tfm { 544 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 545 unsigned int keylen); 546 int (*encrypt)(struct ablkcipher_request *req); 547 int (*decrypt)(struct ablkcipher_request *req); 548 int (*givencrypt)(struct skcipher_givcrypt_request *req); 549 int (*givdecrypt)(struct skcipher_givcrypt_request *req); 550 551 struct crypto_ablkcipher *base; 552 553 unsigned int ivsize; 554 unsigned int reqsize; 555}; 556 557struct blkcipher_tfm { 558 void *iv; 559 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 560 unsigned int keylen); 561 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 562 struct scatterlist *src, unsigned int nbytes); 563 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 564 struct scatterlist *src, unsigned int nbytes); 565}; 566 567struct cipher_tfm { 568 int (*cit_setkey)(struct crypto_tfm *tfm, 569 const u8 *key, unsigned int keylen); 570 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 571 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 572}; 573 574struct hash_tfm { 575 int (*init)(struct hash_desc *desc); 576 int (*update)(struct hash_desc *desc, 577 struct scatterlist *sg, unsigned int nsg); 578 int (*final)(struct hash_desc *desc, u8 *out); 579 int (*digest)(struct hash_desc *desc, struct scatterlist *sg, 580 unsigned int nsg, u8 *out); 581 int (*setkey)(struct crypto_hash *tfm, const u8 *key, 582 unsigned int keylen); 583 unsigned int digestsize; 584}; 585 586struct compress_tfm { 587 int (*cot_compress)(struct crypto_tfm *tfm, 588 const u8 *src, unsigned int slen, 589 u8 *dst, unsigned int *dlen); 590 int (*cot_decompress)(struct crypto_tfm *tfm, 591 const u8 *src, unsigned int slen, 592 u8 *dst, unsigned int *dlen); 593}; 594 595#define crt_ablkcipher crt_u.ablkcipher 596#define crt_blkcipher crt_u.blkcipher 597#define crt_cipher crt_u.cipher 598#define crt_hash crt_u.hash 599#define crt_compress crt_u.compress 600 601struct crypto_tfm { 602 603 u32 crt_flags; 604 605 union { 606 struct ablkcipher_tfm ablkcipher; 607 struct blkcipher_tfm blkcipher; 608 struct cipher_tfm cipher; 609 struct hash_tfm hash; 610 struct compress_tfm compress; 611 } crt_u; 612 613 void (*exit)(struct crypto_tfm *tfm); 614 615 struct crypto_alg *__crt_alg; 616 617 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 618}; 619 620struct crypto_ablkcipher { 621 struct crypto_tfm base; 622}; 623 624struct crypto_blkcipher { 625 struct crypto_tfm base; 626}; 627 628struct crypto_cipher { 629 struct crypto_tfm base; 630}; 631 632struct crypto_comp { 633 struct crypto_tfm base; 634}; 635 636struct crypto_hash { 637 struct crypto_tfm base; 638}; 639 640enum { 641 CRYPTOA_UNSPEC, 642 CRYPTOA_ALG, 643 CRYPTOA_TYPE, 644 CRYPTOA_U32, 645 __CRYPTOA_MAX, 646}; 647 648#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) 649 650/* Maximum number of (rtattr) parameters for each template. */ 651#define CRYPTO_MAX_ATTRS 32 652 653struct crypto_attr_alg { 654 char name[CRYPTO_MAX_ALG_NAME]; 655}; 656 657struct crypto_attr_type { 658 u32 type; 659 u32 mask; 660}; 661 662struct crypto_attr_u32 { 663 u32 num; 664}; 665 666/* 667 * Transform user interface. 668 */ 669 670struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 671void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 672 673static inline void crypto_free_tfm(struct crypto_tfm *tfm) 674{ 675 return crypto_destroy_tfm(tfm, tfm); 676} 677 678int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 679 680/* 681 * Transform helpers which query the underlying algorithm. 682 */ 683static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 684{ 685 return tfm->__crt_alg->cra_name; 686} 687 688static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 689{ 690 return tfm->__crt_alg->cra_driver_name; 691} 692 693static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 694{ 695 return tfm->__crt_alg->cra_priority; 696} 697 698static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 699{ 700 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 701} 702 703static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 704{ 705 return tfm->__crt_alg->cra_blocksize; 706} 707 708static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 709{ 710 return tfm->__crt_alg->cra_alignmask; 711} 712 713static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 714{ 715 return tfm->crt_flags; 716} 717 718static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 719{ 720 tfm->crt_flags |= flags; 721} 722 723static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 724{ 725 tfm->crt_flags &= ~flags; 726} 727 728static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 729{ 730 return tfm->__crt_ctx; 731} 732 733static inline unsigned int crypto_tfm_ctx_alignment(void) 734{ 735 struct crypto_tfm *tfm; 736 return __alignof__(tfm->__crt_ctx); 737} 738 739/* 740 * API wrappers. 741 */ 742static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 743 struct crypto_tfm *tfm) 744{ 745 return (struct crypto_ablkcipher *)tfm; 746} 747 748static inline u32 crypto_skcipher_type(u32 type) 749{ 750 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 751 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 752 return type; 753} 754 755static inline u32 crypto_skcipher_mask(u32 mask) 756{ 757 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); 758 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; 759 return mask; 760} 761 762/** 763 * DOC: Asynchronous Block Cipher API 764 * 765 * Asynchronous block cipher API is used with the ciphers of type 766 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 767 * 768 * Asynchronous cipher operations imply that the function invocation for a 769 * cipher request returns immediately before the completion of the operation. 770 * The cipher request is scheduled as a separate kernel thread and therefore 771 * load-balanced on the different CPUs via the process scheduler. To allow 772 * the kernel crypto API to inform the caller about the completion of a cipher 773 * request, the caller must provide a callback function. That function is 774 * invoked with the cipher handle when the request completes. 775 * 776 * To support the asynchronous operation, additional information than just the 777 * cipher handle must be supplied to the kernel crypto API. That additional 778 * information is given by filling in the ablkcipher_request data structure. 779 * 780 * For the asynchronous block cipher API, the state is maintained with the tfm 781 * cipher handle. A single tfm can be used across multiple calls and in 782 * parallel. For asynchronous block cipher calls, context data supplied and 783 * only used by the caller can be referenced the request data structure in 784 * addition to the IV used for the cipher request. The maintenance of such 785 * state information would be important for a crypto driver implementer to 786 * have, because when calling the callback function upon completion of the 787 * cipher operation, that callback function may need some information about 788 * which operation just finished if it invoked multiple in parallel. This 789 * state information is unused by the kernel crypto API. 790 */ 791 792/** 793 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle 794 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 795 * ablkcipher cipher 796 * @type: specifies the type of the cipher 797 * @mask: specifies the mask for the cipher 798 * 799 * Allocate a cipher handle for an ablkcipher. The returned struct 800 * crypto_ablkcipher is the cipher handle that is required for any subsequent 801 * API invocation for that ablkcipher. 802 * 803 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 804 * of an error, PTR_ERR() returns the error code. 805 */ 806struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, 807 u32 type, u32 mask); 808 809static inline struct crypto_tfm *crypto_ablkcipher_tfm( 810 struct crypto_ablkcipher *tfm) 811{ 812 return &tfm->base; 813} 814 815/** 816 * crypto_free_ablkcipher() - zeroize and free cipher handle 817 * @tfm: cipher handle to be freed 818 */ 819static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 820{ 821 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 822} 823 824/** 825 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. 826 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 827 * ablkcipher 828 * @type: specifies the type of the cipher 829 * @mask: specifies the mask for the cipher 830 * 831 * Return: true when the ablkcipher is known to the kernel crypto API; false 832 * otherwise 833 */ 834static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 835 u32 mask) 836{ 837 return crypto_has_alg(alg_name, crypto_skcipher_type(type), 838 crypto_skcipher_mask(mask)); 839} 840 841static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 842 struct crypto_ablkcipher *tfm) 843{ 844 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 845} 846 847/** 848 * crypto_ablkcipher_ivsize() - obtain IV size 849 * @tfm: cipher handle 850 * 851 * The size of the IV for the ablkcipher referenced by the cipher handle is 852 * returned. This IV size may be zero if the cipher does not need an IV. 853 * 854 * Return: IV size in bytes 855 */ 856static inline unsigned int crypto_ablkcipher_ivsize( 857 struct crypto_ablkcipher *tfm) 858{ 859 return crypto_ablkcipher_crt(tfm)->ivsize; 860} 861 862/** 863 * crypto_ablkcipher_blocksize() - obtain block size of cipher 864 * @tfm: cipher handle 865 * 866 * The block size for the ablkcipher referenced with the cipher handle is 867 * returned. The caller may use that information to allocate appropriate 868 * memory for the data returned by the encryption or decryption operation 869 * 870 * Return: block size of cipher 871 */ 872static inline unsigned int crypto_ablkcipher_blocksize( 873 struct crypto_ablkcipher *tfm) 874{ 875 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 876} 877 878static inline unsigned int crypto_ablkcipher_alignmask( 879 struct crypto_ablkcipher *tfm) 880{ 881 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 882} 883 884static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 885{ 886 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 887} 888 889static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 890 u32 flags) 891{ 892 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 893} 894 895static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 896 u32 flags) 897{ 898 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 899} 900 901/** 902 * crypto_ablkcipher_setkey() - set key for cipher 903 * @tfm: cipher handle 904 * @key: buffer holding the key 905 * @keylen: length of the key in bytes 906 * 907 * The caller provided key is set for the ablkcipher referenced by the cipher 908 * handle. 909 * 910 * Note, the key length determines the cipher type. Many block ciphers implement 911 * different cipher modes depending on the key size, such as AES-128 vs AES-192 912 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 913 * is performed. 914 * 915 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 916 */ 917static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 918 const u8 *key, unsigned int keylen) 919{ 920 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 921 922 return crt->setkey(crt->base, key, keylen); 923} 924 925/** 926 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 927 * @req: ablkcipher_request out of which the cipher handle is to be obtained 928 * 929 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 930 * data structure. 931 * 932 * Return: crypto_ablkcipher handle 933 */ 934static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 935 struct ablkcipher_request *req) 936{ 937 return __crypto_ablkcipher_cast(req->base.tfm); 938} 939 940/** 941 * crypto_ablkcipher_encrypt() - encrypt plaintext 942 * @req: reference to the ablkcipher_request handle that holds all information 943 * needed to perform the cipher operation 944 * 945 * Encrypt plaintext data using the ablkcipher_request handle. That data 946 * structure and how it is filled with data is discussed with the 947 * ablkcipher_request_* functions. 948 * 949 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 950 */ 951static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 952{ 953 struct ablkcipher_tfm *crt = 954 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 955 return crt->encrypt(req); 956} 957 958/** 959 * crypto_ablkcipher_decrypt() - decrypt ciphertext 960 * @req: reference to the ablkcipher_request handle that holds all information 961 * needed to perform the cipher operation 962 * 963 * Decrypt ciphertext data using the ablkcipher_request handle. That data 964 * structure and how it is filled with data is discussed with the 965 * ablkcipher_request_* functions. 966 * 967 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 968 */ 969static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 970{ 971 struct ablkcipher_tfm *crt = 972 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 973 return crt->decrypt(req); 974} 975 976/** 977 * DOC: Asynchronous Cipher Request Handle 978 * 979 * The ablkcipher_request data structure contains all pointers to data 980 * required for the asynchronous cipher operation. This includes the cipher 981 * handle (which can be used by multiple ablkcipher_request instances), pointer 982 * to plaintext and ciphertext, asynchronous callback function, etc. It acts 983 * as a handle to the ablkcipher_request_* API calls in a similar way as 984 * ablkcipher handle to the crypto_ablkcipher_* API calls. 985 */ 986 987/** 988 * crypto_ablkcipher_reqsize() - obtain size of the request data structure 989 * @tfm: cipher handle 990 * 991 * Return: number of bytes 992 */ 993static inline unsigned int crypto_ablkcipher_reqsize( 994 struct crypto_ablkcipher *tfm) 995{ 996 return crypto_ablkcipher_crt(tfm)->reqsize; 997} 998 999/** 1000 * ablkcipher_request_set_tfm() - update cipher handle reference in request 1001 * @req: request handle to be modified 1002 * @tfm: cipher handle that shall be added to the request handle 1003 * 1004 * Allow the caller to replace the existing ablkcipher handle in the request 1005 * data structure with a different one. 1006 */ 1007static inline void ablkcipher_request_set_tfm( 1008 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1009{ 1010 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 1011} 1012 1013static inline struct ablkcipher_request *ablkcipher_request_cast( 1014 struct crypto_async_request *req) 1015{ 1016 return container_of(req, struct ablkcipher_request, base); 1017} 1018 1019/** 1020 * ablkcipher_request_alloc() - allocate request data structure 1021 * @tfm: cipher handle to be registered with the request 1022 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 1023 * 1024 * Allocate the request data structure that must be used with the ablkcipher 1025 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 1026 * handle is registered in the request data structure. 1027 * 1028 * Return: allocated request handle in case of success; IS_ERR() is true in case 1029 * of an error, PTR_ERR() returns the error code. 1030 */ 1031static inline struct ablkcipher_request *ablkcipher_request_alloc( 1032 struct crypto_ablkcipher *tfm, gfp_t gfp) 1033{ 1034 struct ablkcipher_request *req; 1035 1036 req = kmalloc(sizeof(struct ablkcipher_request) + 1037 crypto_ablkcipher_reqsize(tfm), gfp); 1038 1039 if (likely(req)) 1040 ablkcipher_request_set_tfm(req, tfm); 1041 1042 return req; 1043} 1044 1045/** 1046 * ablkcipher_request_free() - zeroize and free request data structure 1047 * @req: request data structure cipher handle to be freed 1048 */ 1049static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1050{ 1051 kzfree(req); 1052} 1053 1054/** 1055 * ablkcipher_request_set_callback() - set asynchronous callback function 1056 * @req: request handle 1057 * @flags: specify zero or an ORing of the flags 1058 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1059 * increase the wait queue beyond the initial maximum size; 1060 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1061 * @compl: callback function pointer to be registered with the request handle 1062 * @data: The data pointer refers to memory that is not used by the kernel 1063 * crypto API, but provided to the callback function for it to use. Here, 1064 * the caller can provide a reference to memory the callback function can 1065 * operate on. As the callback function is invoked asynchronously to the 1066 * related functionality, it may need to access data structures of the 1067 * related functionality which can be referenced using this pointer. The 1068 * callback function can access the memory via the "data" field in the 1069 * crypto_async_request data structure provided to the callback function. 1070 * 1071 * This function allows setting the callback function that is triggered once the 1072 * cipher operation completes. 1073 * 1074 * The callback function is registered with the ablkcipher_request handle and 1075 * must comply with the following template 1076 * 1077 * void callback_function(struct crypto_async_request *req, int error) 1078 */ 1079static inline void ablkcipher_request_set_callback( 1080 struct ablkcipher_request *req, 1081 u32 flags, crypto_completion_t compl, void *data) 1082{ 1083 req->base.complete = compl; 1084 req->base.data = data; 1085 req->base.flags = flags; 1086} 1087 1088/** 1089 * ablkcipher_request_set_crypt() - set data buffers 1090 * @req: request handle 1091 * @src: source scatter / gather list 1092 * @dst: destination scatter / gather list 1093 * @nbytes: number of bytes to process from @src 1094 * @iv: IV for the cipher operation which must comply with the IV size defined 1095 * by crypto_ablkcipher_ivsize 1096 * 1097 * This function allows setting of the source data and destination data 1098 * scatter / gather lists. 1099 * 1100 * For encryption, the source is treated as the plaintext and the 1101 * destination is the ciphertext. For a decryption operation, the use is 1102 * reversed - the source is the ciphertext and the destination is the plaintext. 1103 */ 1104static inline void ablkcipher_request_set_crypt( 1105 struct ablkcipher_request *req, 1106 struct scatterlist *src, struct scatterlist *dst, 1107 unsigned int nbytes, void *iv) 1108{ 1109 req->src = src; 1110 req->dst = dst; 1111 req->nbytes = nbytes; 1112 req->info = iv; 1113} 1114 1115/** 1116 * DOC: Synchronous Block Cipher API 1117 * 1118 * The synchronous block cipher API is used with the ciphers of type 1119 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) 1120 * 1121 * Synchronous calls, have a context in the tfm. But since a single tfm can be 1122 * used in multiple calls and in parallel, this info should not be changeable 1123 * (unless a lock is used). This applies, for example, to the symmetric key. 1124 * However, the IV is changeable, so there is an iv field in blkcipher_tfm 1125 * structure for synchronous blkcipher api. So, its the only state info that can 1126 * be kept for synchronous calls without using a big lock across a tfm. 1127 * 1128 * The block cipher API allows the use of a complete cipher, i.e. a cipher 1129 * consisting of a template (a block chaining mode) and a single block cipher 1130 * primitive (e.g. AES). 1131 * 1132 * The plaintext data buffer and the ciphertext data buffer are pointed to 1133 * by using scatter/gather lists. The cipher operation is performed 1134 * on all segments of the provided scatter/gather lists. 1135 * 1136 * The kernel crypto API supports a cipher operation "in-place" which means that 1137 * the caller may provide the same scatter/gather list for the plaintext and 1138 * cipher text. After the completion of the cipher operation, the plaintext 1139 * data is replaced with the ciphertext data in case of an encryption and vice 1140 * versa for a decryption. The caller must ensure that the scatter/gather lists 1141 * for the output data point to sufficiently large buffers, i.e. multiples of 1142 * the block size of the cipher. 1143 */ 1144 1145static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1146 struct crypto_tfm *tfm) 1147{ 1148 return (struct crypto_blkcipher *)tfm; 1149} 1150 1151static inline struct crypto_blkcipher *crypto_blkcipher_cast( 1152 struct crypto_tfm *tfm) 1153{ 1154 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); 1155 return __crypto_blkcipher_cast(tfm); 1156} 1157 1158/** 1159 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle 1160 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1161 * blkcipher cipher 1162 * @type: specifies the type of the cipher 1163 * @mask: specifies the mask for the cipher 1164 * 1165 * Allocate a cipher handle for a block cipher. The returned struct 1166 * crypto_blkcipher is the cipher handle that is required for any subsequent 1167 * API invocation for that block cipher. 1168 * 1169 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1170 * of an error, PTR_ERR() returns the error code. 1171 */ 1172static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1173 const char *alg_name, u32 type, u32 mask) 1174{ 1175 type &= ~CRYPTO_ALG_TYPE_MASK; 1176 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1177 mask |= CRYPTO_ALG_TYPE_MASK; 1178 1179 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); 1180} 1181 1182static inline struct crypto_tfm *crypto_blkcipher_tfm( 1183 struct crypto_blkcipher *tfm) 1184{ 1185 return &tfm->base; 1186} 1187 1188/** 1189 * crypto_free_blkcipher() - zeroize and free the block cipher handle 1190 * @tfm: cipher handle to be freed 1191 */ 1192static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1193{ 1194 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1195} 1196 1197/** 1198 * crypto_has_blkcipher() - Search for the availability of a block cipher 1199 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1200 * block cipher 1201 * @type: specifies the type of the cipher 1202 * @mask: specifies the mask for the cipher 1203 * 1204 * Return: true when the block cipher is known to the kernel crypto API; false 1205 * otherwise 1206 */ 1207static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1208{ 1209 type &= ~CRYPTO_ALG_TYPE_MASK; 1210 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1211 mask |= CRYPTO_ALG_TYPE_MASK; 1212 1213 return crypto_has_alg(alg_name, type, mask); 1214} 1215 1216/** 1217 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle 1218 * @tfm: cipher handle 1219 * 1220 * Return: The character string holding the name of the cipher 1221 */ 1222static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1223{ 1224 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1225} 1226 1227static inline struct blkcipher_tfm *crypto_blkcipher_crt( 1228 struct crypto_blkcipher *tfm) 1229{ 1230 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; 1231} 1232 1233static inline struct blkcipher_alg *crypto_blkcipher_alg( 1234 struct crypto_blkcipher *tfm) 1235{ 1236 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1237} 1238 1239/** 1240 * crypto_blkcipher_ivsize() - obtain IV size 1241 * @tfm: cipher handle 1242 * 1243 * The size of the IV for the block cipher referenced by the cipher handle is 1244 * returned. This IV size may be zero if the cipher does not need an IV. 1245 * 1246 * Return: IV size in bytes 1247 */ 1248static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1249{ 1250 return crypto_blkcipher_alg(tfm)->ivsize; 1251} 1252 1253/** 1254 * crypto_blkcipher_blocksize() - obtain block size of cipher 1255 * @tfm: cipher handle 1256 * 1257 * The block size for the block cipher referenced with the cipher handle is 1258 * returned. The caller may use that information to allocate appropriate 1259 * memory for the data returned by the encryption or decryption operation. 1260 * 1261 * Return: block size of cipher 1262 */ 1263static inline unsigned int crypto_blkcipher_blocksize( 1264 struct crypto_blkcipher *tfm) 1265{ 1266 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); 1267} 1268 1269static inline unsigned int crypto_blkcipher_alignmask( 1270 struct crypto_blkcipher *tfm) 1271{ 1272 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); 1273} 1274 1275static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) 1276{ 1277 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); 1278} 1279 1280static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, 1281 u32 flags) 1282{ 1283 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); 1284} 1285 1286static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, 1287 u32 flags) 1288{ 1289 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1290} 1291 1292/** 1293 * crypto_blkcipher_setkey() - set key for cipher 1294 * @tfm: cipher handle 1295 * @key: buffer holding the key 1296 * @keylen: length of the key in bytes 1297 * 1298 * The caller provided key is set for the block cipher referenced by the cipher 1299 * handle. 1300 * 1301 * Note, the key length determines the cipher type. Many block ciphers implement 1302 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1303 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1304 * is performed. 1305 * 1306 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1307 */ 1308static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1309 const u8 *key, unsigned int keylen) 1310{ 1311 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), 1312 key, keylen); 1313} 1314 1315/** 1316 * crypto_blkcipher_encrypt() - encrypt plaintext 1317 * @desc: reference to the block cipher handle with meta data 1318 * @dst: scatter/gather list that is filled by the cipher operation with the 1319 * ciphertext 1320 * @src: scatter/gather list that holds the plaintext 1321 * @nbytes: number of bytes of the plaintext to encrypt. 1322 * 1323 * Encrypt plaintext data using the IV set by the caller with a preceding 1324 * call of crypto_blkcipher_set_iv. 1325 * 1326 * The blkcipher_desc data structure must be filled by the caller and can 1327 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1328 * with the block cipher handle; desc.flags is filled with either 1329 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1330 * 1331 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1332 */ 1333static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1334 struct scatterlist *dst, 1335 struct scatterlist *src, 1336 unsigned int nbytes) 1337{ 1338 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1339 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1340} 1341 1342/** 1343 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV 1344 * @desc: reference to the block cipher handle with meta data 1345 * @dst: scatter/gather list that is filled by the cipher operation with the 1346 * ciphertext 1347 * @src: scatter/gather list that holds the plaintext 1348 * @nbytes: number of bytes of the plaintext to encrypt. 1349 * 1350 * Encrypt plaintext data with the use of an IV that is solely used for this 1351 * cipher operation. Any previously set IV is not used. 1352 * 1353 * The blkcipher_desc data structure must be filled by the caller and can 1354 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1355 * with the block cipher handle; desc.info is filled with the IV to be used for 1356 * the current operation; desc.flags is filled with either 1357 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1358 * 1359 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1360 */ 1361static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1362 struct scatterlist *dst, 1363 struct scatterlist *src, 1364 unsigned int nbytes) 1365{ 1366 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1367} 1368 1369/** 1370 * crypto_blkcipher_decrypt() - decrypt ciphertext 1371 * @desc: reference to the block cipher handle with meta data 1372 * @dst: scatter/gather list that is filled by the cipher operation with the 1373 * plaintext 1374 * @src: scatter/gather list that holds the ciphertext 1375 * @nbytes: number of bytes of the ciphertext to decrypt. 1376 * 1377 * Decrypt ciphertext data using the IV set by the caller with a preceding 1378 * call of crypto_blkcipher_set_iv. 1379 * 1380 * The blkcipher_desc data structure must be filled by the caller as documented 1381 * for the crypto_blkcipher_encrypt call above. 1382 * 1383 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1384 * 1385 */ 1386static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1387 struct scatterlist *dst, 1388 struct scatterlist *src, 1389 unsigned int nbytes) 1390{ 1391 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1392 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1393} 1394 1395/** 1396 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV 1397 * @desc: reference to the block cipher handle with meta data 1398 * @dst: scatter/gather list that is filled by the cipher operation with the 1399 * plaintext 1400 * @src: scatter/gather list that holds the ciphertext 1401 * @nbytes: number of bytes of the ciphertext to decrypt. 1402 * 1403 * Decrypt ciphertext data with the use of an IV that is solely used for this 1404 * cipher operation. Any previously set IV is not used. 1405 * 1406 * The blkcipher_desc data structure must be filled by the caller as documented 1407 * for the crypto_blkcipher_encrypt_iv call above. 1408 * 1409 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1410 */ 1411static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1412 struct scatterlist *dst, 1413 struct scatterlist *src, 1414 unsigned int nbytes) 1415{ 1416 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1417} 1418 1419/** 1420 * crypto_blkcipher_set_iv() - set IV for cipher 1421 * @tfm: cipher handle 1422 * @src: buffer holding the IV 1423 * @len: length of the IV in bytes 1424 * 1425 * The caller provided IV is set for the block cipher referenced by the cipher 1426 * handle. 1427 */ 1428static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1429 const u8 *src, unsigned int len) 1430{ 1431 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1432} 1433 1434/** 1435 * crypto_blkcipher_get_iv() - obtain IV from cipher 1436 * @tfm: cipher handle 1437 * @dst: buffer filled with the IV 1438 * @len: length of the buffer dst 1439 * 1440 * The caller can obtain the IV set for the block cipher referenced by the 1441 * cipher handle and store it into the user-provided buffer. If the buffer 1442 * has an insufficient space, the IV is truncated to fit the buffer. 1443 */ 1444static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1445 u8 *dst, unsigned int len) 1446{ 1447 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1448} 1449 1450/** 1451 * DOC: Single Block Cipher API 1452 * 1453 * The single block cipher API is used with the ciphers of type 1454 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). 1455 * 1456 * Using the single block cipher API calls, operations with the basic cipher 1457 * primitive can be implemented. These cipher primitives exclude any block 1458 * chaining operations including IV handling. 1459 * 1460 * The purpose of this single block cipher API is to support the implementation 1461 * of templates or other concepts that only need to perform the cipher operation 1462 * on one block at a time. Templates invoke the underlying cipher primitive 1463 * block-wise and process either the input or the output data of these cipher 1464 * operations. 1465 */ 1466 1467static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1468{ 1469 return (struct crypto_cipher *)tfm; 1470} 1471 1472static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 1473{ 1474 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 1475 return __crypto_cipher_cast(tfm); 1476} 1477 1478/** 1479 * crypto_alloc_cipher() - allocate single block cipher handle 1480 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1481 * single block cipher 1482 * @type: specifies the type of the cipher 1483 * @mask: specifies the mask for the cipher 1484 * 1485 * Allocate a cipher handle for a single block cipher. The returned struct 1486 * crypto_cipher is the cipher handle that is required for any subsequent API 1487 * invocation for that single block cipher. 1488 * 1489 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1490 * of an error, PTR_ERR() returns the error code. 1491 */ 1492static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1493 u32 type, u32 mask) 1494{ 1495 type &= ~CRYPTO_ALG_TYPE_MASK; 1496 type |= CRYPTO_ALG_TYPE_CIPHER; 1497 mask |= CRYPTO_ALG_TYPE_MASK; 1498 1499 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 1500} 1501 1502static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 1503{ 1504 return &tfm->base; 1505} 1506 1507/** 1508 * crypto_free_cipher() - zeroize and free the single block cipher handle 1509 * @tfm: cipher handle to be freed 1510 */ 1511static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1512{ 1513 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1514} 1515 1516/** 1517 * crypto_has_cipher() - Search for the availability of a single block cipher 1518 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1519 * single block cipher 1520 * @type: specifies the type of the cipher 1521 * @mask: specifies the mask for the cipher 1522 * 1523 * Return: true when the single block cipher is known to the kernel crypto API; 1524 * false otherwise 1525 */ 1526static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1527{ 1528 type &= ~CRYPTO_ALG_TYPE_MASK; 1529 type |= CRYPTO_ALG_TYPE_CIPHER; 1530 mask |= CRYPTO_ALG_TYPE_MASK; 1531 1532 return crypto_has_alg(alg_name, type, mask); 1533} 1534 1535static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 1536{ 1537 return &crypto_cipher_tfm(tfm)->crt_cipher; 1538} 1539 1540/** 1541 * crypto_cipher_blocksize() - obtain block size for cipher 1542 * @tfm: cipher handle 1543 * 1544 * The block size for the single block cipher referenced with the cipher handle 1545 * tfm is returned. The caller may use that information to allocate appropriate 1546 * memory for the data returned by the encryption or decryption operation 1547 * 1548 * Return: block size of cipher 1549 */ 1550static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 1551{ 1552 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 1553} 1554 1555static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 1556{ 1557 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 1558} 1559 1560static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 1561{ 1562 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 1563} 1564 1565static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 1566 u32 flags) 1567{ 1568 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 1569} 1570 1571static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 1572 u32 flags) 1573{ 1574 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 1575} 1576 1577/** 1578 * crypto_cipher_setkey() - set key for cipher 1579 * @tfm: cipher handle 1580 * @key: buffer holding the key 1581 * @keylen: length of the key in bytes 1582 * 1583 * The caller provided key is set for the single block cipher referenced by the 1584 * cipher handle. 1585 * 1586 * Note, the key length determines the cipher type. Many block ciphers implement 1587 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1588 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1589 * is performed. 1590 * 1591 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1592 */ 1593static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 1594 const u8 *key, unsigned int keylen) 1595{ 1596 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 1597 key, keylen); 1598} 1599 1600/** 1601 * crypto_cipher_encrypt_one() - encrypt one block of plaintext 1602 * @tfm: cipher handle 1603 * @dst: points to the buffer that will be filled with the ciphertext 1604 * @src: buffer holding the plaintext to be encrypted 1605 * 1606 * Invoke the encryption operation of one block. The caller must ensure that 1607 * the plaintext and ciphertext buffers are at least one block in size. 1608 */ 1609static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 1610 u8 *dst, const u8 *src) 1611{ 1612 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 1613 dst, src); 1614} 1615 1616/** 1617 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext 1618 * @tfm: cipher handle 1619 * @dst: points to the buffer that will be filled with the plaintext 1620 * @src: buffer holding the ciphertext to be decrypted 1621 * 1622 * Invoke the decryption operation of one block. The caller must ensure that 1623 * the plaintext and ciphertext buffers are at least one block in size. 1624 */ 1625static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 1626 u8 *dst, const u8 *src) 1627{ 1628 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 1629 dst, src); 1630} 1631 1632/** 1633 * DOC: Synchronous Message Digest API 1634 * 1635 * The synchronous message digest API is used with the ciphers of type 1636 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) 1637 */ 1638 1639static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 1640{ 1641 return (struct crypto_hash *)tfm; 1642} 1643 1644static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) 1645{ 1646 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & 1647 CRYPTO_ALG_TYPE_HASH_MASK); 1648 return __crypto_hash_cast(tfm); 1649} 1650 1651/** 1652 * crypto_alloc_hash() - allocate synchronous message digest handle 1653 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1654 * message digest cipher 1655 * @type: specifies the type of the cipher 1656 * @mask: specifies the mask for the cipher 1657 * 1658 * Allocate a cipher handle for a message digest. The returned struct 1659 * crypto_hash is the cipher handle that is required for any subsequent 1660 * API invocation for that message digest. 1661 * 1662 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1663 * of an error, PTR_ERR() returns the error code. 1664 */ 1665static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 1666 u32 type, u32 mask) 1667{ 1668 type &= ~CRYPTO_ALG_TYPE_MASK; 1669 mask &= ~CRYPTO_ALG_TYPE_MASK; 1670 type |= CRYPTO_ALG_TYPE_HASH; 1671 mask |= CRYPTO_ALG_TYPE_HASH_MASK; 1672 1673 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); 1674} 1675 1676static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) 1677{ 1678 return &tfm->base; 1679} 1680 1681/** 1682 * crypto_free_hash() - zeroize and free message digest handle 1683 * @tfm: cipher handle to be freed 1684 */ 1685static inline void crypto_free_hash(struct crypto_hash *tfm) 1686{ 1687 crypto_free_tfm(crypto_hash_tfm(tfm)); 1688} 1689 1690/** 1691 * crypto_has_hash() - Search for the availability of a message digest 1692 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1693 * message digest cipher 1694 * @type: specifies the type of the cipher 1695 * @mask: specifies the mask for the cipher 1696 * 1697 * Return: true when the message digest cipher is known to the kernel crypto 1698 * API; false otherwise 1699 */ 1700static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 1701{ 1702 type &= ~CRYPTO_ALG_TYPE_MASK; 1703 mask &= ~CRYPTO_ALG_TYPE_MASK; 1704 type |= CRYPTO_ALG_TYPE_HASH; 1705 mask |= CRYPTO_ALG_TYPE_HASH_MASK; 1706 1707 return crypto_has_alg(alg_name, type, mask); 1708} 1709 1710static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) 1711{ 1712 return &crypto_hash_tfm(tfm)->crt_hash; 1713} 1714 1715/** 1716 * crypto_hash_blocksize() - obtain block size for message digest 1717 * @tfm: cipher handle 1718 * 1719 * The block size for the message digest cipher referenced with the cipher 1720 * handle is returned. 1721 * 1722 * Return: block size of cipher 1723 */ 1724static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 1725{ 1726 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 1727} 1728 1729static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) 1730{ 1731 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 1732} 1733 1734/** 1735 * crypto_hash_digestsize() - obtain message digest size 1736 * @tfm: cipher handle 1737 * 1738 * The size for the message digest created by the message digest cipher 1739 * referenced with the cipher handle is returned. 1740 * 1741 * Return: message digest size 1742 */ 1743static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 1744{ 1745 return crypto_hash_crt(tfm)->digestsize; 1746} 1747 1748static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) 1749{ 1750 return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); 1751} 1752 1753static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) 1754{ 1755 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); 1756} 1757 1758static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) 1759{ 1760 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 1761} 1762 1763/** 1764 * crypto_hash_init() - (re)initialize message digest handle 1765 * @desc: cipher request handle that to be filled by caller -- 1766 * desc.tfm is filled with the hash cipher handle; 1767 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1768 * 1769 * The call (re-)initializes the message digest referenced by the hash cipher 1770 * request handle. Any potentially existing state created by previous 1771 * operations is discarded. 1772 * 1773 * Return: 0 if the message digest initialization was successful; < 0 if an 1774 * error occurred 1775 */ 1776static inline int crypto_hash_init(struct hash_desc *desc) 1777{ 1778 return crypto_hash_crt(desc->tfm)->init(desc); 1779} 1780 1781/** 1782 * crypto_hash_update() - add data to message digest for processing 1783 * @desc: cipher request handle 1784 * @sg: scatter / gather list pointing to the data to be added to the message 1785 * digest 1786 * @nbytes: number of bytes to be processed from @sg 1787 * 1788 * Updates the message digest state of the cipher handle pointed to by the 1789 * hash cipher request handle with the input data pointed to by the 1790 * scatter/gather list. 1791 * 1792 * Return: 0 if the message digest update was successful; < 0 if an error 1793 * occurred 1794 */ 1795static inline int crypto_hash_update(struct hash_desc *desc, 1796 struct scatterlist *sg, 1797 unsigned int nbytes) 1798{ 1799 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 1800} 1801 1802/** 1803 * crypto_hash_final() - calculate message digest 1804 * @desc: cipher request handle 1805 * @out: message digest output buffer -- The caller must ensure that the out 1806 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize 1807 * function). 1808 * 1809 * Finalize the message digest operation and create the message digest 1810 * based on all data added to the cipher handle. The message digest is placed 1811 * into the output buffer. 1812 * 1813 * Return: 0 if the message digest creation was successful; < 0 if an error 1814 * occurred 1815 */ 1816static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 1817{ 1818 return crypto_hash_crt(desc->tfm)->final(desc, out); 1819} 1820 1821/** 1822 * crypto_hash_digest() - calculate message digest for a buffer 1823 * @desc: see crypto_hash_final() 1824 * @sg: see crypto_hash_update() 1825 * @nbytes: see crypto_hash_update() 1826 * @out: see crypto_hash_final() 1827 * 1828 * This function is a "short-hand" for the function calls of crypto_hash_init, 1829 * crypto_hash_update and crypto_hash_final. The parameters have the same 1830 * meaning as discussed for those separate three functions. 1831 * 1832 * Return: 0 if the message digest creation was successful; < 0 if an error 1833 * occurred 1834 */ 1835static inline int crypto_hash_digest(struct hash_desc *desc, 1836 struct scatterlist *sg, 1837 unsigned int nbytes, u8 *out) 1838{ 1839 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 1840} 1841 1842/** 1843 * crypto_hash_setkey() - set key for message digest 1844 * @hash: cipher handle 1845 * @key: buffer holding the key 1846 * @keylen: length of the key in bytes 1847 * 1848 * The caller provided key is set for the message digest cipher. The cipher 1849 * handle must point to a keyed hash in order for this function to succeed. 1850 * 1851 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1852 */ 1853static inline int crypto_hash_setkey(struct crypto_hash *hash, 1854 const u8 *key, unsigned int keylen) 1855{ 1856 return crypto_hash_crt(hash)->setkey(hash, key, keylen); 1857} 1858 1859static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 1860{ 1861 return (struct crypto_comp *)tfm; 1862} 1863 1864static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 1865{ 1866 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 1867 CRYPTO_ALG_TYPE_MASK); 1868 return __crypto_comp_cast(tfm); 1869} 1870 1871static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 1872 u32 type, u32 mask) 1873{ 1874 type &= ~CRYPTO_ALG_TYPE_MASK; 1875 type |= CRYPTO_ALG_TYPE_COMPRESS; 1876 mask |= CRYPTO_ALG_TYPE_MASK; 1877 1878 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 1879} 1880 1881static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 1882{ 1883 return &tfm->base; 1884} 1885 1886static inline void crypto_free_comp(struct crypto_comp *tfm) 1887{ 1888 crypto_free_tfm(crypto_comp_tfm(tfm)); 1889} 1890 1891static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 1892{ 1893 type &= ~CRYPTO_ALG_TYPE_MASK; 1894 type |= CRYPTO_ALG_TYPE_COMPRESS; 1895 mask |= CRYPTO_ALG_TYPE_MASK; 1896 1897 return crypto_has_alg(alg_name, type, mask); 1898} 1899 1900static inline const char *crypto_comp_name(struct crypto_comp *tfm) 1901{ 1902 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 1903} 1904 1905static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 1906{ 1907 return &crypto_comp_tfm(tfm)->crt_compress; 1908} 1909 1910static inline int crypto_comp_compress(struct crypto_comp *tfm, 1911 const u8 *src, unsigned int slen, 1912 u8 *dst, unsigned int *dlen) 1913{ 1914 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), 1915 src, slen, dst, dlen); 1916} 1917 1918static inline int crypto_comp_decompress(struct crypto_comp *tfm, 1919 const u8 *src, unsigned int slen, 1920 u8 *dst, unsigned int *dlen) 1921{ 1922 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), 1923 src, slen, dst, dlen); 1924} 1925 1926#endif /* _LINUX_CRYPTO_H */ 1927