Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Scatterlist Cryptographic API.
4 *
5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
10 * and Nettle, by Niels Möller.
11 */
12#ifndef _LINUX_CRYPTO_H
13#define _LINUX_CRYPTO_H
14
15#include <linux/completion.h>
16#include <linux/errno.h>
17#include <linux/refcount_types.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20
21/*
22 * Algorithm masks and types.
23 */
24#define CRYPTO_ALG_TYPE_MASK 0x0000000f
25#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
26#define CRYPTO_ALG_TYPE_AEAD 0x00000003
27#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
28#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
29#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
30#define CRYPTO_ALG_TYPE_SIG 0x00000007
31#define CRYPTO_ALG_TYPE_KPP 0x00000008
32#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
33#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
34#define CRYPTO_ALG_TYPE_RNG 0x0000000c
35#define CRYPTO_ALG_TYPE_HASH 0x0000000e
36#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
37#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
38
39#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
40
41#define CRYPTO_ALG_LARVAL 0x00000010
42#define CRYPTO_ALG_DEAD 0x00000020
43#define CRYPTO_ALG_DYING 0x00000040
44#define CRYPTO_ALG_ASYNC 0x00000080
45
46/*
47 * Set if the algorithm (or an algorithm which it uses) requires another
48 * algorithm of the same type to handle corner cases.
49 */
50#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
51
52/*
53 * Set if the algorithm data structure should be duplicated into
54 * kmalloc memory before registration. This is useful for hardware
55 * that can be disconnected at will. Do not use this if the data
56 * structure is embedded into a bigger one. Duplicate the overall
57 * data structure in the driver in that case.
58 */
59#define CRYPTO_ALG_DUP_FIRST 0x00000200
60
61/*
62 * Set if the algorithm has passed automated run-time testing. Note that
63 * if there is no run-time testing for a given algorithm it is considered
64 * to have passed.
65 */
66
67#define CRYPTO_ALG_TESTED 0x00000400
68
69/*
70 * Set if the algorithm is an instance that is built from templates.
71 */
72#define CRYPTO_ALG_INSTANCE 0x00000800
73
74/* Set this bit if the algorithm provided is hardware accelerated but
75 * not available to userspace via instruction set or so.
76 */
77#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
78
79/*
80 * Mark a cipher as a service implementation only usable by another
81 * cipher and never by a normal user of the kernel crypto API
82 */
83#define CRYPTO_ALG_INTERNAL 0x00002000
84
85/*
86 * Set if the algorithm has a ->setkey() method but can be used without
87 * calling it first, i.e. there is a default key.
88 */
89#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
90
91/*
92 * Don't trigger module loading
93 */
94#define CRYPTO_NOLOAD 0x00008000
95
96/*
97 * The algorithm may allocate memory during request processing, i.e. during
98 * encryption, decryption, or hashing. Users can request an algorithm with this
99 * flag unset if they can't handle memory allocation failures.
100 *
101 * This flag is currently only implemented for algorithms of type "skcipher",
102 * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not
103 * have this flag set even if they allocate memory.
104 *
105 * In some edge cases, algorithms can allocate memory regardless of this flag.
106 * To avoid these cases, users must obey the following usage constraints:
107 * skcipher:
108 * - The IV buffer and all scatterlist elements must be aligned to the
109 * algorithm's alignmask.
110 * - If the data were to be divided into chunks of size
111 * crypto_skcipher_walksize() (with any remainder going at the end), no
112 * chunk can cross a page boundary or a scatterlist element boundary.
113 * aead:
114 * - The IV buffer and all scatterlist elements must be aligned to the
115 * algorithm's alignmask.
116 * - The first scatterlist element must contain all the associated data,
117 * and its pages must be !PageHighMem.
118 * - If the plaintext/ciphertext were to be divided into chunks of size
119 * crypto_aead_walksize() (with the remainder going at the end), no chunk
120 * can cross a page boundary or a scatterlist element boundary.
121 * ahash:
122 * - crypto_ahash_finup() must not be used unless the algorithm implements
123 * ->finup() natively.
124 */
125#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
126
127/*
128 * Mark an algorithm as a service implementation only usable by a
129 * template and never by a normal user of the kernel crypto API.
130 * This is intended to be used by algorithms that are themselves
131 * not FIPS-approved but may instead be used to implement parts of
132 * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
133 */
134#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
135
136/* Set if the algorithm supports virtual addresses. */
137#define CRYPTO_ALG_REQ_VIRT 0x00040000
138
139/* The high bits 0xff000000 are reserved for type-specific flags. */
140
141/*
142 * Transform masks and values (for crt_flags).
143 */
144#define CRYPTO_TFM_NEED_KEY 0x00000001
145
146#define CRYPTO_TFM_REQ_MASK 0x000fff00
147#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
148#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
149#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
150#define CRYPTO_TFM_REQ_ON_STACK 0x00000800
151
152/*
153 * Miscellaneous stuff.
154 */
155#define CRYPTO_MAX_ALG_NAME 128
156
157/*
158 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
159 * declaration) is used to ensure that the crypto_tfm context structure is
160 * aligned correctly for the given architecture so that there are no alignment
161 * faults for C data types. On architectures that support non-cache coherent
162 * DMA, such as ARM or arm64, it also takes into account the minimal alignment
163 * that is required to ensure that the context struct member does not share any
164 * cachelines with the rest of the struct. This is needed to ensure that cache
165 * maintenance for non-coherent DMA (cache invalidation in particular) does not
166 * affect data that may be accessed by the CPU concurrently.
167 */
168#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
169
170#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
171
172struct crypto_tfm;
173struct crypto_type;
174struct module;
175
176typedef void (*crypto_completion_t)(void *req, int err);
177
178/**
179 * DOC: Block Cipher Context Data Structures
180 *
181 * These data structures define the operating context for each block cipher
182 * type.
183 */
184
185struct crypto_async_request {
186 struct list_head list;
187 crypto_completion_t complete;
188 void *data;
189 struct crypto_tfm *tfm;
190
191 u32 flags;
192};
193
194/**
195 * DOC: Block Cipher Algorithm Definitions
196 *
197 * These data structures define modular crypto algorithm implementations,
198 * managed via crypto_register_alg() and crypto_unregister_alg().
199 */
200
201/**
202 * struct cipher_alg - single-block symmetric ciphers definition
203 * @cia_min_keysize: Minimum key size supported by the transformation. This is
204 * the smallest key length supported by this transformation
205 * algorithm. This must be set to one of the pre-defined
206 * values as this is not hardware specific. Possible values
207 * for this field can be found via git grep "_MIN_KEY_SIZE"
208 * include/crypto/
209 * @cia_max_keysize: Maximum key size supported by the transformation. This is
210 * the largest key length supported by this transformation
211 * algorithm. This must be set to one of the pre-defined values
212 * as this is not hardware specific. Possible values for this
213 * field can be found via git grep "_MAX_KEY_SIZE"
214 * include/crypto/
215 * @cia_setkey: Set key for the transformation. This function is used to either
216 * program a supplied key into the hardware or store the key in the
217 * transformation context for programming it later. Note that this
218 * function does modify the transformation context. This function
219 * can be called multiple times during the existence of the
220 * transformation object, so one must make sure the key is properly
221 * reprogrammed into the hardware. This function is also
222 * responsible for checking the key length for validity.
223 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
224 * single block of data, which must be @cra_blocksize big. This
225 * always operates on a full @cra_blocksize and it is not possible
226 * to encrypt a block of smaller size. The supplied buffers must
227 * therefore also be at least of @cra_blocksize size. Both the
228 * input and output buffers are always aligned to @cra_alignmask.
229 * In case either of the input or output buffer supplied by user
230 * of the crypto API is not aligned to @cra_alignmask, the crypto
231 * API will re-align the buffers. The re-alignment means that a
232 * new buffer will be allocated, the data will be copied into the
233 * new buffer, then the processing will happen on the new buffer,
234 * then the data will be copied back into the original buffer and
235 * finally the new buffer will be freed. In case a software
236 * fallback was put in place in the @cra_init call, this function
237 * might need to use the fallback if the algorithm doesn't support
238 * all of the key sizes. In case the key was stored in
239 * transformation context, the key might need to be re-programmed
240 * into the hardware in this function. This function shall not
241 * modify the transformation context, as this function may be
242 * called in parallel with the same transformation object.
243 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
244 * @cia_encrypt, and the conditions are exactly the same.
245 *
246 * All fields are mandatory and must be filled.
247 */
248struct cipher_alg {
249 unsigned int cia_min_keysize;
250 unsigned int cia_max_keysize;
251 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
252 unsigned int keylen);
253 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
254 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
255};
256
257#define cra_cipher cra_u.cipher
258
259/**
260 * struct crypto_alg - definition of a cryptograpic cipher algorithm
261 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
262 * CRYPTO_ALG_* flags for the flags which go in here. Those are
263 * used for fine-tuning the description of the transformation
264 * algorithm.
265 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
266 * of the smallest possible unit which can be transformed with
267 * this algorithm. The users must respect this value.
268 * In case of HASH transformation, it is possible for a smaller
269 * block than @cra_blocksize to be passed to the crypto API for
270 * transformation, in case of any other transformation type, an
271 * error will be returned upon any attempt to transform smaller
272 * than @cra_blocksize chunks.
273 * @cra_ctxsize: Size of the operational context of the transformation. This
274 * value informs the kernel crypto API about the memory size
275 * needed to be allocated for the transformation context.
276 * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
277 * 1 less than the alignment, in bytes, that the algorithm
278 * implementation requires for input and output buffers. When
279 * the crypto API is invoked with buffers that are not aligned
280 * to this alignment, the crypto API automatically utilizes
281 * appropriately aligned temporary buffers to comply with what
282 * the algorithm needs. (For scatterlists this happens only if
283 * the algorithm uses the skcipher_walk helper functions.) This
284 * misalignment handling carries a performance penalty, so it is
285 * preferred that algorithms do not set a nonzero alignmask.
286 * Also, crypto API users may wish to allocate buffers aligned
287 * to the alignmask of the algorithm being used, in order to
288 * avoid the API having to realign them. Note: the alignmask is
289 * not supported for hash algorithms and is always 0 for them.
290 * @cra_reqsize: Size of the request context for this algorithm.
291 * @cra_priority: Priority of this transformation implementation. In case
292 * multiple transformations with same @cra_name are available to
293 * the Crypto API, the kernel will use the one with highest
294 * @cra_priority.
295 * @cra_name: Generic name (usable by multiple implementations) of the
296 * transformation algorithm. This is the name of the transformation
297 * itself. This field is used by the kernel when looking up the
298 * providers of particular transformation.
299 * @cra_driver_name: Unique name of the transformation provider. This is the
300 * name of the provider of the transformation. This can be any
301 * arbitrary value, but in the usual case, this contains the
302 * name of the chip or provider and the name of the
303 * transformation algorithm.
304 * @cra_type: Type of the cryptographic transformation. This is a pointer to
305 * struct crypto_type, which implements callbacks common for all
306 * transformation types. There are multiple options, such as
307 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
308 * This field might be empty. In that case, there are no common
309 * callbacks. This is the case for: cipher.
310 * @cra_u: Callbacks implementing the transformation. This is a union of
311 * multiple structures. Depending on the type of transformation selected
312 * by @cra_type and @cra_flags above, the associated structure must be
313 * filled with callbacks. This field might be empty. This is the case
314 * for ahash, shash.
315 * @cra_init: Deprecated, do not use.
316 * @cra_exit: Deprecated, do not use.
317 * @cra_u.cipher: Union member which contains a single-block symmetric cipher
318 * definition. See @struct @cipher_alg.
319 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
320 * @cra_list: internally used
321 * @cra_users: internally used
322 * @cra_refcnt: internally used
323 * @cra_destroy: internally used
324 *
325 * The struct crypto_alg describes a generic Crypto API algorithm and is common
326 * for all of the transformations. Any variable not documented here shall not
327 * be used by a cipher implementation as it is internal to the Crypto API.
328 */
329struct crypto_alg {
330 struct list_head cra_list;
331 struct list_head cra_users;
332
333 u32 cra_flags;
334 unsigned int cra_blocksize;
335 unsigned int cra_ctxsize;
336 unsigned int cra_alignmask;
337 unsigned int cra_reqsize;
338
339 int cra_priority;
340 refcount_t cra_refcnt;
341
342 char cra_name[CRYPTO_MAX_ALG_NAME];
343 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
344
345 const struct crypto_type *cra_type;
346
347 union {
348 struct cipher_alg cipher;
349 } cra_u;
350
351 int (*cra_init)(struct crypto_tfm *tfm);
352 void (*cra_exit)(struct crypto_tfm *tfm);
353 void (*cra_destroy)(struct crypto_alg *alg);
354
355 struct module *cra_module;
356} CRYPTO_MINALIGN_ATTR;
357
358/*
359 * A helper struct for waiting for completion of async crypto ops
360 */
361struct crypto_wait {
362 struct completion completion;
363 int err;
364};
365
366/*
367 * Macro for declaring a crypto op async wait object on stack
368 */
369#define DECLARE_CRYPTO_WAIT(_wait) \
370 struct crypto_wait _wait = { \
371 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
372
373/*
374 * Async ops completion helper functioons
375 */
376void crypto_req_done(void *req, int err);
377
378static inline int crypto_wait_req(int err, struct crypto_wait *wait)
379{
380 switch (err) {
381 case -EINPROGRESS:
382 case -EBUSY:
383 wait_for_completion(&wait->completion);
384 reinit_completion(&wait->completion);
385 err = wait->err;
386 break;
387 }
388
389 return err;
390}
391
392static inline void crypto_init_wait(struct crypto_wait *wait)
393{
394 init_completion(&wait->completion);
395}
396
397/*
398 * Algorithm query interface.
399 */
400int crypto_has_alg(const char *name, u32 type, u32 mask);
401
402/*
403 * Transforms: user-instantiated objects which encapsulate algorithms
404 * and core processing logic. Managed via crypto_alloc_*() and
405 * crypto_free_*(), as well as the various helpers below.
406 */
407
408struct crypto_tfm {
409 refcount_t refcnt;
410
411 u32 crt_flags;
412
413 int node;
414
415 struct crypto_tfm *fb;
416
417 void (*exit)(struct crypto_tfm *tfm);
418
419 struct crypto_alg *__crt_alg;
420
421 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
422};
423
424/*
425 * Transform user interface.
426 */
427
428struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
429void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
430
431static inline void crypto_free_tfm(struct crypto_tfm *tfm)
432{
433 return crypto_destroy_tfm(tfm, tfm);
434}
435
436/*
437 * Transform helpers which query the underlying algorithm.
438 */
439static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
440{
441 return tfm->__crt_alg->cra_name;
442}
443
444static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
445{
446 return tfm->__crt_alg->cra_driver_name;
447}
448
449static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
450{
451 return tfm->__crt_alg->cra_blocksize;
452}
453
454static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
455{
456 return tfm->__crt_alg->cra_alignmask;
457}
458
459static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
460{
461 return tfm->__crt_alg->cra_reqsize;
462}
463
464static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
465{
466 return tfm->crt_flags;
467}
468
469static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
470{
471 tfm->crt_flags |= flags;
472}
473
474static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
475{
476 tfm->crt_flags &= ~flags;
477}
478
479static inline unsigned int crypto_tfm_ctx_alignment(void)
480{
481 struct crypto_tfm *tfm;
482 return __alignof__(tfm->__crt_ctx);
483}
484
485static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
486{
487 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
488}
489
490static inline bool crypto_req_on_stack(struct crypto_async_request *req)
491{
492 return req->flags & CRYPTO_TFM_REQ_ON_STACK;
493}
494
495static inline void crypto_request_set_callback(
496 struct crypto_async_request *req, u32 flags,
497 crypto_completion_t compl, void *data)
498{
499 u32 keep = CRYPTO_TFM_REQ_ON_STACK;
500
501 req->complete = compl;
502 req->data = data;
503 req->flags &= keep;
504 req->flags |= flags & ~keep;
505}
506
507static inline void crypto_request_set_tfm(struct crypto_async_request *req,
508 struct crypto_tfm *tfm)
509{
510 req->tfm = tfm;
511 req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
512}
513
514struct crypto_async_request *crypto_request_clone(
515 struct crypto_async_request *req, size_t total, gfp_t gfp);
516
517static inline void crypto_stack_request_init(struct crypto_async_request *req,
518 struct crypto_tfm *tfm)
519{
520 req->flags = 0;
521 crypto_request_set_tfm(req, tfm);
522 req->flags |= CRYPTO_TFM_REQ_ON_STACK;
523}
524
525#endif /* _LINUX_CRYPTO_H */
526