Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
10 */
11
12#include <crypto/internal/aead.h>
13#include <crypto/internal/cipher.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/scatterwalk.h>
16#include <linux/bug.h>
17#include <linux/cryptouser.h>
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/slab.h>
24#include <linux/string.h>
25#include <net/netlink.h>
26#include "skcipher.h"
27
28#define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
29
30enum {
31 SKCIPHER_WALK_SLOW = 1 << 0,
32 SKCIPHER_WALK_COPY = 1 << 1,
33 SKCIPHER_WALK_DIFF = 1 << 2,
34 SKCIPHER_WALK_SLEEP = 1 << 3,
35};
36
37static const struct crypto_type crypto_skcipher_type;
38
39static int skcipher_walk_next(struct skcipher_walk *walk);
40
41static inline void skcipher_map_src(struct skcipher_walk *walk)
42{
43 walk->src.virt.addr = scatterwalk_map(&walk->in);
44}
45
46static inline void skcipher_map_dst(struct skcipher_walk *walk)
47{
48 walk->dst.virt.addr = scatterwalk_map(&walk->out);
49}
50
51static inline void skcipher_unmap_src(struct skcipher_walk *walk)
52{
53 scatterwalk_unmap(walk->src.virt.addr);
54}
55
56static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
57{
58 scatterwalk_unmap(walk->dst.virt.addr);
59}
60
61static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
62{
63 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
64}
65
66static inline struct skcipher_alg *__crypto_skcipher_alg(
67 struct crypto_alg *alg)
68{
69 return container_of(alg, struct skcipher_alg, base);
70}
71
72static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
73{
74 u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
75
76 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
77 return 0;
78}
79
80/**
81 * skcipher_walk_done() - finish one step of a skcipher_walk
82 * @walk: the skcipher_walk
83 * @res: number of bytes *not* processed (>= 0) from walk->nbytes,
84 * or a -errno value to terminate the walk due to an error
85 *
86 * This function cleans up after one step of walking through the source and
87 * destination scatterlists, and advances to the next step if applicable.
88 * walk->nbytes is set to the number of bytes available in the next step,
89 * walk->total is set to the new total number of bytes remaining, and
90 * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there
91 * is no more data, or if an error occurred (i.e. -errno return), then
92 * walk->nbytes and walk->total are set to 0 and all resources owned by the
93 * skcipher_walk are freed.
94 *
95 * Return: 0 or a -errno value. If @res was a -errno value then it will be
96 * returned, but other errors may occur too.
97 */
98int skcipher_walk_done(struct skcipher_walk *walk, int res)
99{
100 unsigned int n = walk->nbytes; /* num bytes processed this step */
101 unsigned int total = 0; /* new total remaining */
102
103 if (!n)
104 goto finish;
105
106 if (likely(res >= 0)) {
107 n -= res; /* subtract num bytes *not* processed */
108 total = walk->total - n;
109 }
110
111 if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW |
112 SKCIPHER_WALK_COPY |
113 SKCIPHER_WALK_DIFF)))) {
114unmap_src:
115 skcipher_unmap_src(walk);
116 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
117 skcipher_unmap_dst(walk);
118 goto unmap_src;
119 } else if (walk->flags & SKCIPHER_WALK_COPY) {
120 skcipher_map_dst(walk);
121 memcpy(walk->dst.virt.addr, walk->page, n);
122 skcipher_unmap_dst(walk);
123 } else { /* SKCIPHER_WALK_SLOW */
124 if (res > 0) {
125 /*
126 * Didn't process all bytes. Either the algorithm is
127 * broken, or this was the last step and it turned out
128 * the message wasn't evenly divisible into blocks but
129 * the algorithm requires it.
130 */
131 res = -EINVAL;
132 total = 0;
133 } else
134 n = skcipher_done_slow(walk, n);
135 }
136
137 if (res > 0)
138 res = 0;
139
140 walk->total = total;
141 walk->nbytes = 0;
142
143 scatterwalk_advance(&walk->in, n);
144 scatterwalk_advance(&walk->out, n);
145 scatterwalk_done(&walk->in, 0, total);
146 scatterwalk_done(&walk->out, 1, total);
147
148 if (total) {
149 if (walk->flags & SKCIPHER_WALK_SLEEP)
150 cond_resched();
151 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
152 SKCIPHER_WALK_DIFF);
153 return skcipher_walk_next(walk);
154 }
155
156finish:
157 /* Short-circuit for the common/fast path. */
158 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
159 goto out;
160
161 if (walk->iv != walk->oiv)
162 memcpy(walk->oiv, walk->iv, walk->ivsize);
163 if (walk->buffer != walk->page)
164 kfree(walk->buffer);
165 if (walk->page)
166 free_page((unsigned long)walk->page);
167
168out:
169 return res;
170}
171EXPORT_SYMBOL_GPL(skcipher_walk_done);
172
173static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
174{
175 unsigned alignmask = walk->alignmask;
176 unsigned n;
177 u8 *buffer;
178
179 if (!walk->buffer)
180 walk->buffer = walk->page;
181 buffer = walk->buffer;
182 if (!buffer) {
183 /* Min size for a buffer of bsize bytes aligned to alignmask */
184 n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
185
186 buffer = kzalloc(n, skcipher_walk_gfp(walk));
187 if (!buffer)
188 return skcipher_walk_done(walk, -ENOMEM);
189 walk->buffer = buffer;
190 }
191 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
192 walk->src.virt.addr = walk->dst.virt.addr;
193
194 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
195
196 walk->nbytes = bsize;
197 walk->flags |= SKCIPHER_WALK_SLOW;
198
199 return 0;
200}
201
202static int skcipher_next_copy(struct skcipher_walk *walk)
203{
204 u8 *tmp = walk->page;
205
206 skcipher_map_src(walk);
207 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
208 skcipher_unmap_src(walk);
209
210 walk->src.virt.addr = tmp;
211 walk->dst.virt.addr = tmp;
212 return 0;
213}
214
215static int skcipher_next_fast(struct skcipher_walk *walk)
216{
217 unsigned long diff;
218
219 diff = offset_in_page(walk->in.offset) -
220 offset_in_page(walk->out.offset);
221 diff |= (u8 *)scatterwalk_page(&walk->in) -
222 (u8 *)scatterwalk_page(&walk->out);
223
224 skcipher_map_src(walk);
225 walk->dst.virt.addr = walk->src.virt.addr;
226
227 if (diff) {
228 walk->flags |= SKCIPHER_WALK_DIFF;
229 skcipher_map_dst(walk);
230 }
231
232 return 0;
233}
234
235static int skcipher_walk_next(struct skcipher_walk *walk)
236{
237 unsigned int bsize;
238 unsigned int n;
239
240 n = walk->total;
241 bsize = min(walk->stride, max(n, walk->blocksize));
242 n = scatterwalk_clamp(&walk->in, n);
243 n = scatterwalk_clamp(&walk->out, n);
244
245 if (unlikely(n < bsize)) {
246 if (unlikely(walk->total < walk->blocksize))
247 return skcipher_walk_done(walk, -EINVAL);
248
249slow_path:
250 return skcipher_next_slow(walk, bsize);
251 }
252 walk->nbytes = n;
253
254 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
255 if (!walk->page) {
256 gfp_t gfp = skcipher_walk_gfp(walk);
257
258 walk->page = (void *)__get_free_page(gfp);
259 if (!walk->page)
260 goto slow_path;
261 }
262 walk->flags |= SKCIPHER_WALK_COPY;
263 return skcipher_next_copy(walk);
264 }
265
266 return skcipher_next_fast(walk);
267}
268
269static int skcipher_copy_iv(struct skcipher_walk *walk)
270{
271 unsigned alignmask = walk->alignmask;
272 unsigned ivsize = walk->ivsize;
273 unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
274 unsigned size;
275 u8 *iv;
276
277 /* Min size for a buffer of stride + ivsize, aligned to alignmask */
278 size = aligned_stride + ivsize +
279 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
280
281 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
282 if (!walk->buffer)
283 return -ENOMEM;
284
285 iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
286
287 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
288 return 0;
289}
290
291static int skcipher_walk_first(struct skcipher_walk *walk)
292{
293 if (WARN_ON_ONCE(in_hardirq()))
294 return -EDEADLK;
295
296 walk->buffer = NULL;
297 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
298 int err = skcipher_copy_iv(walk);
299 if (err)
300 return err;
301 }
302
303 walk->page = NULL;
304
305 return skcipher_walk_next(walk);
306}
307
308int skcipher_walk_virt(struct skcipher_walk *walk,
309 struct skcipher_request *req, bool atomic)
310{
311 const struct skcipher_alg *alg =
312 crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
313
314 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
315
316 walk->total = req->cryptlen;
317 walk->nbytes = 0;
318 walk->iv = req->iv;
319 walk->oiv = req->iv;
320 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
321 walk->flags = SKCIPHER_WALK_SLEEP;
322 else
323 walk->flags = 0;
324
325 if (unlikely(!walk->total))
326 return 0;
327
328 scatterwalk_start(&walk->in, req->src);
329 scatterwalk_start(&walk->out, req->dst);
330
331 /*
332 * Accessing 'alg' directly generates better code than using the
333 * crypto_skcipher_blocksize() and similar helper functions here, as it
334 * prevents the algorithm pointer from being repeatedly reloaded.
335 */
336 walk->blocksize = alg->base.cra_blocksize;
337 walk->ivsize = alg->co.ivsize;
338 walk->alignmask = alg->base.cra_alignmask;
339
340 if (alg->co.base.cra_type != &crypto_skcipher_type)
341 walk->stride = alg->co.chunksize;
342 else
343 walk->stride = alg->walksize;
344
345 return skcipher_walk_first(walk);
346}
347EXPORT_SYMBOL_GPL(skcipher_walk_virt);
348
349static int skcipher_walk_aead_common(struct skcipher_walk *walk,
350 struct aead_request *req, bool atomic)
351{
352 const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
353
354 walk->nbytes = 0;
355 walk->iv = req->iv;
356 walk->oiv = req->iv;
357 if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
358 walk->flags = SKCIPHER_WALK_SLEEP;
359 else
360 walk->flags = 0;
361
362 if (unlikely(!walk->total))
363 return 0;
364
365 scatterwalk_start(&walk->in, req->src);
366 scatterwalk_start(&walk->out, req->dst);
367
368 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
369 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
370
371 scatterwalk_done(&walk->in, 0, walk->total);
372 scatterwalk_done(&walk->out, 0, walk->total);
373
374 /*
375 * Accessing 'alg' directly generates better code than using the
376 * crypto_aead_blocksize() and similar helper functions here, as it
377 * prevents the algorithm pointer from being repeatedly reloaded.
378 */
379 walk->blocksize = alg->base.cra_blocksize;
380 walk->stride = alg->chunksize;
381 walk->ivsize = alg->ivsize;
382 walk->alignmask = alg->base.cra_alignmask;
383
384 return skcipher_walk_first(walk);
385}
386
387int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
388 struct aead_request *req, bool atomic)
389{
390 walk->total = req->cryptlen;
391
392 return skcipher_walk_aead_common(walk, req, atomic);
393}
394EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
395
396int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
397 struct aead_request *req, bool atomic)
398{
399 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
400
401 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
402
403 return skcipher_walk_aead_common(walk, req, atomic);
404}
405EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
406
407static void skcipher_set_needkey(struct crypto_skcipher *tfm)
408{
409 if (crypto_skcipher_max_keysize(tfm) != 0)
410 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
411}
412
413static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
414 const u8 *key, unsigned int keylen)
415{
416 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
417 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
418 u8 *buffer, *alignbuffer;
419 unsigned long absize;
420 int ret;
421
422 absize = keylen + alignmask;
423 buffer = kmalloc(absize, GFP_ATOMIC);
424 if (!buffer)
425 return -ENOMEM;
426
427 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
428 memcpy(alignbuffer, key, keylen);
429 ret = cipher->setkey(tfm, alignbuffer, keylen);
430 kfree_sensitive(buffer);
431 return ret;
432}
433
434int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
435 unsigned int keylen)
436{
437 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
438 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
439 int err;
440
441 if (cipher->co.base.cra_type != &crypto_skcipher_type) {
442 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
443
444 crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
445 crypto_lskcipher_set_flags(*ctx,
446 crypto_skcipher_get_flags(tfm) &
447 CRYPTO_TFM_REQ_MASK);
448 err = crypto_lskcipher_setkey(*ctx, key, keylen);
449 goto out;
450 }
451
452 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
453 return -EINVAL;
454
455 if ((unsigned long)key & alignmask)
456 err = skcipher_setkey_unaligned(tfm, key, keylen);
457 else
458 err = cipher->setkey(tfm, key, keylen);
459
460out:
461 if (unlikely(err)) {
462 skcipher_set_needkey(tfm);
463 return err;
464 }
465
466 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
467 return 0;
468}
469EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
470
471int crypto_skcipher_encrypt(struct skcipher_request *req)
472{
473 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
474 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
475
476 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
477 return -ENOKEY;
478 if (alg->co.base.cra_type != &crypto_skcipher_type)
479 return crypto_lskcipher_encrypt_sg(req);
480 return alg->encrypt(req);
481}
482EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
483
484int crypto_skcipher_decrypt(struct skcipher_request *req)
485{
486 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
487 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
488
489 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
490 return -ENOKEY;
491 if (alg->co.base.cra_type != &crypto_skcipher_type)
492 return crypto_lskcipher_decrypt_sg(req);
493 return alg->decrypt(req);
494}
495EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
496
497static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
498{
499 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
500 u8 *ivs = skcipher_request_ctx(req);
501
502 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
503
504 memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
505 crypto_skcipher_statesize(tfm));
506
507 return 0;
508}
509
510static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
511{
512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
513 u8 *ivs = skcipher_request_ctx(req);
514
515 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
516
517 memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
518 crypto_skcipher_statesize(tfm));
519
520 return 0;
521}
522
523static int skcipher_noexport(struct skcipher_request *req, void *out)
524{
525 return 0;
526}
527
528static int skcipher_noimport(struct skcipher_request *req, const void *in)
529{
530 return 0;
531}
532
533int crypto_skcipher_export(struct skcipher_request *req, void *out)
534{
535 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
536 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
537
538 if (alg->co.base.cra_type != &crypto_skcipher_type)
539 return crypto_lskcipher_export(req, out);
540 return alg->export(req, out);
541}
542EXPORT_SYMBOL_GPL(crypto_skcipher_export);
543
544int crypto_skcipher_import(struct skcipher_request *req, const void *in)
545{
546 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
547 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
548
549 if (alg->co.base.cra_type != &crypto_skcipher_type)
550 return crypto_lskcipher_import(req, in);
551 return alg->import(req, in);
552}
553EXPORT_SYMBOL_GPL(crypto_skcipher_import);
554
555static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
556{
557 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
558 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
559
560 alg->exit(skcipher);
561}
562
563static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
564{
565 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
566 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
567
568 skcipher_set_needkey(skcipher);
569
570 if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
571 unsigned am = crypto_skcipher_alignmask(skcipher);
572 unsigned reqsize;
573
574 reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
575 reqsize += crypto_skcipher_ivsize(skcipher);
576 reqsize += crypto_skcipher_statesize(skcipher);
577 crypto_skcipher_set_reqsize(skcipher, reqsize);
578
579 return crypto_init_lskcipher_ops_sg(tfm);
580 }
581
582 if (alg->exit)
583 skcipher->base.exit = crypto_skcipher_exit_tfm;
584
585 if (alg->init)
586 return alg->init(skcipher);
587
588 return 0;
589}
590
591static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
592{
593 if (alg->cra_type != &crypto_skcipher_type)
594 return sizeof(struct crypto_lskcipher *);
595
596 return crypto_alg_extsize(alg);
597}
598
599static void crypto_skcipher_free_instance(struct crypto_instance *inst)
600{
601 struct skcipher_instance *skcipher =
602 container_of(inst, struct skcipher_instance, s.base);
603
604 skcipher->free(skcipher);
605}
606
607static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
608 __maybe_unused;
609static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
610{
611 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
612
613 seq_printf(m, "type : skcipher\n");
614 seq_printf(m, "async : %s\n",
615 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
616 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
617 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
618 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
619 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
620 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
621 seq_printf(m, "walksize : %u\n", skcipher->walksize);
622 seq_printf(m, "statesize : %u\n", skcipher->statesize);
623}
624
625static int __maybe_unused crypto_skcipher_report(
626 struct sk_buff *skb, struct crypto_alg *alg)
627{
628 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
629 struct crypto_report_blkcipher rblkcipher;
630
631 memset(&rblkcipher, 0, sizeof(rblkcipher));
632
633 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
634 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
635
636 rblkcipher.blocksize = alg->cra_blocksize;
637 rblkcipher.min_keysize = skcipher->min_keysize;
638 rblkcipher.max_keysize = skcipher->max_keysize;
639 rblkcipher.ivsize = skcipher->ivsize;
640
641 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
642 sizeof(rblkcipher), &rblkcipher);
643}
644
645static const struct crypto_type crypto_skcipher_type = {
646 .extsize = crypto_skcipher_extsize,
647 .init_tfm = crypto_skcipher_init_tfm,
648 .free = crypto_skcipher_free_instance,
649#ifdef CONFIG_PROC_FS
650 .show = crypto_skcipher_show,
651#endif
652#if IS_ENABLED(CONFIG_CRYPTO_USER)
653 .report = crypto_skcipher_report,
654#endif
655 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
656 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
657 .type = CRYPTO_ALG_TYPE_SKCIPHER,
658 .tfmsize = offsetof(struct crypto_skcipher, base),
659};
660
661int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
662 struct crypto_instance *inst,
663 const char *name, u32 type, u32 mask)
664{
665 spawn->base.frontend = &crypto_skcipher_type;
666 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
667}
668EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
669
670struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
671 u32 type, u32 mask)
672{
673 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
674}
675EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
676
677struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
678 const char *alg_name, u32 type, u32 mask)
679{
680 struct crypto_skcipher *tfm;
681
682 /* Only sync algorithms allowed. */
683 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
684
685 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
686
687 /*
688 * Make sure we do not allocate something that might get used with
689 * an on-stack request: check the request size.
690 */
691 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
692 MAX_SYNC_SKCIPHER_REQSIZE)) {
693 crypto_free_skcipher(tfm);
694 return ERR_PTR(-EINVAL);
695 }
696
697 return (struct crypto_sync_skcipher *)tfm;
698}
699EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
700
701int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
702{
703 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
704}
705EXPORT_SYMBOL_GPL(crypto_has_skcipher);
706
707int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
708{
709 struct crypto_alg *base = &alg->base;
710
711 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
712 alg->statesize > PAGE_SIZE / 2 ||
713 (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
714 return -EINVAL;
715
716 if (!alg->chunksize)
717 alg->chunksize = base->cra_blocksize;
718
719 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
720
721 return 0;
722}
723
724static int skcipher_prepare_alg(struct skcipher_alg *alg)
725{
726 struct crypto_alg *base = &alg->base;
727 int err;
728
729 err = skcipher_prepare_alg_common(&alg->co);
730 if (err)
731 return err;
732
733 if (alg->walksize > PAGE_SIZE / 8)
734 return -EINVAL;
735
736 if (!alg->walksize)
737 alg->walksize = alg->chunksize;
738
739 if (!alg->statesize) {
740 alg->import = skcipher_noimport;
741 alg->export = skcipher_noexport;
742 } else if (!(alg->import && alg->export))
743 return -EINVAL;
744
745 base->cra_type = &crypto_skcipher_type;
746 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
747
748 return 0;
749}
750
751int crypto_register_skcipher(struct skcipher_alg *alg)
752{
753 struct crypto_alg *base = &alg->base;
754 int err;
755
756 err = skcipher_prepare_alg(alg);
757 if (err)
758 return err;
759
760 return crypto_register_alg(base);
761}
762EXPORT_SYMBOL_GPL(crypto_register_skcipher);
763
764void crypto_unregister_skcipher(struct skcipher_alg *alg)
765{
766 crypto_unregister_alg(&alg->base);
767}
768EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
769
770int crypto_register_skciphers(struct skcipher_alg *algs, int count)
771{
772 int i, ret;
773
774 for (i = 0; i < count; i++) {
775 ret = crypto_register_skcipher(&algs[i]);
776 if (ret)
777 goto err;
778 }
779
780 return 0;
781
782err:
783 for (--i; i >= 0; --i)
784 crypto_unregister_skcipher(&algs[i]);
785
786 return ret;
787}
788EXPORT_SYMBOL_GPL(crypto_register_skciphers);
789
790void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
791{
792 int i;
793
794 for (i = count - 1; i >= 0; --i)
795 crypto_unregister_skcipher(&algs[i]);
796}
797EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
798
799int skcipher_register_instance(struct crypto_template *tmpl,
800 struct skcipher_instance *inst)
801{
802 int err;
803
804 if (WARN_ON(!inst->free))
805 return -EINVAL;
806
807 err = skcipher_prepare_alg(&inst->alg);
808 if (err)
809 return err;
810
811 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
812}
813EXPORT_SYMBOL_GPL(skcipher_register_instance);
814
815static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
816 unsigned int keylen)
817{
818 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
819
820 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
821 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
822 CRYPTO_TFM_REQ_MASK);
823 return crypto_cipher_setkey(cipher, key, keylen);
824}
825
826static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
827{
828 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
829 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
830 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
831 struct crypto_cipher *cipher;
832
833 cipher = crypto_spawn_cipher(spawn);
834 if (IS_ERR(cipher))
835 return PTR_ERR(cipher);
836
837 ctx->cipher = cipher;
838 return 0;
839}
840
841static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
842{
843 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
844
845 crypto_free_cipher(ctx->cipher);
846}
847
848static void skcipher_free_instance_simple(struct skcipher_instance *inst)
849{
850 crypto_drop_cipher(skcipher_instance_ctx(inst));
851 kfree(inst);
852}
853
854/**
855 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
856 *
857 * Allocate an skcipher_instance for a simple block cipher mode of operation,
858 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
859 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
860 * alignmask, and priority are set from the underlying cipher but can be
861 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
862 * default ->setkey(), ->init(), and ->exit() methods are installed.
863 *
864 * @tmpl: the template being instantiated
865 * @tb: the template parameters
866 *
867 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
868 * needs to register the instance.
869 */
870struct skcipher_instance *skcipher_alloc_instance_simple(
871 struct crypto_template *tmpl, struct rtattr **tb)
872{
873 u32 mask;
874 struct skcipher_instance *inst;
875 struct crypto_cipher_spawn *spawn;
876 struct crypto_alg *cipher_alg;
877 int err;
878
879 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
880 if (err)
881 return ERR_PTR(err);
882
883 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
884 if (!inst)
885 return ERR_PTR(-ENOMEM);
886 spawn = skcipher_instance_ctx(inst);
887
888 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
889 crypto_attr_alg_name(tb[1]), 0, mask);
890 if (err)
891 goto err_free_inst;
892 cipher_alg = crypto_spawn_cipher_alg(spawn);
893
894 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
895 cipher_alg);
896 if (err)
897 goto err_free_inst;
898
899 inst->free = skcipher_free_instance_simple;
900
901 /* Default algorithm properties, can be overridden */
902 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
903 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
904 inst->alg.base.cra_priority = cipher_alg->cra_priority;
905 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
906 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
907 inst->alg.ivsize = cipher_alg->cra_blocksize;
908
909 /* Use skcipher_ctx_simple by default, can be overridden */
910 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
911 inst->alg.setkey = skcipher_setkey_simple;
912 inst->alg.init = skcipher_init_tfm_simple;
913 inst->alg.exit = skcipher_exit_tfm_simple;
914
915 return inst;
916
917err_free_inst:
918 skcipher_free_instance_simple(inst);
919 return ERR_PTR(err);
920}
921EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
922
923MODULE_LICENSE("GPL");
924MODULE_DESCRIPTION("Symmetric key cipher type");
925MODULE_IMPORT_NS("CRYPTO_INTERNAL");