Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: rsassa-pkcs1 - Migrate to sig_alg backend

A sig_alg backend has just been introduced with the intent of moving all
asymmetric sign/verify algorithms to it one by one.

Migrate the sign/verify operations from rsa-pkcs1pad.c to a separate
rsassa-pkcs1.c which uses the new backend.

Consequently there are now two templates which build on the "rsa"
akcipher_alg:

* The existing "pkcs1pad" template, which is instantiated as an
akcipher_instance and retains the encrypt/decrypt operations of
RSAES-PKCS1-v1_5 (RFC 8017 sec 7.2).

* The new "pkcs1" template, which is instantiated as a sig_instance
and contains the sign/verify operations of RSASSA-PKCS1-v1_5
(RFC 8017 sec 8.2).

In a separate step, rsa-pkcs1pad.c could optionally be renamed to
rsaes-pkcs1.c for clarity. Additional "oaep" and "pss" templates
could be added for RSAES-OAEP and RSASSA-PSS.

Note that it's currently allowed to allocate a "pkcs1pad(rsa)" transform
without specifying a hash algorithm. That makes sense if the transform
is only used for encrypt/decrypt and continues to be supported. But for
sign/verify, such transforms previously did not insert the Full Hash
Prefix into the padding. The resulting message encoding was incompliant
with EMSA-PKCS1-v1_5 (RFC 8017 sec 9.2) and therefore nonsensical.

From here on in, it is no longer allowed to allocate a transform without
specifying a hash algorithm if the transform is used for sign/verify
operations. This simplifies the code because the insertion of the Full
Hash Prefix is no longer optional, so various "if (digest_info)" clauses
can be removed.

There has been a previous attempt to forbid transform allocation without
specifying a hash algorithm, namely by commit c0d20d22e0ad ("crypto:
rsa-pkcs1pad - Require hash to be present"). It had to be rolled back
with commit b3a8c8a5ebb5 ("crypto: rsa-pkcs1pad: Allow hash to be
optional [ver #2]"), presumably because it broke allocation of a
transform which was solely used for encrypt/decrypt, not sign/verify.
Avoid such breakage by allowing transform allocation for encrypt/decrypt
with and without specifying a hash algorithm (and simply ignoring the
hash algorithm in the former case).

So again, specifying a hash algorithm is now mandatory for sign/verify,
but optional and ignored for encrypt/decrypt.

The new sig_alg API uses kernel buffers instead of sglists, which
avoids the overhead of copying signature and digest from sglists back
into kernel buffers. rsassa-pkcs1.c is thus simplified quite a bit.

sig_alg is always synchronous, whereas the underlying "rsa" akcipher_alg
may be asynchronous. So await the result of the akcipher_alg, similar
to crypto_akcipher_sync_{en,de}crypt().

As part of the migration, rename "rsa_digest_info" to "hash_prefix" to
adhere to the spec language in RFC 9580. Otherwise keep the code
unmodified wherever possible to ease reviewing and bisecting. Leave
several simplification and hardening opportunities to separate commits.

rsassa-pkcs1.c uses modern __free() syntax for allocation of buffers
which need to be freed by kfree_sensitive(), hence a DEFINE_FREE()
clause for kfree_sensitive() is introduced herein as a byproduct.

Signed-off-by: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Lukas Wunner and committed by
Herbert Xu
1e562dea 7964b0d4

+479 -344
+1
crypto/Kconfig
··· 250 250 tristate "RSA (Rivest-Shamir-Adleman)" 251 251 select CRYPTO_AKCIPHER 252 252 select CRYPTO_MANAGER 253 + select CRYPTO_SIG 253 254 select MPILIB 254 255 select ASN1 255 256 help
+1
crypto/Makefile
··· 48 48 rsa_generic-y += rsa.o 49 49 rsa_generic-y += rsa_helper.o 50 50 rsa_generic-y += rsa-pkcs1pad.o 51 + rsa_generic-y += rsassa-pkcs1.o 51 52 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o 52 53 53 54 $(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
+8 -2
crypto/asymmetric_keys/public_key.c
··· 83 83 if (strcmp(encoding, "pkcs1") == 0) { 84 84 *sig = op == kernel_pkey_sign || 85 85 op == kernel_pkey_verify; 86 - if (!hash_algo) { 86 + if (!*sig) { 87 + /* 88 + * For encrypt/decrypt, hash_algo is not used 89 + * but allowed to be set for historic reasons. 90 + */ 87 91 n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, 88 92 "pkcs1pad(%s)", 89 93 pkey->pkey_algo); 90 94 } else { 95 + if (!hash_algo) 96 + return -EINVAL; 91 97 n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME, 92 - "pkcs1pad(%s,%s)", 98 + "pkcs1(%s,%s)", 93 99 pkey->pkey_algo, hash_algo); 94 100 } 95 101 return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
+15 -324
crypto/rsa-pkcs1pad.c
··· 16 16 #include <linux/random.h> 17 17 #include <linux/scatterlist.h> 18 18 19 - /* 20 - * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2]. 21 - */ 22 - static const u8 rsa_digest_info_md5[] = { 23 - 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 24 - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */ 25 - 0x05, 0x00, 0x04, 0x10 26 - }; 27 - 28 - static const u8 rsa_digest_info_sha1[] = { 29 - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 30 - 0x2b, 0x0e, 0x03, 0x02, 0x1a, 31 - 0x05, 0x00, 0x04, 0x14 32 - }; 33 - 34 - static const u8 rsa_digest_info_rmd160[] = { 35 - 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 36 - 0x2b, 0x24, 0x03, 0x02, 0x01, 37 - 0x05, 0x00, 0x04, 0x14 38 - }; 39 - 40 - static const u8 rsa_digest_info_sha224[] = { 41 - 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 42 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 43 - 0x05, 0x00, 0x04, 0x1c 44 - }; 45 - 46 - static const u8 rsa_digest_info_sha256[] = { 47 - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 48 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 49 - 0x05, 0x00, 0x04, 0x20 50 - }; 51 - 52 - static const u8 rsa_digest_info_sha384[] = { 53 - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 54 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 55 - 0x05, 0x00, 0x04, 0x30 56 - }; 57 - 58 - static const u8 rsa_digest_info_sha512[] = { 59 - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 60 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 61 - 0x05, 0x00, 0x04, 0x40 62 - }; 63 - 64 - static const u8 rsa_digest_info_sha3_256[] = { 65 - 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 66 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, 67 - 0x05, 0x00, 0x04, 0x20 68 - }; 69 - 70 - static const u8 rsa_digest_info_sha3_384[] = { 71 - 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 72 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, 73 - 0x05, 0x00, 0x04, 0x30 74 - }; 75 - 76 - static const u8 rsa_digest_info_sha3_512[] = { 77 - 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 78 - 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0A, 79 - 0x05, 0x00, 0x04, 0x40 80 - }; 81 - 82 - static const struct rsa_asn1_template { 83 - const char *name; 84 - const u8 *data; 85 - size_t size; 86 - } rsa_asn1_templates[] = { 87 - #define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) } 88 - _(md5), 89 - _(sha1), 90 - _(rmd160), 91 - _(sha256), 92 - _(sha384), 93 - _(sha512), 94 - _(sha224), 95 - #undef _ 96 - #define _(X) { "sha3-" #X, rsa_digest_info_sha3_##X, sizeof(rsa_digest_info_sha3_##X) } 97 - _(256), 98 - _(384), 99 - _(512), 100 - #undef _ 101 - { NULL } 102 - }; 103 - 104 - static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name) 105 - { 106 - const struct rsa_asn1_template *p; 107 - 108 - for (p = rsa_asn1_templates; p->name; p++) 109 - if (strcmp(name, p->name) == 0) 110 - return p; 111 - return NULL; 112 - } 113 - 114 19 struct pkcs1pad_ctx { 115 20 struct crypto_akcipher *child; 116 21 unsigned int key_size; ··· 23 118 24 119 struct pkcs1pad_inst_ctx { 25 120 struct crypto_akcipher_spawn spawn; 26 - const struct rsa_asn1_template *digest_info; 27 121 }; 28 122 29 123 struct pkcs1pad_request { ··· 52 148 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 53 149 54 150 /* 55 - * The maximum destination buffer size for the encrypt/sign operations 151 + * The maximum destination buffer size for the encrypt operation 56 152 * will be the same as for RSA, even though it's smaller for 57 - * decrypt/verify. 153 + * decrypt. 58 154 */ 59 155 60 156 return ctx->key_size; ··· 72 168 sg_chain(sg, nsegs, next); 73 169 } 74 170 75 - static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err) 171 + static int pkcs1pad_encrypt_complete(struct akcipher_request *req, int err) 76 172 { 77 173 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 78 174 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); ··· 111 207 return err; 112 208 } 113 209 114 - static void pkcs1pad_encrypt_sign_complete_cb(void *data, int err) 210 + static void pkcs1pad_encrypt_complete_cb(void *data, int err) 115 211 { 116 212 struct akcipher_request *req = data; 117 213 118 214 if (err == -EINPROGRESS) 119 215 goto out; 120 216 121 - err = pkcs1pad_encrypt_sign_complete(req, err); 217 + err = pkcs1pad_encrypt_complete(req, err); 122 218 123 219 out: 124 220 akcipher_request_complete(req, err); ··· 159 255 160 256 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 161 257 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 162 - pkcs1pad_encrypt_sign_complete_cb, req); 258 + pkcs1pad_encrypt_complete_cb, req); 163 259 164 260 /* Reuse output buffer */ 165 261 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, ··· 167 263 168 264 err = crypto_akcipher_encrypt(&req_ctx->child_req); 169 265 if (err != -EINPROGRESS && err != -EBUSY) 170 - return pkcs1pad_encrypt_sign_complete(req, err); 266 + return pkcs1pad_encrypt_complete(req, err); 171 267 172 268 return err; 173 269 } ··· 272 368 return err; 273 369 } 274 370 275 - static int pkcs1pad_sign(struct akcipher_request *req) 276 - { 277 - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 278 - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 279 - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 280 - struct akcipher_instance *inst = akcipher_alg_instance(tfm); 281 - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); 282 - const struct rsa_asn1_template *digest_info = ictx->digest_info; 283 - int err; 284 - unsigned int ps_end, digest_info_size = 0; 285 - 286 - if (!ctx->key_size) 287 - return -EINVAL; 288 - 289 - if (digest_info) 290 - digest_info_size = digest_info->size; 291 - 292 - if (req->src_len + digest_info_size > ctx->key_size - 11) 293 - return -EOVERFLOW; 294 - 295 - if (req->dst_len < ctx->key_size) { 296 - req->dst_len = ctx->key_size; 297 - return -EOVERFLOW; 298 - } 299 - 300 - req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len, 301 - GFP_KERNEL); 302 - if (!req_ctx->in_buf) 303 - return -ENOMEM; 304 - 305 - ps_end = ctx->key_size - digest_info_size - req->src_len - 2; 306 - req_ctx->in_buf[0] = 0x01; 307 - memset(req_ctx->in_buf + 1, 0xff, ps_end - 1); 308 - req_ctx->in_buf[ps_end] = 0x00; 309 - 310 - if (digest_info) 311 - memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data, 312 - digest_info->size); 313 - 314 - pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf, 315 - ctx->key_size - 1 - req->src_len, req->src); 316 - 317 - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 318 - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 319 - pkcs1pad_encrypt_sign_complete_cb, req); 320 - 321 - /* Reuse output buffer */ 322 - akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg, 323 - req->dst, ctx->key_size - 1, req->dst_len); 324 - 325 - err = crypto_akcipher_decrypt(&req_ctx->child_req); 326 - if (err != -EINPROGRESS && err != -EBUSY) 327 - return pkcs1pad_encrypt_sign_complete(req, err); 328 - 329 - return err; 330 - } 331 - 332 - static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) 333 - { 334 - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 335 - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 336 - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 337 - struct akcipher_instance *inst = akcipher_alg_instance(tfm); 338 - struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst); 339 - const struct rsa_asn1_template *digest_info = ictx->digest_info; 340 - const unsigned int sig_size = req->src_len; 341 - const unsigned int digest_size = req->dst_len; 342 - unsigned int dst_len; 343 - unsigned int pos; 344 - u8 *out_buf; 345 - 346 - if (err) 347 - goto done; 348 - 349 - err = -EINVAL; 350 - dst_len = req_ctx->child_req.dst_len; 351 - if (dst_len < ctx->key_size - 1) 352 - goto done; 353 - 354 - out_buf = req_ctx->out_buf; 355 - if (dst_len == ctx->key_size) { 356 - if (out_buf[0] != 0x00) 357 - /* Decrypted value had no leading 0 byte */ 358 - goto done; 359 - 360 - dst_len--; 361 - out_buf++; 362 - } 363 - 364 - err = -EBADMSG; 365 - if (out_buf[0] != 0x01) 366 - goto done; 367 - 368 - for (pos = 1; pos < dst_len; pos++) 369 - if (out_buf[pos] != 0xff) 370 - break; 371 - 372 - if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) 373 - goto done; 374 - pos++; 375 - 376 - if (digest_info) { 377 - if (digest_info->size > dst_len - pos) 378 - goto done; 379 - if (crypto_memneq(out_buf + pos, digest_info->data, 380 - digest_info->size)) 381 - goto done; 382 - 383 - pos += digest_info->size; 384 - } 385 - 386 - err = 0; 387 - 388 - if (digest_size != dst_len - pos) { 389 - err = -EKEYREJECTED; 390 - req->dst_len = dst_len - pos; 391 - goto done; 392 - } 393 - /* Extract appended digest. */ 394 - sg_pcopy_to_buffer(req->src, 395 - sg_nents_for_len(req->src, sig_size + digest_size), 396 - req_ctx->out_buf + ctx->key_size, 397 - digest_size, sig_size); 398 - /* Do the actual verification step. */ 399 - if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, 400 - digest_size) != 0) 401 - err = -EKEYREJECTED; 402 - done: 403 - kfree_sensitive(req_ctx->out_buf); 404 - 405 - return err; 406 - } 407 - 408 - static void pkcs1pad_verify_complete_cb(void *data, int err) 409 - { 410 - struct akcipher_request *req = data; 411 - 412 - if (err == -EINPROGRESS) 413 - goto out; 414 - 415 - err = pkcs1pad_verify_complete(req, err); 416 - 417 - out: 418 - akcipher_request_complete(req, err); 419 - } 420 - 421 - /* 422 - * The verify operation is here for completeness similar to the verification 423 - * defined in RFC2313 section 10.2 except that block type 0 is not accepted, 424 - * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to 425 - * retrieve the DigestInfo from a signature, instead the user is expected 426 - * to call the sign operation to generate the expected signature and compare 427 - * signatures instead of the message-digests. 428 - */ 429 - static int pkcs1pad_verify(struct akcipher_request *req) 430 - { 431 - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 432 - struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 433 - struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 434 - const unsigned int sig_size = req->src_len; 435 - const unsigned int digest_size = req->dst_len; 436 - int err; 437 - 438 - if (WARN_ON(req->dst) || WARN_ON(!digest_size) || 439 - !ctx->key_size || sig_size != ctx->key_size) 440 - return -EINVAL; 441 - 442 - req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL); 443 - if (!req_ctx->out_buf) 444 - return -ENOMEM; 445 - 446 - pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 447 - ctx->key_size, NULL); 448 - 449 - akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 450 - akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 451 - pkcs1pad_verify_complete_cb, req); 452 - 453 - /* Reuse input buffer, output to a new buffer */ 454 - akcipher_request_set_crypt(&req_ctx->child_req, req->src, 455 - req_ctx->out_sg, sig_size, ctx->key_size); 456 - 457 - err = crypto_akcipher_encrypt(&req_ctx->child_req); 458 - if (err != -EINPROGRESS && err != -EBUSY) 459 - return pkcs1pad_verify_complete(req, err); 460 - 461 - return err; 462 - } 463 - 464 371 static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm) 465 372 { 466 373 struct akcipher_instance *inst = akcipher_alg_instance(tfm); ··· 313 598 struct akcipher_instance *inst; 314 599 struct pkcs1pad_inst_ctx *ctx; 315 600 struct akcipher_alg *rsa_alg; 316 - const char *hash_name; 317 601 int err; 318 602 319 603 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask); ··· 338 624 } 339 625 340 626 err = -ENAMETOOLONG; 341 - hash_name = crypto_attr_alg_name(tb[2]); 342 - if (IS_ERR(hash_name)) { 343 - if (snprintf(inst->alg.base.cra_name, 344 - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 345 - rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) 346 - goto err_free_inst; 627 + if (snprintf(inst->alg.base.cra_name, 628 + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 629 + rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) 630 + goto err_free_inst; 347 631 348 - if (snprintf(inst->alg.base.cra_driver_name, 349 - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 350 - rsa_alg->base.cra_driver_name) >= 351 - CRYPTO_MAX_ALG_NAME) 352 - goto err_free_inst; 353 - } else { 354 - ctx->digest_info = rsa_lookup_asn1(hash_name); 355 - if (!ctx->digest_info) { 356 - err = -EINVAL; 357 - goto err_free_inst; 358 - } 359 - 360 - if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 361 - "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, 362 - hash_name) >= CRYPTO_MAX_ALG_NAME) 363 - goto err_free_inst; 364 - 365 - if (snprintf(inst->alg.base.cra_driver_name, 366 - CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)", 367 - rsa_alg->base.cra_driver_name, 368 - hash_name) >= CRYPTO_MAX_ALG_NAME) 369 - goto err_free_inst; 370 - } 632 + if (snprintf(inst->alg.base.cra_driver_name, 633 + CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)", 634 + rsa_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 635 + goto err_free_inst; 371 636 372 637 inst->alg.base.cra_priority = rsa_alg->base.cra_priority; 373 638 inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx); ··· 356 663 357 664 inst->alg.encrypt = pkcs1pad_encrypt; 358 665 inst->alg.decrypt = pkcs1pad_decrypt; 359 - inst->alg.sign = pkcs1pad_sign; 360 - inst->alg.verify = pkcs1pad_verify; 361 666 inst->alg.set_pub_key = pkcs1pad_set_pub_key; 362 667 inst->alg.set_priv_key = pkcs1pad_set_priv_key; 363 668 inst->alg.max_size = pkcs1pad_get_max_size;
+13 -4
crypto/rsa.c
··· 407 407 return err; 408 408 409 409 err = crypto_register_template(&rsa_pkcs1pad_tmpl); 410 - if (err) { 411 - crypto_unregister_akcipher(&rsa); 412 - return err; 413 - } 410 + if (err) 411 + goto err_unregister_rsa; 412 + 413 + err = crypto_register_template(&rsassa_pkcs1_tmpl); 414 + if (err) 415 + goto err_unregister_rsa_pkcs1pad; 414 416 415 417 return 0; 418 + 419 + err_unregister_rsa_pkcs1pad: 420 + crypto_unregister_template(&rsa_pkcs1pad_tmpl); 421 + err_unregister_rsa: 422 + crypto_unregister_akcipher(&rsa); 423 + return err; 416 424 } 417 425 418 426 static void __exit rsa_exit(void) 419 427 { 428 + crypto_unregister_template(&rsassa_pkcs1_tmpl); 420 429 crypto_unregister_template(&rsa_pkcs1pad_tmpl); 421 430 crypto_unregister_akcipher(&rsa); 422 431 }
+422
crypto/rsassa-pkcs1.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * RSA Signature Scheme with Appendix - PKCS #1 v1.5 (RFC 8017 sec 8.2) 4 + * 5 + * https://www.rfc-editor.org/rfc/rfc8017#section-8.2 6 + * 7 + * Copyright (c) 2015 - 2024 Intel Corporation 8 + */ 9 + 10 + #include <linux/module.h> 11 + #include <linux/scatterlist.h> 12 + #include <crypto/akcipher.h> 13 + #include <crypto/algapi.h> 14 + #include <crypto/sig.h> 15 + #include <crypto/internal/akcipher.h> 16 + #include <crypto/internal/rsa.h> 17 + #include <crypto/internal/sig.h> 18 + 19 + /* 20 + * Full Hash Prefix for EMSA-PKCS1-v1_5 encoding method (RFC 9580 table 24) 21 + * 22 + * RSA keys are usually much larger than the hash of the message to be signed. 23 + * The hash is therefore prepended by the Full Hash Prefix and a 0xff padding. 24 + * The Full Hash Prefix is an ASN.1 SEQUENCE containing the hash algorithm OID. 25 + * 26 + * https://www.rfc-editor.org/rfc/rfc9580#table-24 27 + */ 28 + 29 + static const u8 hash_prefix_md5[] = { 30 + 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, /* SEQUENCE (SEQUENCE (OID */ 31 + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* <algorithm>, */ 32 + 0x05, 0x00, 0x04, 0x10 /* NULL), OCTET STRING <hash>) */ 33 + }; 34 + 35 + static const u8 hash_prefix_sha1[] = { 36 + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 37 + 0x2b, 0x0e, 0x03, 0x02, 0x1a, 38 + 0x05, 0x00, 0x04, 0x14 39 + }; 40 + 41 + static const u8 hash_prefix_rmd160[] = { 42 + 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 43 + 0x2b, 0x24, 0x03, 0x02, 0x01, 44 + 0x05, 0x00, 0x04, 0x14 45 + }; 46 + 47 + static const u8 hash_prefix_sha224[] = { 48 + 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 49 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 50 + 0x05, 0x00, 0x04, 0x1c 51 + }; 52 + 53 + static const u8 hash_prefix_sha256[] = { 54 + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 55 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 56 + 0x05, 0x00, 0x04, 0x20 57 + }; 58 + 59 + static const u8 hash_prefix_sha384[] = { 60 + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 61 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 62 + 0x05, 0x00, 0x04, 0x30 63 + }; 64 + 65 + static const u8 hash_prefix_sha512[] = { 66 + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 67 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 68 + 0x05, 0x00, 0x04, 0x40 69 + }; 70 + 71 + static const u8 hash_prefix_sha3_256[] = { 72 + 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 73 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x08, 74 + 0x05, 0x00, 0x04, 0x20 75 + }; 76 + 77 + static const u8 hash_prefix_sha3_384[] = { 78 + 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 79 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x09, 80 + 0x05, 0x00, 0x04, 0x30 81 + }; 82 + 83 + static const u8 hash_prefix_sha3_512[] = { 84 + 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 85 + 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x0a, 86 + 0x05, 0x00, 0x04, 0x40 87 + }; 88 + 89 + static const struct hash_prefix { 90 + const char *name; 91 + const u8 *data; 92 + size_t size; 93 + } hash_prefixes[] = { 94 + #define _(X) { #X, hash_prefix_##X, sizeof(hash_prefix_##X) } 95 + _(md5), 96 + _(sha1), 97 + _(rmd160), 98 + _(sha256), 99 + _(sha384), 100 + _(sha512), 101 + _(sha224), 102 + #undef _ 103 + #define _(X) { "sha3-" #X, hash_prefix_sha3_##X, sizeof(hash_prefix_sha3_##X) } 104 + _(256), 105 + _(384), 106 + _(512), 107 + #undef _ 108 + { NULL } 109 + }; 110 + 111 + static const struct hash_prefix *rsassa_pkcs1_find_hash_prefix(const char *name) 112 + { 113 + const struct hash_prefix *p; 114 + 115 + for (p = hash_prefixes; p->name; p++) 116 + if (strcmp(name, p->name) == 0) 117 + return p; 118 + return NULL; 119 + } 120 + 121 + struct rsassa_pkcs1_ctx { 122 + struct crypto_akcipher *child; 123 + unsigned int key_size; 124 + }; 125 + 126 + struct rsassa_pkcs1_inst_ctx { 127 + struct crypto_akcipher_spawn spawn; 128 + const struct hash_prefix *hash_prefix; 129 + }; 130 + 131 + static int rsassa_pkcs1_sign(struct crypto_sig *tfm, 132 + const void *src, unsigned int slen, 133 + void *dst, unsigned int dlen) 134 + { 135 + struct sig_instance *inst = sig_alg_instance(tfm); 136 + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); 137 + const struct hash_prefix *hash_prefix = ictx->hash_prefix; 138 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 139 + unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child); 140 + struct akcipher_request *child_req __free(kfree_sensitive) = NULL; 141 + struct scatterlist in_sg[2], out_sg; 142 + struct crypto_wait cwait; 143 + unsigned int pad_len; 144 + unsigned int ps_end; 145 + unsigned int len; 146 + u8 *in_buf; 147 + int err; 148 + 149 + if (!ctx->key_size) 150 + return -EINVAL; 151 + 152 + if (dlen < ctx->key_size) 153 + return -EOVERFLOW; 154 + 155 + if (slen + hash_prefix->size > ctx->key_size - 11) 156 + return -EOVERFLOW; 157 + 158 + child_req = kmalloc(sizeof(*child_req) + child_reqsize + 159 + ctx->key_size - 1 - slen, GFP_KERNEL); 160 + if (!child_req) 161 + return -ENOMEM; 162 + 163 + /* RFC 8017 sec 8.2.1 step 1 - EMSA-PKCS1-v1_5 encoding generation */ 164 + in_buf = (u8 *)(child_req + 1) + child_reqsize; 165 + ps_end = ctx->key_size - hash_prefix->size - slen - 2; 166 + in_buf[0] = 0x01; 167 + memset(in_buf + 1, 0xff, ps_end - 1); 168 + in_buf[ps_end] = 0x00; 169 + memcpy(in_buf + ps_end + 1, hash_prefix->data, hash_prefix->size); 170 + 171 + /* RFC 8017 sec 8.2.1 step 2 - RSA signature */ 172 + crypto_init_wait(&cwait); 173 + sg_init_table(in_sg, 2); 174 + sg_set_buf(&in_sg[0], in_buf, ctx->key_size - 1 - slen); 175 + sg_set_buf(&in_sg[1], src, slen); 176 + sg_init_one(&out_sg, dst, dlen); 177 + akcipher_request_set_tfm(child_req, ctx->child); 178 + akcipher_request_set_crypt(child_req, in_sg, &out_sg, 179 + ctx->key_size - 1, dlen); 180 + akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP, 181 + crypto_req_done, &cwait); 182 + 183 + err = crypto_akcipher_decrypt(child_req); 184 + err = crypto_wait_req(err, &cwait); 185 + if (err) 186 + return err; 187 + 188 + len = child_req->dst_len; 189 + pad_len = ctx->key_size - len; 190 + 191 + /* Four billion to one */ 192 + if (unlikely(pad_len)) { 193 + memmove(dst + pad_len, dst, len); 194 + memset(dst, 0, pad_len); 195 + } 196 + 197 + return 0; 198 + } 199 + 200 + static int rsassa_pkcs1_verify(struct crypto_sig *tfm, 201 + const void *src, unsigned int slen, 202 + const void *digest, unsigned int dlen) 203 + { 204 + struct sig_instance *inst = sig_alg_instance(tfm); 205 + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); 206 + const struct hash_prefix *hash_prefix = ictx->hash_prefix; 207 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 208 + unsigned int child_reqsize = crypto_akcipher_reqsize(ctx->child); 209 + struct akcipher_request *child_req __free(kfree_sensitive) = NULL; 210 + struct scatterlist in_sg, out_sg; 211 + struct crypto_wait cwait; 212 + unsigned int dst_len; 213 + unsigned int pos; 214 + u8 *out_buf; 215 + int err; 216 + 217 + /* RFC 8017 sec 8.2.2 step 1 - length checking */ 218 + if (!ctx->key_size || 219 + slen != ctx->key_size || 220 + !dlen) 221 + return -EINVAL; 222 + 223 + /* RFC 8017 sec 8.2.2 step 2 - RSA verification */ 224 + child_req = kmalloc(sizeof(*child_req) + child_reqsize + ctx->key_size, 225 + GFP_KERNEL); 226 + if (!child_req) 227 + return -ENOMEM; 228 + 229 + out_buf = (u8 *)(child_req + 1) + child_reqsize; 230 + 231 + crypto_init_wait(&cwait); 232 + sg_init_one(&in_sg, src, slen); 233 + sg_init_one(&out_sg, out_buf, ctx->key_size); 234 + akcipher_request_set_tfm(child_req, ctx->child); 235 + akcipher_request_set_crypt(child_req, &in_sg, &out_sg, 236 + slen, ctx->key_size); 237 + akcipher_request_set_callback(child_req, CRYPTO_TFM_REQ_MAY_SLEEP, 238 + crypto_req_done, &cwait); 239 + 240 + err = crypto_akcipher_encrypt(child_req); 241 + err = crypto_wait_req(err, &cwait); 242 + if (err) 243 + return err; 244 + 245 + /* RFC 8017 sec 8.2.2 step 3 - EMSA-PKCS1-v1_5 encoding verification */ 246 + dst_len = child_req->dst_len; 247 + if (dst_len < ctx->key_size - 1) 248 + return -EINVAL; 249 + 250 + if (dst_len == ctx->key_size) { 251 + if (out_buf[0] != 0x00) 252 + /* Encrypted value had no leading 0 byte */ 253 + return -EINVAL; 254 + 255 + dst_len--; 256 + out_buf++; 257 + } 258 + 259 + if (out_buf[0] != 0x01) 260 + return -EBADMSG; 261 + 262 + for (pos = 1; pos < dst_len; pos++) 263 + if (out_buf[pos] != 0xff) 264 + break; 265 + 266 + if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00) 267 + return -EBADMSG; 268 + pos++; 269 + 270 + if (hash_prefix->size > dst_len - pos) 271 + return -EBADMSG; 272 + if (crypto_memneq(out_buf + pos, hash_prefix->data, hash_prefix->size)) 273 + return -EBADMSG; 274 + pos += hash_prefix->size; 275 + 276 + /* RFC 8017 sec 8.2.2 step 4 - comparison of digest with out_buf */ 277 + if (dlen != dst_len - pos) 278 + return -EKEYREJECTED; 279 + if (memcmp(digest, out_buf + pos, dlen) != 0) 280 + return -EKEYREJECTED; 281 + 282 + return 0; 283 + } 284 + 285 + static unsigned int rsassa_pkcs1_max_size(struct crypto_sig *tfm) 286 + { 287 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 288 + 289 + return ctx->key_size; 290 + } 291 + 292 + static int rsassa_pkcs1_set_pub_key(struct crypto_sig *tfm, 293 + const void *key, unsigned int keylen) 294 + { 295 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 296 + 297 + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PUB, key, keylen); 298 + } 299 + 300 + static int rsassa_pkcs1_set_priv_key(struct crypto_sig *tfm, 301 + const void *key, unsigned int keylen) 302 + { 303 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 304 + 305 + return rsa_set_key(ctx->child, &ctx->key_size, RSA_PRIV, key, keylen); 306 + } 307 + 308 + static int rsassa_pkcs1_init_tfm(struct crypto_sig *tfm) 309 + { 310 + struct sig_instance *inst = sig_alg_instance(tfm); 311 + struct rsassa_pkcs1_inst_ctx *ictx = sig_instance_ctx(inst); 312 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 313 + struct crypto_akcipher *child_tfm; 314 + 315 + child_tfm = crypto_spawn_akcipher(&ictx->spawn); 316 + if (IS_ERR(child_tfm)) 317 + return PTR_ERR(child_tfm); 318 + 319 + ctx->child = child_tfm; 320 + 321 + return 0; 322 + } 323 + 324 + static void rsassa_pkcs1_exit_tfm(struct crypto_sig *tfm) 325 + { 326 + struct rsassa_pkcs1_ctx *ctx = crypto_sig_ctx(tfm); 327 + 328 + crypto_free_akcipher(ctx->child); 329 + } 330 + 331 + static void rsassa_pkcs1_free(struct sig_instance *inst) 332 + { 333 + struct rsassa_pkcs1_inst_ctx *ctx = sig_instance_ctx(inst); 334 + struct crypto_akcipher_spawn *spawn = &ctx->spawn; 335 + 336 + crypto_drop_akcipher(spawn); 337 + kfree(inst); 338 + } 339 + 340 + static int rsassa_pkcs1_create(struct crypto_template *tmpl, struct rtattr **tb) 341 + { 342 + struct rsassa_pkcs1_inst_ctx *ctx; 343 + struct akcipher_alg *rsa_alg; 344 + struct sig_instance *inst; 345 + const char *hash_name; 346 + u32 mask; 347 + int err; 348 + 349 + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SIG, &mask); 350 + if (err) 351 + return err; 352 + 353 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 354 + if (!inst) 355 + return -ENOMEM; 356 + 357 + ctx = sig_instance_ctx(inst); 358 + 359 + err = crypto_grab_akcipher(&ctx->spawn, sig_crypto_instance(inst), 360 + crypto_attr_alg_name(tb[1]), 0, mask); 361 + if (err) 362 + goto err_free_inst; 363 + 364 + rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); 365 + 366 + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { 367 + err = -EINVAL; 368 + goto err_free_inst; 369 + } 370 + 371 + hash_name = crypto_attr_alg_name(tb[2]); 372 + if (IS_ERR(hash_name)) { 373 + err = PTR_ERR(hash_name); 374 + goto err_free_inst; 375 + } 376 + 377 + ctx->hash_prefix = rsassa_pkcs1_find_hash_prefix(hash_name); 378 + if (!ctx->hash_prefix) { 379 + err = -EINVAL; 380 + goto err_free_inst; 381 + } 382 + 383 + err = -ENAMETOOLONG; 384 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 385 + "pkcs1(%s,%s)", rsa_alg->base.cra_name, 386 + hash_name) >= CRYPTO_MAX_ALG_NAME) 387 + goto err_free_inst; 388 + 389 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 390 + "pkcs1(%s,%s)", rsa_alg->base.cra_driver_name, 391 + hash_name) >= CRYPTO_MAX_ALG_NAME) 392 + goto err_free_inst; 393 + 394 + inst->alg.base.cra_priority = rsa_alg->base.cra_priority; 395 + inst->alg.base.cra_ctxsize = sizeof(struct rsassa_pkcs1_ctx); 396 + 397 + inst->alg.init = rsassa_pkcs1_init_tfm; 398 + inst->alg.exit = rsassa_pkcs1_exit_tfm; 399 + 400 + inst->alg.sign = rsassa_pkcs1_sign; 401 + inst->alg.verify = rsassa_pkcs1_verify; 402 + inst->alg.max_size = rsassa_pkcs1_max_size; 403 + inst->alg.set_pub_key = rsassa_pkcs1_set_pub_key; 404 + inst->alg.set_priv_key = rsassa_pkcs1_set_priv_key; 405 + 406 + inst->free = rsassa_pkcs1_free; 407 + 408 + err = sig_register_instance(tmpl, inst); 409 + if (err) { 410 + err_free_inst: 411 + rsassa_pkcs1_free(inst); 412 + } 413 + return err; 414 + } 415 + 416 + struct crypto_template rsassa_pkcs1_tmpl = { 417 + .name = "pkcs1", 418 + .create = rsassa_pkcs1_create, 419 + .module = THIS_MODULE, 420 + }; 421 + 422 + MODULE_ALIAS_CRYPTO("pkcs1");
+13 -9
crypto/testmgr.c
··· 5569 5569 .cipher = __VECS(fcrypt_pcbc_tv_template) 5570 5570 } 5571 5571 }, { 5572 - .alg = "pkcs1pad(rsa,sha224)", 5572 + .alg = "pkcs1(rsa,sha224)", 5573 5573 .test = alg_test_null, 5574 5574 .fips_allowed = 1, 5575 5575 }, { 5576 - .alg = "pkcs1pad(rsa,sha256)", 5577 - .test = alg_test_akcipher, 5576 + .alg = "pkcs1(rsa,sha256)", 5577 + .test = alg_test_sig, 5578 5578 .fips_allowed = 1, 5579 5579 .suite = { 5580 - .akcipher = __VECS(pkcs1pad_rsa_tv_template) 5580 + .sig = __VECS(pkcs1_rsa_tv_template) 5581 5581 } 5582 5582 }, { 5583 - .alg = "pkcs1pad(rsa,sha3-256)", 5583 + .alg = "pkcs1(rsa,sha3-256)", 5584 5584 .test = alg_test_null, 5585 5585 .fips_allowed = 1, 5586 5586 }, { 5587 - .alg = "pkcs1pad(rsa,sha3-384)", 5587 + .alg = "pkcs1(rsa,sha3-384)", 5588 5588 .test = alg_test_null, 5589 5589 .fips_allowed = 1, 5590 5590 }, { 5591 - .alg = "pkcs1pad(rsa,sha3-512)", 5591 + .alg = "pkcs1(rsa,sha3-512)", 5592 5592 .test = alg_test_null, 5593 5593 .fips_allowed = 1, 5594 5594 }, { 5595 - .alg = "pkcs1pad(rsa,sha384)", 5595 + .alg = "pkcs1(rsa,sha384)", 5596 5596 .test = alg_test_null, 5597 5597 .fips_allowed = 1, 5598 5598 }, { 5599 - .alg = "pkcs1pad(rsa,sha512)", 5599 + .alg = "pkcs1(rsa,sha512)", 5600 + .test = alg_test_null, 5601 + .fips_allowed = 1, 5602 + }, { 5603 + .alg = "pkcs1pad(rsa)", 5600 5604 .test = alg_test_null, 5601 5605 .fips_allowed = 1, 5602 5606 }, {
+1 -2
crypto/testmgr.h
··· 1268 1268 /* 1269 1269 * PKCS#1 RSA test vectors. Obtained from CAVS testing. 1270 1270 */ 1271 - static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = { 1271 + static const struct sig_testvec pkcs1_rsa_tv_template[] = { 1272 1272 { 1273 1273 .key = 1274 1274 "\x30\x82\x04\xa5\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82" ··· 1380 1380 "\xda\x62\x8d\xe1\x2a\x71\x91\x43\x40\x61\x3c\x5a\xbe\x86\xfc\x5b" 1381 1381 "\xe6\xf9\xa9\x16\x31\x1f\xaf\x25\x6d\xc2\x4a\x23\x6e\x63\x02\xa2", 1382 1382 .c_size = 256, 1383 - .siggen_sigver_test = true, 1384 1383 } 1385 1384 }; 1386 1385
+1
include/crypto/internal/rsa.h
··· 82 82 } 83 83 84 84 extern struct crypto_template rsa_pkcs1pad_tmpl; 85 + extern struct crypto_template rsassa_pkcs1_tmpl; 85 86 #endif
+1
include/linux/slab.h
··· 448 448 size_t __ksize(const void *objp); 449 449 450 450 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 451 + DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) 451 452 452 453 /** 453 454 * ksize - Report actual allocation size of associated object
+3 -3
security/integrity/ima/ima_main.c
··· 1114 1114 #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS 1115 1115 1116 1116 /** 1117 - * ima_kernel_module_request - Prevent crypto-pkcs1pad(rsa,*) requests 1117 + * ima_kernel_module_request - Prevent crypto-pkcs1(rsa,*) requests 1118 1118 * @kmod_name: kernel module name 1119 1119 * 1120 1120 * Avoid a verification loop where verifying the signature of the modprobe ··· 1128 1128 * algorithm on the fly, but crypto_larval_lookup() will try to use alg_name 1129 1129 * in order to load a kernel module with same name. 1130 1130 * 1131 - * Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules, 1131 + * Since we don't have any real "crypto-pkcs1(rsa,*)" kernel modules, 1132 1132 * we are safe to fail such module request from crypto_larval_lookup(), and 1133 1133 * avoid the verification loop. 1134 1134 * ··· 1136 1136 */ 1137 1137 static int ima_kernel_module_request(char *kmod_name) 1138 1138 { 1139 - if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0) 1139 + if (strncmp(kmod_name, "crypto-pkcs1(rsa,", 17) == 0) 1140 1140 return -EINVAL; 1141 1141 1142 1142 return 0;