Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "crypto: remove CONFIG_CRYPTO_STATS"

This reverts commit 2beb81fbf0c01a62515a1bcef326168494ee2bd0.

While removing CONFIG_CRYPTO_STATS is a worthy goal, this also
removed unrelated infrastructure such as crypto_comp_alg_common.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+1140 -78
+1
arch/s390/configs/debug_defconfig
··· 766 766 CONFIG_CRYPTO_USER_API_SKCIPHER=m 767 767 CONFIG_CRYPTO_USER_API_RNG=m 768 768 CONFIG_CRYPTO_USER_API_AEAD=m 769 + CONFIG_CRYPTO_STATS=y 769 770 CONFIG_CRYPTO_CRC32_S390=y 770 771 CONFIG_CRYPTO_SHA512_S390=m 771 772 CONFIG_CRYPTO_SHA1_S390=m
+1
arch/s390/configs/defconfig
··· 752 752 CONFIG_CRYPTO_USER_API_SKCIPHER=m 753 753 CONFIG_CRYPTO_USER_API_RNG=m 754 754 CONFIG_CRYPTO_USER_API_AEAD=m 755 + CONFIG_CRYPTO_STATS=y 755 756 CONFIG_CRYPTO_CRC32_S390=y 756 757 CONFIG_CRYPTO_SHA512_S390=m 757 758 CONFIG_CRYPTO_SHA1_S390=m
+20
crypto/Kconfig
··· 1456 1456 already been phased out from internal use by the kernel, and are 1457 1457 only useful for userspace clients that still rely on them. 1458 1458 1459 + config CRYPTO_STATS 1460 + bool "Crypto usage statistics" 1461 + depends on CRYPTO_USER 1462 + help 1463 + Enable the gathering of crypto stats. 1464 + 1465 + Enabling this option reduces the performance of the crypto API. It 1466 + should only be enabled when there is actually a use case for it. 1467 + 1468 + This collects data sizes, numbers of requests, and numbers 1469 + of errors processed by: 1470 + - AEAD ciphers (encrypt, decrypt) 1471 + - asymmetric key ciphers (encrypt, decrypt, verify, sign) 1472 + - symmetric key ciphers (encrypt, decrypt) 1473 + - compression algorithms (compress, decompress) 1474 + - hash algorithms (hash) 1475 + - key-agreement protocol primitives (setsecret, generate 1476 + public key, compute shared secret) 1477 + - RNG (generate, seed) 1478 + 1459 1479 endmenu 1460 1480 1461 1481 config CRYPTO_HASH_INFO
+2
crypto/Makefile
··· 69 69 70 70 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 71 71 obj-$(CONFIG_CRYPTO_USER) += crypto_user.o 72 + crypto_user-y := crypto_user_base.o 73 + crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o 72 74 obj-$(CONFIG_CRYPTO_CMAC) += cmac.o 73 75 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 74 76 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
+44 -3
crypto/acompress.c
··· 25 25 26 26 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) 27 27 { 28 - return container_of(alg, struct acomp_alg, base); 28 + return container_of(alg, struct acomp_alg, calg.base); 29 29 } 30 30 31 31 static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm) ··· 93 93 return extsize; 94 94 } 95 95 96 + static inline int __crypto_acomp_report_stat(struct sk_buff *skb, 97 + struct crypto_alg *alg) 98 + { 99 + struct comp_alg_common *calg = __crypto_comp_alg_common(alg); 100 + struct crypto_istat_compress *istat = comp_get_stat(calg); 101 + struct crypto_stat_compress racomp; 102 + 103 + memset(&racomp, 0, sizeof(racomp)); 104 + 105 + strscpy(racomp.type, "acomp", sizeof(racomp.type)); 106 + racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt); 107 + racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen); 108 + racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt); 109 + racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen); 110 + racomp.stat_err_cnt = atomic64_read(&istat->err_cnt); 111 + 112 + return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp); 113 + } 114 + 115 + #ifdef CONFIG_CRYPTO_STATS 116 + int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg) 117 + { 118 + return __crypto_acomp_report_stat(skb, alg); 119 + } 120 + #endif 121 + 96 122 static const struct crypto_type crypto_acomp_type = { 97 123 .extsize = crypto_acomp_extsize, 98 124 .init_tfm = crypto_acomp_init_tfm, ··· 127 101 #endif 128 102 #if IS_ENABLED(CONFIG_CRYPTO_USER) 129 103 .report = crypto_acomp_report, 104 + #endif 105 + #ifdef CONFIG_CRYPTO_STATS 106 + .report_stat = crypto_acomp_report_stat, 130 107 #endif 131 108 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 132 109 .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK, ··· 182 153 } 183 154 EXPORT_SYMBOL_GPL(acomp_request_free); 184 155 185 - int crypto_register_acomp(struct acomp_alg *alg) 156 + void comp_prepare_alg(struct comp_alg_common *alg) 186 157 { 158 + struct crypto_istat_compress *istat = comp_get_stat(alg); 187 159 struct crypto_alg *base = &alg->base; 188 160 189 - base->cra_type = &crypto_acomp_type; 190 161 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 162 + 163 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 164 + memset(istat, 0, sizeof(*istat)); 165 + } 166 + 167 + int crypto_register_acomp(struct acomp_alg *alg) 168 + { 169 + struct crypto_alg *base = &alg->calg.base; 170 + 171 + comp_prepare_alg(&alg->calg); 172 + 173 + base->cra_type = &crypto_acomp_type; 191 174 base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS; 192 175 193 176 return crypto_register_alg(base);
+77 -7
crypto/aead.c
··· 20 20 21 21 #include "internal.h" 22 22 23 + static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg) 24 + { 25 + #ifdef CONFIG_CRYPTO_STATS 26 + return &alg->stat; 27 + #else 28 + return NULL; 29 + #endif 30 + } 31 + 23 32 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, 24 33 unsigned int keylen) 25 34 { ··· 90 81 } 91 82 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize); 92 83 84 + static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err) 85 + { 86 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 87 + return err; 88 + 89 + if (err && err != -EINPROGRESS && err != -EBUSY) 90 + atomic64_inc(&istat->err_cnt); 91 + 92 + return err; 93 + } 94 + 93 95 int crypto_aead_encrypt(struct aead_request *req) 94 96 { 95 97 struct crypto_aead *aead = crypto_aead_reqtfm(req); 98 + struct aead_alg *alg = crypto_aead_alg(aead); 99 + struct crypto_istat_aead *istat; 100 + int ret; 101 + 102 + istat = aead_get_stat(alg); 103 + 104 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 105 + atomic64_inc(&istat->encrypt_cnt); 106 + atomic64_add(req->cryptlen, &istat->encrypt_tlen); 107 + } 96 108 97 109 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 98 - return -ENOKEY; 110 + ret = -ENOKEY; 111 + else 112 + ret = alg->encrypt(req); 99 113 100 - return crypto_aead_alg(aead)->encrypt(req); 114 + return crypto_aead_errstat(istat, ret); 101 115 } 102 116 EXPORT_SYMBOL_GPL(crypto_aead_encrypt); 103 117 104 118 int crypto_aead_decrypt(struct aead_request *req) 105 119 { 106 120 struct crypto_aead *aead = crypto_aead_reqtfm(req); 121 + struct aead_alg *alg = crypto_aead_alg(aead); 122 + struct crypto_istat_aead *istat; 123 + int ret; 124 + 125 + istat = aead_get_stat(alg); 126 + 127 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 128 + atomic64_inc(&istat->encrypt_cnt); 129 + atomic64_add(req->cryptlen, &istat->encrypt_tlen); 130 + } 107 131 108 132 if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 109 - return -ENOKEY; 133 + ret = -ENOKEY; 134 + else if (req->cryptlen < crypto_aead_authsize(aead)) 135 + ret = -EINVAL; 136 + else 137 + ret = alg->decrypt(req); 110 138 111 - if (req->cryptlen < crypto_aead_authsize(aead)) 112 - return -EINVAL; 113 - 114 - return crypto_aead_alg(aead)->decrypt(req); 139 + return crypto_aead_errstat(istat, ret); 115 140 } 116 141 EXPORT_SYMBOL_GPL(crypto_aead_decrypt); 117 142 ··· 215 172 aead->free(aead); 216 173 } 217 174 175 + static int __maybe_unused crypto_aead_report_stat( 176 + struct sk_buff *skb, struct crypto_alg *alg) 177 + { 178 + struct aead_alg *aead = container_of(alg, struct aead_alg, base); 179 + struct crypto_istat_aead *istat = aead_get_stat(aead); 180 + struct crypto_stat_aead raead; 181 + 182 + memset(&raead, 0, sizeof(raead)); 183 + 184 + strscpy(raead.type, "aead", sizeof(raead.type)); 185 + 186 + raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); 187 + raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); 188 + raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); 189 + raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); 190 + raead.stat_err_cnt = atomic64_read(&istat->err_cnt); 191 + 192 + return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead); 193 + } 194 + 218 195 static const struct crypto_type crypto_aead_type = { 219 196 .extsize = crypto_alg_extsize, 220 197 .init_tfm = crypto_aead_init_tfm, ··· 244 181 #endif 245 182 #if IS_ENABLED(CONFIG_CRYPTO_USER) 246 183 .report = crypto_aead_report, 184 + #endif 185 + #ifdef CONFIG_CRYPTO_STATS 186 + .report_stat = crypto_aead_report_stat, 247 187 #endif 248 188 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 249 189 .maskset = CRYPTO_ALG_TYPE_MASK, ··· 277 211 278 212 static int aead_prepare_alg(struct aead_alg *alg) 279 213 { 214 + struct crypto_istat_aead *istat = aead_get_stat(alg); 280 215 struct crypto_alg *base = &alg->base; 281 216 282 217 if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) > ··· 290 223 base->cra_type = &crypto_aead_type; 291 224 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 292 225 base->cra_flags |= CRYPTO_ALG_TYPE_AEAD; 226 + 227 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 228 + memset(istat, 0, sizeof(*istat)); 293 229 294 230 return 0; 295 231 }
+59 -6
crypto/ahash.c
··· 27 27 28 28 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 29 29 30 + static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg) 31 + { 32 + return hash_get_stat(&alg->halg); 33 + } 34 + 35 + static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err) 36 + { 37 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 38 + return err; 39 + 40 + if (err && err != -EINPROGRESS && err != -EBUSY) 41 + atomic64_inc(&ahash_get_stat(alg)->err_cnt); 42 + 43 + return err; 44 + } 45 + 30 46 /* 31 47 * For an ahash tfm that is using an shash algorithm (instead of an ahash 32 48 * algorithm), this returns the underlying shash tfm. ··· 344 328 int crypto_ahash_update(struct ahash_request *req) 345 329 { 346 330 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 331 + struct ahash_alg *alg; 347 332 348 333 if (likely(tfm->using_shash)) 349 334 return shash_ahash_update(req, ahash_request_ctx(req)); 350 335 351 - return crypto_ahash_alg(tfm)->update(req); 336 + alg = crypto_ahash_alg(tfm); 337 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 338 + atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen); 339 + return crypto_ahash_errstat(alg, alg->update(req)); 352 340 } 353 341 EXPORT_SYMBOL_GPL(crypto_ahash_update); 354 342 355 343 int crypto_ahash_final(struct ahash_request *req) 356 344 { 357 345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 346 + struct ahash_alg *alg; 358 347 359 348 if (likely(tfm->using_shash)) 360 349 return crypto_shash_final(ahash_request_ctx(req), req->result); 361 350 362 - return crypto_ahash_alg(tfm)->final(req); 351 + alg = crypto_ahash_alg(tfm); 352 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 353 + atomic64_inc(&ahash_get_stat(alg)->hash_cnt); 354 + return crypto_ahash_errstat(alg, alg->final(req)); 363 355 } 364 356 EXPORT_SYMBOL_GPL(crypto_ahash_final); 365 357 366 358 int crypto_ahash_finup(struct ahash_request *req) 367 359 { 368 360 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 361 + struct ahash_alg *alg; 369 362 370 363 if (likely(tfm->using_shash)) 371 364 return shash_ahash_finup(req, ahash_request_ctx(req)); 372 365 373 - return crypto_ahash_alg(tfm)->finup(req); 366 + alg = crypto_ahash_alg(tfm); 367 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 368 + struct crypto_istat_hash *istat = ahash_get_stat(alg); 369 + 370 + atomic64_inc(&istat->hash_cnt); 371 + atomic64_add(req->nbytes, &istat->hash_tlen); 372 + } 373 + return crypto_ahash_errstat(alg, alg->finup(req)); 374 374 } 375 375 EXPORT_SYMBOL_GPL(crypto_ahash_finup); 376 376 377 377 int crypto_ahash_digest(struct ahash_request *req) 378 378 { 379 379 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 380 + struct ahash_alg *alg; 381 + int err; 380 382 381 383 if (likely(tfm->using_shash)) 382 384 return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); 383 385 384 - if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 385 - return -ENOKEY; 386 + alg = crypto_ahash_alg(tfm); 387 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 388 + struct crypto_istat_hash *istat = ahash_get_stat(alg); 386 389 387 - return crypto_ahash_alg(tfm)->digest(req); 390 + atomic64_inc(&istat->hash_cnt); 391 + atomic64_add(req->nbytes, &istat->hash_tlen); 392 + } 393 + 394 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 395 + err = -ENOKEY; 396 + else 397 + err = alg->digest(req); 398 + 399 + return crypto_ahash_errstat(alg, err); 388 400 } 389 401 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 390 402 ··· 571 527 __crypto_hash_alg_common(alg)->digestsize); 572 528 } 573 529 530 + static int __maybe_unused crypto_ahash_report_stat( 531 + struct sk_buff *skb, struct crypto_alg *alg) 532 + { 533 + return crypto_hash_report_stat(skb, alg, "ahash"); 534 + } 535 + 574 536 static const struct crypto_type crypto_ahash_type = { 575 537 .extsize = crypto_ahash_extsize, 576 538 .init_tfm = crypto_ahash_init_tfm, ··· 586 536 #endif 587 537 #if IS_ENABLED(CONFIG_CRYPTO_USER) 588 538 .report = crypto_ahash_report, 539 + #endif 540 + #ifdef CONFIG_CRYPTO_STATS 541 + .report_stat = crypto_ahash_report_stat, 589 542 #endif 590 543 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 591 544 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
+31
crypto/akcipher.c
··· 70 70 akcipher->free(akcipher); 71 71 } 72 72 73 + static int __maybe_unused crypto_akcipher_report_stat( 74 + struct sk_buff *skb, struct crypto_alg *alg) 75 + { 76 + struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg); 77 + struct crypto_istat_akcipher *istat; 78 + struct crypto_stat_akcipher rakcipher; 79 + 80 + istat = akcipher_get_stat(akcipher); 81 + 82 + memset(&rakcipher, 0, sizeof(rakcipher)); 83 + 84 + strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); 85 + rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); 86 + rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); 87 + rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); 88 + rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); 89 + rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt); 90 + rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt); 91 + rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); 92 + 93 + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, 94 + sizeof(rakcipher), &rakcipher); 95 + } 96 + 73 97 static const struct crypto_type crypto_akcipher_type = { 74 98 .extsize = crypto_alg_extsize, 75 99 .init_tfm = crypto_akcipher_init_tfm, ··· 103 79 #endif 104 80 #if IS_ENABLED(CONFIG_CRYPTO_USER) 105 81 .report = crypto_akcipher_report, 82 + #endif 83 + #ifdef CONFIG_CRYPTO_STATS 84 + .report_stat = crypto_akcipher_report_stat, 106 85 #endif 107 86 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 108 87 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, ··· 131 104 132 105 static void akcipher_prepare_alg(struct akcipher_alg *alg) 133 106 { 107 + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); 134 108 struct crypto_alg *base = &alg->base; 135 109 136 110 base->cra_type = &crypto_akcipher_type; 137 111 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 138 112 base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER; 113 + 114 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 115 + memset(istat, 0, sizeof(*istat)); 139 116 } 140 117 141 118 static int akcipher_default_op(struct akcipher_request *req)
+5
crypto/compress.h
··· 12 12 #include "internal.h" 13 13 14 14 struct acomp_req; 15 + struct comp_alg_common; 15 16 struct sk_buff; 16 17 17 18 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); 18 19 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); 19 20 void crypto_acomp_scomp_free_ctx(struct acomp_req *req); 21 + 22 + int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg); 23 + 24 + void comp_prepare_alg(struct comp_alg_common *alg); 20 25 21 26 #endif /* _LOCAL_CRYPTO_COMPRESS_H */
+2 -8
crypto/crypto_user.c crypto/crypto_user_base.c
··· 18 18 #include <crypto/internal/rng.h> 19 19 #include <crypto/akcipher.h> 20 20 #include <crypto/kpp.h> 21 + #include <crypto/internal/cryptouser.h> 21 22 22 23 #include "internal.h" 23 24 ··· 33 32 u16 nlmsg_flags; 34 33 }; 35 34 36 - static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) 35 + struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact) 37 36 { 38 37 struct crypto_alg *q, *alg = NULL; 39 38 ··· 385 384 if (!netlink_capable(skb, CAP_NET_ADMIN)) 386 385 return -EPERM; 387 386 return crypto_del_default_rng(); 388 - } 389 - 390 - static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, 391 - struct nlattr **attrs) 392 - { 393 - /* No longer supported */ 394 - return -ENOTSUPP; 395 387 } 396 388 397 389 #define MSGSIZE(type) sizeof(struct type)
+176
crypto/crypto_user_stat.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Crypto user configuration API. 4 + * 5 + * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com> 6 + * 7 + */ 8 + 9 + #include <crypto/algapi.h> 10 + #include <crypto/internal/cryptouser.h> 11 + #include <linux/errno.h> 12 + #include <linux/kernel.h> 13 + #include <linux/module.h> 14 + #include <linux/string.h> 15 + #include <net/netlink.h> 16 + #include <net/sock.h> 17 + 18 + #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) 19 + 20 + struct crypto_dump_info { 21 + struct sk_buff *in_skb; 22 + struct sk_buff *out_skb; 23 + u32 nlmsg_seq; 24 + u16 nlmsg_flags; 25 + }; 26 + 27 + static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) 28 + { 29 + struct crypto_stat_cipher rcipher; 30 + 31 + memset(&rcipher, 0, sizeof(rcipher)); 32 + 33 + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); 34 + 35 + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); 36 + } 37 + 38 + static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) 39 + { 40 + struct crypto_stat_compress rcomp; 41 + 42 + memset(&rcomp, 0, sizeof(rcomp)); 43 + 44 + strscpy(rcomp.type, "compression", sizeof(rcomp.type)); 45 + 46 + return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp); 47 + } 48 + 49 + static int crypto_reportstat_one(struct crypto_alg *alg, 50 + struct crypto_user_alg *ualg, 51 + struct sk_buff *skb) 52 + { 53 + memset(ualg, 0, sizeof(*ualg)); 54 + 55 + strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); 56 + strscpy(ualg->cru_driver_name, alg->cra_driver_name, 57 + sizeof(ualg->cru_driver_name)); 58 + strscpy(ualg->cru_module_name, module_name(alg->cra_module), 59 + sizeof(ualg->cru_module_name)); 60 + 61 + ualg->cru_type = 0; 62 + ualg->cru_mask = 0; 63 + ualg->cru_flags = alg->cra_flags; 64 + ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); 65 + 66 + if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) 67 + goto nla_put_failure; 68 + if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 69 + struct crypto_stat_larval rl; 70 + 71 + memset(&rl, 0, sizeof(rl)); 72 + strscpy(rl.type, "larval", sizeof(rl.type)); 73 + if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl)) 74 + goto nla_put_failure; 75 + goto out; 76 + } 77 + 78 + if (alg->cra_type && alg->cra_type->report_stat) { 79 + if (alg->cra_type->report_stat(skb, alg)) 80 + goto nla_put_failure; 81 + goto out; 82 + } 83 + 84 + switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { 85 + case CRYPTO_ALG_TYPE_CIPHER: 86 + if (crypto_report_cipher(skb, alg)) 87 + goto nla_put_failure; 88 + break; 89 + case CRYPTO_ALG_TYPE_COMPRESS: 90 + if (crypto_report_comp(skb, alg)) 91 + goto nla_put_failure; 92 + break; 93 + default: 94 + pr_err("ERROR: Unhandled alg %d in %s\n", 95 + alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), 96 + __func__); 97 + } 98 + 99 + out: 100 + return 0; 101 + 102 + nla_put_failure: 103 + return -EMSGSIZE; 104 + } 105 + 106 + static int crypto_reportstat_alg(struct crypto_alg *alg, 107 + struct crypto_dump_info *info) 108 + { 109 + struct sk_buff *in_skb = info->in_skb; 110 + struct sk_buff *skb = info->out_skb; 111 + struct nlmsghdr *nlh; 112 + struct crypto_user_alg *ualg; 113 + int err = 0; 114 + 115 + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, 116 + CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); 117 + if (!nlh) { 118 + err = -EMSGSIZE; 119 + goto out; 120 + } 121 + 122 + ualg = nlmsg_data(nlh); 123 + 124 + err = crypto_reportstat_one(alg, ualg, skb); 125 + if (err) { 126 + nlmsg_cancel(skb, nlh); 127 + goto out; 128 + } 129 + 130 + nlmsg_end(skb, nlh); 131 + 132 + out: 133 + return err; 134 + } 135 + 136 + int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, 137 + struct nlattr **attrs) 138 + { 139 + struct net *net = sock_net(in_skb->sk); 140 + struct crypto_user_alg *p = nlmsg_data(in_nlh); 141 + struct crypto_alg *alg; 142 + struct sk_buff *skb; 143 + struct crypto_dump_info info; 144 + int err; 145 + 146 + if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) 147 + return -EINVAL; 148 + 149 + alg = crypto_alg_match(p, 0); 150 + if (!alg) 151 + return -ENOENT; 152 + 153 + err = -ENOMEM; 154 + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 155 + if (!skb) 156 + goto drop_alg; 157 + 158 + info.in_skb = in_skb; 159 + info.out_skb = skb; 160 + info.nlmsg_seq = in_nlh->nlmsg_seq; 161 + info.nlmsg_flags = 0; 162 + 163 + err = crypto_reportstat_alg(alg, &info); 164 + 165 + drop_alg: 166 + crypto_mod_put(alg); 167 + 168 + if (err) { 169 + kfree_skb(skb); 170 + return err; 171 + } 172 + 173 + return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid); 174 + } 175 + 176 + MODULE_LICENSE("GPL");
+30
crypto/hash.h
··· 8 8 #define _LOCAL_CRYPTO_HASH_H 9 9 10 10 #include <crypto/internal/hash.h> 11 + #include <linux/cryptouser.h> 11 12 12 13 #include "internal.h" 14 + 15 + static inline struct crypto_istat_hash *hash_get_stat( 16 + struct hash_alg_common *alg) 17 + { 18 + #ifdef CONFIG_CRYPTO_STATS 19 + return &alg->stat; 20 + #else 21 + return NULL; 22 + #endif 23 + } 24 + 25 + static inline int crypto_hash_report_stat(struct sk_buff *skb, 26 + struct crypto_alg *alg, 27 + const char *type) 28 + { 29 + struct hash_alg_common *halg = __crypto_hash_alg_common(alg); 30 + struct crypto_istat_hash *istat = hash_get_stat(halg); 31 + struct crypto_stat_hash rhash; 32 + 33 + memset(&rhash, 0, sizeof(rhash)); 34 + 35 + strscpy(rhash.type, type, sizeof(rhash.type)); 36 + 37 + rhash.stat_hash_cnt = atomic64_read(&istat->hash_cnt); 38 + rhash.stat_hash_tlen = atomic64_read(&istat->hash_tlen); 39 + rhash.stat_err_cnt = atomic64_read(&istat->err_cnt); 40 + 41 + return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash); 42 + } 13 43 14 44 extern const struct crypto_type crypto_shash_type; 15 45
+30
crypto/kpp.c
··· 66 66 kpp->free(kpp); 67 67 } 68 68 69 + static int __maybe_unused crypto_kpp_report_stat( 70 + struct sk_buff *skb, struct crypto_alg *alg) 71 + { 72 + struct kpp_alg *kpp = __crypto_kpp_alg(alg); 73 + struct crypto_istat_kpp *istat; 74 + struct crypto_stat_kpp rkpp; 75 + 76 + istat = kpp_get_stat(kpp); 77 + 78 + memset(&rkpp, 0, sizeof(rkpp)); 79 + 80 + strscpy(rkpp.type, "kpp", sizeof(rkpp.type)); 81 + 82 + rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt); 83 + rkpp.stat_generate_public_key_cnt = 84 + atomic64_read(&istat->generate_public_key_cnt); 85 + rkpp.stat_compute_shared_secret_cnt = 86 + atomic64_read(&istat->compute_shared_secret_cnt); 87 + rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt); 88 + 89 + return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp); 90 + } 91 + 69 92 static const struct crypto_type crypto_kpp_type = { 70 93 .extsize = crypto_alg_extsize, 71 94 .init_tfm = crypto_kpp_init_tfm, ··· 98 75 #endif 99 76 #if IS_ENABLED(CONFIG_CRYPTO_USER) 100 77 .report = crypto_kpp_report, 78 + #endif 79 + #ifdef CONFIG_CRYPTO_STATS 80 + .report_stat = crypto_kpp_report_stat, 101 81 #endif 102 82 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 103 83 .maskset = CRYPTO_ALG_TYPE_MASK, ··· 131 105 132 106 static void kpp_prepare_alg(struct kpp_alg *alg) 133 107 { 108 + struct crypto_istat_kpp *istat = kpp_get_stat(alg); 134 109 struct crypto_alg *base = &alg->base; 135 110 136 111 base->cra_type = &crypto_kpp_type; 137 112 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 138 113 base->cra_flags |= CRYPTO_ALG_TYPE_KPP; 114 + 115 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 116 + memset(istat, 0, sizeof(*istat)); 139 117 } 140 118 141 119 int crypto_register_kpp(struct kpp_alg *alg)
+69 -4
crypto/lskcipher.c
··· 29 29 return container_of(alg, struct lskcipher_alg, co.base); 30 30 } 31 31 32 + static inline struct crypto_istat_cipher *lskcipher_get_stat( 33 + struct lskcipher_alg *alg) 34 + { 35 + return skcipher_get_stat_common(&alg->co); 36 + } 37 + 38 + static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err) 39 + { 40 + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 41 + 42 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 43 + return err; 44 + 45 + if (err) 46 + atomic64_inc(&istat->err_cnt); 47 + 48 + return err; 49 + } 50 + 32 51 static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm, 33 52 const u8 *key, unsigned int keylen) 34 53 { ··· 147 128 u32 flags)) 148 129 { 149 130 unsigned long alignmask = crypto_lskcipher_alignmask(tfm); 131 + struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 132 + int ret; 150 133 151 134 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) & 152 - alignmask) 153 - return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, 154 - crypt); 135 + alignmask) { 136 + ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv, 137 + crypt); 138 + goto out; 139 + } 155 140 156 - return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); 141 + ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL); 142 + 143 + out: 144 + return crypto_lskcipher_errstat(alg, ret); 157 145 } 158 146 159 147 int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src, 160 148 u8 *dst, unsigned len, u8 *iv) 161 149 { 162 150 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 151 + 152 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 153 + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 154 + 155 + atomic64_inc(&istat->encrypt_cnt); 156 + atomic64_add(len, &istat->encrypt_tlen); 157 + } 163 158 164 159 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt); 165 160 } ··· 183 150 u8 *dst, unsigned len, u8 *iv) 184 151 { 185 152 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm); 153 + 154 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 155 + struct crypto_istat_cipher *istat = lskcipher_get_stat(alg); 156 + 157 + atomic64_inc(&istat->decrypt_cnt); 158 + atomic64_add(len, &istat->decrypt_tlen); 159 + } 186 160 187 161 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt); 188 162 } ··· 322 282 sizeof(rblkcipher), &rblkcipher); 323 283 } 324 284 285 + static int __maybe_unused crypto_lskcipher_report_stat( 286 + struct sk_buff *skb, struct crypto_alg *alg) 287 + { 288 + struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg); 289 + struct crypto_istat_cipher *istat; 290 + struct crypto_stat_cipher rcipher; 291 + 292 + istat = lskcipher_get_stat(skcipher); 293 + 294 + memset(&rcipher, 0, sizeof(rcipher)); 295 + 296 + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); 297 + 298 + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); 299 + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); 300 + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); 301 + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); 302 + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); 303 + 304 + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); 305 + } 306 + 325 307 static const struct crypto_type crypto_lskcipher_type = { 326 308 .extsize = crypto_alg_extsize, 327 309 .init_tfm = crypto_lskcipher_init_tfm, ··· 353 291 #endif 354 292 #if IS_ENABLED(CONFIG_CRYPTO_USER) 355 293 .report = crypto_lskcipher_report, 294 + #endif 295 + #ifdef CONFIG_CRYPTO_STATS 296 + .report_stat = crypto_lskcipher_report_stat, 356 297 #endif 357 298 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 358 299 .maskset = CRYPTO_ALG_TYPE_MASK,
+39 -5
crypto/rng.c
··· 30 30 31 31 int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) 32 32 { 33 + struct rng_alg *alg = crypto_rng_alg(tfm); 33 34 u8 *buf = NULL; 34 35 int err; 35 36 37 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 38 + atomic64_inc(&rng_get_stat(alg)->seed_cnt); 39 + 36 40 if (!seed && slen) { 37 41 buf = kmalloc(slen, GFP_KERNEL); 42 + err = -ENOMEM; 38 43 if (!buf) 39 - return -ENOMEM; 44 + goto out; 40 45 41 46 err = get_random_bytes_wait(buf, slen); 42 47 if (err) 43 - goto out; 48 + goto free_buf; 44 49 seed = buf; 45 50 } 46 51 47 - err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); 48 - out: 52 + err = alg->seed(tfm, seed, slen); 53 + free_buf: 49 54 kfree_sensitive(buf); 50 - return err; 55 + out: 56 + return crypto_rng_errstat(alg, err); 51 57 } 52 58 EXPORT_SYMBOL_GPL(crypto_rng_reset); 53 59 ··· 91 85 seq_printf(m, "seedsize : %u\n", seedsize(alg)); 92 86 } 93 87 88 + static int __maybe_unused crypto_rng_report_stat( 89 + struct sk_buff *skb, struct crypto_alg *alg) 90 + { 91 + struct rng_alg *rng = __crypto_rng_alg(alg); 92 + struct crypto_istat_rng *istat; 93 + struct crypto_stat_rng rrng; 94 + 95 + istat = rng_get_stat(rng); 96 + 97 + memset(&rrng, 0, sizeof(rrng)); 98 + 99 + strscpy(rrng.type, "rng", sizeof(rrng.type)); 100 + 101 + rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt); 102 + rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen); 103 + rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt); 104 + rrng.stat_err_cnt = atomic64_read(&istat->err_cnt); 105 + 106 + return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng); 107 + } 108 + 94 109 static const struct crypto_type crypto_rng_type = { 95 110 .extsize = crypto_alg_extsize, 96 111 .init_tfm = crypto_rng_init_tfm, ··· 120 93 #endif 121 94 #if IS_ENABLED(CONFIG_CRYPTO_USER) 122 95 .report = crypto_rng_report, 96 + #endif 97 + #ifdef CONFIG_CRYPTO_STATS 98 + .report_stat = crypto_rng_report_stat, 123 99 #endif 124 100 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 125 101 .maskset = CRYPTO_ALG_TYPE_MASK, ··· 199 169 200 170 int crypto_register_rng(struct rng_alg *alg) 201 171 { 172 + struct crypto_istat_rng *istat = rng_get_stat(alg); 202 173 struct crypto_alg *base = &alg->base; 203 174 204 175 if (alg->seedsize > PAGE_SIZE / 8) ··· 208 177 base->cra_type = &crypto_rng_type; 209 178 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 210 179 base->cra_flags |= CRYPTO_ALG_TYPE_RNG; 180 + 181 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 182 + memset(istat, 0, sizeof(*istat)); 211 183 212 184 return crypto_register_alg(base); 213 185 }
+6 -2
crypto/scompress.c
··· 271 271 #if IS_ENABLED(CONFIG_CRYPTO_USER) 272 272 .report = crypto_scomp_report, 273 273 #endif 274 + #ifdef CONFIG_CRYPTO_STATS 275 + .report_stat = crypto_acomp_report_stat, 276 + #endif 274 277 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 275 278 .maskset = CRYPTO_ALG_TYPE_MASK, 276 279 .type = CRYPTO_ALG_TYPE_SCOMPRESS, ··· 282 279 283 280 int crypto_register_scomp(struct scomp_alg *alg) 284 281 { 285 - struct crypto_alg *base = &alg->base; 282 + struct crypto_alg *base = &alg->calg.base; 283 + 284 + comp_prepare_alg(&alg->calg); 286 285 287 286 base->cra_type = &crypto_scomp_type; 288 - base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 289 287 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 290 288 291 289 return crypto_register_alg(base);
+70 -5
crypto/shash.c
··· 16 16 17 17 #include "hash.h" 18 18 19 + static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg) 20 + { 21 + return hash_get_stat(&alg->halg); 22 + } 23 + 24 + static inline int crypto_shash_errstat(struct shash_alg *alg, int err) 25 + { 26 + if (IS_ENABLED(CONFIG_CRYPTO_STATS) && err) 27 + atomic64_inc(&shash_get_stat(alg)->err_cnt); 28 + return err; 29 + } 30 + 19 31 int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, 20 32 unsigned int keylen) 21 33 { ··· 61 49 int crypto_shash_update(struct shash_desc *desc, const u8 *data, 62 50 unsigned int len) 63 51 { 64 - return crypto_shash_alg(desc->tfm)->update(desc, data, len); 52 + struct shash_alg *shash = crypto_shash_alg(desc->tfm); 53 + int err; 54 + 55 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 56 + atomic64_add(len, &shash_get_stat(shash)->hash_tlen); 57 + 58 + err = shash->update(desc, data, len); 59 + 60 + return crypto_shash_errstat(shash, err); 65 61 } 66 62 EXPORT_SYMBOL_GPL(crypto_shash_update); 67 63 68 64 int crypto_shash_final(struct shash_desc *desc, u8 *out) 69 65 { 70 - return crypto_shash_alg(desc->tfm)->final(desc, out); 66 + struct shash_alg *shash = crypto_shash_alg(desc->tfm); 67 + int err; 68 + 69 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 70 + atomic64_inc(&shash_get_stat(shash)->hash_cnt); 71 + 72 + err = shash->final(desc, out); 73 + 74 + return crypto_shash_errstat(shash, err); 71 75 } 72 76 EXPORT_SYMBOL_GPL(crypto_shash_final); 73 77 ··· 99 71 int crypto_shash_finup(struct shash_desc *desc, const u8 *data, 100 72 unsigned int len, u8 *out) 101 73 { 102 - return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out); 74 + struct crypto_shash *tfm = desc->tfm; 75 + struct shash_alg *shash = crypto_shash_alg(tfm); 76 + int err; 77 + 78 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 79 + struct crypto_istat_hash *istat = shash_get_stat(shash); 80 + 81 + atomic64_inc(&istat->hash_cnt); 82 + atomic64_add(len, &istat->hash_tlen); 83 + } 84 + 85 + err = shash->finup(desc, data, len, out); 86 + 87 + return crypto_shash_errstat(shash, err); 103 88 } 104 89 EXPORT_SYMBOL_GPL(crypto_shash_finup); 105 90 ··· 129 88 unsigned int len, u8 *out) 130 89 { 131 90 struct crypto_shash *tfm = desc->tfm; 91 + struct shash_alg *shash = crypto_shash_alg(tfm); 92 + int err; 93 + 94 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 95 + struct crypto_istat_hash *istat = shash_get_stat(shash); 96 + 97 + atomic64_inc(&istat->hash_cnt); 98 + atomic64_add(len, &istat->hash_tlen); 99 + } 132 100 133 101 if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 134 - return -ENOKEY; 102 + err = -ENOKEY; 103 + else 104 + err = shash->digest(desc, data, len, out); 135 105 136 - return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out); 106 + return crypto_shash_errstat(shash, err); 137 107 } 138 108 EXPORT_SYMBOL_GPL(crypto_shash_digest); 139 109 ··· 265 213 seq_printf(m, "digestsize : %u\n", salg->digestsize); 266 214 } 267 215 216 + static int __maybe_unused crypto_shash_report_stat( 217 + struct sk_buff *skb, struct crypto_alg *alg) 218 + { 219 + return crypto_hash_report_stat(skb, alg, "shash"); 220 + } 221 + 268 222 const struct crypto_type crypto_shash_type = { 269 223 .extsize = crypto_alg_extsize, 270 224 .init_tfm = crypto_shash_init_tfm, ··· 280 222 #endif 281 223 #if IS_ENABLED(CONFIG_CRYPTO_USER) 282 224 .report = crypto_shash_report, 225 + #endif 226 + #ifdef CONFIG_CRYPTO_STATS 227 + .report_stat = crypto_shash_report_stat, 283 228 #endif 284 229 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 285 230 .maskset = CRYPTO_ALG_TYPE_MASK, ··· 350 289 351 290 int hash_prepare_alg(struct hash_alg_common *alg) 352 291 { 292 + struct crypto_istat_hash *istat = hash_get_stat(alg); 353 293 struct crypto_alg *base = &alg->base; 354 294 355 295 if (alg->digestsize > HASH_MAX_DIGESTSIZE) ··· 361 299 return -EINVAL; 362 300 363 301 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 302 + 303 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 304 + memset(istat, 0, sizeof(*istat)); 364 305 365 306 return 0; 366 307 }
+13
crypto/sig.c
··· 45 45 return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig); 46 46 } 47 47 48 + static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb, 49 + struct crypto_alg *alg) 50 + { 51 + struct crypto_stat_akcipher rsig = {}; 52 + 53 + strscpy(rsig.type, "sig", sizeof(rsig.type)); 54 + 55 + return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig); 56 + } 57 + 48 58 static const struct crypto_type crypto_sig_type = { 49 59 .extsize = crypto_alg_extsize, 50 60 .init_tfm = crypto_sig_init_tfm, ··· 63 53 #endif 64 54 #if IS_ENABLED(CONFIG_CRYPTO_USER) 65 55 .report = crypto_sig_report, 56 + #endif 57 + #ifdef CONFIG_CRYPTO_STATS 58 + .report_stat = crypto_sig_report_stat, 66 59 #endif 67 60 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 68 61 .maskset = CRYPTO_ALG_TYPE_SIG_MASK,
+78 -8
crypto/skcipher.c
··· 89 89 return container_of(alg, struct skcipher_alg, base); 90 90 } 91 91 92 + static inline struct crypto_istat_cipher *skcipher_get_stat( 93 + struct skcipher_alg *alg) 94 + { 95 + return skcipher_get_stat_common(&alg->co); 96 + } 97 + 98 + static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) 99 + { 100 + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); 101 + 102 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 103 + return err; 104 + 105 + if (err && err != -EINPROGRESS && err != -EBUSY) 106 + atomic64_inc(&istat->err_cnt); 107 + 108 + return err; 109 + } 110 + 92 111 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) 93 112 { 94 113 u8 *addr; ··· 654 635 { 655 636 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 656 637 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 638 + int ret; 639 + 640 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 641 + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); 642 + 643 + atomic64_inc(&istat->encrypt_cnt); 644 + atomic64_add(req->cryptlen, &istat->encrypt_tlen); 645 + } 657 646 658 647 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 659 - return -ENOKEY; 660 - if (alg->co.base.cra_type != &crypto_skcipher_type) 661 - return crypto_lskcipher_encrypt_sg(req); 662 - return alg->encrypt(req); 648 + ret = -ENOKEY; 649 + else if (alg->co.base.cra_type != &crypto_skcipher_type) 650 + ret = crypto_lskcipher_encrypt_sg(req); 651 + else 652 + ret = alg->encrypt(req); 653 + 654 + return crypto_skcipher_errstat(alg, ret); 663 655 } 664 656 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); 665 657 ··· 678 648 { 679 649 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 680 650 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 651 + int ret; 652 + 653 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 654 + struct crypto_istat_cipher *istat = skcipher_get_stat(alg); 655 + 656 + atomic64_inc(&istat->decrypt_cnt); 657 + atomic64_add(req->cryptlen, &istat->decrypt_tlen); 658 + } 681 659 682 660 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 683 - return -ENOKEY; 684 - if (alg->co.base.cra_type != &crypto_skcipher_type) 685 - return crypto_lskcipher_decrypt_sg(req); 686 - return alg->decrypt(req); 661 + ret = -ENOKEY; 662 + else if (alg->co.base.cra_type != &crypto_skcipher_type) 663 + ret = crypto_lskcipher_decrypt_sg(req); 664 + else 665 + ret = alg->decrypt(req); 666 + 667 + return crypto_skcipher_errstat(alg, ret); 687 668 } 688 669 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); 689 670 ··· 846 805 sizeof(rblkcipher), &rblkcipher); 847 806 } 848 807 808 + static int __maybe_unused crypto_skcipher_report_stat( 809 + struct sk_buff *skb, struct crypto_alg *alg) 810 + { 811 + struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); 812 + struct crypto_istat_cipher *istat; 813 + struct crypto_stat_cipher rcipher; 814 + 815 + istat = skcipher_get_stat(skcipher); 816 + 817 + memset(&rcipher, 0, sizeof(rcipher)); 818 + 819 + strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); 820 + 821 + rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); 822 + rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); 823 + rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); 824 + rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); 825 + rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); 826 + 827 + return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); 828 + } 829 + 849 830 static const struct crypto_type crypto_skcipher_type = { 850 831 .extsize = crypto_skcipher_extsize, 851 832 .init_tfm = crypto_skcipher_init_tfm, ··· 877 814 #endif 878 815 #if IS_ENABLED(CONFIG_CRYPTO_USER) 879 816 .report = crypto_skcipher_report, 817 + #endif 818 + #ifdef CONFIG_CRYPTO_STATS 819 + .report_stat = crypto_skcipher_report_stat, 880 820 #endif 881 821 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 882 822 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, ··· 935 869 936 870 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) 937 871 { 872 + struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); 938 873 struct crypto_alg *base = &alg->base; 939 874 940 875 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || ··· 947 880 alg->chunksize = base->cra_blocksize; 948 881 949 882 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 883 + 884 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 885 + memset(istat, 0, sizeof(*istat)); 950 886 951 887 return 0; 952 888 }
+10
crypto/skcipher.h
··· 10 10 #include <crypto/internal/skcipher.h> 11 11 #include "internal.h" 12 12 13 + static inline struct crypto_istat_cipher *skcipher_get_stat_common( 14 + struct skcipher_alg_common *alg) 15 + { 16 + #ifdef CONFIG_CRYPTO_STATS 17 + return &alg->stat; 18 + #else 19 + return NULL; 20 + #endif 21 + } 22 + 13 23 int crypto_lskcipher_encrypt_sg(struct skcipher_request *req); 14 24 int crypto_lskcipher_decrypt_sg(struct skcipher_request *req); 15 25 int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm);
+88 -2
include/crypto/acompress.h
··· 56 56 struct crypto_tfm base; 57 57 }; 58 58 59 + /* 60 + * struct crypto_istat_compress - statistics for compress algorithm 61 + * @compress_cnt: number of compress requests 62 + * @compress_tlen: total data size handled by compress requests 63 + * @decompress_cnt: number of decompress requests 64 + * @decompress_tlen: total data size handled by decompress requests 65 + * @err_cnt: number of error for compress requests 66 + */ 67 + struct crypto_istat_compress { 68 + atomic64_t compress_cnt; 69 + atomic64_t compress_tlen; 70 + atomic64_t decompress_cnt; 71 + atomic64_t decompress_tlen; 72 + atomic64_t err_cnt; 73 + }; 74 + 75 + #ifdef CONFIG_CRYPTO_STATS 76 + #define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat; 77 + #else 78 + #define COMP_ALG_COMMON_STATS 79 + #endif 80 + 81 + #define COMP_ALG_COMMON { \ 82 + COMP_ALG_COMMON_STATS \ 83 + \ 84 + struct crypto_alg base; \ 85 + } 86 + struct comp_alg_common COMP_ALG_COMMON; 87 + 59 88 /** 60 89 * DOC: Asynchronous Compression API 61 90 * ··· 132 103 return &tfm->base; 133 104 } 134 105 106 + static inline struct comp_alg_common *__crypto_comp_alg_common( 107 + struct crypto_alg *alg) 108 + { 109 + return container_of(alg, struct comp_alg_common, base); 110 + } 111 + 135 112 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) 136 113 { 137 114 return container_of(tfm, struct crypto_acomp, base); 115 + } 116 + 117 + static inline struct comp_alg_common *crypto_comp_alg_common( 118 + struct crypto_acomp *tfm) 119 + { 120 + return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); 138 121 } 139 122 140 123 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) ··· 255 214 req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; 256 215 } 257 216 217 + static inline struct crypto_istat_compress *comp_get_stat( 218 + struct comp_alg_common *alg) 219 + { 220 + #ifdef CONFIG_CRYPTO_STATS 221 + return &alg->stat; 222 + #else 223 + return NULL; 224 + #endif 225 + } 226 + 227 + static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err) 228 + { 229 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 230 + return err; 231 + 232 + if (err && err != -EINPROGRESS && err != -EBUSY) 233 + atomic64_inc(&comp_get_stat(alg)->err_cnt); 234 + 235 + return err; 236 + } 237 + 258 238 /** 259 239 * crypto_acomp_compress() -- Invoke asynchronous compress operation 260 240 * ··· 287 225 */ 288 226 static inline int crypto_acomp_compress(struct acomp_req *req) 289 227 { 290 - return crypto_acomp_reqtfm(req)->compress(req); 228 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 229 + struct comp_alg_common *alg; 230 + 231 + alg = crypto_comp_alg_common(tfm); 232 + 233 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 234 + struct crypto_istat_compress *istat = comp_get_stat(alg); 235 + 236 + atomic64_inc(&istat->compress_cnt); 237 + atomic64_add(req->slen, &istat->compress_tlen); 238 + } 239 + 240 + return crypto_comp_errstat(alg, tfm->compress(req)); 291 241 } 292 242 293 243 /** ··· 313 239 */ 314 240 static inline int crypto_acomp_decompress(struct acomp_req *req) 315 241 { 316 - return crypto_acomp_reqtfm(req)->decompress(req); 242 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 243 + struct comp_alg_common *alg; 244 + 245 + alg = crypto_comp_alg_common(tfm); 246 + 247 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 248 + struct crypto_istat_compress *istat = comp_get_stat(alg); 249 + 250 + atomic64_inc(&istat->decompress_cnt); 251 + atomic64_add(req->slen, &istat->decompress_tlen); 252 + } 253 + 254 + return crypto_comp_errstat(alg, tfm->decompress(req)); 317 255 } 318 256 319 257 #endif
+21
include/crypto/aead.h
··· 101 101 void *__ctx[] CRYPTO_MINALIGN_ATTR; 102 102 }; 103 103 104 + /* 105 + * struct crypto_istat_aead - statistics for AEAD algorithm 106 + * @encrypt_cnt: number of encrypt requests 107 + * @encrypt_tlen: total data size handled by encrypt requests 108 + * @decrypt_cnt: number of decrypt requests 109 + * @decrypt_tlen: total data size handled by decrypt requests 110 + * @err_cnt: number of error for AEAD requests 111 + */ 112 + struct crypto_istat_aead { 113 + atomic64_t encrypt_cnt; 114 + atomic64_t encrypt_tlen; 115 + atomic64_t decrypt_cnt; 116 + atomic64_t decrypt_tlen; 117 + atomic64_t err_cnt; 118 + }; 119 + 104 120 /** 105 121 * struct aead_alg - AEAD cipher definition 106 122 * @maxauthsize: Set the maximum authentication tag size supported by the ··· 135 119 * @setkey: see struct skcipher_alg 136 120 * @encrypt: see struct skcipher_alg 137 121 * @decrypt: see struct skcipher_alg 122 + * @stat: statistics for AEAD algorithm 138 123 * @ivsize: see struct skcipher_alg 139 124 * @chunksize: see struct skcipher_alg 140 125 * @init: Initialize the cryptographic transformation object. This function ··· 161 144 int (*decrypt)(struct aead_request *req); 162 145 int (*init)(struct crypto_aead *tfm); 163 146 void (*exit)(struct crypto_aead *tfm); 147 + 148 + #ifdef CONFIG_CRYPTO_STATS 149 + struct crypto_istat_aead stat; 150 + #endif 164 151 165 152 unsigned int ivsize; 166 153 unsigned int maxauthsize;
+74 -4
include/crypto/akcipher.h
··· 54 54 struct crypto_tfm base; 55 55 }; 56 56 57 + /* 58 + * struct crypto_istat_akcipher - statistics for akcipher algorithm 59 + * @encrypt_cnt: number of encrypt requests 60 + * @encrypt_tlen: total data size handled by encrypt requests 61 + * @decrypt_cnt: number of decrypt requests 62 + * @decrypt_tlen: total data size handled by decrypt requests 63 + * @verify_cnt: number of verify operation 64 + * @sign_cnt: number of sign requests 65 + * @err_cnt: number of error for akcipher requests 66 + */ 67 + struct crypto_istat_akcipher { 68 + atomic64_t encrypt_cnt; 69 + atomic64_t encrypt_tlen; 70 + atomic64_t decrypt_cnt; 71 + atomic64_t decrypt_tlen; 72 + atomic64_t verify_cnt; 73 + atomic64_t sign_cnt; 74 + atomic64_t err_cnt; 75 + }; 76 + 57 77 /** 58 78 * struct akcipher_alg - generic public key algorithm 59 79 * ··· 110 90 * @exit: Deinitialize the cryptographic transformation object. This is a 111 91 * counterpart to @init, used to remove various changes set in 112 92 * @init. 93 + * @stat: Statistics for akcipher algorithm 113 94 * 114 95 * @base: Common crypto API algorithm data structure 115 96 */ ··· 126 105 unsigned int (*max_size)(struct crypto_akcipher *tfm); 127 106 int (*init)(struct crypto_akcipher *tfm); 128 107 void (*exit)(struct crypto_akcipher *tfm); 108 + 109 + #ifdef CONFIG_CRYPTO_STATS 110 + struct crypto_istat_akcipher stat; 111 + #endif 129 112 130 113 struct crypto_alg base; 131 114 }; ··· 302 277 return alg->max_size(tfm); 303 278 } 304 279 280 + static inline struct crypto_istat_akcipher *akcipher_get_stat( 281 + struct akcipher_alg *alg) 282 + { 283 + #ifdef CONFIG_CRYPTO_STATS 284 + return &alg->stat; 285 + #else 286 + return NULL; 287 + #endif 288 + } 289 + 290 + static inline int crypto_akcipher_errstat(struct akcipher_alg *alg, int err) 291 + { 292 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 293 + return err; 294 + 295 + if (err && err != -EINPROGRESS && err != -EBUSY) 296 + atomic64_inc(&akcipher_get_stat(alg)->err_cnt); 297 + 298 + return err; 299 + } 300 + 305 301 /** 306 302 * crypto_akcipher_encrypt() - Invoke public key encrypt operation 307 303 * ··· 336 290 static inline int crypto_akcipher_encrypt(struct akcipher_request *req) 337 291 { 338 292 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 293 + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 339 294 340 - return crypto_akcipher_alg(tfm)->encrypt(req); 295 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 296 + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); 297 + 298 + atomic64_inc(&istat->encrypt_cnt); 299 + atomic64_add(req->src_len, &istat->encrypt_tlen); 300 + } 301 + 302 + return crypto_akcipher_errstat(alg, alg->encrypt(req)); 341 303 } 342 304 343 305 /** ··· 361 307 static inline int crypto_akcipher_decrypt(struct akcipher_request *req) 362 308 { 363 309 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 310 + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 364 311 365 - return crypto_akcipher_alg(tfm)->decrypt(req); 312 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 313 + struct crypto_istat_akcipher *istat = akcipher_get_stat(alg); 314 + 315 + atomic64_inc(&istat->decrypt_cnt); 316 + atomic64_add(req->src_len, &istat->decrypt_tlen); 317 + } 318 + 319 + return crypto_akcipher_errstat(alg, alg->decrypt(req)); 366 320 } 367 321 368 322 /** ··· 422 360 static inline int crypto_akcipher_sign(struct akcipher_request *req) 423 361 { 424 362 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 363 + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 425 364 426 - return crypto_akcipher_alg(tfm)->sign(req); 365 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 366 + atomic64_inc(&akcipher_get_stat(alg)->sign_cnt); 367 + 368 + return crypto_akcipher_errstat(alg, alg->sign(req)); 427 369 } 428 370 429 371 /** ··· 447 381 static inline int crypto_akcipher_verify(struct akcipher_request *req) 448 382 { 449 383 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 384 + struct akcipher_alg *alg = crypto_akcipher_alg(tfm); 450 385 451 - return crypto_akcipher_alg(tfm)->verify(req); 386 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 387 + atomic64_inc(&akcipher_get_stat(alg)->verify_cnt); 388 + 389 + return crypto_akcipher_errstat(alg, alg->verify(req)); 452 390 } 453 391 454 392 /**
+3
include/crypto/algapi.h
··· 61 61 void (*show)(struct seq_file *m, struct crypto_alg *alg); 62 62 int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 63 63 void (*free)(struct crypto_instance *inst); 64 + #ifdef CONFIG_CRYPTO_STATS 65 + int (*report_stat)(struct sk_buff *skb, struct crypto_alg *alg); 66 + #endif 64 67 65 68 unsigned int type; 66 69 unsigned int maskclear;
+22
include/crypto/hash.h
··· 24 24 */ 25 25 26 26 /* 27 + * struct crypto_istat_hash - statistics for has algorithm 28 + * @hash_cnt: number of hash requests 29 + * @hash_tlen: total data size hashed 30 + * @err_cnt: number of error for hash requests 31 + */ 32 + struct crypto_istat_hash { 33 + atomic64_t hash_cnt; 34 + atomic64_t hash_tlen; 35 + atomic64_t err_cnt; 36 + }; 37 + 38 + #ifdef CONFIG_CRYPTO_STATS 39 + #define HASH_ALG_COMMON_STAT struct crypto_istat_hash stat; 40 + #else 41 + #define HASH_ALG_COMMON_STAT 42 + #endif 43 + 44 + /* 27 45 * struct hash_alg_common - define properties of message digest 46 + * @stat: Statistics for hash algorithm. 28 47 * @digestsize: Size of the result of the transformation. A buffer of this size 29 48 * must be available to the @final and @finup calls, so they can 30 49 * store the resulting hash into it. For various predefined sizes, ··· 60 41 * information. 61 42 */ 62 43 #define HASH_ALG_COMMON { \ 44 + HASH_ALG_COMMON_STAT \ 45 + \ 63 46 unsigned int digestsize; \ 64 47 unsigned int statesize; \ 65 48 \ ··· 243 222 }; 244 223 }; 245 224 #undef HASH_ALG_COMMON 225 + #undef HASH_ALG_COMMON_STAT 246 226 247 227 struct crypto_ahash { 248 228 bool using_shash; /* Underlying algorithm is shash, not ahash */
+6 -1
include/crypto/internal/acompress.h
··· 31 31 * @init. 32 32 * 33 33 * @reqsize: Context size for (de)compression requests 34 + * @stat: Statistics for compress algorithm 34 35 * @base: Common crypto API algorithm data structure 36 + * @calg: Cmonn algorithm data structure shared with scomp 35 37 */ 36 38 struct acomp_alg { 37 39 int (*compress)(struct acomp_req *req); ··· 44 42 45 43 unsigned int reqsize; 46 44 47 - struct crypto_alg base; 45 + union { 46 + struct COMP_ALG_COMMON; 47 + struct comp_alg_common calg; 48 + }; 48 49 }; 49 50 50 51 /*
+16
include/crypto/internal/cryptouser.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #include <linux/cryptouser.h> 3 + #include <net/netlink.h> 4 + 5 + struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); 6 + 7 + #ifdef CONFIG_CRYPTO_STATS 8 + int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); 9 + #else 10 + static inline int crypto_reportstat(struct sk_buff *in_skb, 11 + struct nlmsghdr *in_nlh, 12 + struct nlattr **attrs) 13 + { 14 + return -ENOTSUPP; 15 + } 16 + #endif
+7 -1
include/crypto/internal/scompress.h
··· 27 27 * @free_ctx: Function frees context allocated with alloc_ctx 28 28 * @compress: Function performs a compress operation 29 29 * @decompress: Function performs a de-compress operation 30 + * @stat: Statistics for compress algorithm 30 31 * @base: Common crypto API algorithm data structure 32 + * @calg: Cmonn algorithm data structure shared with acomp 31 33 */ 32 34 struct scomp_alg { 33 35 void *(*alloc_ctx)(struct crypto_scomp *tfm); ··· 40 38 int (*decompress)(struct crypto_scomp *tfm, const u8 *src, 41 39 unsigned int slen, u8 *dst, unsigned int *dlen, 42 40 void *ctx); 43 - struct crypto_alg base; 41 + 42 + union { 43 + struct COMP_ALG_COMMON; 44 + struct comp_alg_common calg; 45 + }; 44 46 }; 45 47 46 48 static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+55 -3
include/crypto/kpp.h
··· 51 51 struct crypto_tfm base; 52 52 }; 53 53 54 + /* 55 + * struct crypto_istat_kpp - statistics for KPP algorithm 56 + * @setsecret_cnt: number of setsecrey operation 57 + * @generate_public_key_cnt: number of generate_public_key operation 58 + * @compute_shared_secret_cnt: number of compute_shared_secret operation 59 + * @err_cnt: number of error for KPP requests 60 + */ 61 + struct crypto_istat_kpp { 62 + atomic64_t setsecret_cnt; 63 + atomic64_t generate_public_key_cnt; 64 + atomic64_t compute_shared_secret_cnt; 65 + atomic64_t err_cnt; 66 + }; 67 + 54 68 /** 55 69 * struct kpp_alg - generic key-agreement protocol primitives 56 70 * ··· 87 73 * @exit: Undo everything @init did. 88 74 * 89 75 * @base: Common crypto API algorithm data structure 76 + * @stat: Statistics for KPP algorithm 90 77 */ 91 78 struct kpp_alg { 92 79 int (*set_secret)(struct crypto_kpp *tfm, const void *buffer, ··· 99 84 100 85 int (*init)(struct crypto_kpp *tfm); 101 86 void (*exit)(struct crypto_kpp *tfm); 87 + 88 + #ifdef CONFIG_CRYPTO_STATS 89 + struct crypto_istat_kpp stat; 90 + #endif 102 91 103 92 struct crypto_alg base; 104 93 }; ··· 291 272 unsigned short len; 292 273 }; 293 274 275 + static inline struct crypto_istat_kpp *kpp_get_stat(struct kpp_alg *alg) 276 + { 277 + #ifdef CONFIG_CRYPTO_STATS 278 + return &alg->stat; 279 + #else 280 + return NULL; 281 + #endif 282 + } 283 + 284 + static inline int crypto_kpp_errstat(struct kpp_alg *alg, int err) 285 + { 286 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 287 + return err; 288 + 289 + if (err && err != -EINPROGRESS && err != -EBUSY) 290 + atomic64_inc(&kpp_get_stat(alg)->err_cnt); 291 + 292 + return err; 293 + } 294 + 294 295 /** 295 296 * crypto_kpp_set_secret() - Invoke kpp operation 296 297 * ··· 329 290 static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, 330 291 const void *buffer, unsigned int len) 331 292 { 332 - return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len); 293 + struct kpp_alg *alg = crypto_kpp_alg(tfm); 294 + 295 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 296 + atomic64_inc(&kpp_get_stat(alg)->setsecret_cnt); 297 + 298 + return crypto_kpp_errstat(alg, alg->set_secret(tfm, buffer, len)); 333 299 } 334 300 335 301 /** ··· 353 309 static inline int crypto_kpp_generate_public_key(struct kpp_request *req) 354 310 { 355 311 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 312 + struct kpp_alg *alg = crypto_kpp_alg(tfm); 356 313 357 - return crypto_kpp_alg(tfm)->generate_public_key(req); 314 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 315 + atomic64_inc(&kpp_get_stat(alg)->generate_public_key_cnt); 316 + 317 + return crypto_kpp_errstat(alg, alg->generate_public_key(req)); 358 318 } 359 319 360 320 /** ··· 374 326 static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) 375 327 { 376 328 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); 329 + struct kpp_alg *alg = crypto_kpp_alg(tfm); 377 330 378 - return crypto_kpp_alg(tfm)->compute_shared_secret(req); 331 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) 332 + atomic64_inc(&kpp_get_stat(alg)->compute_shared_secret_cnt); 333 + 334 + return crypto_kpp_errstat(alg, alg->compute_shared_secret(req)); 379 335 } 380 336 381 337 /**
+50 -1
include/crypto/rng.h
··· 15 15 16 16 struct crypto_rng; 17 17 18 + /* 19 + * struct crypto_istat_rng: statistics for RNG algorithm 20 + * @generate_cnt: number of RNG generate requests 21 + * @generate_tlen: total data size of generated data by the RNG 22 + * @seed_cnt: number of times the RNG was seeded 23 + * @err_cnt: number of error for RNG requests 24 + */ 25 + struct crypto_istat_rng { 26 + atomic64_t generate_cnt; 27 + atomic64_t generate_tlen; 28 + atomic64_t seed_cnt; 29 + atomic64_t err_cnt; 30 + }; 31 + 18 32 /** 19 33 * struct rng_alg - random number generator definition 20 34 * ··· 46 32 * size of the seed is defined with @seedsize . 47 33 * @set_ent: Set entropy that would otherwise be obtained from 48 34 * entropy source. Internal use only. 35 + * @stat: Statistics for rng algorithm 49 36 * @seedsize: The seed size required for a random number generator 50 37 * initialization defined with this variable. Some 51 38 * random number generators does not require a seed ··· 62 47 int (*seed)(struct crypto_rng *tfm, const u8 *seed, unsigned int slen); 63 48 void (*set_ent)(struct crypto_rng *tfm, const u8 *data, 64 49 unsigned int len); 50 + 51 + #ifdef CONFIG_CRYPTO_STATS 52 + struct crypto_istat_rng stat; 53 + #endif 65 54 66 55 unsigned int seedsize; 67 56 ··· 144 125 crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); 145 126 } 146 127 128 + static inline struct crypto_istat_rng *rng_get_stat(struct rng_alg *alg) 129 + { 130 + #ifdef CONFIG_CRYPTO_STATS 131 + return &alg->stat; 132 + #else 133 + return NULL; 134 + #endif 135 + } 136 + 137 + static inline int crypto_rng_errstat(struct rng_alg *alg, int err) 138 + { 139 + if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) 140 + return err; 141 + 142 + if (err && err != -EINPROGRESS && err != -EBUSY) 143 + atomic64_inc(&rng_get_stat(alg)->err_cnt); 144 + 145 + return err; 146 + } 147 + 147 148 /** 148 149 * crypto_rng_generate() - get random number 149 150 * @tfm: cipher handle ··· 182 143 const u8 *src, unsigned int slen, 183 144 u8 *dst, unsigned int dlen) 184 145 { 185 - return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); 146 + struct rng_alg *alg = crypto_rng_alg(tfm); 147 + 148 + if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { 149 + struct crypto_istat_rng *istat = rng_get_stat(alg); 150 + 151 + atomic64_inc(&istat->generate_cnt); 152 + atomic64_add(dlen, &istat->generate_tlen); 153 + } 154 + 155 + return crypto_rng_errstat(alg, 156 + alg->generate(tfm, src, slen, dst, dlen)); 186 157 } 187 158 188 159 /**
+25
include/crypto/skcipher.h
··· 65 65 }; 66 66 67 67 /* 68 + * struct crypto_istat_cipher - statistics for cipher algorithm 69 + * @encrypt_cnt: number of encrypt requests 70 + * @encrypt_tlen: total data size handled by encrypt requests 71 + * @decrypt_cnt: number of decrypt requests 72 + * @decrypt_tlen: total data size handled by decrypt requests 73 + * @err_cnt: number of error for cipher requests 74 + */ 75 + struct crypto_istat_cipher { 76 + atomic64_t encrypt_cnt; 77 + atomic64_t encrypt_tlen; 78 + atomic64_t decrypt_cnt; 79 + atomic64_t decrypt_tlen; 80 + atomic64_t err_cnt; 81 + }; 82 + 83 + #ifdef CONFIG_CRYPTO_STATS 84 + #define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat; 85 + #else 86 + #define SKCIPHER_ALG_COMMON_STAT 87 + #endif 88 + 89 + /* 68 90 * struct skcipher_alg_common - common properties of skcipher_alg 69 91 * @min_keysize: Minimum key size supported by the transformation. This is the 70 92 * smallest key length supported by this transformation algorithm. ··· 103 81 * @chunksize: Equal to the block size except for stream ciphers such as 104 82 * CTR where it is set to the underlying block size. 105 83 * @statesize: Size of the internal state for the algorithm. 84 + * @stat: Statistics for cipher algorithm 106 85 * @base: Definition of a generic crypto algorithm. 107 86 */ 108 87 #define SKCIPHER_ALG_COMMON { \ ··· 112 89 unsigned int ivsize; \ 113 90 unsigned int chunksize; \ 114 91 unsigned int statesize; \ 92 + \ 93 + SKCIPHER_ALG_COMMON_STAT \ 115 94 \ 116 95 struct crypto_alg base; \ 117 96 }
+10 -18
include/uapi/linux/cryptouser.h
··· 54 54 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */ 55 55 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */ 56 56 CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */ 57 - CRYPTOCFGA_STAT_LARVAL, /* No longer supported */ 58 - CRYPTOCFGA_STAT_HASH, /* No longer supported */ 59 - CRYPTOCFGA_STAT_BLKCIPHER, /* No longer supported */ 60 - CRYPTOCFGA_STAT_AEAD, /* No longer supported */ 61 - CRYPTOCFGA_STAT_COMPRESS, /* No longer supported */ 62 - CRYPTOCFGA_STAT_RNG, /* No longer supported */ 63 - CRYPTOCFGA_STAT_CIPHER, /* No longer supported */ 64 - CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported */ 65 - CRYPTOCFGA_STAT_KPP, /* No longer supported */ 66 - CRYPTOCFGA_STAT_ACOMP, /* No longer supported */ 57 + CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */ 58 + CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */ 59 + CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */ 60 + CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */ 61 + CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */ 62 + CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */ 63 + CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */ 64 + CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */ 65 + CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */ 66 + CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */ 67 67 __CRYPTOCFGA_MAX 68 68 69 69 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) ··· 79 79 __u32 cru_flags; 80 80 }; 81 81 82 - /* No longer supported, do not use. */ 83 82 struct crypto_stat_aead { 84 83 char type[CRYPTO_MAX_NAME]; 85 84 __u64 stat_encrypt_cnt; ··· 88 89 __u64 stat_err_cnt; 89 90 }; 90 91 91 - /* No longer supported, do not use. */ 92 92 struct crypto_stat_akcipher { 93 93 char type[CRYPTO_MAX_NAME]; 94 94 __u64 stat_encrypt_cnt; ··· 99 101 __u64 stat_err_cnt; 100 102 }; 101 103 102 - /* No longer supported, do not use. */ 103 104 struct crypto_stat_cipher { 104 105 char type[CRYPTO_MAX_NAME]; 105 106 __u64 stat_encrypt_cnt; ··· 108 111 __u64 stat_err_cnt; 109 112 }; 110 113 111 - /* No longer supported, do not use. */ 112 114 struct crypto_stat_compress { 113 115 char type[CRYPTO_MAX_NAME]; 114 116 __u64 stat_compress_cnt; ··· 117 121 __u64 stat_err_cnt; 118 122 }; 119 123 120 - /* No longer supported, do not use. */ 121 124 struct crypto_stat_hash { 122 125 char type[CRYPTO_MAX_NAME]; 123 126 __u64 stat_hash_cnt; ··· 124 129 __u64 stat_err_cnt; 125 130 }; 126 131 127 - /* No longer supported, do not use. */ 128 132 struct crypto_stat_kpp { 129 133 char type[CRYPTO_MAX_NAME]; 130 134 __u64 stat_setsecret_cnt; ··· 132 138 __u64 stat_err_cnt; 133 139 }; 134 140 135 - /* No longer supported, do not use. */ 136 141 struct crypto_stat_rng { 137 142 char type[CRYPTO_MAX_NAME]; 138 143 __u64 stat_generate_cnt; ··· 140 147 __u64 stat_err_cnt; 141 148 }; 142 149 143 - /* No longer supported, do not use. */ 144 150 struct crypto_stat_larval { 145 151 char type[CRYPTO_MAX_NAME]; 146 152 };