Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.15:

API:

- Disambiguate EBUSY when queueing crypto request by adding ENOSPC.
This change touches code outside the crypto API.
- Reset settings when empty string is written to rng_current.

Algorithms:

- Add OSCCA SM3 secure hash.

Drivers:

- Remove old mv_cesa driver (replaced by marvell/cesa).
- Enable rfc3686/ecb/cfb/ofb AES in crypto4xx.
- Add ccm/gcm AES in crypto4xx.
- Add support for BCM7278 in iproc-rng200.
- Add hash support on Exynos in s5p-sss.
- Fix fallback-induced error in vmx.
- Fix output IV in atmel-aes.
- Fix empty GCM hash in mediatek.

Others:

- Fix DoS potential in lib/mpi.
- Fix potential out-of-order issues with padata"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
lib/mpi: call cond_resched() from mpi_powm() loop
crypto: stm32/hash - Fix return issue on update
crypto: dh - Remove pointless checks for NULL 'p' and 'g'
crypto: qat - Clean up error handling in qat_dh_set_secret()
crypto: dh - Don't permit 'key' or 'g' size longer than 'p'
crypto: dh - Don't permit 'p' to be 0
crypto: dh - Fix double free of ctx->p
hwrng: iproc-rng200 - Add support for BCM7278
dt-bindings: rng: Document BCM7278 RNG200 compatible
crypto: chcr - Replace _manual_ swap with swap macro
crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume
crypto: atmel - remove empty functions
crypto: ecdh - remove empty exit()
MAINTAINERS: update maintainer for qat
crypto: caam - remove unused param of ctx_map_to_sec4_sg()
crypto: caam - remove unneeded edesc zeroization
crypto: atmel-aes - Reset the controller before each use
crypto: atmel-aes - properly set IV after {en,de}crypt
hwrng: core - Reset user selected rng by writing "" to rng_current
...

+5695 -4642
+9 -41
Documentation/crypto/api-samples.rst
··· 7 7 :: 8 8 9 9 10 - struct tcrypt_result { 11 - struct completion completion; 12 - int err; 13 - }; 14 - 15 10 /* tie all data structures together */ 16 11 struct skcipher_def { 17 12 struct scatterlist sg; 18 13 struct crypto_skcipher *tfm; 19 14 struct skcipher_request *req; 20 - struct tcrypt_result result; 15 + struct crypto_wait wait; 21 16 }; 22 - 23 - /* Callback function */ 24 - static void test_skcipher_cb(struct crypto_async_request *req, int error) 25 - { 26 - struct tcrypt_result *result = req->data; 27 - 28 - if (error == -EINPROGRESS) 29 - return; 30 - result->err = error; 31 - complete(&result->completion); 32 - pr_info("Encryption finished successfully\n"); 33 - } 34 17 35 18 /* Perform cipher operation */ 36 19 static unsigned int test_skcipher_encdec(struct skcipher_def *sk, 37 20 int enc) 38 21 { 39 - int rc = 0; 22 + int rc; 40 23 41 24 if (enc) 42 - rc = crypto_skcipher_encrypt(sk->req); 25 + rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req), &sk->wait); 43 26 else 44 - rc = crypto_skcipher_decrypt(sk->req); 27 + rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req), &sk->wait); 45 28 46 - switch (rc) { 47 - case 0: 48 - break; 49 - case -EINPROGRESS: 50 - case -EBUSY: 51 - rc = wait_for_completion_interruptible( 52 - &sk->result.completion); 53 - if (!rc && !sk->result.err) { 54 - reinit_completion(&sk->result.completion); 55 - break; 56 - } 57 - default: 58 - pr_info("skcipher encrypt returned with %d result %d\n", 59 - rc, sk->result.err); 60 - break; 61 - } 62 - init_completion(&sk->result.completion); 29 + if (rc) 30 + pr_info("skcipher encrypt returned with result %d\n", rc); 63 31 64 32 return rc; 65 33 } ··· 57 89 } 58 90 59 91 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 60 - test_skcipher_cb, 61 - &sk.result); 92 + crypto_req_done, 93 + &sk.wait); 62 94 63 95 /* AES 256 with random key */ 64 96 get_random_bytes(&key, 32); ··· 90 122 /* We encrypt one block */ 91 123 sg_init_one(&sk.sg, scratchpad, 16); 92 124 skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata); 93 - init_completion(&sk.result.completion); 125 + crypto_init_wait(&sk.wait); 94 126 95 127 /* encrypt data */ 96 128 ret = test_skcipher_encdec(&sk, 1);
+3 -1
Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
··· 1 1 HWRNG support for the iproc-rng200 driver 2 2 3 3 Required properties: 4 - - compatible : "brcm,iproc-rng200" 4 + - compatible : Must be one of: 5 + "brcm,bcm7278-rng200" 6 + "brcm,iproc-rng200" 5 7 - reg : base address and size of control register block 6 8 7 9 Example:
Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+2 -3
MAINTAINERS
··· 5484 5484 5485 5485 FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER 5486 5486 M: Horia Geantă <horia.geanta@nxp.com> 5487 - M: Dan Douglass <dan.douglass@nxp.com> 5487 + M: Aymen Sghaier <aymen.sghaier@nxp.com> 5488 5488 L: linux-crypto@vger.kernel.org 5489 5489 S: Maintained 5490 5490 F: drivers/crypto/caam/ ··· 11060 11060 11061 11061 QAT DRIVER 11062 11062 M: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 11063 - M: Salvatore Benedetto <salvatore.benedetto@intel.com> 11064 11063 L: qat-linux@intel.com 11065 11064 S: Supported 11066 11065 F: drivers/crypto/qat/ ··· 11792 11793 L: linux-samsung-soc@vger.kernel.org 11793 11794 S: Maintained 11794 11795 F: drivers/crypto/exynos-rng.c 11795 - F: Documentation/devicetree/bindings/rng/samsung,exynos-rng4.txt 11796 + F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt 11796 11797 11797 11798 SAMSUNG FRAMEBUFFER DRIVER 11798 11799 M: Jingoo Han <jingoohan1@gmail.com>
+1 -1
arch/arm/configs/dove_defconfig
··· 140 140 CONFIG_CRYPTO_DEFLATE=y 141 141 CONFIG_CRYPTO_LZO=y 142 142 # CONFIG_CRYPTO_ANSI_CPRNG is not set 143 - CONFIG_CRYPTO_DEV_MV_CESA=y 143 + CONFIG_CRYPTO_DEV_MARVELL_CESA=y 144 144 CONFIG_CRC_CCITT=y 145 145 CONFIG_LIBCRC32C=y
+1 -1
arch/arm/configs/multi_v5_defconfig
··· 279 279 CONFIG_DEBUG_USER=y 280 280 CONFIG_CRYPTO_CBC=m 281 281 CONFIG_CRYPTO_PCBC=m 282 - CONFIG_CRYPTO_DEV_MV_CESA=y 282 + CONFIG_CRYPTO_DEV_MARVELL_CESA=y 283 283 CONFIG_CRC_CCITT=y 284 284 CONFIG_LIBCRC32C=y
+1 -1
arch/arm/configs/orion5x_defconfig
··· 163 163 CONFIG_CRYPTO_ECB=m 164 164 CONFIG_CRYPTO_PCBC=m 165 165 # CONFIG_CRYPTO_ANSI_CPRNG is not set 166 - CONFIG_CRYPTO_DEV_MV_CESA=y 166 + CONFIG_CRYPTO_DEV_MARVELL_CESA=y 167 167 CONFIG_CRC_T10DIF=y
+6 -4
arch/x86/crypto/aesni-intel_glue.c
··· 28 28 #include <crypto/cryptd.h> 29 29 #include <crypto/ctr.h> 30 30 #include <crypto/b128ops.h> 31 + #include <crypto/gcm.h> 31 32 #include <crypto/xts.h> 32 33 #include <asm/cpu_device_id.h> 33 34 #include <asm/fpu/api.h> ··· 1068 1067 } 1069 1068 }; 1070 1069 1070 + static 1071 1071 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; 1072 1072 1073 - struct { 1073 + static struct { 1074 1074 const char *algname; 1075 1075 const char *drvname; 1076 1076 const char *basename; ··· 1133 1131 .setauthsize = common_rfc4106_set_authsize, 1134 1132 .encrypt = helper_rfc4106_encrypt, 1135 1133 .decrypt = helper_rfc4106_decrypt, 1136 - .ivsize = 8, 1134 + .ivsize = GCM_RFC4106_IV_SIZE, 1137 1135 .maxauthsize = 16, 1138 1136 .base = { 1139 1137 .cra_name = "__gcm-aes-aesni", ··· 1151 1149 .setauthsize = rfc4106_set_authsize, 1152 1150 .encrypt = rfc4106_encrypt, 1153 1151 .decrypt = rfc4106_decrypt, 1154 - .ivsize = 8, 1152 + .ivsize = GCM_RFC4106_IV_SIZE, 1155 1153 .maxauthsize = 16, 1156 1154 .base = { 1157 1155 .cra_name = "rfc4106(gcm(aes))", ··· 1167 1165 .setauthsize = generic_gcmaes_set_authsize, 1168 1166 .encrypt = generic_gcmaes_encrypt, 1169 1167 .decrypt = generic_gcmaes_decrypt, 1170 - .ivsize = 12, 1168 + .ivsize = GCM_AES_IV_SIZE, 1171 1169 .maxauthsize = 16, 1172 1170 .base = { 1173 1171 .cra_name = "gcm(aes)",
+6 -11
arch/x86/crypto/crc32-pclmul_asm.S
··· 41 41 #include <asm/inst.h> 42 42 43 43 44 + .section .rodata 44 45 .align 16 45 46 /* 46 47 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4 ··· 112 111 pxor CONSTANT, %xmm1 113 112 sub $0x40, LEN 114 113 add $0x40, BUF 115 - #ifndef __x86_64__ 116 - /* This is for position independent code(-fPIC) support for 32bit */ 117 - call delta 118 - delta: 119 - pop %ecx 120 - #endif 121 114 cmp $0x40, LEN 122 115 jb less_64 123 116 124 117 #ifdef __x86_64__ 125 118 movdqa .Lconstant_R2R1(%rip), CONSTANT 126 119 #else 127 - movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT 120 + movdqa .Lconstant_R2R1, CONSTANT 128 121 #endif 129 122 130 123 loop_64:/* 64 bytes Full cache line folding */ ··· 167 172 #ifdef __x86_64__ 168 173 movdqa .Lconstant_R4R3(%rip), CONSTANT 169 174 #else 170 - movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT 175 + movdqa .Lconstant_R4R3, CONSTANT 171 176 #endif 172 177 prefetchnta (BUF) 173 178 ··· 215 220 movdqa .Lconstant_R5(%rip), CONSTANT 216 221 movdqa .Lconstant_mask32(%rip), %xmm3 217 222 #else 218 - movdqa .Lconstant_R5 - delta(%ecx), CONSTANT 219 - movdqa .Lconstant_mask32 - delta(%ecx), %xmm3 223 + movdqa .Lconstant_R5, CONSTANT 224 + movdqa .Lconstant_mask32, %xmm3 220 225 #endif 221 226 psrldq $0x04, %xmm2 222 227 pand %xmm3, %xmm1 ··· 227 232 #ifdef __x86_64__ 228 233 movdqa .Lconstant_RUpoly(%rip), CONSTANT 229 234 #else 230 - movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT 235 + movdqa .Lconstant_RUpoly, CONSTANT 231 236 #endif 232 237 movdqa %xmm1, %xmm2 233 238 pand %xmm3, %xmm1
+11
crypto/Kconfig
··· 860 860 References: 861 861 http://keccak.noekeon.org/ 862 862 863 + config CRYPTO_SM3 864 + tristate "SM3 digest algorithm" 865 + select CRYPTO_HASH 866 + help 867 + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). 868 + It is part of the Chinese Commercial Cryptography suite. 869 + 870 + References: 871 + http://www.oscca.gov.cn/UpFile/20101222141857786.pdf 872 + https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash 873 + 863 874 config CRYPTO_TGR192 864 875 tristate "Tiger digest algorithms" 865 876 select CRYPTO_HASH
+1
crypto/Makefile
··· 71 71 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o 72 72 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o 73 73 obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o 74 + obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o 74 75 obj-$(CONFIG_CRYPTO_WP512) += wp512.o 75 76 CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 76 77 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
-27
crypto/af_alg.c
··· 481 481 } 482 482 EXPORT_SYMBOL_GPL(af_alg_cmsg_send); 483 483 484 - int af_alg_wait_for_completion(int err, struct af_alg_completion *completion) 485 - { 486 - switch (err) { 487 - case -EINPROGRESS: 488 - case -EBUSY: 489 - wait_for_completion(&completion->completion); 490 - reinit_completion(&completion->completion); 491 - err = completion->err; 492 - break; 493 - }; 494 - 495 - return err; 496 - } 497 - EXPORT_SYMBOL_GPL(af_alg_wait_for_completion); 498 - 499 - void af_alg_complete(struct crypto_async_request *req, int err) 500 - { 501 - struct af_alg_completion *completion = req->data; 502 - 503 - if (err == -EINPROGRESS) 504 - return; 505 - 506 - completion->err = err; 507 - complete(&completion->completion); 508 - } 509 - EXPORT_SYMBOL_GPL(af_alg_complete); 510 - 511 484 /** 512 485 * af_alg_alloc_tsgl - allocate the TX SGL 513 486 *
+3 -9
crypto/ahash.c
··· 334 334 return err; 335 335 336 336 err = op(req); 337 - if (err == -EINPROGRESS || 338 - (err == -EBUSY && (ahash_request_flags(req) & 339 - CRYPTO_TFM_REQ_MAY_BACKLOG))) 337 + if (err == -EINPROGRESS || err == -EBUSY) 340 338 return err; 341 339 342 340 ahash_restore_req(req, err); ··· 392 394 req->base.complete = ahash_def_finup_done2; 393 395 394 396 err = crypto_ahash_reqtfm(req)->final(req); 395 - if (err == -EINPROGRESS || 396 - (err == -EBUSY && (ahash_request_flags(req) & 397 - CRYPTO_TFM_REQ_MAY_BACKLOG))) 397 + if (err == -EINPROGRESS || err == -EBUSY) 398 398 return err; 399 399 400 400 out: ··· 428 432 return err; 429 433 430 434 err = tfm->update(req); 431 - if (err == -EINPROGRESS || 432 - (err == -EBUSY && (ahash_request_flags(req) & 433 - CRYPTO_TFM_REQ_MAY_BACKLOG))) 435 + if (err == -EINPROGRESS || err == -EBUSY) 434 436 return err; 435 437 436 438 return ahash_def_finup_finish1(req, err);
+4 -2
crypto/algapi.c
··· 897 897 int err = -EINPROGRESS; 898 898 899 899 if (unlikely(queue->qlen >= queue->max_qlen)) { 900 - err = -EBUSY; 901 - if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 900 + if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 901 + err = -ENOSPC; 902 902 goto out; 903 + } 904 + err = -EBUSY; 903 905 if (queue->backlog == &queue->list) 904 906 queue->backlog = &request->list; 905 907 }
-1
crypto/algboss.c
··· 122 122 int notnum = 0; 123 123 124 124 name = ++p; 125 - len = 0; 126 125 127 126 for (; isalnum(*p) || *p == '-' || *p == '_'; p++) 128 127 notnum |= !isdigit(*p);
+4 -4
crypto/algif_aead.c
··· 278 278 /* Synchronous operation */ 279 279 aead_request_set_callback(&areq->cra_u.aead_req, 280 280 CRYPTO_TFM_REQ_MAY_BACKLOG, 281 - af_alg_complete, &ctx->completion); 282 - err = af_alg_wait_for_completion(ctx->enc ? 281 + crypto_req_done, &ctx->wait); 282 + err = crypto_wait_req(ctx->enc ? 283 283 crypto_aead_encrypt(&areq->cra_u.aead_req) : 284 284 crypto_aead_decrypt(&areq->cra_u.aead_req), 285 - &ctx->completion); 285 + &ctx->wait); 286 286 } 287 287 288 288 /* AIO operation in progress */ ··· 554 554 ctx->merge = 0; 555 555 ctx->enc = 0; 556 556 ctx->aead_assoclen = 0; 557 - af_alg_init_completion(&ctx->completion); 557 + crypto_init_wait(&ctx->wait); 558 558 559 559 ask->private = ctx; 560 560
+14 -16
crypto/algif_hash.c
··· 26 26 27 27 u8 *result; 28 28 29 - struct af_alg_completion completion; 29 + struct crypto_wait wait; 30 30 31 31 unsigned int len; 32 32 bool more; ··· 88 88 if ((msg->msg_flags & MSG_MORE)) 89 89 hash_free_result(sk, ctx); 90 90 91 - err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), 92 - &ctx->completion); 91 + err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait); 93 92 if (err) 94 93 goto unlock; 95 94 } ··· 109 110 110 111 ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); 111 112 112 - err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), 113 - &ctx->completion); 113 + err = crypto_wait_req(crypto_ahash_update(&ctx->req), 114 + &ctx->wait); 114 115 af_alg_free_sg(&ctx->sgl); 115 116 if (err) 116 117 goto unlock; ··· 128 129 goto unlock; 129 130 130 131 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 131 - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), 132 - &ctx->completion); 132 + err = crypto_wait_req(crypto_ahash_final(&ctx->req), 133 + &ctx->wait); 133 134 } 134 135 135 136 unlock: ··· 170 171 } else { 171 172 if (!ctx->more) { 172 173 err = crypto_ahash_init(&ctx->req); 173 - err = af_alg_wait_for_completion(err, &ctx->completion); 174 + err = crypto_wait_req(err, &ctx->wait); 174 175 if (err) 175 176 goto unlock; 176 177 } ··· 178 179 err = crypto_ahash_update(&ctx->req); 179 180 } 180 181 181 - err = af_alg_wait_for_completion(err, &ctx->completion); 182 + err = crypto_wait_req(err, &ctx->wait); 182 183 if (err) 183 184 goto unlock; 184 185 ··· 214 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 215 216 216 217 if (!result && !ctx->more) { 217 - err = af_alg_wait_for_completion( 218 - crypto_ahash_init(&ctx->req), 219 - &ctx->completion); 218 + err = crypto_wait_req(crypto_ahash_init(&ctx->req), 219 + &ctx->wait); 220 220 if (err) 221 221 goto unlock; 222 222 } 223 223 224 224 if (!result || ctx->more) { 225 225 ctx->more = 0; 226 - err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), 227 - &ctx->completion); 226 + err = crypto_wait_req(crypto_ahash_final(&ctx->req), 227 + &ctx->wait); 228 228 if (err) 229 229 goto unlock; 230 230 } ··· 474 476 ctx->result = NULL; 475 477 ctx->len = len; 476 478 ctx->more = 0; 477 - af_alg_init_completion(&ctx->completion); 479 + crypto_init_wait(&ctx->wait); 478 480 479 481 ask->private = ctx; 480 482 481 483 ahash_request_set_tfm(&ctx->req, hash); 482 484 ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 483 - af_alg_complete, &ctx->completion); 485 + crypto_req_done, &ctx->wait); 484 486 485 487 sk->sk_destruct = hash_sock_destruct; 486 488
+4 -5
crypto/algif_skcipher.c
··· 129 129 skcipher_request_set_callback(&areq->cra_u.skcipher_req, 130 130 CRYPTO_TFM_REQ_MAY_SLEEP | 131 131 CRYPTO_TFM_REQ_MAY_BACKLOG, 132 - af_alg_complete, 133 - &ctx->completion); 134 - err = af_alg_wait_for_completion(ctx->enc ? 132 + crypto_req_done, &ctx->wait); 133 + err = crypto_wait_req(ctx->enc ? 135 134 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : 136 135 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), 137 - &ctx->completion); 136 + &ctx->wait); 138 137 } 139 138 140 139 /* AIO operation in progress */ ··· 387 388 ctx->more = 0; 388 389 ctx->merge = 0; 389 390 ctx->enc = 0; 390 - af_alg_init_completion(&ctx->completion); 391 + crypto_init_wait(&ctx->wait); 391 392 392 393 ask->private = ctx; 393 394
+13
crypto/api.c
··· 24 24 #include <linux/sched/signal.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/string.h> 27 + #include <linux/completion.h> 27 28 #include "internal.h" 28 29 29 30 LIST_HEAD(crypto_alg_list); ··· 595 594 return ret; 596 595 } 597 596 EXPORT_SYMBOL_GPL(crypto_has_alg); 597 + 598 + void crypto_req_done(struct crypto_async_request *req, int err) 599 + { 600 + struct crypto_wait *wait = req->data; 601 + 602 + if (err == -EINPROGRESS) 603 + return; 604 + 605 + wait->err = err; 606 + complete(&wait->completion); 607 + } 608 + EXPORT_SYMBOL_GPL(crypto_req_done); 598 609 599 610 MODULE_DESCRIPTION("Cryptographic core API"); 600 611 MODULE_LICENSE("GPL");
+4 -24
crypto/asymmetric_keys/public_key.c
··· 57 57 public_key_signature_free(payload3); 58 58 } 59 59 60 - struct public_key_completion { 61 - struct completion completion; 62 - int err; 63 - }; 64 - 65 - static void public_key_verify_done(struct crypto_async_request *req, int err) 66 - { 67 - struct public_key_completion *compl = req->data; 68 - 69 - if (err == -EINPROGRESS) 70 - return; 71 - 72 - compl->err = err; 73 - complete(&compl->completion); 74 - } 75 - 76 60 /* 77 61 * Verify a signature using a public key. 78 62 */ 79 63 int public_key_verify_signature(const struct public_key *pkey, 80 64 const struct public_key_signature *sig) 81 65 { 82 - struct public_key_completion compl; 66 + struct crypto_wait cwait; 83 67 struct crypto_akcipher *tfm; 84 68 struct akcipher_request *req; 85 69 struct scatterlist sig_sg, digest_sg; ··· 115 131 sg_init_one(&digest_sg, output, outlen); 116 132 akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size, 117 133 outlen); 118 - init_completion(&compl.completion); 134 + crypto_init_wait(&cwait); 119 135 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 120 136 CRYPTO_TFM_REQ_MAY_SLEEP, 121 - public_key_verify_done, &compl); 137 + crypto_req_done, &cwait); 122 138 123 139 /* Perform the verification calculation. This doesn't actually do the 124 140 * verification, but rather calculates the hash expected by the 125 141 * signature and returns that to us. 126 142 */ 127 - ret = crypto_akcipher_verify(req); 128 - if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { 129 - wait_for_completion(&compl.completion); 130 - ret = compl.err; 131 - } 143 + ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 132 144 if (ret < 0) 133 145 goto out_free_output; 134 146
+1 -3
crypto/cryptd.c
··· 137 137 int cpu, err; 138 138 struct cryptd_cpu_queue *cpu_queue; 139 139 atomic_t *refcnt; 140 - bool may_backlog; 141 140 142 141 cpu = get_cpu(); 143 142 cpu_queue = this_cpu_ptr(queue->cpu_queue); 144 143 err = crypto_enqueue_request(&cpu_queue->queue, request); 145 144 146 145 refcnt = crypto_tfm_ctx(request->tfm); 147 - may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; 148 146 149 - if (err == -EBUSY && !may_backlog) 147 + if (err == -ENOSPC) 150 148 goto out_put_cpu; 151 149 152 150 queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+2 -4
crypto/cts.c
··· 136 136 goto out; 137 137 138 138 err = cts_cbc_encrypt(req); 139 - if (err == -EINPROGRESS || 140 - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 139 + if (err == -EINPROGRESS || err == -EBUSY) 141 140 return; 142 141 143 142 out: ··· 228 229 goto out; 229 230 230 231 err = cts_cbc_decrypt(req); 231 - if (err == -EINPROGRESS || 232 - (err == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 232 + if (err == -EINPROGRESS || err == -EBUSY) 233 233 return; 234 234 235 235 out:
+13 -23
crypto/dh.c
··· 21 21 MPI xa; 22 22 }; 23 23 24 - static inline void dh_clear_params(struct dh_ctx *ctx) 24 + static void dh_clear_ctx(struct dh_ctx *ctx) 25 25 { 26 26 mpi_free(ctx->p); 27 27 mpi_free(ctx->g); 28 - ctx->p = NULL; 29 - ctx->g = NULL; 30 - } 31 - 32 - static void dh_free_ctx(struct dh_ctx *ctx) 33 - { 34 - dh_clear_params(ctx); 35 28 mpi_free(ctx->xa); 36 - ctx->xa = NULL; 29 + memset(ctx, 0, sizeof(*ctx)); 37 30 } 38 31 39 32 /* ··· 53 60 54 61 static int dh_set_params(struct dh_ctx *ctx, struct dh *params) 55 62 { 56 - if (unlikely(!params->p || !params->g)) 57 - return -EINVAL; 58 - 59 63 if (dh_check_params_length(params->p_size << 3)) 60 64 return -EINVAL; 61 65 ··· 61 71 return -EINVAL; 62 72 63 73 ctx->g = mpi_read_raw_data(params->g, params->g_size); 64 - if (!ctx->g) { 65 - mpi_free(ctx->p); 74 + if (!ctx->g) 66 75 return -EINVAL; 67 - } 68 76 69 77 return 0; 70 78 } ··· 74 86 struct dh params; 75 87 76 88 /* Free the old MPI key if any */ 77 - dh_free_ctx(ctx); 89 + dh_clear_ctx(ctx); 78 90 79 91 if (crypto_dh_decode_key(buf, len, &params) < 0) 80 - return -EINVAL; 92 + goto err_clear_ctx; 81 93 82 94 if (dh_set_params(ctx, &params) < 0) 83 - return -EINVAL; 95 + goto err_clear_ctx; 84 96 85 97 ctx->xa = mpi_read_raw_data(params.key, params.key_size); 86 - if (!ctx->xa) { 87 - dh_clear_params(ctx); 88 - return -EINVAL; 89 - } 98 + if (!ctx->xa) 99 + goto err_clear_ctx; 90 100 91 101 return 0; 102 + 103 + err_clear_ctx: 104 + dh_clear_ctx(ctx); 105 + return -EINVAL; 92 106 } 93 107 94 108 static int dh_compute_value(struct kpp_request *req) ··· 148 158 { 149 159 struct dh_ctx *ctx = dh_get_ctx(tfm); 150 160 151 - dh_free_ctx(ctx); 161 + dh_clear_ctx(ctx); 152 162 } 153 163 154 164 static struct kpp_alg dh = {
+18 -2
crypto/dh_helper.c
··· 28 28 return src + size; 29 29 } 30 30 31 - static inline int dh_data_size(const struct dh *p) 31 + static inline unsigned int dh_data_size(const struct dh *p) 32 32 { 33 33 return p->key_size + p->p_size + p->g_size; 34 34 } 35 35 36 - int crypto_dh_key_len(const struct dh *p) 36 + unsigned int crypto_dh_key_len(const struct dh *p) 37 37 { 38 38 return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p); 39 39 } ··· 83 83 if (secret.len != crypto_dh_key_len(params)) 84 84 return -EINVAL; 85 85 86 + /* 87 + * Don't permit the buffer for 'key' or 'g' to be larger than 'p', since 88 + * some drivers assume otherwise. 89 + */ 90 + if (params->key_size > params->p_size || 91 + params->g_size > params->p_size) 92 + return -EINVAL; 93 + 86 94 /* Don't allocate memory. Set pointers to data within 87 95 * the given buffer 88 96 */ 89 97 params->key = (void *)ptr; 90 98 params->p = (void *)(ptr + params->key_size); 91 99 params->g = (void *)(ptr + params->key_size + params->p_size); 100 + 101 + /* 102 + * Don't permit 'p' to be 0. It's not a prime number, and it's subject 103 + * to corner cases such as 'mod 0' being undefined or 104 + * crypto_kpp_maxsize() returning 0. 105 + */ 106 + if (memchr_inv(params->p, 0, params->p_size) == NULL) 107 + return -EINVAL; 92 108 93 109 return 0; 94 110 }
+9 -27
crypto/drbg.c
··· 1651 1651 return 0; 1652 1652 } 1653 1653 1654 - static void drbg_skcipher_cb(struct crypto_async_request *req, int error) 1655 - { 1656 - struct drbg_state *drbg = req->data; 1657 - 1658 - if (error == -EINPROGRESS) 1659 - return; 1660 - drbg->ctr_async_err = error; 1661 - complete(&drbg->ctr_completion); 1662 - } 1663 - 1664 1654 static int drbg_init_sym_kernel(struct drbg_state *drbg) 1665 1655 { 1666 1656 struct crypto_cipher *tfm; ··· 1681 1691 return PTR_ERR(sk_tfm); 1682 1692 } 1683 1693 drbg->ctr_handle = sk_tfm; 1684 - init_completion(&drbg->ctr_completion); 1694 + crypto_init_wait(&drbg->ctr_wait); 1685 1695 1686 1696 req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); 1687 1697 if (!req) { ··· 1690 1700 return -ENOMEM; 1691 1701 } 1692 1702 drbg->ctr_req = req; 1693 - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1694 - drbg_skcipher_cb, drbg); 1703 + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 1704 + CRYPTO_TFM_REQ_MAY_SLEEP, 1705 + crypto_req_done, &drbg->ctr_wait); 1695 1706 1696 1707 alignmask = crypto_skcipher_alignmask(sk_tfm); 1697 1708 drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, ··· 1753 1762 /* Output buffer may not be valid for SGL, use scratchpad */ 1754 1763 skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, 1755 1764 cryptlen, drbg->V); 1756 - ret = crypto_skcipher_encrypt(drbg->ctr_req); 1757 - switch (ret) { 1758 - case 0: 1759 - break; 1760 - case -EINPROGRESS: 1761 - case -EBUSY: 1762 - wait_for_completion(&drbg->ctr_completion); 1763 - if (!drbg->ctr_async_err) { 1764 - reinit_completion(&drbg->ctr_completion); 1765 - break; 1766 - } 1767 - default: 1765 + ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), 1766 + &drbg->ctr_wait); 1767 + if (ret) 1768 1768 goto out; 1769 - } 1770 - init_completion(&drbg->ctr_completion); 1769 + 1770 + crypto_init_wait(&drbg->ctr_wait); 1771 1771 1772 1772 memcpy(outbuf, drbg->outscratchpad, cryptlen); 1773 1773
-6
crypto/ecdh.c
··· 131 131 return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); 132 132 } 133 133 134 - static void no_exit_tfm(struct crypto_kpp *tfm) 135 - { 136 - return; 137 - } 138 - 139 134 static struct kpp_alg ecdh = { 140 135 .set_secret = ecdh_set_secret, 141 136 .generate_public_key = ecdh_compute_value, 142 137 .compute_shared_secret = ecdh_compute_value, 143 138 .max_size = ecdh_max_size, 144 - .exit = no_exit_tfm, 145 139 .base = { 146 140 .cra_name = "ecdh", 147 141 .cra_driver_name = "ecdh-generic",
+1 -1
crypto/ecdh_helper.c
··· 28 28 return src + sz; 29 29 } 30 30 31 - int crypto_ecdh_key_len(const struct ecdh *params) 31 + unsigned int crypto_ecdh_key_len(const struct ecdh *params) 32 32 { 33 33 return ECDH_KPP_SECRET_MIN_SIZE + params->key_size; 34 34 }
+18 -37
crypto/gcm.c
··· 14 14 #include <crypto/internal/hash.h> 15 15 #include <crypto/null.h> 16 16 #include <crypto/scatterwalk.h> 17 + #include <crypto/gcm.h> 17 18 #include <crypto/hash.h> 18 19 #include "internal.h" 19 - #include <linux/completion.h> 20 20 #include <linux/err.h> 21 21 #include <linux/init.h> 22 22 #include <linux/kernel.h> ··· 78 78 } u; 79 79 }; 80 80 81 - struct crypto_gcm_setkey_result { 82 - int err; 83 - struct completion completion; 84 - }; 85 - 86 81 static struct { 87 82 u8 buf[16]; 88 83 struct scatterlist sg; ··· 93 98 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); 94 99 } 95 100 96 - static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) 97 - { 98 - struct crypto_gcm_setkey_result *result = req->data; 99 - 100 - if (err == -EINPROGRESS) 101 - return; 102 - 103 - result->err = err; 104 - complete(&result->completion); 105 - } 106 - 107 101 static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, 108 102 unsigned int keylen) 109 103 { ··· 103 119 be128 hash; 104 120 u8 iv[16]; 105 121 106 - struct crypto_gcm_setkey_result result; 122 + struct crypto_wait wait; 107 123 108 124 struct scatterlist sg[1]; 109 125 struct skcipher_request req; ··· 124 140 if (!data) 125 141 return -ENOMEM; 126 142 127 - init_completion(&data->result.completion); 143 + crypto_init_wait(&data->wait); 128 144 sg_init_one(data->sg, &data->hash, sizeof(data->hash)); 129 145 skcipher_request_set_tfm(&data->req, ctr); 130 146 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 131 147 CRYPTO_TFM_REQ_MAY_BACKLOG, 132 - crypto_gcm_setkey_done, 133 - &data->result); 148 + crypto_req_done, 149 + &data->wait); 134 150 skcipher_request_set_crypt(&data->req, data->sg, data->sg, 135 151 sizeof(data->hash), data->iv); 136 152 137 - err = crypto_skcipher_encrypt(&data->req); 138 - if (err == -EINPROGRESS || err == -EBUSY) { 139 - wait_for_completion(&data->result.completion); 140 - err = data->result.err; 141 - } 153 + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), 154 + &data->wait); 142 155 143 156 if (err) 144 157 goto out; ··· 178 197 struct scatterlist *sg; 179 198 180 199 memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); 181 - memcpy(pctx->iv, req->iv, 12); 182 - memcpy(pctx->iv + 12, &counter, 4); 200 + memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE); 201 + memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4); 183 202 184 203 sg_init_table(pctx->src, 3); 185 204 sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); ··· 676 695 inst->alg.base.cra_alignmask = ghash->base.cra_alignmask | 677 696 ctr->base.cra_alignmask; 678 697 inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); 679 - inst->alg.ivsize = 12; 698 + inst->alg.ivsize = GCM_AES_IV_SIZE; 680 699 inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr); 681 700 inst->alg.maxauthsize = 16; 682 701 inst->alg.init = crypto_gcm_init_tfm; ··· 813 832 u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), 814 833 crypto_aead_alignmask(child) + 1); 815 834 816 - scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0); 835 + scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0); 817 836 818 837 memcpy(iv, ctx->nonce, 4); 819 838 memcpy(iv + 4, req->iv, 8); 820 839 821 840 sg_init_table(rctx->src, 3); 822 - sg_set_buf(rctx->src, iv + 12, req->assoclen - 8); 841 + sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8); 823 842 sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); 824 843 if (sg != rctx->src + 1) 825 844 sg_chain(rctx->src, 2, sg); 826 845 827 846 if (req->src != req->dst) { 828 847 sg_init_table(rctx->dst, 3); 829 - sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8); 848 + sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8); 830 849 sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); 831 850 if (sg != rctx->dst + 1) 832 851 sg_chain(rctx->dst, 2, sg); ··· 938 957 err = -EINVAL; 939 958 940 959 /* Underlying IV size must be 12. */ 941 - if (crypto_aead_alg_ivsize(alg) != 12) 960 + if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) 942 961 goto out_drop_alg; 943 962 944 963 /* Not a stream cipher? */ ··· 961 980 962 981 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); 963 982 964 - inst->alg.ivsize = 8; 983 + inst->alg.ivsize = GCM_RFC4106_IV_SIZE; 965 984 inst->alg.chunksize = crypto_aead_alg_chunksize(alg); 966 985 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 967 986 ··· 1115 1134 tfm, 1116 1135 sizeof(struct crypto_rfc4543_req_ctx) + 1117 1136 ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + 1118 - align + 12); 1137 + align + GCM_AES_IV_SIZE); 1119 1138 1120 1139 return 0; 1121 1140 ··· 1180 1199 err = -EINVAL; 1181 1200 1182 1201 /* Underlying IV size must be 12. */ 1183 - if (crypto_aead_alg_ivsize(alg) != 12) 1202 + if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) 1184 1203 goto out_drop_alg; 1185 1204 1186 1205 /* Not a stream cipher? */ ··· 1203 1222 1204 1223 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); 1205 1224 1206 - inst->alg.ivsize = 8; 1225 + inst->alg.ivsize = GCM_RFC4543_IV_SIZE; 1207 1226 inst->alg.chunksize = crypto_aead_alg_chunksize(alg); 1208 1227 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); 1209 1228
+13
crypto/gf128mul.c
··· 156 156 x->b = cpu_to_be64((b << 8) ^ _tt); 157 157 } 158 158 159 + void gf128mul_x8_ble(le128 *r, const le128 *x) 160 + { 161 + u64 a = le64_to_cpu(x->a); 162 + u64 b = le64_to_cpu(x->b); 163 + 164 + /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ 165 + u64 _tt = gf128mul_table_be[a >> 56]; 166 + 167 + r->a = cpu_to_le64((a << 8) | (b >> 56)); 168 + r->b = cpu_to_le64((b << 8) ^ _tt); 169 + } 170 + EXPORT_SYMBOL(gf128mul_x8_ble); 171 + 159 172 void gf128mul_lle(be128 *r, const be128 *b) 160 173 { 161 174 be128 p[8];
+26 -58
crypto/keywrap.c
··· 93 93 94 94 struct crypto_kw_block { 95 95 #define SEMIBSIZE 8 96 - u8 A[SEMIBSIZE]; 97 - u8 R[SEMIBSIZE]; 96 + __be64 A; 97 + __be64 R; 98 98 }; 99 - 100 - /* convert 64 bit integer into its string representation */ 101 - static inline void crypto_kw_cpu_to_be64(u64 val, u8 *buf) 102 - { 103 - __be64 *a = (__be64 *)buf; 104 - 105 - *a = cpu_to_be64(val); 106 - } 107 99 108 100 /* 109 101 * Fast forward the SGL to the "end" length minus SEMIBSIZE. ··· 131 139 struct crypto_blkcipher *tfm = desc->tfm; 132 140 struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); 133 141 struct crypto_cipher *child = ctx->child; 134 - 135 - unsigned long alignmask = max_t(unsigned long, SEMIBSIZE, 136 - crypto_cipher_alignmask(child)); 137 - unsigned int i; 138 - 139 - u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask]; 140 - struct crypto_kw_block *block = (struct crypto_kw_block *) 141 - PTR_ALIGN(blockbuf + 0, alignmask + 1); 142 - 143 - u64 t = 6 * ((nbytes) >> 3); 142 + struct crypto_kw_block block; 144 143 struct scatterlist *lsrc, *ldst; 144 + u64 t = 6 * ((nbytes) >> 3); 145 + unsigned int i; 145 146 int ret = 0; 146 147 147 148 /* ··· 145 160 return -EINVAL; 146 161 147 162 /* Place the IV into block A */ 148 - memcpy(block->A, desc->info, SEMIBSIZE); 163 + memcpy(&block.A, desc->info, SEMIBSIZE); 149 164 150 165 /* 151 166 * src scatterlist is read-only. dst scatterlist is r/w. During the ··· 156 171 ldst = dst; 157 172 158 173 for (i = 0; i < 6; i++) { 159 - u8 tbe_buffer[SEMIBSIZE + alignmask]; 160 - /* alignment for the crypto_xor and the _to_be64 operation */ 161 - u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1); 162 - unsigned int tmp_nbytes = nbytes; 163 174 struct scatter_walk src_walk, dst_walk; 175 + unsigned int tmp_nbytes = nbytes; 164 176 165 177 while (tmp_nbytes) { 166 178 /* move pointer by tmp_nbytes in the SGL */ 167 179 crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); 168 180 /* get the source block */ 169 - scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, 181 + scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, 170 182 false); 171 183 172 - /* perform KW operation: get counter as byte string */ 173 - crypto_kw_cpu_to_be64(t, tbe); 174 184 /* perform KW operation: modify IV with counter */ 175 - crypto_xor(block->A, tbe, SEMIBSIZE); 185 + block.A ^= cpu_to_be64(t); 176 186 t--; 177 187 /* perform KW operation: decrypt block */ 178 - crypto_cipher_decrypt_one(child, (u8*)block, 179 - (u8*)block); 188 + crypto_cipher_decrypt_one(child, (u8*)&block, 189 + (u8*)&block); 180 190 181 191 /* move pointer by tmp_nbytes in the SGL */ 182 192 crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); 183 193 /* Copy block->R into place */ 184 - scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, 194 + scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, 185 195 true); 186 196 187 197 tmp_nbytes -= SEMIBSIZE; ··· 188 208 } 189 209 190 210 /* Perform authentication check */ 191 - if (crypto_memneq("\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", block->A, 192 - SEMIBSIZE)) 211 + if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6)) 193 212 ret = -EBADMSG; 194 213 195 - memzero_explicit(block, sizeof(struct crypto_kw_block)); 214 + memzero_explicit(&block, sizeof(struct crypto_kw_block)); 196 215 197 216 return ret; 198 217 } ··· 203 224 struct crypto_blkcipher *tfm = desc->tfm; 204 225 struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); 205 226 struct crypto_cipher *child = ctx->child; 206 - 207 - unsigned long alignmask = max_t(unsigned long, SEMIBSIZE, 208 - crypto_cipher_alignmask(child)); 209 - unsigned int i; 210 - 211 - u8 blockbuf[sizeof(struct crypto_kw_block) + alignmask]; 212 - struct crypto_kw_block *block = (struct crypto_kw_block *) 213 - PTR_ALIGN(blockbuf + 0, alignmask + 1); 214 - 215 - u64 t = 1; 227 + struct crypto_kw_block block; 216 228 struct scatterlist *lsrc, *ldst; 229 + u64 t = 1; 230 + unsigned int i; 217 231 218 232 /* 219 233 * Require at least 2 semiblocks (note, the 3rd semiblock that is ··· 221 249 * Place the predefined IV into block A -- for encrypt, the caller 222 250 * does not need to provide an IV, but he needs to fetch the final IV. 223 251 */ 224 - memcpy(block->A, "\xA6\xA6\xA6\xA6\xA6\xA6\xA6\xA6", SEMIBSIZE); 252 + block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6); 225 253 226 254 /* 227 255 * src scatterlist is read-only. dst scatterlist is r/w. During the ··· 232 260 ldst = dst; 233 261 234 262 for (i = 0; i < 6; i++) { 235 - u8 tbe_buffer[SEMIBSIZE + alignmask]; 236 - u8 *tbe = PTR_ALIGN(tbe_buffer + 0, alignmask + 1); 237 - unsigned int tmp_nbytes = nbytes; 238 263 struct scatter_walk src_walk, dst_walk; 264 + unsigned int tmp_nbytes = nbytes; 239 265 240 266 scatterwalk_start(&src_walk, lsrc); 241 267 scatterwalk_start(&dst_walk, ldst); 242 268 243 269 while (tmp_nbytes) { 244 270 /* get the source block */ 245 - scatterwalk_copychunks(block->R, &src_walk, SEMIBSIZE, 271 + scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, 246 272 false); 247 273 248 274 /* perform KW operation: encrypt block */ 249 - crypto_cipher_encrypt_one(child, (u8 *)block, 250 - (u8 *)block); 251 - /* perform KW operation: get counter as byte string */ 252 - crypto_kw_cpu_to_be64(t, tbe); 275 + crypto_cipher_encrypt_one(child, (u8 *)&block, 276 + (u8 *)&block); 253 277 /* perform KW operation: modify IV with counter */ 254 - crypto_xor(block->A, tbe, SEMIBSIZE); 278 + block.A ^= cpu_to_be64(t); 255 279 t++; 256 280 257 281 /* Copy block->R into place */ 258 - scatterwalk_copychunks(block->R, &dst_walk, SEMIBSIZE, 282 + scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, 259 283 true); 260 284 261 285 tmp_nbytes -= SEMIBSIZE; ··· 263 295 } 264 296 265 297 /* establish the IV for the caller to pick up */ 266 - memcpy(desc->info, block->A, SEMIBSIZE); 298 + memcpy(desc->info, &block.A, SEMIBSIZE); 267 299 268 - memzero_explicit(block, sizeof(struct crypto_kw_block)); 300 + memzero_explicit(&block, sizeof(struct crypto_kw_block)); 269 301 270 302 return 0; 271 303 }
+8 -9
crypto/lrw.c
··· 328 328 crypto_skcipher_encrypt(subreq) ?: 329 329 post_crypt(req); 330 330 331 - if (err == -EINPROGRESS || 332 - (err == -EBUSY && 333 - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 331 + if (err == -EINPROGRESS || err == -EBUSY) 334 332 return err; 335 333 } 336 334 ··· 378 380 crypto_skcipher_decrypt(subreq) ?: 379 381 post_crypt(req); 380 382 381 - if (err == -EINPROGRESS || 382 - (err == -EBUSY && 383 - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 383 + if (err == -EINPROGRESS || err == -EBUSY) 384 384 return err; 385 385 } 386 386 ··· 606 610 ecb_name[len - 1] = 0; 607 611 608 612 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 609 - "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) 610 - return -ENAMETOOLONG; 611 - } 613 + "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { 614 + err = -ENAMETOOLONG; 615 + goto err_drop_spawn; 616 + } 617 + } else 618 + goto err_drop_spawn; 612 619 613 620 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 614 621 inst->alg.base.cra_priority = alg->base.cra_priority;
-2
crypto/rmd128.c
··· 213 213 state[2] = state[3] + aa + bbb; 214 214 state[3] = state[0] + bb + ccc; 215 215 state[0] = ddd; 216 - 217 - return; 218 216 } 219 217 220 218 static int rmd128_init(struct shash_desc *desc)
-2
crypto/rmd160.c
··· 256 256 state[3] = state[4] + aa + bbb; 257 257 state[4] = state[0] + bb + ccc; 258 258 state[0] = ddd; 259 - 260 - return; 261 259 } 262 260 263 261 static int rmd160_init(struct shash_desc *desc)
-2
crypto/rmd256.c
··· 228 228 state[5] += bbb; 229 229 state[6] += ccc; 230 230 state[7] += ddd; 231 - 232 - return; 233 231 } 234 232 235 233 static int rmd256_init(struct shash_desc *desc)
-2
crypto/rmd320.c
··· 275 275 state[7] += ccc; 276 276 state[8] += ddd; 277 277 state[9] += eee; 278 - 279 - return; 280 278 } 281 279 282 280 static int rmd320_init(struct shash_desc *desc)
+4 -12
crypto/rsa-pkcs1pad.c
··· 279 279 req->dst, ctx->key_size - 1, req->dst_len); 280 280 281 281 err = crypto_akcipher_encrypt(&req_ctx->child_req); 282 - if (err != -EINPROGRESS && 283 - (err != -EBUSY || 284 - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) 282 + if (err != -EINPROGRESS && err != -EBUSY) 285 283 return pkcs1pad_encrypt_sign_complete(req, err); 286 284 287 285 return err; ··· 381 383 ctx->key_size); 382 384 383 385 err = crypto_akcipher_decrypt(&req_ctx->child_req); 384 - if (err != -EINPROGRESS && 385 - (err != -EBUSY || 386 - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) 386 + if (err != -EINPROGRESS && err != -EBUSY) 387 387 return pkcs1pad_decrypt_complete(req, err); 388 388 389 389 return err; ··· 436 440 req->dst, ctx->key_size - 1, req->dst_len); 437 441 438 442 err = crypto_akcipher_sign(&req_ctx->child_req); 439 - if (err != -EINPROGRESS && 440 - (err != -EBUSY || 441 - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) 443 + if (err != -EINPROGRESS && err != -EBUSY) 442 444 return pkcs1pad_encrypt_sign_complete(req, err); 443 445 444 446 return err; ··· 555 561 ctx->key_size); 556 562 557 563 err = crypto_akcipher_verify(&req_ctx->child_req); 558 - if (err != -EINPROGRESS && 559 - (err != -EBUSY || 560 - !(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) 564 + if (err != -EINPROGRESS && err != -EBUSY) 561 565 return pkcs1pad_verify_complete(req, err); 562 566 563 567 return err;
+210
crypto/sm3_generic.c
··· 1 + /* 2 + * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and 3 + * described at https://tools.ietf.org/html/draft-shen-sm3-hash-01 4 + * 5 + * Copyright (C) 2017 ARM Limited or its affiliates. 6 + * Written by Gilad Ben-Yossef <gilad@benyossef.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 + */ 20 + 21 + #include <crypto/internal/hash.h> 22 + #include <linux/init.h> 23 + #include <linux/module.h> 24 + #include <linux/mm.h> 25 + #include <linux/types.h> 26 + #include <crypto/sm3.h> 27 + #include <crypto/sm3_base.h> 28 + #include <linux/bitops.h> 29 + #include <asm/byteorder.h> 30 + #include <asm/unaligned.h> 31 + 32 + const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { 33 + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, 34 + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, 35 + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, 36 + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B 37 + }; 38 + EXPORT_SYMBOL_GPL(sm3_zero_message_hash); 39 + 40 + static inline u32 p0(u32 x) 41 + { 42 + return x ^ rol32(x, 9) ^ rol32(x, 17); 43 + } 44 + 45 + static inline u32 p1(u32 x) 46 + { 47 + return x ^ rol32(x, 15) ^ rol32(x, 23); 48 + } 49 + 50 + static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) 51 + { 52 + return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); 53 + } 54 + 55 + static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g) 56 + { 57 + return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g)); 58 + } 59 + 60 + static inline u32 t(unsigned int n) 61 + { 62 + return (n < 16) ? SM3_T1 : SM3_T2; 63 + } 64 + 65 + static void sm3_expand(u32 *t, u32 *w, u32 *wt) 66 + { 67 + int i; 68 + unsigned int tmp; 69 + 70 + /* load the input */ 71 + for (i = 0; i <= 15; i++) 72 + w[i] = get_unaligned_be32((__u32 *)t + i); 73 + 74 + for (i = 16; i <= 67; i++) { 75 + tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15); 76 + w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6]; 77 + } 78 + 79 + for (i = 0; i <= 63; i++) 80 + wt[i] = w[i] ^ w[i + 4]; 81 + } 82 + 83 + static void sm3_compress(u32 *w, u32 *wt, u32 *m) 84 + { 85 + u32 ss1; 86 + u32 ss2; 87 + u32 tt1; 88 + u32 tt2; 89 + u32 a, b, c, d, e, f, g, h; 90 + int i; 91 + 92 + a = m[0]; 93 + b = m[1]; 94 + c = m[2]; 95 + d = m[3]; 96 + e = m[4]; 97 + f = m[5]; 98 + g = m[6]; 99 + h = m[7]; 100 + 101 + for (i = 0; i <= 63; i++) { 102 + 103 + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); 104 + 105 + ss2 = ss1 ^ rol32(a, 12); 106 + 107 + tt1 = ff(i, a, b, c) + d + ss2 + *wt; 108 + wt++; 109 + 110 + tt2 = gg(i, e, f, g) + h + ss1 + *w; 111 + w++; 112 + 113 + d = c; 114 + c = rol32(b, 9); 115 + b = a; 116 + a = tt1; 117 + h = g; 118 + g = rol32(f, 19); 119 + f = e; 120 + e = p0(tt2); 121 + } 122 + 123 + m[0] = a ^ m[0]; 124 + m[1] = b ^ m[1]; 125 + m[2] = c ^ m[2]; 126 + m[3] = d ^ m[3]; 127 + m[4] = e ^ m[4]; 128 + m[5] = f ^ m[5]; 129 + m[6] = g ^ m[6]; 130 + m[7] = h ^ m[7]; 131 + 132 + a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; 133 + } 134 + 135 + static void sm3_transform(struct sm3_state *sst, u8 const *src) 136 + { 137 + unsigned int w[68]; 138 + unsigned int wt[64]; 139 + 140 + sm3_expand((u32 *)src, w, wt); 141 + sm3_compress(w, wt, sst->state); 142 + 143 + memzero_explicit(w, sizeof(w)); 144 + memzero_explicit(wt, sizeof(wt)); 145 + } 146 + 147 + static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src, 148 + int blocks) 149 + { 150 + while (blocks--) { 151 + sm3_transform(sst, src); 152 + src += SM3_BLOCK_SIZE; 153 + } 154 + } 155 + 156 + int crypto_sm3_update(struct shash_desc *desc, const u8 *data, 157 + unsigned int len) 158 + { 159 + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); 160 + } 161 + EXPORT_SYMBOL(crypto_sm3_update); 162 + 163 + static int sm3_final(struct shash_desc *desc, u8 *out) 164 + { 165 + sm3_base_do_finalize(desc, sm3_generic_block_fn); 166 + return sm3_base_finish(desc, out); 167 + } 168 + 169 + int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, 170 + unsigned int len, u8 *hash) 171 + { 172 + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); 173 + return sm3_final(desc, hash); 174 + } 175 + EXPORT_SYMBOL(crypto_sm3_finup); 176 + 177 + static struct shash_alg sm3_alg = { 178 + .digestsize = SM3_DIGEST_SIZE, 179 + .init = sm3_base_init, 180 + .update = crypto_sm3_update, 181 + .final = sm3_final, 182 + .finup = crypto_sm3_finup, 183 + .descsize = sizeof(struct sm3_state), 184 + .base = { 185 + .cra_name = "sm3", 186 + .cra_driver_name = "sm3-generic", 187 + .cra_flags = CRYPTO_ALG_TYPE_SHASH, 188 + .cra_blocksize = SM3_BLOCK_SIZE, 189 + .cra_module = THIS_MODULE, 190 + } 191 + }; 192 + 193 + static int __init sm3_generic_mod_init(void) 194 + { 195 + return crypto_register_shash(&sm3_alg); 196 + } 197 + 198 + static void __exit sm3_generic_mod_fini(void) 199 + { 200 + crypto_unregister_shash(&sm3_alg); 201 + } 202 + 203 + module_init(sm3_generic_mod_init); 204 + module_exit(sm3_generic_mod_fini); 205 + 206 + MODULE_LICENSE("GPL v2"); 207 + MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); 208 + 209 + MODULE_ALIAS_CRYPTO("sm3"); 210 + MODULE_ALIAS_CRYPTO("sm3-generic");
+91 -118
crypto/tcrypt.c
··· 70 70 static char *tvmem[TVMEMSIZE]; 71 71 72 72 static char *check[] = { 73 - "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", 73 + "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", 74 74 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", 75 75 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", 76 76 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", ··· 79 79 NULL 80 80 }; 81 81 82 - struct tcrypt_result { 83 - struct completion completion; 84 - int err; 85 - }; 86 - 87 - static void tcrypt_complete(struct crypto_async_request *req, int err) 88 - { 89 - struct tcrypt_result *res = req->data; 90 - 91 - if (err == -EINPROGRESS) 92 - return; 93 - 94 - res->err = err; 95 - complete(&res->completion); 96 - } 97 - 98 82 static inline int do_one_aead_op(struct aead_request *req, int ret) 99 83 { 100 - if (ret == -EINPROGRESS || ret == -EBUSY) { 101 - struct tcrypt_result *tr = req->base.data; 84 + struct crypto_wait *wait = req->base.data; 102 85 103 - ret = wait_for_completion_interruptible(&tr->completion); 104 - if (!ret) 105 - ret = tr->err; 106 - reinit_completion(&tr->completion); 107 - } 108 - 109 - return ret; 86 + return crypto_wait_req(ret, wait); 110 87 } 111 88 112 89 static int test_aead_jiffies(struct aead_request *req, int enc, ··· 225 248 char *axbuf[XBUFSIZE]; 226 249 unsigned int *b_size; 227 250 unsigned int iv_len; 228 - struct tcrypt_result result; 251 + struct crypto_wait wait; 229 252 230 253 iv = kzalloc(MAX_IVLEN, GFP_KERNEL); 231 254 if (!iv) ··· 261 284 goto out_notfm; 262 285 } 263 286 264 - init_completion(&result.completion); 287 + crypto_init_wait(&wait); 265 288 printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo, 266 289 get_driver_name(crypto_aead, tfm), e); 267 290 ··· 273 296 } 274 297 275 298 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 276 - tcrypt_complete, &result); 299 + crypto_req_done, &wait); 277 300 278 301 i = 0; 279 302 do { ··· 317 340 } 318 341 319 342 sg_init_aead(sg, xbuf, 320 - *b_size + (enc ? authsize : 0)); 343 + *b_size + (enc ? 0 : authsize)); 321 344 322 345 sg_init_aead(sgout, xoutbuf, 323 346 *b_size + (enc ? authsize : 0)); ··· 325 348 sg_set_buf(&sg[0], assoc, aad_size); 326 349 sg_set_buf(&sgout[0], assoc, aad_size); 327 350 328 - aead_request_set_crypt(req, sg, sgout, *b_size, iv); 351 + aead_request_set_crypt(req, sg, sgout, 352 + *b_size + (enc ? 0 : authsize), 353 + iv); 329 354 aead_request_set_ad(req, aad_size); 330 355 331 356 if (secs) ··· 360 381 testmgr_free_buf(xbuf); 361 382 out_noxbuf: 362 383 kfree(iv); 363 - return; 364 384 } 365 385 366 386 static void test_hash_sg_init(struct scatterlist *sg) ··· 375 397 376 398 static inline int do_one_ahash_op(struct ahash_request *req, int ret) 377 399 { 378 - if (ret == -EINPROGRESS || ret == -EBUSY) { 379 - struct tcrypt_result *tr = req->base.data; 400 + struct crypto_wait *wait = req->base.data; 380 401 381 - wait_for_completion(&tr->completion); 382 - reinit_completion(&tr->completion); 383 - ret = tr->err; 384 - } 385 - return ret; 402 + return crypto_wait_req(ret, wait); 386 403 } 387 404 388 405 struct test_mb_ahash_data { 389 406 struct scatterlist sg[TVMEMSIZE]; 390 407 char result[64]; 391 408 struct ahash_request *req; 392 - struct tcrypt_result tresult; 409 + struct crypto_wait wait; 393 410 char *xbuf[XBUFSIZE]; 394 411 }; 395 412 ··· 413 440 if (testmgr_alloc_buf(data[i].xbuf)) 414 441 goto out; 415 442 416 - init_completion(&data[i].tresult.completion); 443 + crypto_init_wait(&data[i].wait); 417 444 418 445 data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); 419 446 if (!data[i].req) { ··· 422 449 goto out; 423 450 } 424 451 425 - ahash_request_set_callback(data[i].req, 0, 426 - tcrypt_complete, &data[i].tresult); 452 + ahash_request_set_callback(data[i].req, 0, crypto_req_done, 453 + &data[i].wait); 427 454 test_hash_sg_init(data[i].sg); 428 455 } 429 456 ··· 465 492 if (ret) 466 493 break; 467 494 468 - complete(&data[k].tresult.completion); 469 - data[k].tresult.err = 0; 495 + crypto_req_done(&data[k].req->base, 0); 470 496 } 471 497 472 498 for (j = 0; j < k; j++) { 473 - struct tcrypt_result *tr = &data[j].tresult; 499 + struct crypto_wait *wait = &data[j].wait; 500 + int wait_ret; 474 501 475 - wait_for_completion(&tr->completion); 476 - if (tr->err) 477 - ret = tr->err; 502 + wait_ret = crypto_wait_req(-EINPROGRESS, wait); 503 + if (wait_ret) 504 + ret = wait_ret; 478 505 } 479 506 480 507 end = get_cycles(); ··· 652 679 struct hash_speed *speed, unsigned mask) 653 680 { 654 681 struct scatterlist sg[TVMEMSIZE]; 655 - struct tcrypt_result tresult; 682 + struct crypto_wait wait; 656 683 struct ahash_request *req; 657 684 struct crypto_ahash *tfm; 658 685 char *output; ··· 681 708 goto out; 682 709 } 683 710 684 - init_completion(&tresult.completion); 711 + crypto_init_wait(&wait); 685 712 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 686 - tcrypt_complete, &tresult); 713 + crypto_req_done, &wait); 687 714 688 715 output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL); 689 716 if (!output) ··· 738 765 739 766 static inline int do_one_acipher_op(struct skcipher_request *req, int ret) 740 767 { 741 - if (ret == -EINPROGRESS || ret == -EBUSY) { 742 - struct tcrypt_result *tr = req->base.data; 768 + struct crypto_wait *wait = req->base.data; 743 769 744 - wait_for_completion(&tr->completion); 745 - reinit_completion(&tr->completion); 746 - ret = tr->err; 747 - } 748 - 749 - return ret; 770 + return crypto_wait_req(ret, wait); 750 771 } 751 772 752 773 static int test_acipher_jiffies(struct skcipher_request *req, int enc, ··· 820 853 unsigned int tcount, u8 *keysize, bool async) 821 854 { 822 855 unsigned int ret, i, j, k, iv_len; 823 - struct tcrypt_result tresult; 856 + struct crypto_wait wait; 824 857 const char *key; 825 858 char iv[128]; 826 859 struct skcipher_request *req; ··· 833 866 else 834 867 e = "decryption"; 835 868 836 - init_completion(&tresult.completion); 869 + crypto_init_wait(&wait); 837 870 838 871 tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC); 839 872 ··· 854 887 } 855 888 856 889 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 857 - tcrypt_complete, &tresult); 890 + crypto_req_done, &wait); 858 891 859 892 i = 0; 860 893 do { ··· 1236 1269 ret += tcrypt_test("sha3-512"); 1237 1270 break; 1238 1271 1272 + case 52: 1273 + ret += tcrypt_test("sm3"); 1274 + break; 1275 + 1239 1276 case 100: 1240 1277 ret += tcrypt_test("hmac(md5)"); 1241 1278 break; ··· 1574 1603 speed_template_32); 1575 1604 break; 1576 1605 1577 - 1578 1606 case 300: 1579 1607 if (alg) { 1580 1608 test_hash_speed(alg, sec, generic_hash_speed_template); 1581 1609 break; 1582 1610 } 1583 - 1584 1611 /* fall through */ 1585 - 1586 1612 case 301: 1587 1613 test_hash_speed("md4", sec, generic_hash_speed_template); 1588 1614 if (mode > 300 && mode < 400) break; 1589 - 1615 + /* fall through */ 1590 1616 case 302: 1591 1617 test_hash_speed("md5", sec, generic_hash_speed_template); 1592 1618 if (mode > 300 && mode < 400) break; 1593 - 1619 + /* fall through */ 1594 1620 case 303: 1595 1621 test_hash_speed("sha1", sec, generic_hash_speed_template); 1596 1622 if (mode > 300 && mode < 400) break; 1597 - 1623 + /* fall through */ 1598 1624 case 304: 1599 1625 test_hash_speed("sha256", sec, generic_hash_speed_template); 1600 1626 if (mode > 300 && mode < 400) break; 1601 - 1627 + /* fall through */ 1602 1628 case 305: 1603 1629 test_hash_speed("sha384", sec, generic_hash_speed_template); 1604 1630 if (mode > 300 && mode < 400) break; 1605 - 1631 + /* fall through */ 1606 1632 case 306: 1607 1633 test_hash_speed("sha512", sec, generic_hash_speed_template); 1608 1634 if (mode > 300 && mode < 400) break; 1609 - 1635 + /* fall through */ 1610 1636 case 307: 1611 1637 test_hash_speed("wp256", sec, generic_hash_speed_template); 1612 1638 if (mode > 300 && mode < 400) break; 1613 - 1639 + /* fall through */ 1614 1640 case 308: 1615 1641 test_hash_speed("wp384", sec, generic_hash_speed_template); 1616 1642 if (mode > 300 && mode < 400) break; 1617 - 1643 + /* fall through */ 1618 1644 case 309: 1619 1645 test_hash_speed("wp512", sec, generic_hash_speed_template); 1620 1646 if (mode > 300 && mode < 400) break; 1621 - 1647 + /* fall through */ 1622 1648 case 310: 1623 1649 test_hash_speed("tgr128", sec, generic_hash_speed_template); 1624 1650 if (mode > 300 && mode < 400) break; 1625 - 1651 + /* fall through */ 1626 1652 case 311: 1627 1653 test_hash_speed("tgr160", sec, generic_hash_speed_template); 1628 1654 if (mode > 300 && mode < 400) break; 1629 - 1655 + /* fall through */ 1630 1656 case 312: 1631 1657 test_hash_speed("tgr192", sec, generic_hash_speed_template); 1632 1658 if (mode > 300 && mode < 400) break; 1633 - 1659 + /* fall through */ 1634 1660 case 313: 1635 1661 test_hash_speed("sha224", sec, generic_hash_speed_template); 1636 1662 if (mode > 300 && mode < 400) break; 1637 - 1663 + /* fall through */ 1638 1664 case 314: 1639 1665 test_hash_speed("rmd128", sec, generic_hash_speed_template); 1640 1666 if (mode > 300 && mode < 400) break; 1641 - 1667 + /* fall through */ 1642 1668 case 315: 1643 1669 test_hash_speed("rmd160", sec, generic_hash_speed_template); 1644 1670 if (mode > 300 && mode < 400) break; 1645 - 1671 + /* fall through */ 1646 1672 case 316: 1647 1673 test_hash_speed("rmd256", sec, generic_hash_speed_template); 1648 1674 if (mode > 300 && mode < 400) break; 1649 - 1675 + /* fall through */ 1650 1676 case 317: 1651 1677 test_hash_speed("rmd320", sec, generic_hash_speed_template); 1652 1678 if (mode > 300 && mode < 400) break; 1653 - 1679 + /* fall through */ 1654 1680 case 318: 1655 1681 test_hash_speed("ghash-generic", sec, hash_speed_template_16); 1656 1682 if (mode > 300 && mode < 400) break; 1657 - 1683 + /* fall through */ 1658 1684 case 319: 1659 1685 test_hash_speed("crc32c", sec, generic_hash_speed_template); 1660 1686 if (mode > 300 && mode < 400) break; 1661 - 1687 + /* fall through */ 1662 1688 case 320: 1663 1689 test_hash_speed("crct10dif", sec, generic_hash_speed_template); 1664 1690 if (mode > 300 && mode < 400) break; 1665 - 1691 + /* fall through */ 1666 1692 case 321: 1667 1693 test_hash_speed("poly1305", sec, poly1305_speed_template); 1668 1694 if (mode > 300 && mode < 400) break; 1669 - 1695 + /* fall through */ 1670 1696 case 322: 1671 1697 test_hash_speed("sha3-224", sec, generic_hash_speed_template); 1672 1698 if (mode > 300 && mode < 400) break; 1673 - 1699 + /* fall through */ 1674 1700 case 323: 1675 1701 test_hash_speed("sha3-256", sec, generic_hash_speed_template); 1676 1702 if (mode > 300 && mode < 400) break; 1677 - 1703 + /* fall through */ 1678 1704 case 324: 1679 1705 test_hash_speed("sha3-384", sec, generic_hash_speed_template); 1680 1706 if (mode > 300 && mode < 400) break; 1681 - 1707 + /* fall through */ 1682 1708 case 325: 1683 1709 test_hash_speed("sha3-512", sec, generic_hash_speed_template); 1684 1710 if (mode > 300 && mode < 400) break; 1685 - 1711 + /* fall through */ 1712 + case 326: 1713 + test_hash_speed("sm3", sec, generic_hash_speed_template); 1714 + if (mode > 300 && mode < 400) break; 1715 + /* fall through */ 1686 1716 case 399: 1687 1717 break; 1688 1718 ··· 1692 1720 test_ahash_speed(alg, sec, generic_hash_speed_template); 1693 1721 break; 1694 1722 } 1695 - 1696 1723 /* fall through */ 1697 - 1698 1724 case 401: 1699 1725 test_ahash_speed("md4", sec, generic_hash_speed_template); 1700 1726 if (mode > 400 && mode < 500) break; 1701 - 1727 + /* fall through */ 1702 1728 case 402: 1703 1729 test_ahash_speed("md5", sec, generic_hash_speed_template); 1704 1730 if (mode > 400 && mode < 500) break; 1705 - 1731 + /* fall through */ 1706 1732 case 403: 1707 1733 test_ahash_speed("sha1", sec, generic_hash_speed_template); 1708 1734 if (mode > 400 && mode < 500) break; 1709 - 1735 + /* fall through */ 1710 1736 case 404: 1711 1737 test_ahash_speed("sha256", sec, generic_hash_speed_template); 1712 1738 if (mode > 400 && mode < 500) break; 1713 - 1739 + /* fall through */ 1714 1740 case 405: 1715 1741 test_ahash_speed("sha384", sec, generic_hash_speed_template); 1716 1742 if (mode > 400 && mode < 500) break; 1717 - 1743 + /* fall through */ 1718 1744 case 406: 1719 1745 test_ahash_speed("sha512", sec, generic_hash_speed_template); 1720 1746 if (mode > 400 && mode < 500) break; 1721 - 1747 + /* fall through */ 1722 1748 case 407: 1723 1749 test_ahash_speed("wp256", sec, generic_hash_speed_template); 1724 1750 if (mode > 400 && mode < 500) break; 1725 - 1751 + /* fall through */ 1726 1752 case 408: 1727 1753 test_ahash_speed("wp384", sec, generic_hash_speed_template); 1728 1754 if (mode > 400 && mode < 500) break; 1729 - 1755 + /* fall through */ 1730 1756 case 409: 1731 1757 test_ahash_speed("wp512", sec, generic_hash_speed_template); 1732 1758 if (mode > 400 && mode < 500) break; 1733 - 1759 + /* fall through */ 1734 1760 case 410: 1735 1761 test_ahash_speed("tgr128", sec, generic_hash_speed_template); 1736 1762 if (mode > 400 && mode < 500) break; 1737 - 1763 + /* fall through */ 1738 1764 case 411: 1739 1765 test_ahash_speed("tgr160", sec, generic_hash_speed_template); 1740 1766 if (mode > 400 && mode < 500) break; 1741 - 1767 + /* fall through */ 1742 1768 case 412: 1743 1769 test_ahash_speed("tgr192", sec, generic_hash_speed_template); 1744 1770 if (mode > 400 && mode < 500) break; 1745 - 1771 + /* fall through */ 1746 1772 case 413: 1747 1773 test_ahash_speed("sha224", sec, generic_hash_speed_template); 1748 1774 if (mode > 400 && mode < 500) break; 1749 - 1775 + /* fall through */ 1750 1776 case 414: 1751 1777 test_ahash_speed("rmd128", sec, generic_hash_speed_template); 1752 1778 if (mode > 400 && mode < 500) break; 1753 - 1779 + /* fall through */ 1754 1780 case 415: 1755 1781 test_ahash_speed("rmd160", sec, generic_hash_speed_template); 1756 1782 if (mode > 400 && mode < 500) break; 1757 - 1783 + /* fall through */ 1758 1784 case 416: 1759 1785 test_ahash_speed("rmd256", sec, generic_hash_speed_template); 1760 1786 if (mode > 400 && mode < 500) break; 1761 - 1787 + /* fall through */ 1762 1788 case 417: 1763 1789 test_ahash_speed("rmd320", sec, generic_hash_speed_template); 1764 1790 if (mode > 400 && mode < 500) break; 1765 - 1791 + /* fall through */ 1766 1792 case 418: 1767 1793 test_ahash_speed("sha3-224", sec, generic_hash_speed_template); 1768 1794 if (mode > 400 && mode < 500) break; 1769 - 1795 + /* fall through */ 1770 1796 case 419: 1771 1797 test_ahash_speed("sha3-256", sec, generic_hash_speed_template); 1772 1798 if (mode > 400 && mode < 500) break; 1773 - 1799 + /* fall through */ 1774 1800 case 420: 1775 1801 test_ahash_speed("sha3-384", sec, generic_hash_speed_template); 1776 1802 if (mode > 400 && mode < 500) break; 1777 - 1778 - 1803 + /* fall through */ 1779 1804 case 421: 1780 1805 test_ahash_speed("sha3-512", sec, generic_hash_speed_template); 1781 1806 if (mode > 400 && mode < 500) break; 1782 - 1807 + /* fall through */ 1783 1808 case 422: 1784 1809 test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); 1785 1810 if (mode > 400 && mode < 500) break; 1786 - 1811 + /* fall through */ 1787 1812 case 423: 1788 1813 test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); 1789 1814 if (mode > 400 && mode < 500) break; 1790 - 1815 + /* fall through */ 1791 1816 case 424: 1792 1817 test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); 1793 1818 if (mode > 400 && mode < 500) break; 1794 - 1819 + /* fall through */ 1820 + case 425: 1821 + test_mb_ahash_speed("sm3", sec, generic_hash_speed_template); 1822 + if (mode > 400 && mode < 500) break; 1823 + /* fall through */ 1795 1824 case 499: 1796 1825 break; 1797 1826
+72 -138
crypto/testmgr.c
··· 76 76 #define ENCRYPT 1 77 77 #define DECRYPT 0 78 78 79 - struct tcrypt_result { 80 - struct completion completion; 81 - int err; 82 - }; 83 - 84 79 struct aead_test_suite { 85 80 struct { 86 81 const struct aead_testvec *vecs; ··· 150 155 buf, len, false); 151 156 } 152 157 153 - static void tcrypt_complete(struct crypto_async_request *req, int err) 154 - { 155 - struct tcrypt_result *res = req->data; 156 - 157 - if (err == -EINPROGRESS) 158 - return; 159 - 160 - res->err = err; 161 - complete(&res->completion); 162 - } 163 - 164 158 static int testmgr_alloc_buf(char *buf[XBUFSIZE]) 165 159 { 166 160 int i; ··· 177 193 free_page((unsigned long)buf[i]); 178 194 } 179 195 180 - static int wait_async_op(struct tcrypt_result *tr, int ret) 181 - { 182 - if (ret == -EINPROGRESS || ret == -EBUSY) { 183 - wait_for_completion(&tr->completion); 184 - reinit_completion(&tr->completion); 185 - ret = tr->err; 186 - } 187 - return ret; 188 - } 189 - 190 196 static int ahash_partial_update(struct ahash_request **preq, 191 197 struct crypto_ahash *tfm, const struct hash_testvec *template, 192 198 void *hash_buff, int k, int temp, struct scatterlist *sg, 193 - const char *algo, char *result, struct tcrypt_result *tresult) 199 + const char *algo, char *result, struct crypto_wait *wait) 194 200 { 195 201 char *state; 196 202 struct ahash_request *req; ··· 210 236 } 211 237 ahash_request_set_callback(req, 212 238 CRYPTO_TFM_REQ_MAY_BACKLOG, 213 - tcrypt_complete, tresult); 239 + crypto_req_done, wait); 214 240 215 241 memcpy(hash_buff, template->plaintext + temp, 216 242 template->tap[k]); ··· 221 247 pr_err("alg: hash: Failed to import() for %s\n", algo); 222 248 goto out; 223 249 } 224 - ret = wait_async_op(tresult, crypto_ahash_update(req)); 250 + ret = crypto_wait_req(crypto_ahash_update(req), wait); 225 251 if (ret) 226 252 goto out; 227 253 *preq = req; ··· 246 272 char *result; 247 273 char *key; 248 274 struct ahash_request *req; 249 - struct tcrypt_result tresult; 275 + struct crypto_wait wait; 250 276 void *hash_buff; 251 277 char *xbuf[XBUFSIZE]; 252 278 int ret = -ENOMEM; ··· 260 286 if (testmgr_alloc_buf(xbuf)) 261 287 goto out_nobuf; 262 288 263 - init_completion(&tresult.completion); 289 + crypto_init_wait(&wait); 264 290 265 291 req = ahash_request_alloc(tfm, GFP_KERNEL); 266 292 if (!req) { ··· 269 295 goto out_noreq; 270 296 } 271 297 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 272 - tcrypt_complete, &tresult); 298 + crypto_req_done, &wait); 273 299 274 300 j = 0; 275 301 for (i = 0; i < tcount; i++) { ··· 309 335 310 336 ahash_request_set_crypt(req, sg, result, template[i].psize); 311 337 if (use_digest) { 312 - ret = wait_async_op(&tresult, crypto_ahash_digest(req)); 338 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 313 339 if (ret) { 314 340 pr_err("alg: hash: digest failed on test %d " 315 341 "for %s: ret=%d\n", j, algo, -ret); 316 342 goto out; 317 343 } 318 344 } else { 319 - ret = wait_async_op(&tresult, crypto_ahash_init(req)); 345 + ret = crypto_wait_req(crypto_ahash_init(req), &wait); 320 346 if (ret) { 321 347 pr_err("alg: hash: init failed on test %d " 322 348 "for %s: ret=%d\n", j, algo, -ret); 323 349 goto out; 324 350 } 325 - ret = wait_async_op(&tresult, crypto_ahash_update(req)); 351 + ret = crypto_wait_req(crypto_ahash_update(req), &wait); 326 352 if (ret) { 327 353 pr_err("alg: hash: update failed on test %d " 328 354 "for %s: ret=%d\n", j, algo, -ret); 329 355 goto out; 330 356 } 331 - ret = wait_async_op(&tresult, crypto_ahash_final(req)); 357 + ret = crypto_wait_req(crypto_ahash_final(req), &wait); 332 358 if (ret) { 333 359 pr_err("alg: hash: final failed on test %d " 334 360 "for %s: ret=%d\n", j, algo, -ret); ··· 394 420 } 395 421 396 422 ahash_request_set_crypt(req, sg, result, template[i].psize); 397 - ret = crypto_ahash_digest(req); 398 - switch (ret) { 399 - case 0: 400 - break; 401 - case -EINPROGRESS: 402 - case -EBUSY: 403 - wait_for_completion(&tresult.completion); 404 - reinit_completion(&tresult.completion); 405 - ret = tresult.err; 406 - if (!ret) 407 - break; 408 - /* fall through */ 409 - default: 410 - printk(KERN_ERR "alg: hash: digest failed " 411 - "on chunking test %d for %s: " 412 - "ret=%d\n", j, algo, -ret); 423 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 424 + if (ret) { 425 + pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", 426 + j, algo, -ret); 413 427 goto out; 414 428 } 415 429 ··· 448 486 } 449 487 450 488 ahash_request_set_crypt(req, sg, result, template[i].tap[0]); 451 - ret = wait_async_op(&tresult, crypto_ahash_init(req)); 489 + ret = crypto_wait_req(crypto_ahash_init(req), &wait); 452 490 if (ret) { 453 491 pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", 454 492 j, algo, -ret); 455 493 goto out; 456 494 } 457 - ret = wait_async_op(&tresult, crypto_ahash_update(req)); 495 + ret = crypto_wait_req(crypto_ahash_update(req), &wait); 458 496 if (ret) { 459 497 pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", 460 498 j, algo, -ret); ··· 465 503 for (k = 1; k < template[i].np; k++) { 466 504 ret = ahash_partial_update(&req, tfm, &template[i], 467 505 hash_buff, k, temp, &sg[0], algo, result, 468 - &tresult); 506 + &wait); 469 507 if (ret) { 470 508 pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", 471 509 j, algo, -ret); ··· 473 511 } 474 512 temp += template[i].tap[k]; 475 513 } 476 - ret = wait_async_op(&tresult, crypto_ahash_final(req)); 514 + ret = crypto_wait_req(crypto_ahash_final(req), &wait); 477 515 if (ret) { 478 516 pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", 479 517 j, algo, -ret); ··· 542 580 struct scatterlist *sg; 543 581 struct scatterlist *sgout; 544 582 const char *e, *d; 545 - struct tcrypt_result result; 583 + struct crypto_wait wait; 546 584 unsigned int authsize, iv_len; 547 585 void *input; 548 586 void *output; ··· 581 619 else 582 620 e = "decryption"; 583 621 584 - init_completion(&result.completion); 622 + crypto_init_wait(&wait); 585 623 586 624 req = aead_request_alloc(tfm, GFP_KERNEL); 587 625 if (!req) { ··· 591 629 } 592 630 593 631 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 594 - tcrypt_complete, &result); 632 + crypto_req_done, &wait); 595 633 596 634 iv_len = crypto_aead_ivsize(tfm); 597 635 ··· 671 709 672 710 aead_request_set_ad(req, template[i].alen); 673 711 674 - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 712 + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) 713 + : crypto_aead_decrypt(req), &wait); 675 714 676 715 switch (ret) { 677 716 case 0: ··· 685 722 goto out; 686 723 } 687 724 break; 688 - case -EINPROGRESS: 689 - case -EBUSY: 690 - wait_for_completion(&result.completion); 691 - reinit_completion(&result.completion); 692 - ret = result.err; 693 - if (!ret) 694 - break; 695 725 case -EBADMSG: 696 726 if (template[i].novrfy) 697 727 /* verification failure was expected */ ··· 822 866 823 867 aead_request_set_ad(req, template[i].alen); 824 868 825 - ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 869 + ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) 870 + : crypto_aead_decrypt(req), &wait); 826 871 827 872 switch (ret) { 828 873 case 0: ··· 836 879 goto out; 837 880 } 838 881 break; 839 - case -EINPROGRESS: 840 - case -EBUSY: 841 - wait_for_completion(&result.completion); 842 - reinit_completion(&result.completion); 843 - ret = result.err; 844 - if (!ret) 845 - break; 846 882 case -EBADMSG: 847 883 if (template[i].novrfy) 848 884 /* verification failure was expected */ ··· 1033 1083 struct scatterlist sg[8]; 1034 1084 struct scatterlist sgout[8]; 1035 1085 const char *e, *d; 1036 - struct tcrypt_result result; 1086 + struct crypto_wait wait; 1037 1087 void *data; 1038 1088 char iv[MAX_IVLEN]; 1039 1089 char *xbuf[XBUFSIZE]; ··· 1057 1107 else 1058 1108 e = "decryption"; 1059 1109 1060 - init_completion(&result.completion); 1110 + crypto_init_wait(&wait); 1061 1111 1062 1112 req = skcipher_request_alloc(tfm, GFP_KERNEL); 1063 1113 if (!req) { ··· 1067 1117 } 1068 1118 1069 1119 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1070 - tcrypt_complete, &result); 1120 + crypto_req_done, &wait); 1071 1121 1072 1122 j = 0; 1073 1123 for (i = 0; i < tcount; i++) { ··· 1114 1164 1115 1165 skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, 1116 1166 template[i].ilen, iv); 1117 - ret = enc ? crypto_skcipher_encrypt(req) : 1118 - crypto_skcipher_decrypt(req); 1167 + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : 1168 + crypto_skcipher_decrypt(req), &wait); 1119 1169 1120 - switch (ret) { 1121 - case 0: 1122 - break; 1123 - case -EINPROGRESS: 1124 - case -EBUSY: 1125 - wait_for_completion(&result.completion); 1126 - reinit_completion(&result.completion); 1127 - ret = result.err; 1128 - if (!ret) 1129 - break; 1130 - /* fall through */ 1131 - default: 1170 + if (ret) { 1132 1171 pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", 1133 1172 d, e, j, algo, -ret); 1134 1173 goto out; ··· 1211 1272 skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, 1212 1273 template[i].ilen, iv); 1213 1274 1214 - ret = enc ? crypto_skcipher_encrypt(req) : 1215 - crypto_skcipher_decrypt(req); 1275 + ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : 1276 + crypto_skcipher_decrypt(req), &wait); 1216 1277 1217 - switch (ret) { 1218 - case 0: 1219 - break; 1220 - case -EINPROGRESS: 1221 - case -EBUSY: 1222 - wait_for_completion(&result.completion); 1223 - reinit_completion(&result.completion); 1224 - ret = result.err; 1225 - if (!ret) 1226 - break; 1227 - /* fall through */ 1228 - default: 1278 + if (ret) { 1229 1279 pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", 1230 1280 d, e, j, algo, -ret); 1231 1281 goto out; ··· 1390 1462 int ret; 1391 1463 struct scatterlist src, dst; 1392 1464 struct acomp_req *req; 1393 - struct tcrypt_result result; 1465 + struct crypto_wait wait; 1394 1466 1395 1467 output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); 1396 1468 if (!output) ··· 1414 1486 } 1415 1487 1416 1488 memset(output, 0, dlen); 1417 - init_completion(&result.completion); 1489 + crypto_init_wait(&wait); 1418 1490 sg_init_one(&src, input_vec, ilen); 1419 1491 sg_init_one(&dst, output, dlen); 1420 1492 ··· 1429 1501 1430 1502 acomp_request_set_params(req, &src, &dst, ilen, dlen); 1431 1503 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1432 - tcrypt_complete, &result); 1504 + crypto_req_done, &wait); 1433 1505 1434 - ret = wait_async_op(&result, crypto_acomp_compress(req)); 1506 + ret = crypto_wait_req(crypto_acomp_compress(req), &wait); 1435 1507 if (ret) { 1436 1508 pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", 1437 1509 i + 1, algo, -ret); ··· 1444 1516 dlen = COMP_BUF_SIZE; 1445 1517 sg_init_one(&src, output, ilen); 1446 1518 sg_init_one(&dst, decomp_out, dlen); 1447 - init_completion(&result.completion); 1519 + crypto_init_wait(&wait); 1448 1520 acomp_request_set_params(req, &src, &dst, ilen, dlen); 1449 1521 1450 - ret = wait_async_op(&result, crypto_acomp_decompress(req)); 1522 + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); 1451 1523 if (ret) { 1452 1524 pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", 1453 1525 i + 1, algo, -ret); ··· 1491 1563 } 1492 1564 1493 1565 memset(output, 0, dlen); 1494 - init_completion(&result.completion); 1566 + crypto_init_wait(&wait); 1495 1567 sg_init_one(&src, input_vec, ilen); 1496 1568 sg_init_one(&dst, output, dlen); 1497 1569 ··· 1506 1578 1507 1579 acomp_request_set_params(req, &src, &dst, ilen, dlen); 1508 1580 acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1509 - tcrypt_complete, &result); 1581 + crypto_req_done, &wait); 1510 1582 1511 - ret = wait_async_op(&result, crypto_acomp_decompress(req)); 1583 + ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); 1512 1584 if (ret) { 1513 1585 pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", 1514 1586 i + 1, algo, -ret); ··· 1928 2000 void *a_public = NULL; 1929 2001 void *a_ss = NULL; 1930 2002 void *shared_secret = NULL; 1931 - struct tcrypt_result result; 2003 + struct crypto_wait wait; 1932 2004 unsigned int out_len_max; 1933 2005 int err = -ENOMEM; 1934 2006 struct scatterlist src, dst; ··· 1937 2009 if (!req) 1938 2010 return err; 1939 2011 1940 - init_completion(&result.completion); 2012 + crypto_init_wait(&wait); 1941 2013 1942 2014 err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size); 1943 2015 if (err < 0) ··· 1955 2027 sg_init_one(&dst, output_buf, out_len_max); 1956 2028 kpp_request_set_output(req, &dst, out_len_max); 1957 2029 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1958 - tcrypt_complete, &result); 2030 + crypto_req_done, &wait); 1959 2031 1960 2032 /* Compute party A's public key */ 1961 - err = wait_async_op(&result, crypto_kpp_generate_public_key(req)); 2033 + err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); 1962 2034 if (err) { 1963 2035 pr_err("alg: %s: Party A: generate public key test failed. err %d\n", 1964 2036 alg, err); ··· 1997 2069 kpp_request_set_input(req, &src, vec->b_public_size); 1998 2070 kpp_request_set_output(req, &dst, out_len_max); 1999 2071 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2000 - tcrypt_complete, &result); 2001 - err = wait_async_op(&result, crypto_kpp_compute_shared_secret(req)); 2072 + crypto_req_done, &wait); 2073 + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); 2002 2074 if (err) { 2003 2075 pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n", 2004 2076 alg, err); ··· 2028 2100 kpp_request_set_input(req, &src, vec->expected_a_public_size); 2029 2101 kpp_request_set_output(req, &dst, out_len_max); 2030 2102 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2031 - tcrypt_complete, &result); 2032 - err = wait_async_op(&result, 2033 - crypto_kpp_compute_shared_secret(req)); 2103 + crypto_req_done, &wait); 2104 + err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), 2105 + &wait); 2034 2106 if (err) { 2035 2107 pr_err("alg: %s: Party B: compute shared secret failed. err %d\n", 2036 2108 alg, err); ··· 2107 2179 struct akcipher_request *req; 2108 2180 void *outbuf_enc = NULL; 2109 2181 void *outbuf_dec = NULL; 2110 - struct tcrypt_result result; 2182 + struct crypto_wait wait; 2111 2183 unsigned int out_len_max, out_len = 0; 2112 2184 int err = -ENOMEM; 2113 2185 struct scatterlist src, dst, src_tab[2]; ··· 2119 2191 if (!req) 2120 2192 goto free_xbuf; 2121 2193 2122 - init_completion(&result.completion); 2194 + crypto_init_wait(&wait); 2123 2195 2124 2196 if (vecs->public_key_vec) 2125 2197 err = crypto_akcipher_set_pub_key(tfm, vecs->key, ··· 2148 2220 akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, 2149 2221 out_len_max); 2150 2222 akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2151 - tcrypt_complete, &result); 2223 + crypto_req_done, &wait); 2152 2224 2153 - err = wait_async_op(&result, vecs->siggen_sigver_test ? 2154 - /* Run asymmetric signature generation */ 2155 - crypto_akcipher_sign(req) : 2156 - /* Run asymmetric encrypt */ 2157 - crypto_akcipher_encrypt(req)); 2225 + err = crypto_wait_req(vecs->siggen_sigver_test ? 2226 + /* Run asymmetric signature generation */ 2227 + crypto_akcipher_sign(req) : 2228 + /* Run asymmetric encrypt */ 2229 + crypto_akcipher_encrypt(req), &wait); 2158 2230 if (err) { 2159 2231 pr_err("alg: akcipher: encrypt test failed. err %d\n", err); 2160 2232 goto free_all; ··· 2189 2261 2190 2262 sg_init_one(&src, xbuf[0], vecs->c_size); 2191 2263 sg_init_one(&dst, outbuf_dec, out_len_max); 2192 - init_completion(&result.completion); 2264 + crypto_init_wait(&wait); 2193 2265 akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); 2194 2266 2195 - err = wait_async_op(&result, vecs->siggen_sigver_test ? 2196 - /* Run asymmetric signature verification */ 2197 - crypto_akcipher_verify(req) : 2198 - /* Run asymmetric decrypt */ 2199 - crypto_akcipher_decrypt(req)); 2267 + err = crypto_wait_req(vecs->siggen_sigver_test ? 2268 + /* Run asymmetric signature verification */ 2269 + crypto_akcipher_verify(req) : 2270 + /* Run asymmetric decrypt */ 2271 + crypto_akcipher_decrypt(req), &wait); 2200 2272 if (err) { 2201 2273 pr_err("alg: akcipher: decrypt test failed. err %d\n", err); 2202 2274 goto free_all; ··· 3426 3498 .fips_allowed = 1, 3427 3499 .suite = { 3428 3500 .hash = __VECS(sha512_tv_template) 3501 + } 3502 + }, { 3503 + .alg = "sm3", 3504 + .test = alg_test_hash, 3505 + .suite = { 3506 + .hash = __VECS(sm3_tv_template) 3429 3507 } 3430 3508 }, { 3431 3509 .alg = "tgr128",
+67
crypto/testmgr.h
··· 1497 1497 } 1498 1498 }; 1499 1499 1500 + /* Example vectors below taken from 1501 + * http://www.oscca.gov.cn/UpFile/20101222141857786.pdf 1502 + * 1503 + * The rest taken from 1504 + * https://github.com/adamws/oscca-sm3 1505 + */ 1506 + static const struct hash_testvec sm3_tv_template[] = { 1507 + { 1508 + .plaintext = "", 1509 + .psize = 0, 1510 + .digest = (u8 *)(u8 []) { 1511 + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, 1512 + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, 1513 + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, 1514 + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B } 1515 + }, { 1516 + .plaintext = "a", 1517 + .psize = 1, 1518 + .digest = (u8 *)(u8 []) { 1519 + 0x62, 0x34, 0x76, 0xAC, 0x18, 0xF6, 0x5A, 0x29, 1520 + 0x09, 0xE4, 0x3C, 0x7F, 0xEC, 0x61, 0xB4, 0x9C, 1521 + 0x7E, 0x76, 0x4A, 0x91, 0xA1, 0x8C, 0xCB, 0x82, 1522 + 0xF1, 0x91, 0x7A, 0x29, 0xC8, 0x6C, 0x5E, 0x88 } 1523 + }, { 1524 + /* A.1. Example 1 */ 1525 + .plaintext = "abc", 1526 + .psize = 3, 1527 + .digest = (u8 *)(u8 []) { 1528 + 0x66, 0xC7, 0xF0, 0xF4, 0x62, 0xEE, 0xED, 0xD9, 1529 + 0xD1, 0xF2, 0xD4, 0x6B, 0xDC, 0x10, 0xE4, 0xE2, 1530 + 0x41, 0x67, 0xC4, 0x87, 0x5C, 0xF2, 0xF7, 0xA2, 1531 + 0x29, 0x7D, 0xA0, 0x2B, 0x8F, 0x4B, 0xA8, 0xE0 } 1532 + }, { 1533 + .plaintext = "abcdefghijklmnopqrstuvwxyz", 1534 + .psize = 26, 1535 + .digest = (u8 *)(u8 []) { 1536 + 0xB8, 0x0F, 0xE9, 0x7A, 0x4D, 0xA2, 0x4A, 0xFC, 1537 + 0x27, 0x75, 0x64, 0xF6, 0x6A, 0x35, 0x9E, 0xF4, 1538 + 0x40, 0x46, 0x2A, 0xD2, 0x8D, 0xCC, 0x6D, 0x63, 1539 + 0xAD, 0xB2, 0x4D, 0x5C, 0x20, 0xA6, 0x15, 0x95 } 1540 + }, { 1541 + /* A.1. Example 2 */ 1542 + .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdab" 1543 + "cdabcdabcdabcdabcd", 1544 + .psize = 64, 1545 + .digest = (u8 *)(u8 []) { 1546 + 0xDE, 0xBE, 0x9F, 0xF9, 0x22, 0x75, 0xB8, 0xA1, 1547 + 0x38, 0x60, 0x48, 0x89, 0xC1, 0x8E, 0x5A, 0x4D, 1548 + 0x6F, 0xDB, 0x70, 0xE5, 0x38, 0x7E, 0x57, 0x65, 1549 + 0x29, 0x3D, 0xCB, 0xA3, 0x9C, 0x0C, 0x57, 0x32 } 1550 + }, { 1551 + .plaintext = "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1552 + "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1553 + "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1554 + "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1555 + "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1556 + "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" 1557 + "abcdabcdabcdabcdabcdabcdabcdabcd", 1558 + .psize = 256, 1559 + .digest = (u8 *)(u8 []) { 1560 + 0xB9, 0x65, 0x76, 0x4C, 0x8B, 0xEB, 0xB0, 0x91, 1561 + 0xC7, 0x60, 0x2B, 0x74, 0xAF, 0xD3, 0x4E, 0xEF, 1562 + 0xB5, 0x31, 0xDC, 0xCB, 0x4E, 0x00, 0x76, 0xD9, 1563 + 0xB7, 0xCD, 0x81, 0x31, 0x99, 0xB4, 0x59, 0x71 } 1564 + } 1565 + }; 1566 + 1500 1567 /* 1501 1568 * SHA1 test vectors from from FIPS PUB 180-1 1502 1569 * Long vector from CAVS 5.0
+2 -6
crypto/xts.c
··· 269 269 crypto_skcipher_encrypt(subreq) ?: 270 270 post_crypt(req); 271 271 272 - if (err == -EINPROGRESS || 273 - (err == -EBUSY && 274 - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 272 + if (err == -EINPROGRESS || err == -EBUSY) 275 273 return err; 276 274 } 277 275 ··· 319 321 crypto_skcipher_decrypt(subreq) ?: 320 322 post_crypt(req); 321 323 322 - if (err == -EINPROGRESS || 323 - (err == -EBUSY && 324 - req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 324 + if (err == -EINPROGRESS || err == -EBUSY) 325 325 return err; 326 326 } 327 327
+3 -3
drivers/char/hw_random/Kconfig
··· 100 100 If unsure, say Y. 101 101 102 102 config HW_RANDOM_IPROC_RNG200 103 - tristate "Broadcom iProc RNG200 support" 104 - depends on ARCH_BCM_IPROC 103 + tristate "Broadcom iProc/STB RNG200 support" 104 + depends on ARCH_BCM_IPROC || ARCH_BRCMSTB 105 105 default HW_RANDOM 106 106 ---help--- 107 107 This driver provides kernel-side support for the RNG200 108 - hardware found on the Broadcom iProc SoCs. 108 + hardware found on the Broadcom iProc and STB SoCs. 109 109 110 110 To compile this driver as a module, choose M here: the 111 111 module will be called iproc-rng200
+33 -20
drivers/char/hw_random/core.c
··· 292 292 .groups = rng_dev_groups, 293 293 }; 294 294 295 + static int enable_best_rng(void) 296 + { 297 + int ret = -ENODEV; 298 + 299 + BUG_ON(!mutex_is_locked(&rng_mutex)); 300 + 301 + /* rng_list is sorted by quality, use the best (=first) one */ 302 + if (!list_empty(&rng_list)) { 303 + struct hwrng *new_rng; 304 + 305 + new_rng = list_entry(rng_list.next, struct hwrng, list); 306 + ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); 307 + if (!ret) 308 + cur_rng_set_by_user = 0; 309 + } 310 + 311 + return ret; 312 + } 313 + 295 314 static ssize_t hwrng_attr_current_store(struct device *dev, 296 315 struct device_attribute *attr, 297 316 const char *buf, size_t len) 298 317 { 299 - int err; 318 + int err = -ENODEV; 300 319 struct hwrng *rng; 301 320 302 321 err = mutex_lock_interruptible(&rng_mutex); 303 322 if (err) 304 323 return -ERESTARTSYS; 305 - err = -ENODEV; 306 - list_for_each_entry(rng, &rng_list, list) { 307 - if (sysfs_streq(rng->name, buf)) { 308 - err = 0; 309 - cur_rng_set_by_user = 1; 310 - if (rng != current_rng) 324 + 325 + if (sysfs_streq(buf, "")) { 326 + err = enable_best_rng(); 327 + } else { 328 + list_for_each_entry(rng, &rng_list, list) { 329 + if (sysfs_streq(rng->name, buf)) { 330 + cur_rng_set_by_user = 1; 311 331 err = set_current_rng(rng); 312 - break; 332 + break; 333 + } 313 334 } 314 335 } 336 + 315 337 mutex_unlock(&rng_mutex); 316 338 317 339 return err ? : len; ··· 445 423 { 446 424 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 447 425 if (IS_ERR(hwrng_fill)) { 448 - pr_err("hwrng_fill thread creation failed"); 426 + pr_err("hwrng_fill thread creation failed\n"); 449 427 hwrng_fill = NULL; 450 428 } 451 429 } ··· 515 493 mutex_lock(&rng_mutex); 516 494 517 495 list_del(&rng->list); 518 - if (current_rng == rng) { 519 - drop_current_rng(); 520 - cur_rng_set_by_user = 0; 521 - /* rng_list is sorted by quality, use the best (=first) one */ 522 - if (!list_empty(&rng_list)) { 523 - struct hwrng *new_rng; 524 - 525 - new_rng = list_entry(rng_list.next, struct hwrng, list); 526 - set_current_rng(new_rng); 527 - } 528 - } 496 + if (current_rng == rng) 497 + enable_best_rng(); 529 498 530 499 if (list_empty(&rng_list)) { 531 500 mutex_unlock(&rng_mutex);
+1
drivers/char/hw_random/iproc-rng200.c
··· 220 220 } 221 221 222 222 static const struct of_device_id iproc_rng200_of_match[] = { 223 + { .compatible = "brcm,bcm7278-rng200", }, 223 224 { .compatible = "brcm,iproc-rng200", }, 224 225 {}, 225 226 };
+1 -1
drivers/char/hw_random/pseries-rng.c
··· 72 72 return 0; 73 73 } 74 74 75 - static struct vio_device_id pseries_rng_driver_ids[] = { 75 + static const struct vio_device_id pseries_rng_driver_ids[] = { 76 76 { "ibm,random-v1", "ibm,random"}, 77 77 { "", "" } 78 78 };
-7
drivers/char/hw_random/timeriomem-rng.c
··· 53 53 int period_us = ktime_to_us(priv->period); 54 54 55 55 /* 56 - * The RNG provides 32-bits per read. Ensure there is enough space for 57 - * at minimum one read. 58 - */ 59 - if (max < sizeof(u32)) 60 - return 0; 61 - 62 - /* 63 56 * There may not have been enough time for new data to be generated 64 57 * since the last request. If the caller doesn't want to wait, let them 65 58 * bail out. Otherwise, wait for the completion. If the new data has
+20 -1
drivers/char/hw_random/virtio-rng.c
··· 184 184 185 185 static int virtrng_restore(struct virtio_device *vdev) 186 186 { 187 - return probe_common(vdev); 187 + int err; 188 + 189 + err = probe_common(vdev); 190 + if (!err) { 191 + struct virtrng_info *vi = vdev->priv; 192 + 193 + /* 194 + * Set hwrng_removed to ensure that virtio_read() 195 + * does not block waiting for data before the 196 + * registration is complete. 197 + */ 198 + vi->hwrng_removed = true; 199 + err = hwrng_register(&vi->hwrng); 200 + if (!err) { 201 + vi->hwrng_register_done = true; 202 + vi->hwrng_removed = false; 203 + } 204 + } 205 + 206 + return err; 188 207 } 189 208 #endif 190 209
+21 -19
drivers/crypto/Kconfig
··· 199 199 200 200 It is available with IBM z13 or later. 201 201 202 - config CRYPTO_DEV_MV_CESA 203 - tristate "Marvell's Cryptographic Engine" 204 - depends on PLAT_ORION 205 - select CRYPTO_AES 206 - select CRYPTO_BLKCIPHER 207 - select CRYPTO_HASH 208 - select SRAM 209 - help 210 - This driver allows you to utilize the Cryptographic Engines and 211 - Security Accelerator (CESA) which can be found on the Marvell Orion 212 - and Kirkwood SoCs, such as QNAP's TS-209. 213 - 214 - Currently the driver supports AES in ECB and CBC mode without DMA. 215 - 216 202 config CRYPTO_DEV_MARVELL_CESA 217 - tristate "New Marvell's Cryptographic Engine driver" 203 + tristate "Marvell's Cryptographic Engine driver" 218 204 depends on PLAT_ORION || ARCH_MVEBU 219 205 select CRYPTO_AES 220 206 select CRYPTO_DES ··· 209 223 select SRAM 210 224 help 211 225 This driver allows you to utilize the Cryptographic Engines and 212 - Security Accelerator (CESA) which can be found on the Armada 370. 226 + Security Accelerator (CESA) which can be found on MVEBU and ORION 227 + platforms. 213 228 This driver supports CPU offload through DMA transfers. 214 - 215 - This driver is aimed at replacing the mv_cesa driver. This will only 216 - happen once it has received proper testing. 217 229 218 230 config CRYPTO_DEV_NIAGARA2 219 231 tristate "Niagara2 Stream Processing Unit driver" ··· 299 315 tristate "Driver AMCC PPC4xx crypto accelerator" 300 316 depends on PPC && 4xx 301 317 select CRYPTO_HASH 318 + select CRYPTO_AEAD 319 + select CRYPTO_AES 320 + select CRYPTO_CCM 321 + select CRYPTO_GCM 302 322 select CRYPTO_BLKCIPHER 303 323 help 304 324 This option allows you to have support for AMCC crypto acceleration. ··· 426 438 This option allows you to have support for S5P crypto acceleration. 427 439 Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES 428 440 algorithms execution. 441 + 442 + config CRYPTO_DEV_EXYNOS_HASH 443 + bool "Support for Samsung Exynos HASH accelerator" 444 + depends on CRYPTO_DEV_S5P 445 + depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m 446 + select CRYPTO_SHA1 447 + select CRYPTO_MD5 448 + select CRYPTO_SHA256 449 + help 450 + Select this to offload Exynos from HASH MD5/SHA1/SHA256. 451 + This will select software SHA1, MD5 and SHA256 as they are 452 + needed for small and zero-size messages. 453 + HASH algorithms will be disabled if EXYNOS_RNG 454 + is enabled due to hw conflict. 429 455 430 456 config CRYPTO_DEV_NX 431 457 bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
-1
drivers/crypto/Makefile
··· 15 15 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 16 16 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o 17 17 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 18 - obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 19 18 obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ 20 19 obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ 21 20 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
+1 -1
drivers/crypto/amcc/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o 2 - crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o 2 + crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o 3 3 crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
+438 -74
drivers/crypto/amcc/crypto4xx_alg.c
··· 26 26 #include <crypto/internal/hash.h> 27 27 #include <linux/dma-mapping.h> 28 28 #include <crypto/algapi.h> 29 + #include <crypto/aead.h> 29 30 #include <crypto/aes.h> 31 + #include <crypto/gcm.h> 30 32 #include <crypto/sha.h> 33 + #include <crypto/ctr.h> 31 34 #include "crypto4xx_reg_def.h" 32 - #include "crypto4xx_sa.h" 33 35 #include "crypto4xx_core.h" 36 + #include "crypto4xx_sa.h" 34 37 35 38 static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h, 36 39 u32 save_iv, u32 ld_h, u32 ld_iv, ··· 65 62 sa->sa_command_1.bf.crypto_mode9_8 = cm & 3; 66 63 sa->sa_command_1.bf.feedback_mode = cfb, 67 64 sa->sa_command_1.bf.sa_rev = 1; 65 + sa->sa_command_1.bf.hmac_muting = hmac_mc; 68 66 sa->sa_command_1.bf.extended_seq_num = esn; 69 67 sa->sa_command_1.bf.seq_num_mask = sn_mask; 70 68 sa->sa_command_1.bf.mutable_bit_proc = mute; ··· 77 73 int crypto4xx_encrypt(struct ablkcipher_request *req) 78 74 { 79 75 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 76 + unsigned int ivlen = crypto_ablkcipher_ivsize( 77 + crypto_ablkcipher_reqtfm(req)); 78 + __le32 iv[ivlen]; 80 79 81 - ctx->direction = DIR_OUTBOUND; 82 - ctx->hash_final = 0; 83 - ctx->is_hash = 0; 84 - ctx->pd_ctl = 0x1; 80 + if (ivlen) 81 + crypto4xx_memcpy_to_le32(iv, req->info, ivlen); 85 82 86 83 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 87 - req->nbytes, req->info, 88 - get_dynamic_sa_iv_size(ctx)); 84 + req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0); 89 85 } 90 86 91 87 int crypto4xx_decrypt(struct ablkcipher_request *req) 92 88 { 93 89 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 90 + unsigned int ivlen = crypto_ablkcipher_ivsize( 91 + crypto_ablkcipher_reqtfm(req)); 92 + __le32 iv[ivlen]; 94 93 95 - ctx->direction = DIR_INBOUND; 96 - ctx->hash_final = 0; 97 - ctx->is_hash = 0; 98 - ctx->pd_ctl = 1; 94 + if (ivlen) 95 + crypto4xx_memcpy_to_le32(iv, req->info, ivlen); 99 96 100 97 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 101 - req->nbytes, req->info, 102 - get_dynamic_sa_iv_size(ctx)); 98 + req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0); 103 99 } 104 100 105 101 /** ··· 124 120 } 125 121 126 122 /* Create SA */ 127 - if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) 123 + if (ctx->sa_in || ctx->sa_out) 128 124 crypto4xx_free_sa(ctx); 129 125 130 126 rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); 131 127 if (rc) 132 128 return rc; 133 129 134 - if (ctx->state_record_dma_addr == 0) { 135 - rc = crypto4xx_alloc_state_record(ctx); 136 - if (rc) { 137 - crypto4xx_free_sa(ctx); 138 - return rc; 139 - } 140 - } 141 130 /* Setup SA */ 142 - sa = (struct dynamic_sa_ctl *) ctx->sa_in; 143 - ctx->hash_final = 0; 131 + sa = ctx->sa_in; 144 132 145 133 set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, 146 134 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, ··· 146 150 SA_SEQ_MASK_OFF, SA_MC_ENABLE, 147 151 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, 148 152 SA_NOT_COPY_HDR); 149 - crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx), 150 - key, keylen); 151 - sa->sa_contents = SA_AES_CONTENTS | (keylen << 2); 153 + crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), 154 + key, keylen); 155 + sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2); 152 156 sa->sa_command_1.bf.key_len = keylen >> 3; 153 - ctx->is_hash = 0; 154 - ctx->direction = DIR_INBOUND; 155 - memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx), 156 - (void *)&ctx->state_record_dma_addr, 4); 157 - ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); 158 157 159 158 memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); 160 - sa = (struct dynamic_sa_ctl *) ctx->sa_out; 159 + sa = ctx->sa_out; 161 160 sa->sa_command_0.bf.dir = DIR_OUTBOUND; 162 161 163 162 return 0; ··· 165 174 CRYPTO_FEEDBACK_MODE_NO_FB); 166 175 } 167 176 177 + int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, 178 + const u8 *key, unsigned int keylen) 179 + { 180 + return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB, 181 + CRYPTO_FEEDBACK_MODE_128BIT_CFB); 182 + } 183 + 184 + int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, 185 + const u8 *key, unsigned int keylen) 186 + { 187 + return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB, 188 + CRYPTO_FEEDBACK_MODE_NO_FB); 189 + } 190 + 191 + int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, 192 + const u8 *key, unsigned int keylen) 193 + { 194 + return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB, 195 + CRYPTO_FEEDBACK_MODE_64BIT_OFB); 196 + } 197 + 198 + int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, 199 + const u8 *key, unsigned int keylen) 200 + { 201 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 202 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 203 + int rc; 204 + 205 + rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE, 206 + CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB); 207 + if (rc) 208 + return rc; 209 + 210 + ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen - 211 + CTR_RFC3686_NONCE_SIZE]); 212 + 213 + return 0; 214 + } 215 + 216 + int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req) 217 + { 218 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 219 + __le32 iv[AES_IV_SIZE / 4] = { 220 + ctx->iv_nonce, 221 + cpu_to_le32p((u32 *) req->info), 222 + cpu_to_le32p((u32 *) (req->info + 4)), 223 + cpu_to_le32(1) }; 224 + 225 + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 226 + req->nbytes, iv, AES_IV_SIZE, 227 + ctx->sa_out, ctx->sa_len, 0); 228 + } 229 + 230 + int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req) 231 + { 232 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 233 + __le32 iv[AES_IV_SIZE / 4] = { 234 + ctx->iv_nonce, 235 + cpu_to_le32p((u32 *) req->info), 236 + cpu_to_le32p((u32 *) (req->info + 4)), 237 + cpu_to_le32(1) }; 238 + 239 + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 240 + req->nbytes, iv, AES_IV_SIZE, 241 + ctx->sa_out, ctx->sa_len, 0); 242 + } 243 + 244 + static inline bool crypto4xx_aead_need_fallback(struct aead_request *req, 245 + bool is_ccm, bool decrypt) 246 + { 247 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 248 + 249 + /* authsize has to be a multiple of 4 */ 250 + if (aead->authsize & 3) 251 + return true; 252 + 253 + /* 254 + * hardware does not handle cases where cryptlen 255 + * is less than a block 256 + */ 257 + if (req->cryptlen < AES_BLOCK_SIZE) 258 + return true; 259 + 260 + /* assoc len needs to be a multiple of 4 */ 261 + if (req->assoclen & 0x3) 262 + return true; 263 + 264 + /* CCM supports only counter field length of 2 and 4 bytes */ 265 + if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3)) 266 + return true; 267 + 268 + /* CCM - fix CBC MAC mismatch in special case */ 269 + if (is_ccm && decrypt && !req->assoclen) 270 + return true; 271 + 272 + return false; 273 + } 274 + 275 + static int crypto4xx_aead_fallback(struct aead_request *req, 276 + struct crypto4xx_ctx *ctx, bool do_decrypt) 277 + { 278 + char aead_req_data[sizeof(struct aead_request) + 279 + crypto_aead_reqsize(ctx->sw_cipher.aead)] 280 + __aligned(__alignof__(struct aead_request)); 281 + 282 + struct aead_request *subreq = (void *) aead_req_data; 283 + 284 + memset(subreq, 0, sizeof(aead_req_data)); 285 + 286 + aead_request_set_tfm(subreq, ctx->sw_cipher.aead); 287 + aead_request_set_callback(subreq, req->base.flags, 288 + req->base.complete, req->base.data); 289 + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, 290 + req->iv); 291 + aead_request_set_ad(subreq, req->assoclen); 292 + return do_decrypt ? crypto_aead_decrypt(subreq) : 293 + crypto_aead_encrypt(subreq); 294 + } 295 + 296 + static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx, 297 + struct crypto_aead *cipher, 298 + const u8 *key, 299 + unsigned int keylen) 300 + { 301 + int rc; 302 + 303 + crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK); 304 + crypto_aead_set_flags(ctx->sw_cipher.aead, 305 + crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK); 306 + rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); 307 + crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK); 308 + crypto_aead_set_flags(cipher, 309 + crypto_aead_get_flags(ctx->sw_cipher.aead) & 310 + CRYPTO_TFM_RES_MASK); 311 + 312 + return rc; 313 + } 314 + 315 + /** 316 + * AES-CCM Functions 317 + */ 318 + 319 + int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key, 320 + unsigned int keylen) 321 + { 322 + struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 323 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 324 + struct dynamic_sa_ctl *sa; 325 + int rc = 0; 326 + 327 + rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); 328 + if (rc) 329 + return rc; 330 + 331 + if (ctx->sa_in || ctx->sa_out) 332 + crypto4xx_free_sa(ctx); 333 + 334 + rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4); 335 + if (rc) 336 + return rc; 337 + 338 + /* Setup SA */ 339 + sa = (struct dynamic_sa_ctl *) ctx->sa_in; 340 + sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2); 341 + 342 + set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, 343 + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, 344 + SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, 345 + SA_CIPHER_ALG_AES, 346 + SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, 347 + SA_OPCODE_HASH_DECRYPT, DIR_INBOUND); 348 + 349 + set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, 350 + CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, 351 + SA_SEQ_MASK_OFF, SA_MC_ENABLE, 352 + SA_NOT_COPY_PAD, SA_COPY_PAYLOAD, 353 + SA_NOT_COPY_HDR); 354 + 355 + sa->sa_command_1.bf.key_len = keylen >> 3; 356 + 357 + crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen); 358 + 359 + memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); 360 + sa = (struct dynamic_sa_ctl *) ctx->sa_out; 361 + 362 + set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 363 + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, 364 + SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, 365 + SA_CIPHER_ALG_AES, 366 + SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, 367 + SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND); 368 + 369 + set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, 370 + CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, 371 + SA_SEQ_MASK_OFF, SA_MC_ENABLE, 372 + SA_COPY_PAD, SA_COPY_PAYLOAD, 373 + SA_NOT_COPY_HDR); 374 + 375 + sa->sa_command_1.bf.key_len = keylen >> 3; 376 + return 0; 377 + } 378 + 379 + static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt) 380 + { 381 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 382 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 383 + unsigned int len = req->cryptlen; 384 + __le32 iv[16]; 385 + u32 tmp_sa[ctx->sa_len * 4]; 386 + struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa; 387 + 388 + if (crypto4xx_aead_need_fallback(req, true, decrypt)) 389 + return crypto4xx_aead_fallback(req, ctx, decrypt); 390 + 391 + if (decrypt) 392 + len -= crypto_aead_authsize(aead); 393 + 394 + memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa)); 395 + sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2; 396 + 397 + if (req->iv[0] == 1) { 398 + /* CRYPTO_MODE_AES_ICM */ 399 + sa->sa_command_1.bf.crypto_mode9_8 = 1; 400 + } 401 + 402 + iv[3] = cpu_to_le32(0); 403 + crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1)); 404 + 405 + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 406 + len, iv, sizeof(iv), 407 + sa, ctx->sa_len, req->assoclen); 408 + } 409 + 410 + int crypto4xx_encrypt_aes_ccm(struct aead_request *req) 411 + { 412 + return crypto4xx_crypt_aes_ccm(req, false); 413 + } 414 + 415 + int crypto4xx_decrypt_aes_ccm(struct aead_request *req) 416 + { 417 + return crypto4xx_crypt_aes_ccm(req, true); 418 + } 419 + 420 + int crypto4xx_setauthsize_aead(struct crypto_aead *cipher, 421 + unsigned int authsize) 422 + { 423 + struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 424 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 425 + 426 + return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize); 427 + } 428 + 429 + /** 430 + * AES-GCM Functions 431 + */ 432 + 433 + static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen) 434 + { 435 + switch (keylen) { 436 + case 16: 437 + case 24: 438 + case 32: 439 + return 0; 440 + default: 441 + return -EINVAL; 442 + } 443 + } 444 + 445 + static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key, 446 + unsigned int keylen) 447 + { 448 + struct crypto_cipher *aes_tfm = NULL; 449 + uint8_t src[16] = { 0 }; 450 + int rc = 0; 451 + 452 + aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC | 453 + CRYPTO_ALG_NEED_FALLBACK); 454 + if (IS_ERR(aes_tfm)) { 455 + rc = PTR_ERR(aes_tfm); 456 + pr_warn("could not load aes cipher driver: %d\n", rc); 457 + return rc; 458 + } 459 + 460 + rc = crypto_cipher_setkey(aes_tfm, key, keylen); 461 + if (rc) { 462 + pr_err("setkey() failed: %d\n", rc); 463 + goto out; 464 + } 465 + 466 + crypto_cipher_encrypt_one(aes_tfm, src, src); 467 + crypto4xx_memcpy_to_le32(hash_start, src, 16); 468 + out: 469 + crypto_free_cipher(aes_tfm); 470 + return rc; 471 + } 472 + 473 + int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, 474 + const u8 *key, unsigned int keylen) 475 + { 476 + struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 477 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 478 + struct dynamic_sa_ctl *sa; 479 + int rc = 0; 480 + 481 + if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) { 482 + crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 483 + return -EINVAL; 484 + } 485 + 486 + rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen); 487 + if (rc) 488 + return rc; 489 + 490 + if (ctx->sa_in || ctx->sa_out) 491 + crypto4xx_free_sa(ctx); 492 + 493 + rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4); 494 + if (rc) 495 + return rc; 496 + 497 + sa = (struct dynamic_sa_ctl *) ctx->sa_in; 498 + 499 + sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2); 500 + set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 501 + SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, 502 + SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH, 503 + SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, 504 + SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT, 505 + DIR_INBOUND); 506 + set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH, 507 + CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, 508 + SA_SEQ_MASK_ON, SA_MC_DISABLE, 509 + SA_NOT_COPY_PAD, SA_COPY_PAYLOAD, 510 + SA_NOT_COPY_HDR); 511 + 512 + sa->sa_command_1.bf.key_len = keylen >> 3; 513 + 514 + crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), 515 + key, keylen); 516 + 517 + rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa), 518 + key, keylen); 519 + if (rc) { 520 + pr_err("GCM hash key setting failed = %d\n", rc); 521 + goto err; 522 + } 523 + 524 + memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); 525 + sa = (struct dynamic_sa_ctl *) ctx->sa_out; 526 + sa->sa_command_0.bf.dir = DIR_OUTBOUND; 527 + sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH; 528 + 529 + return 0; 530 + err: 531 + crypto4xx_free_sa(ctx); 532 + return rc; 533 + } 534 + 535 + static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req, 536 + bool decrypt) 537 + { 538 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 539 + unsigned int len = req->cryptlen; 540 + __le32 iv[4]; 541 + 542 + if (crypto4xx_aead_need_fallback(req, false, decrypt)) 543 + return crypto4xx_aead_fallback(req, ctx, decrypt); 544 + 545 + crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE); 546 + iv[3] = cpu_to_le32(1); 547 + 548 + if (decrypt) 549 + len -= crypto_aead_authsize(crypto_aead_reqtfm(req)); 550 + 551 + return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, 552 + len, iv, sizeof(iv), 553 + decrypt ? ctx->sa_in : ctx->sa_out, 554 + ctx->sa_len, req->assoclen); 555 + } 556 + 557 + int crypto4xx_encrypt_aes_gcm(struct aead_request *req) 558 + { 559 + return crypto4xx_crypt_aes_gcm(req, false); 560 + } 561 + 562 + int crypto4xx_decrypt_aes_gcm(struct aead_request *req) 563 + { 564 + return crypto4xx_crypt_aes_gcm(req, true); 565 + } 566 + 168 567 /** 169 568 * HASH SHA1 Functions 170 569 */ ··· 564 183 unsigned char hm) 565 184 { 566 185 struct crypto_alg *alg = tfm->__crt_alg; 567 - struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg); 186 + struct crypto4xx_alg *my_alg; 568 187 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 569 - struct dynamic_sa_ctl *sa; 570 - struct dynamic_sa_hash160 *sa_in; 188 + struct dynamic_sa_hash160 *sa; 571 189 int rc; 572 190 191 + my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg, 192 + alg.u.hash); 573 193 ctx->dev = my_alg->dev; 574 - ctx->is_hash = 1; 575 - ctx->hash_final = 0; 576 194 577 195 /* Create SA */ 578 - if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) 196 + if (ctx->sa_in || ctx->sa_out) 579 197 crypto4xx_free_sa(ctx); 580 198 581 199 rc = crypto4xx_alloc_sa(ctx, sa_len); 582 200 if (rc) 583 201 return rc; 584 202 585 - if (ctx->state_record_dma_addr == 0) { 586 - crypto4xx_alloc_state_record(ctx); 587 - if (!ctx->state_record_dma_addr) { 588 - crypto4xx_free_sa(ctx); 589 - return -ENOMEM; 590 - } 591 - } 592 - 593 203 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 594 204 sizeof(struct crypto4xx_ctx)); 595 - sa = (struct dynamic_sa_ctl *) ctx->sa_in; 596 - set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 205 + sa = (struct dynamic_sa_hash160 *)ctx->sa_in; 206 + set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV, 597 207 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, 598 208 SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL, 599 209 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC, 600 210 SA_OPCODE_HASH, DIR_INBOUND); 601 - set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH, 211 + set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH, 602 212 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF, 603 213 SA_SEQ_MASK_OFF, SA_MC_ENABLE, 604 214 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD, 605 215 SA_NOT_COPY_HDR); 606 - ctx->direction = DIR_INBOUND; 607 - sa->sa_contents = SA_HASH160_CONTENTS; 608 - sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in; 609 216 /* Need to zero hash digest in SA */ 610 - memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest)); 611 - memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest)); 612 - sa_in->state_ptr = ctx->state_record_dma_addr; 613 - ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); 217 + memset(sa->inner_digest, 0, sizeof(sa->inner_digest)); 218 + memset(sa->outer_digest, 0, sizeof(sa->outer_digest)); 614 219 615 220 return 0; 616 221 } ··· 607 240 int ds; 608 241 struct dynamic_sa_ctl *sa; 609 242 610 - sa = (struct dynamic_sa_ctl *) ctx->sa_in; 243 + sa = ctx->sa_in; 611 244 ds = crypto_ahash_digestsize( 612 245 __crypto_ahash_cast(req->base.tfm)); 613 246 sa->sa_command_0.bf.digest_len = ds >> 2; 614 247 sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA; 615 - ctx->is_hash = 1; 616 - ctx->direction = DIR_INBOUND; 617 248 618 249 return 0; 619 250 } 620 251 621 252 int crypto4xx_hash_update(struct ahash_request *req) 622 253 { 254 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 623 255 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 256 + struct scatterlist dst; 257 + unsigned int ds = crypto_ahash_digestsize(ahash); 624 258 625 - ctx->is_hash = 1; 626 - ctx->hash_final = 0; 627 - ctx->pd_ctl = 0x11; 628 - ctx->direction = DIR_INBOUND; 259 + sg_init_one(&dst, req->result, ds); 629 260 630 - return crypto4xx_build_pd(&req->base, ctx, req->src, 631 - (struct scatterlist *) req->result, 632 - req->nbytes, NULL, 0); 261 + return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, 262 + req->nbytes, NULL, 0, ctx->sa_in, 263 + ctx->sa_len, 0); 633 264 } 634 265 635 266 int crypto4xx_hash_final(struct ahash_request *req) ··· 637 272 638 273 int crypto4xx_hash_digest(struct ahash_request *req) 639 274 { 275 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 640 276 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 277 + struct scatterlist dst; 278 + unsigned int ds = crypto_ahash_digestsize(ahash); 641 279 642 - ctx->hash_final = 1; 643 - ctx->pd_ctl = 0x11; 644 - ctx->direction = DIR_INBOUND; 280 + sg_init_one(&dst, req->result, ds); 645 281 646 - return crypto4xx_build_pd(&req->base, ctx, req->src, 647 - (struct scatterlist *) req->result, 648 - req->nbytes, NULL, 0); 282 + return crypto4xx_build_pd(&req->base, ctx, req->src, &dst, 283 + req->nbytes, NULL, 0, ctx->sa_in, 284 + ctx->sa_len, 0); 649 285 } 650 286 651 287 /** ··· 657 291 return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1, 658 292 SA_HASH_MODE_HASH); 659 293 } 660 - 661 -
+473 -352
drivers/crypto/amcc/crypto4xx_core.c
··· 35 35 #include <asm/dcr.h> 36 36 #include <asm/dcr-regs.h> 37 37 #include <asm/cacheflush.h> 38 + #include <crypto/aead.h> 38 39 #include <crypto/aes.h> 40 + #include <crypto/ctr.h> 41 + #include <crypto/gcm.h> 39 42 #include <crypto/sha.h> 43 + #include <crypto/scatterwalk.h> 44 + #include <crypto/internal/aead.h> 45 + #include <crypto/internal/skcipher.h> 40 46 #include "crypto4xx_reg_def.h" 41 47 #include "crypto4xx_core.h" 42 48 #include "crypto4xx_sa.h" ··· 133 127 134 128 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) 135 129 { 136 - ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, 137 - &ctx->sa_in_dma_addr, GFP_ATOMIC); 130 + ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC); 138 131 if (ctx->sa_in == NULL) 139 132 return -ENOMEM; 140 133 141 - ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4, 142 - &ctx->sa_out_dma_addr, GFP_ATOMIC); 134 + ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC); 143 135 if (ctx->sa_out == NULL) { 144 - dma_free_coherent(ctx->dev->core_dev->device, size * 4, 145 - ctx->sa_in, ctx->sa_in_dma_addr); 136 + kfree(ctx->sa_in); 137 + ctx->sa_in = NULL; 146 138 return -ENOMEM; 147 139 } 148 140 149 - memset(ctx->sa_in, 0, size * 4); 150 - memset(ctx->sa_out, 0, size * 4); 151 141 ctx->sa_len = size; 152 142 153 143 return 0; ··· 151 149 152 150 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) 153 151 { 154 - if (ctx->sa_in != NULL) 155 - dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, 156 - ctx->sa_in, ctx->sa_in_dma_addr); 157 - if (ctx->sa_out != NULL) 158 - dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4, 159 - ctx->sa_out, ctx->sa_out_dma_addr); 160 - 161 - ctx->sa_in_dma_addr = 0; 162 - ctx->sa_out_dma_addr = 0; 152 + kfree(ctx->sa_in); 153 + ctx->sa_in = NULL; 154 + kfree(ctx->sa_out); 155 + ctx->sa_out = NULL; 163 156 ctx->sa_len = 0; 164 - } 165 - 166 - u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx) 167 - { 168 - ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device, 169 - sizeof(struct sa_state_record), 170 - &ctx->state_record_dma_addr, GFP_ATOMIC); 171 - if (!ctx->state_record_dma_addr) 172 - return -ENOMEM; 173 - memset(ctx->state_record, 0, sizeof(struct sa_state_record)); 174 - 175 - return 0; 176 - } 177 - 178 - void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx) 179 - { 180 - if (ctx->state_record != NULL) 181 - dma_free_coherent(ctx->dev->core_dev->device, 182 - sizeof(struct sa_state_record), 183 - ctx->state_record, 184 - ctx->state_record_dma_addr); 185 - ctx->state_record_dma_addr = 0; 186 157 } 187 158 188 159 /** ··· 166 191 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev) 167 192 { 168 193 int i; 169 - struct pd_uinfo *pd_uinfo; 170 194 dev->pdr = dma_alloc_coherent(dev->core_dev->device, 171 195 sizeof(struct ce_pd) * PPC4XX_NUM_PD, 172 196 &dev->pdr_pa, GFP_ATOMIC); ··· 181 207 dev->pdr_pa); 182 208 return -ENOMEM; 183 209 } 184 - memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); 210 + memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD); 185 211 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device, 186 - 256 * PPC4XX_NUM_PD, 212 + sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, 187 213 &dev->shadow_sa_pool_pa, 188 214 GFP_ATOMIC); 189 215 if (!dev->shadow_sa_pool) ··· 195 221 if (!dev->shadow_sr_pool) 196 222 return -ENOMEM; 197 223 for (i = 0; i < PPC4XX_NUM_PD; i++) { 198 - pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo + 199 - sizeof(struct pd_uinfo) * i); 224 + struct ce_pd *pd = &dev->pdr[i]; 225 + struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i]; 226 + 227 + pd->sa = dev->shadow_sa_pool_pa + 228 + sizeof(union shadow_sa_buf) * i; 200 229 201 230 /* alloc 256 bytes which is enough for any kind of dynamic sa */ 202 - pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i; 203 - pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i; 231 + pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa; 204 232 205 233 /* alloc state record */ 206 - pd_uinfo->sr_va = dev->shadow_sr_pool + 207 - sizeof(struct sa_state_record) * i; 234 + pd_uinfo->sr_va = &dev->shadow_sr_pool[i]; 208 235 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa + 209 236 sizeof(struct sa_state_record) * i; 210 237 } ··· 215 240 216 241 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev) 217 242 { 218 - if (dev->pdr != NULL) 243 + if (dev->pdr) 219 244 dma_free_coherent(dev->core_dev->device, 220 245 sizeof(struct ce_pd) * PPC4XX_NUM_PD, 221 246 dev->pdr, dev->pdr_pa); 247 + 222 248 if (dev->shadow_sa_pool) 223 - dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD, 224 - dev->shadow_sa_pool, dev->shadow_sa_pool_pa); 249 + dma_free_coherent(dev->core_dev->device, 250 + sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD, 251 + dev->shadow_sa_pool, dev->shadow_sa_pool_pa); 252 + 225 253 if (dev->shadow_sr_pool) 226 254 dma_free_coherent(dev->core_dev->device, 227 255 sizeof(struct sa_state_record) * PPC4XX_NUM_PD, ··· 251 273 252 274 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) 253 275 { 254 - struct pd_uinfo *pd_uinfo; 276 + struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; 277 + u32 tail; 255 278 unsigned long flags; 256 279 257 - pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + 258 - sizeof(struct pd_uinfo) * idx); 259 280 spin_lock_irqsave(&dev->core_dev->lock, flags); 281 + pd_uinfo->state = PD_ENTRY_FREE; 282 + 260 283 if (dev->pdr_tail != PPC4XX_LAST_PD) 261 284 dev->pdr_tail++; 262 285 else 263 286 dev->pdr_tail = 0; 264 - pd_uinfo->state = PD_ENTRY_FREE; 287 + tail = dev->pdr_tail; 265 288 spin_unlock_irqrestore(&dev->core_dev->lock, flags); 266 289 267 - return 0; 268 - } 269 - 270 - static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev, 271 - dma_addr_t *pd_dma, u32 idx) 272 - { 273 - *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx; 274 - 275 - return dev->pdr + sizeof(struct ce_pd) * idx; 290 + return tail; 276 291 } 277 292 278 293 /** ··· 297 326 * when this function is called. 298 327 * preemption or interrupt must be disabled 299 328 */ 300 - u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) 329 + static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n) 301 330 { 302 331 u32 retval; 303 332 u32 tmp; 333 + 304 334 if (n >= PPC4XX_NUM_GD) 305 335 return ERING_WAS_FULL; 306 336 ··· 344 372 { 345 373 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx; 346 374 347 - return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx); 375 + return &dev->gdr[idx]; 348 376 } 349 377 350 378 /** ··· 355 383 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev) 356 384 { 357 385 int i; 358 - struct ce_sd *sd_array; 359 386 360 387 /* alloc memory for scatter descriptor ring */ 361 388 dev->sdr = dma_alloc_coherent(dev->core_dev->device, ··· 363 392 if (!dev->sdr) 364 393 return -ENOMEM; 365 394 366 - dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE; 367 395 dev->scatter_buffer_va = 368 396 dma_alloc_coherent(dev->core_dev->device, 369 - dev->scatter_buffer_size * PPC4XX_NUM_SD, 397 + PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, 370 398 &dev->scatter_buffer_pa, GFP_ATOMIC); 371 399 if (!dev->scatter_buffer_va) { 372 400 dma_free_coherent(dev->core_dev->device, ··· 374 404 return -ENOMEM; 375 405 } 376 406 377 - sd_array = dev->sdr; 378 - 379 407 for (i = 0; i < PPC4XX_NUM_SD; i++) { 380 - sd_array[i].ptr = dev->scatter_buffer_pa + 381 - dev->scatter_buffer_size * i; 408 + dev->sdr[i].ptr = dev->scatter_buffer_pa + 409 + PPC4XX_SD_BUFFER_SIZE * i; 382 410 } 383 411 384 412 return 0; ··· 384 416 385 417 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev) 386 418 { 387 - if (dev->sdr != NULL) 419 + if (dev->sdr) 388 420 dma_free_coherent(dev->core_dev->device, 389 421 sizeof(struct ce_sd) * PPC4XX_NUM_SD, 390 422 dev->sdr, dev->sdr_pa); 391 423 392 - if (dev->scatter_buffer_va != NULL) 424 + if (dev->scatter_buffer_va) 393 425 dma_free_coherent(dev->core_dev->device, 394 - dev->scatter_buffer_size * PPC4XX_NUM_SD, 426 + PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD, 395 427 dev->scatter_buffer_va, 396 428 dev->scatter_buffer_pa); 397 429 } ··· 445 477 { 446 478 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx; 447 479 448 - return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx); 449 - } 450 - 451 - static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev, 452 - dma_addr_t *addr, u32 *length, 453 - u32 *idx, u32 *offset, u32 *nbytes) 454 - { 455 - u32 len; 456 - 457 - if (*length > dev->scatter_buffer_size) { 458 - memcpy(phys_to_virt(*addr), 459 - dev->scatter_buffer_va + 460 - *idx * dev->scatter_buffer_size + *offset, 461 - dev->scatter_buffer_size); 462 - *offset = 0; 463 - *length -= dev->scatter_buffer_size; 464 - *nbytes -= dev->scatter_buffer_size; 465 - if (*idx == PPC4XX_LAST_SD) 466 - *idx = 0; 467 - else 468 - (*idx)++; 469 - *addr = *addr + dev->scatter_buffer_size; 470 - return 1; 471 - } else if (*length < dev->scatter_buffer_size) { 472 - memcpy(phys_to_virt(*addr), 473 - dev->scatter_buffer_va + 474 - *idx * dev->scatter_buffer_size + *offset, *length); 475 - if ((*offset + *length) == dev->scatter_buffer_size) { 476 - if (*idx == PPC4XX_LAST_SD) 477 - *idx = 0; 478 - else 479 - (*idx)++; 480 - *nbytes -= *length; 481 - *offset = 0; 482 - } else { 483 - *nbytes -= *length; 484 - *offset += *length; 485 - } 486 - 487 - return 0; 488 - } else { 489 - len = (*nbytes <= dev->scatter_buffer_size) ? 490 - (*nbytes) : dev->scatter_buffer_size; 491 - memcpy(phys_to_virt(*addr), 492 - dev->scatter_buffer_va + 493 - *idx * dev->scatter_buffer_size + *offset, 494 - len); 495 - *offset = 0; 496 - *nbytes -= len; 497 - 498 - if (*idx == PPC4XX_LAST_SD) 499 - *idx = 0; 500 - else 501 - (*idx)++; 502 - 503 - return 0; 504 - } 480 + return &dev->sdr[idx]; 505 481 } 506 482 507 483 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev, ··· 454 542 u32 nbytes, 455 543 struct scatterlist *dst) 456 544 { 457 - dma_addr_t addr; 458 - u32 this_sd; 459 - u32 offset; 460 - u32 len; 461 - u32 i; 462 - u32 sg_len; 463 - struct scatterlist *sg; 545 + unsigned int first_sd = pd_uinfo->first_sd; 546 + unsigned int last_sd; 547 + unsigned int overflow = 0; 548 + unsigned int to_copy; 549 + unsigned int dst_start = 0; 464 550 465 - this_sd = pd_uinfo->first_sd; 466 - offset = 0; 467 - i = 0; 551 + /* 552 + * Because the scatter buffers are all neatly organized in one 553 + * big continuous ringbuffer; scatterwalk_map_and_copy() can 554 + * be instructed to copy a range of buffers in one go. 555 + */ 556 + 557 + last_sd = (first_sd + pd_uinfo->num_sd); 558 + if (last_sd > PPC4XX_LAST_SD) { 559 + last_sd = PPC4XX_LAST_SD; 560 + overflow = last_sd % PPC4XX_NUM_SD; 561 + } 468 562 469 563 while (nbytes) { 470 - sg = &dst[i]; 471 - sg_len = sg->length; 472 - addr = dma_map_page(dev->core_dev->device, sg_page(sg), 473 - sg->offset, sg->length, DMA_TO_DEVICE); 564 + void *buf = dev->scatter_buffer_va + 565 + first_sd * PPC4XX_SD_BUFFER_SIZE; 474 566 475 - if (offset == 0) { 476 - len = (nbytes <= sg->length) ? nbytes : sg->length; 477 - while (crypto4xx_fill_one_page(dev, &addr, &len, 478 - &this_sd, &offset, &nbytes)) 479 - ; 480 - if (!nbytes) 481 - return; 482 - i++; 483 - } else { 484 - len = (nbytes <= (dev->scatter_buffer_size - offset)) ? 485 - nbytes : (dev->scatter_buffer_size - offset); 486 - len = (sg->length < len) ? sg->length : len; 487 - while (crypto4xx_fill_one_page(dev, &addr, &len, 488 - &this_sd, &offset, &nbytes)) 489 - ; 490 - if (!nbytes) 491 - return; 492 - sg_len -= len; 493 - if (sg_len) { 494 - addr += len; 495 - while (crypto4xx_fill_one_page(dev, &addr, 496 - &sg_len, &this_sd, &offset, &nbytes)) 497 - ; 498 - } 499 - i++; 567 + to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE * 568 + (1 + last_sd - first_sd)); 569 + scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1); 570 + nbytes -= to_copy; 571 + 572 + if (overflow) { 573 + first_sd = 0; 574 + last_sd = overflow; 575 + dst_start += to_copy; 576 + overflow = 0; 500 577 } 501 578 } 502 579 } 503 580 504 - static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo, 581 + static void crypto4xx_copy_digest_to_dst(void *dst, 582 + struct pd_uinfo *pd_uinfo, 505 583 struct crypto4xx_ctx *ctx) 506 584 { 507 585 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in; 508 - struct sa_state_record *state_record = 509 - (struct sa_state_record *) pd_uinfo->sr_va; 510 586 511 587 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) { 512 - memcpy((void *) pd_uinfo->dest_va, state_record->save_digest, 588 + memcpy(dst, pd_uinfo->sr_va->save_digest, 513 589 SA_HASH_ALG_SHA1_DIGEST_SIZE); 514 590 } 515 - 516 - return 0; 517 591 } 518 592 519 593 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev, ··· 521 623 } 522 624 } 523 625 524 - static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, 626 + static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev, 525 627 struct pd_uinfo *pd_uinfo, 526 628 struct ce_pd *pd) 527 629 { ··· 542 644 dst->offset, dst->length, DMA_FROM_DEVICE); 543 645 } 544 646 crypto4xx_ret_sg_desc(dev, pd_uinfo); 545 - if (ablk_req->base.complete != NULL) 546 - ablk_req->base.complete(&ablk_req->base, 0); 547 647 548 - return 0; 648 + if (pd_uinfo->state & PD_ENTRY_BUSY) 649 + ablkcipher_request_complete(ablk_req, -EINPROGRESS); 650 + ablkcipher_request_complete(ablk_req, 0); 549 651 } 550 652 551 - static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev, 653 + static void crypto4xx_ahash_done(struct crypto4xx_device *dev, 552 654 struct pd_uinfo *pd_uinfo) 553 655 { 554 656 struct crypto4xx_ctx *ctx; ··· 557 659 ahash_req = ahash_request_cast(pd_uinfo->async_req); 558 660 ctx = crypto_tfm_ctx(ahash_req->base.tfm); 559 661 560 - crypto4xx_copy_digest_to_dst(pd_uinfo, 662 + crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, 561 663 crypto_tfm_ctx(ahash_req->base.tfm)); 562 664 crypto4xx_ret_sg_desc(dev, pd_uinfo); 563 - /* call user provided callback function x */ 564 - if (ahash_req->base.complete != NULL) 565 - ahash_req->base.complete(&ahash_req->base, 0); 566 665 567 - return 0; 666 + if (pd_uinfo->state & PD_ENTRY_BUSY) 667 + ahash_request_complete(ahash_req, -EINPROGRESS); 668 + ahash_request_complete(ahash_req, 0); 568 669 } 569 670 570 - static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) 671 + static void crypto4xx_aead_done(struct crypto4xx_device *dev, 672 + struct pd_uinfo *pd_uinfo, 673 + struct ce_pd *pd) 571 674 { 572 - struct ce_pd *pd; 573 - struct pd_uinfo *pd_uinfo; 675 + struct aead_request *aead_req; 676 + struct crypto4xx_ctx *ctx; 677 + struct scatterlist *dst = pd_uinfo->dest_va; 678 + int err = 0; 574 679 575 - pd = dev->pdr + sizeof(struct ce_pd)*idx; 576 - pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx; 577 - if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) == 578 - CRYPTO_ALG_TYPE_ABLKCIPHER) 579 - return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); 580 - else 581 - return crypto4xx_ahash_done(dev, pd_uinfo); 680 + aead_req = container_of(pd_uinfo->async_req, struct aead_request, 681 + base); 682 + ctx = crypto_tfm_ctx(aead_req->base.tfm); 683 + 684 + if (pd_uinfo->using_sd) { 685 + crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, 686 + pd->pd_ctl_len.bf.pkt_len, 687 + dst); 688 + } else { 689 + __dma_sync_page(sg_page(dst), dst->offset, dst->length, 690 + DMA_FROM_DEVICE); 691 + } 692 + 693 + if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { 694 + /* append icv at the end */ 695 + size_t cp_len = crypto_aead_authsize( 696 + crypto_aead_reqtfm(aead_req)); 697 + u32 icv[cp_len]; 698 + 699 + crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, 700 + cp_len); 701 + 702 + scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, 703 + cp_len, 1); 704 + } 705 + 706 + crypto4xx_ret_sg_desc(dev, pd_uinfo); 707 + 708 + if (pd->pd_ctl.bf.status & 0xff) { 709 + if (pd->pd_ctl.bf.status & 0x1) { 710 + /* authentication error */ 711 + err = -EBADMSG; 712 + } else { 713 + if (!__ratelimit(&dev->aead_ratelimit)) { 714 + if (pd->pd_ctl.bf.status & 2) 715 + pr_err("pad fail error\n"); 716 + if (pd->pd_ctl.bf.status & 4) 717 + pr_err("seqnum fail\n"); 718 + if (pd->pd_ctl.bf.status & 8) 719 + pr_err("error _notify\n"); 720 + pr_err("aead return err status = 0x%02x\n", 721 + pd->pd_ctl.bf.status & 0xff); 722 + pr_err("pd pad_ctl = 0x%08x\n", 723 + pd->pd_ctl.bf.pd_pad_ctl); 724 + } 725 + err = -EINVAL; 726 + } 727 + } 728 + 729 + if (pd_uinfo->state & PD_ENTRY_BUSY) 730 + aead_request_complete(aead_req, -EINPROGRESS); 731 + 732 + aead_request_complete(aead_req, err); 582 733 } 583 734 584 - /** 585 - * Note: Only use this function to copy items that is word aligned. 586 - */ 587 - void crypto4xx_memcpy_le(unsigned int *dst, 588 - const unsigned char *buf, 589 - int len) 735 + static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx) 590 736 { 591 - u8 *tmp; 592 - for (; len >= 4; buf += 4, len -= 4) 593 - *dst++ = cpu_to_le32(*(unsigned int *) buf); 737 + struct ce_pd *pd = &dev->pdr[idx]; 738 + struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx]; 594 739 595 - tmp = (u8 *)dst; 596 - switch (len) { 597 - case 3: 598 - *tmp++ = 0; 599 - *tmp++ = *(buf+2); 600 - *tmp++ = *(buf+1); 601 - *tmp++ = *buf; 740 + switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) { 741 + case CRYPTO_ALG_TYPE_ABLKCIPHER: 742 + crypto4xx_ablkcipher_done(dev, pd_uinfo, pd); 602 743 break; 603 - case 2: 604 - *tmp++ = 0; 605 - *tmp++ = 0; 606 - *tmp++ = *(buf+1); 607 - *tmp++ = *buf; 744 + case CRYPTO_ALG_TYPE_AEAD: 745 + crypto4xx_aead_done(dev, pd_uinfo, pd); 608 746 break; 609 - case 1: 610 - *tmp++ = 0; 611 - *tmp++ = 0; 612 - *tmp++ = 0; 613 - *tmp++ = *buf; 614 - break; 615 - default: 747 + case CRYPTO_ALG_TYPE_AHASH: 748 + crypto4xx_ahash_done(dev, pd_uinfo); 616 749 break; 617 750 } 618 751 } ··· 656 727 iounmap(core_dev->dev->ce_base); 657 728 kfree(core_dev->dev); 658 729 kfree(core_dev); 659 - } 660 - 661 - void crypto4xx_return_pd(struct crypto4xx_device *dev, 662 - u32 pd_entry, struct ce_pd *pd, 663 - struct pd_uinfo *pd_uinfo) 664 - { 665 - /* irq should be already disabled */ 666 - dev->pdr_head = pd_entry; 667 - pd->pd_ctl.w = 0; 668 - pd->pd_ctl_len.w = 0; 669 - pd_uinfo->state = PD_ENTRY_FREE; 670 730 } 671 731 672 732 static u32 get_next_gd(u32 current) ··· 674 756 return 0; 675 757 } 676 758 677 - u32 crypto4xx_build_pd(struct crypto_async_request *req, 759 + int crypto4xx_build_pd(struct crypto_async_request *req, 678 760 struct crypto4xx_ctx *ctx, 679 761 struct scatterlist *src, 680 762 struct scatterlist *dst, 681 - unsigned int datalen, 682 - void *iv, u32 iv_len) 763 + const unsigned int datalen, 764 + const __le32 *iv, const u32 iv_len, 765 + const struct dynamic_sa_ctl *req_sa, 766 + const unsigned int sa_len, 767 + const unsigned int assoclen) 683 768 { 769 + struct scatterlist _dst[2]; 684 770 struct crypto4xx_device *dev = ctx->dev; 685 - dma_addr_t addr, pd_dma, sd_dma, gd_dma; 686 771 struct dynamic_sa_ctl *sa; 687 - struct scatterlist *sg; 688 772 struct ce_gd *gd; 689 773 struct ce_pd *pd; 690 774 u32 num_gd, num_sd; ··· 694 774 u32 fst_sd = 0xffffffff; 695 775 u32 pd_entry; 696 776 unsigned long flags; 697 - struct pd_uinfo *pd_uinfo = NULL; 698 - unsigned int nbytes = datalen, idx; 699 - unsigned int ivlen = 0; 777 + struct pd_uinfo *pd_uinfo; 778 + unsigned int nbytes = datalen; 779 + size_t offset_to_sr_ptr; 700 780 u32 gd_idx = 0; 781 + int tmp; 782 + bool is_busy; 701 783 702 - /* figure how many gd is needed */ 703 - num_gd = sg_nents_for_len(src, datalen); 704 - if ((int)num_gd < 0) { 784 + /* figure how many gd are needed */ 785 + tmp = sg_nents_for_len(src, assoclen + datalen); 786 + if (tmp < 0) { 705 787 dev_err(dev->core_dev->device, "Invalid number of src SG.\n"); 706 - return -EINVAL; 788 + return tmp; 707 789 } 708 - if (num_gd == 1) 709 - num_gd = 0; 790 + if (tmp == 1) 791 + tmp = 0; 792 + num_gd = tmp; 710 793 711 - /* figure how many sd is needed */ 712 - if (sg_is_last(dst) || ctx->is_hash) { 794 + if (assoclen) { 795 + nbytes += assoclen; 796 + dst = scatterwalk_ffwd(_dst, dst, assoclen); 797 + } 798 + 799 + /* figure how many sd are needed */ 800 + if (sg_is_last(dst)) { 713 801 num_sd = 0; 714 802 } else { 715 803 if (datalen > PPC4XX_SD_BUFFER_SIZE) { ··· 736 808 * already got must be return the original place. 737 809 */ 738 810 spin_lock_irqsave(&dev->core_dev->lock, flags); 811 + /* 812 + * Let the caller know to slow down, once more than 13/16ths = 81% 813 + * of the available data contexts are being used simultaneously. 814 + * 815 + * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for 816 + * 31 more contexts. Before new requests have to be rejected. 817 + */ 818 + if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { 819 + is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= 820 + ((PPC4XX_NUM_PD * 13) / 16); 821 + } else { 822 + /* 823 + * To fix contention issues between ipsec (no blacklog) and 824 + * dm-crypto (backlog) reserve 32 entries for "no backlog" 825 + * data contexts. 826 + */ 827 + is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >= 828 + ((PPC4XX_NUM_PD * 15) / 16); 829 + 830 + if (is_busy) { 831 + spin_unlock_irqrestore(&dev->core_dev->lock, flags); 832 + return -EBUSY; 833 + } 834 + } 835 + 739 836 if (num_gd) { 740 837 fst_gd = crypto4xx_get_n_gd(dev, num_gd); 741 838 if (fst_gd == ERING_WAS_FULL) { ··· 788 835 } 789 836 spin_unlock_irqrestore(&dev->core_dev->lock, flags); 790 837 791 - pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo + 792 - sizeof(struct pd_uinfo) * pd_entry); 793 - pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry); 838 + pd = &dev->pdr[pd_entry]; 839 + pd->sa_len = sa_len; 840 + 841 + pd_uinfo = &dev->pdr_uinfo[pd_entry]; 794 842 pd_uinfo->async_req = req; 795 843 pd_uinfo->num_gd = num_gd; 796 844 pd_uinfo->num_sd = num_sd; 797 845 798 - if (iv_len || ctx->is_hash) { 799 - ivlen = iv_len; 800 - pd->sa = pd_uinfo->sa_pa; 801 - sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va; 802 - if (ctx->direction == DIR_INBOUND) 803 - memcpy(sa, ctx->sa_in, ctx->sa_len * 4); 804 - else 805 - memcpy(sa, ctx->sa_out, ctx->sa_len * 4); 846 + if (iv_len) 847 + memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); 806 848 807 - memcpy((void *) sa + ctx->offset_to_sr_ptr, 808 - &pd_uinfo->sr_pa, 4); 849 + sa = pd_uinfo->sa_va; 850 + memcpy(sa, req_sa, sa_len * 4); 809 851 810 - if (iv_len) 811 - crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len); 812 - } else { 813 - if (ctx->direction == DIR_INBOUND) { 814 - pd->sa = ctx->sa_in_dma_addr; 815 - sa = (struct dynamic_sa_ctl *) ctx->sa_in; 816 - } else { 817 - pd->sa = ctx->sa_out_dma_addr; 818 - sa = (struct dynamic_sa_ctl *) ctx->sa_out; 819 - } 820 - } 821 - pd->sa_len = ctx->sa_len; 852 + sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2); 853 + offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa); 854 + *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa; 855 + 822 856 if (num_gd) { 857 + dma_addr_t gd_dma; 858 + struct scatterlist *sg; 859 + 823 860 /* get first gd we are going to use */ 824 861 gd_idx = fst_gd; 825 862 pd_uinfo->first_gd = fst_gd; ··· 818 875 pd->src = gd_dma; 819 876 /* enable gather */ 820 877 sa->sa_command_0.bf.gather = 1; 821 - idx = 0; 822 - src = &src[0]; 823 878 /* walk the sg, and setup gather array */ 879 + 880 + sg = src; 824 881 while (nbytes) { 825 - sg = &src[idx]; 826 - addr = dma_map_page(dev->core_dev->device, sg_page(sg), 827 - sg->offset, sg->length, DMA_TO_DEVICE); 828 - gd->ptr = addr; 829 - gd->ctl_len.len = sg->length; 882 + size_t len; 883 + 884 + len = min(sg->length, nbytes); 885 + gd->ptr = dma_map_page(dev->core_dev->device, 886 + sg_page(sg), sg->offset, len, DMA_TO_DEVICE); 887 + gd->ctl_len.len = len; 830 888 gd->ctl_len.done = 0; 831 889 gd->ctl_len.ready = 1; 832 - if (sg->length >= nbytes) 890 + if (len >= nbytes) 833 891 break; 892 + 834 893 nbytes -= sg->length; 835 894 gd_idx = get_next_gd(gd_idx); 836 895 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); 837 - idx++; 896 + sg = sg_next(sg); 838 897 } 839 898 } else { 840 899 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src), 841 - src->offset, src->length, DMA_TO_DEVICE); 900 + src->offset, min(nbytes, src->length), 901 + DMA_TO_DEVICE); 842 902 /* 843 903 * Disable gather in sa command 844 904 */ ··· 852 906 pd_uinfo->first_gd = 0xffffffff; 853 907 pd_uinfo->num_gd = 0; 854 908 } 855 - if (ctx->is_hash || sg_is_last(dst)) { 909 + if (sg_is_last(dst)) { 856 910 /* 857 911 * we know application give us dst a whole piece of memory 858 912 * no need to use scatter ring. 859 - * In case of is_hash, the icv is always at end of src data. 860 913 */ 861 914 pd_uinfo->using_sd = 0; 862 915 pd_uinfo->first_sd = 0xffffffff; 863 916 pd_uinfo->num_sd = 0; 864 917 pd_uinfo->dest_va = dst; 865 918 sa->sa_command_0.bf.scatter = 0; 866 - if (ctx->is_hash) 867 - pd->dest = virt_to_phys((void *)dst); 868 - else 869 - pd->dest = (u32)dma_map_page(dev->core_dev->device, 870 - sg_page(dst), dst->offset, 871 - dst->length, DMA_TO_DEVICE); 919 + pd->dest = (u32)dma_map_page(dev->core_dev->device, 920 + sg_page(dst), dst->offset, 921 + min(datalen, dst->length), 922 + DMA_TO_DEVICE); 872 923 } else { 924 + dma_addr_t sd_dma; 873 925 struct ce_sd *sd = NULL; 926 + 874 927 u32 sd_idx = fst_sd; 875 928 nbytes = datalen; 876 929 sa->sa_command_0.bf.scatter = 1; ··· 883 938 sd->ctl.done = 0; 884 939 sd->ctl.rdy = 1; 885 940 /* sd->ptr should be setup by sd_init routine*/ 886 - idx = 0; 887 941 if (nbytes >= PPC4XX_SD_BUFFER_SIZE) 888 942 nbytes -= PPC4XX_SD_BUFFER_SIZE; 889 943 else ··· 893 949 /* setup scatter descriptor */ 894 950 sd->ctl.done = 0; 895 951 sd->ctl.rdy = 1; 896 - if (nbytes >= PPC4XX_SD_BUFFER_SIZE) 952 + if (nbytes >= PPC4XX_SD_BUFFER_SIZE) { 897 953 nbytes -= PPC4XX_SD_BUFFER_SIZE; 898 - else 954 + } else { 899 955 /* 900 956 * SD entry can hold PPC4XX_SD_BUFFER_SIZE, 901 957 * which is more than nbytes, so done. 902 958 */ 903 959 nbytes = 0; 960 + } 904 961 } 905 962 } 906 963 907 - sa->sa_command_1.bf.hash_crypto_offset = 0; 908 - pd->pd_ctl.w = ctx->pd_ctl; 909 - pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen; 910 - pd_uinfo->state = PD_ENTRY_INUSE; 964 + pd->pd_ctl.w = PD_CTL_HOST_READY | 965 + ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) | 966 + (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ? 967 + PD_CTL_HASH_FINAL : 0); 968 + pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen); 969 + pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0); 970 + 911 971 wmb(); 912 972 /* write any value to push engine to read a pd */ 973 + writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); 913 974 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD); 914 - return -EINPROGRESS; 975 + return is_busy ? -EBUSY : -EINPROGRESS; 915 976 } 916 977 917 978 /** 918 979 * Algorithm Registration Functions 919 980 */ 920 - static int crypto4xx_alg_init(struct crypto_tfm *tfm) 981 + static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, 982 + struct crypto4xx_ctx *ctx) 921 983 { 922 - struct crypto_alg *alg = tfm->__crt_alg; 923 - struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg); 924 - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 925 - 926 984 ctx->dev = amcc_alg->dev; 927 985 ctx->sa_in = NULL; 928 986 ctx->sa_out = NULL; 929 - ctx->sa_in_dma_addr = 0; 930 - ctx->sa_out_dma_addr = 0; 931 987 ctx->sa_len = 0; 988 + } 932 989 933 - switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 934 - default: 935 - tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); 936 - break; 937 - case CRYPTO_ALG_TYPE_AHASH: 938 - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 939 - sizeof(struct crypto4xx_ctx)); 940 - break; 941 - } 990 + static int crypto4xx_ablk_init(struct crypto_tfm *tfm) 991 + { 992 + struct crypto_alg *alg = tfm->__crt_alg; 993 + struct crypto4xx_alg *amcc_alg; 994 + struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 942 995 996 + amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher); 997 + crypto4xx_ctx_init(amcc_alg, ctx); 998 + tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); 943 999 return 0; 944 1000 } 945 1001 946 - static void crypto4xx_alg_exit(struct crypto_tfm *tfm) 1002 + static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx) 947 1003 { 948 - struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); 949 - 950 1004 crypto4xx_free_sa(ctx); 951 - crypto4xx_free_state_record(ctx); 952 1005 } 953 1006 954 - int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, 955 - struct crypto4xx_alg_common *crypto_alg, 956 - int array_size) 1007 + static void crypto4xx_ablk_exit(struct crypto_tfm *tfm) 1008 + { 1009 + crypto4xx_common_exit(crypto_tfm_ctx(tfm)); 1010 + } 1011 + 1012 + static int crypto4xx_aead_init(struct crypto_aead *tfm) 1013 + { 1014 + struct aead_alg *alg = crypto_aead_alg(tfm); 1015 + struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); 1016 + struct crypto4xx_alg *amcc_alg; 1017 + 1018 + ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0, 1019 + CRYPTO_ALG_NEED_FALLBACK | 1020 + CRYPTO_ALG_ASYNC); 1021 + if (IS_ERR(ctx->sw_cipher.aead)) 1022 + return PTR_ERR(ctx->sw_cipher.aead); 1023 + 1024 + amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead); 1025 + crypto4xx_ctx_init(amcc_alg, ctx); 1026 + crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) + 1027 + max(sizeof(struct crypto4xx_ctx), 32 + 1028 + crypto_aead_reqsize(ctx->sw_cipher.aead))); 1029 + return 0; 1030 + } 1031 + 1032 + static void crypto4xx_aead_exit(struct crypto_aead *tfm) 1033 + { 1034 + struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm); 1035 + 1036 + crypto4xx_common_exit(ctx); 1037 + crypto_free_aead(ctx->sw_cipher.aead); 1038 + } 1039 + 1040 + static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, 1041 + struct crypto4xx_alg_common *crypto_alg, 1042 + int array_size) 957 1043 { 958 1044 struct crypto4xx_alg *alg; 959 1045 int i; ··· 998 1024 alg->dev = sec_dev; 999 1025 1000 1026 switch (alg->alg.type) { 1027 + case CRYPTO_ALG_TYPE_AEAD: 1028 + rc = crypto_register_aead(&alg->alg.u.aead); 1029 + break; 1030 + 1001 1031 case CRYPTO_ALG_TYPE_AHASH: 1002 1032 rc = crypto_register_ahash(&alg->alg.u.hash); 1003 1033 break; ··· 1011 1033 break; 1012 1034 } 1013 1035 1014 - if (rc) { 1015 - list_del(&alg->entry); 1036 + if (rc) 1016 1037 kfree(alg); 1017 - } else { 1038 + else 1018 1039 list_add_tail(&alg->entry, &sec_dev->alg_list); 1019 - } 1020 1040 } 1021 1041 1022 1042 return 0; ··· 1031 1055 crypto_unregister_ahash(&alg->alg.u.hash); 1032 1056 break; 1033 1057 1058 + case CRYPTO_ALG_TYPE_AEAD: 1059 + crypto_unregister_aead(&alg->alg.u.aead); 1060 + break; 1061 + 1034 1062 default: 1035 1063 crypto_unregister_alg(&alg->alg.u.cipher); 1036 1064 } ··· 1048 1068 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); 1049 1069 struct pd_uinfo *pd_uinfo; 1050 1070 struct ce_pd *pd; 1051 - u32 tail; 1071 + u32 tail = core_dev->dev->pdr_tail; 1072 + u32 head = core_dev->dev->pdr_head; 1052 1073 1053 - while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) { 1054 - tail = core_dev->dev->pdr_tail; 1055 - pd_uinfo = core_dev->dev->pdr_uinfo + 1056 - sizeof(struct pd_uinfo)*tail; 1057 - pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail; 1058 - if ((pd_uinfo->state == PD_ENTRY_INUSE) && 1059 - pd->pd_ctl.bf.pe_done && 1060 - !pd->pd_ctl.bf.host_ready) { 1061 - pd->pd_ctl.bf.pe_done = 0; 1074 + do { 1075 + pd_uinfo = &core_dev->dev->pdr_uinfo[tail]; 1076 + pd = &core_dev->dev->pdr[tail]; 1077 + if ((pd_uinfo->state & PD_ENTRY_INUSE) && 1078 + ((READ_ONCE(pd->pd_ctl.w) & 1079 + (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) == 1080 + PD_CTL_PE_DONE)) { 1062 1081 crypto4xx_pd_done(core_dev->dev, tail); 1063 - crypto4xx_put_pd_to_pdr(core_dev->dev, tail); 1064 - pd_uinfo->state = PD_ENTRY_FREE; 1082 + tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail); 1065 1083 } else { 1066 1084 /* if tail not done, break */ 1067 1085 break; 1068 1086 } 1069 - } 1087 + } while (head != tail); 1070 1088 } 1071 1089 1072 1090 /** ··· 1088 1110 /** 1089 1111 * Supported Crypto Algorithms 1090 1112 */ 1091 - struct crypto4xx_alg_common crypto4xx_alg[] = { 1113 + static struct crypto4xx_alg_common crypto4xx_alg[] = { 1092 1114 /* Crypto AES modes */ 1093 1115 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { 1094 1116 .cra_name = "cbc(aes)", 1095 1117 .cra_driver_name = "cbc-aes-ppc4xx", 1096 1118 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1097 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1119 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1120 + CRYPTO_ALG_ASYNC | 1121 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1098 1122 .cra_blocksize = AES_BLOCK_SIZE, 1099 1123 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1100 1124 .cra_type = &crypto_ablkcipher_type, 1101 - .cra_init = crypto4xx_alg_init, 1102 - .cra_exit = crypto4xx_alg_exit, 1125 + .cra_init = crypto4xx_ablk_init, 1126 + .cra_exit = crypto4xx_ablk_exit, 1103 1127 .cra_module = THIS_MODULE, 1104 1128 .cra_u = { 1105 1129 .ablkcipher = { ··· 1114 1134 } 1115 1135 } 1116 1136 }}, 1137 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { 1138 + .cra_name = "cfb(aes)", 1139 + .cra_driver_name = "cfb-aes-ppc4xx", 1140 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1141 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1142 + CRYPTO_ALG_ASYNC | 1143 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1144 + .cra_blocksize = AES_BLOCK_SIZE, 1145 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1146 + .cra_type = &crypto_ablkcipher_type, 1147 + .cra_init = crypto4xx_ablk_init, 1148 + .cra_exit = crypto4xx_ablk_exit, 1149 + .cra_module = THIS_MODULE, 1150 + .cra_u = { 1151 + .ablkcipher = { 1152 + .min_keysize = AES_MIN_KEY_SIZE, 1153 + .max_keysize = AES_MAX_KEY_SIZE, 1154 + .ivsize = AES_IV_SIZE, 1155 + .setkey = crypto4xx_setkey_aes_cfb, 1156 + .encrypt = crypto4xx_encrypt, 1157 + .decrypt = crypto4xx_decrypt, 1158 + } 1159 + } 1160 + } }, 1161 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { 1162 + .cra_name = "rfc3686(ctr(aes))", 1163 + .cra_driver_name = "rfc3686-ctr-aes-ppc4xx", 1164 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1165 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1166 + CRYPTO_ALG_ASYNC | 1167 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1168 + .cra_blocksize = AES_BLOCK_SIZE, 1169 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1170 + .cra_type = &crypto_ablkcipher_type, 1171 + .cra_init = crypto4xx_ablk_init, 1172 + .cra_exit = crypto4xx_ablk_exit, 1173 + .cra_module = THIS_MODULE, 1174 + .cra_u = { 1175 + .ablkcipher = { 1176 + .min_keysize = AES_MIN_KEY_SIZE + 1177 + CTR_RFC3686_NONCE_SIZE, 1178 + .max_keysize = AES_MAX_KEY_SIZE + 1179 + CTR_RFC3686_NONCE_SIZE, 1180 + .ivsize = CTR_RFC3686_IV_SIZE, 1181 + .setkey = crypto4xx_setkey_rfc3686, 1182 + .encrypt = crypto4xx_rfc3686_encrypt, 1183 + .decrypt = crypto4xx_rfc3686_decrypt, 1184 + } 1185 + } 1186 + } }, 1187 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { 1188 + .cra_name = "ecb(aes)", 1189 + .cra_driver_name = "ecb-aes-ppc4xx", 1190 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1191 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1192 + CRYPTO_ALG_ASYNC | 1193 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1194 + .cra_blocksize = AES_BLOCK_SIZE, 1195 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1196 + .cra_type = &crypto_ablkcipher_type, 1197 + .cra_init = crypto4xx_ablk_init, 1198 + .cra_exit = crypto4xx_ablk_exit, 1199 + .cra_module = THIS_MODULE, 1200 + .cra_u = { 1201 + .ablkcipher = { 1202 + .min_keysize = AES_MIN_KEY_SIZE, 1203 + .max_keysize = AES_MAX_KEY_SIZE, 1204 + .setkey = crypto4xx_setkey_aes_ecb, 1205 + .encrypt = crypto4xx_encrypt, 1206 + .decrypt = crypto4xx_decrypt, 1207 + } 1208 + } 1209 + } }, 1210 + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = { 1211 + .cra_name = "ofb(aes)", 1212 + .cra_driver_name = "ofb-aes-ppc4xx", 1213 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1214 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1215 + CRYPTO_ALG_ASYNC | 1216 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1217 + .cra_blocksize = AES_BLOCK_SIZE, 1218 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1219 + .cra_type = &crypto_ablkcipher_type, 1220 + .cra_init = crypto4xx_ablk_init, 1221 + .cra_exit = crypto4xx_ablk_exit, 1222 + .cra_module = THIS_MODULE, 1223 + .cra_u = { 1224 + .ablkcipher = { 1225 + .min_keysize = AES_MIN_KEY_SIZE, 1226 + .max_keysize = AES_MAX_KEY_SIZE, 1227 + .ivsize = AES_IV_SIZE, 1228 + .setkey = crypto4xx_setkey_aes_ofb, 1229 + .encrypt = crypto4xx_encrypt, 1230 + .decrypt = crypto4xx_decrypt, 1231 + } 1232 + } 1233 + } }, 1234 + 1235 + /* AEAD */ 1236 + { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { 1237 + .setkey = crypto4xx_setkey_aes_ccm, 1238 + .setauthsize = crypto4xx_setauthsize_aead, 1239 + .encrypt = crypto4xx_encrypt_aes_ccm, 1240 + .decrypt = crypto4xx_decrypt_aes_ccm, 1241 + .init = crypto4xx_aead_init, 1242 + .exit = crypto4xx_aead_exit, 1243 + .ivsize = AES_BLOCK_SIZE, 1244 + .maxauthsize = 16, 1245 + .base = { 1246 + .cra_name = "ccm(aes)", 1247 + .cra_driver_name = "ccm-aes-ppc4xx", 1248 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1249 + .cra_flags = CRYPTO_ALG_ASYNC | 1250 + CRYPTO_ALG_NEED_FALLBACK | 1251 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1252 + .cra_blocksize = 1, 1253 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1254 + .cra_module = THIS_MODULE, 1255 + }, 1256 + } }, 1257 + { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = { 1258 + .setkey = crypto4xx_setkey_aes_gcm, 1259 + .setauthsize = crypto4xx_setauthsize_aead, 1260 + .encrypt = crypto4xx_encrypt_aes_gcm, 1261 + .decrypt = crypto4xx_decrypt_aes_gcm, 1262 + .init = crypto4xx_aead_init, 1263 + .exit = crypto4xx_aead_exit, 1264 + .ivsize = GCM_AES_IV_SIZE, 1265 + .maxauthsize = 16, 1266 + .base = { 1267 + .cra_name = "gcm(aes)", 1268 + .cra_driver_name = "gcm-aes-ppc4xx", 1269 + .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1270 + .cra_flags = CRYPTO_ALG_ASYNC | 1271 + CRYPTO_ALG_NEED_FALLBACK | 1272 + CRYPTO_ALG_KERN_DRIVER_ONLY, 1273 + .cra_blocksize = 1, 1274 + .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1275 + .cra_module = THIS_MODULE, 1276 + }, 1277 + } }, 1117 1278 }; 1118 1279 1119 1280 /** ··· 1308 1187 core_dev->device = dev; 1309 1188 spin_lock_init(&core_dev->lock); 1310 1189 INIT_LIST_HEAD(&core_dev->dev->alg_list); 1190 + ratelimit_default_init(&core_dev->dev->aead_ratelimit); 1311 1191 rc = crypto4xx_build_pdr(core_dev->dev); 1312 1192 if (rc) 1313 1193 goto err_build_pdr; 1314 1194 1315 1195 rc = crypto4xx_build_gdr(core_dev->dev); 1316 1196 if (rc) 1317 - goto err_build_gdr; 1197 + goto err_build_pdr; 1318 1198 1319 1199 rc = crypto4xx_build_sdr(core_dev->dev); 1320 1200 if (rc) ··· 1358 1236 err_request_irq: 1359 1237 irq_dispose_mapping(core_dev->irq); 1360 1238 tasklet_kill(&core_dev->tasklet); 1361 - crypto4xx_destroy_sdr(core_dev->dev); 1362 1239 err_build_sdr: 1240 + crypto4xx_destroy_sdr(core_dev->dev); 1363 1241 crypto4xx_destroy_gdr(core_dev->dev); 1364 - err_build_gdr: 1365 - crypto4xx_destroy_pdr(core_dev->dev); 1366 1242 err_build_pdr: 1243 + crypto4xx_destroy_pdr(core_dev->dev); 1367 1244 kfree(core_dev->dev); 1368 1245 err_alloc_dev: 1369 1246 kfree(core_dev);
+120 -83
drivers/crypto/amcc/crypto4xx_core.h
··· 22 22 #ifndef __CRYPTO4XX_CORE_H__ 23 23 #define __CRYPTO4XX_CORE_H__ 24 24 25 + #include <linux/ratelimit.h> 25 26 #include <crypto/internal/hash.h> 27 + #include <crypto/internal/aead.h> 28 + #include "crypto4xx_reg_def.h" 29 + #include "crypto4xx_sa.h" 26 30 27 31 #define MODULE_NAME "crypto4xx" 28 32 ··· 38 34 #define PPC405EX_CE_RESET 0x00000008 39 35 40 36 #define CRYPTO4XX_CRYPTO_PRIORITY 300 41 - #define PPC4XX_LAST_PD 63 42 - #define PPC4XX_NUM_PD 64 43 - #define PPC4XX_LAST_GD 1023 37 + #define PPC4XX_NUM_PD 256 38 + #define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1) 44 39 #define PPC4XX_NUM_GD 1024 45 - #define PPC4XX_LAST_SD 63 46 - #define PPC4XX_NUM_SD 64 40 + #define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1) 41 + #define PPC4XX_NUM_SD 256 42 + #define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1) 47 43 #define PPC4XX_SD_BUFFER_SIZE 2048 48 44 49 - #define PD_ENTRY_INUSE 1 45 + #define PD_ENTRY_BUSY BIT(1) 46 + #define PD_ENTRY_INUSE BIT(0) 50 47 #define PD_ENTRY_FREE 0 51 48 #define ERING_WAS_FULL 0xffffffff 52 49 53 50 struct crypto4xx_device; 51 + 52 + union shadow_sa_buf { 53 + struct dynamic_sa_ctl sa; 54 + 55 + /* alloc 256 bytes which is enough for any kind of dynamic sa */ 56 + u8 buf[256]; 57 + } __packed; 54 58 55 59 struct pd_uinfo { 56 60 struct crypto4xx_device *dev; ··· 72 60 used by this packet */ 73 61 u32 num_sd; /* number of scatter discriptors 74 62 used by this packet */ 75 - void *sa_va; /* shadow sa, when using cp from ctx->sa */ 76 - u32 sa_pa; 77 - void *sr_va; /* state record for shadow sa */ 63 + struct dynamic_sa_ctl *sa_va; /* shadow sa */ 64 + struct sa_state_record *sr_va; /* state record for shadow sa */ 78 65 u32 sr_pa; 79 66 struct scatterlist *dest_va; 80 67 struct crypto_async_request *async_req; /* base crypto request ··· 83 72 struct crypto4xx_device { 84 73 struct crypto4xx_core_device *core_dev; 85 74 char *name; 86 - u64 ce_phy_address; 87 75 void __iomem *ce_base; 88 76 void __iomem *trng_base; 89 77 90 - void *pdr; /* base address of packet 91 - descriptor ring */ 92 - dma_addr_t pdr_pa; /* physical address used to 93 - program ce pdr_base_register */ 94 - void *gdr; /* gather descriptor ring */ 95 - dma_addr_t gdr_pa; /* physical address used to 96 - program ce gdr_base_register */ 97 - void *sdr; /* scatter descriptor ring */ 98 - dma_addr_t sdr_pa; /* physical address used to 99 - program ce sdr_base_register */ 78 + struct ce_pd *pdr; /* base address of packet descriptor ring */ 79 + dma_addr_t pdr_pa; /* physical address of pdr_base_register */ 80 + struct ce_gd *gdr; /* gather descriptor ring */ 81 + dma_addr_t gdr_pa; /* physical address of gdr_base_register */ 82 + struct ce_sd *sdr; /* scatter descriptor ring */ 83 + dma_addr_t sdr_pa; /* physical address of sdr_base_register */ 100 84 void *scatter_buffer_va; 101 85 dma_addr_t scatter_buffer_pa; 102 - u32 scatter_buffer_size; 103 86 104 - void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */ 87 + union shadow_sa_buf *shadow_sa_pool; 105 88 dma_addr_t shadow_sa_pool_pa; 106 - void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */ 89 + struct sa_state_record *shadow_sr_pool; 107 90 dma_addr_t shadow_sr_pool_pa; 108 91 u32 pdr_tail; 109 92 u32 pdr_head; ··· 105 100 u32 gdr_head; 106 101 u32 sdr_tail; 107 102 u32 sdr_head; 108 - void *pdr_uinfo; 103 + struct pd_uinfo *pdr_uinfo; 109 104 struct list_head alg_list; /* List of algorithm supported 110 105 by this device */ 106 + struct ratelimit_state aead_ratelimit; 111 107 }; 112 108 113 109 struct crypto4xx_core_device { ··· 124 118 125 119 struct crypto4xx_ctx { 126 120 struct crypto4xx_device *dev; 127 - void *sa_in; 128 - dma_addr_t sa_in_dma_addr; 129 - void *sa_out; 130 - dma_addr_t sa_out_dma_addr; 131 - void *state_record; 132 - dma_addr_t state_record_dma_addr; 121 + struct dynamic_sa_ctl *sa_in; 122 + struct dynamic_sa_ctl *sa_out; 123 + __le32 iv_nonce; 133 124 u32 sa_len; 134 - u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */ 135 - u32 direction; 136 - u32 next_hdr; 137 - u32 save_iv; 138 - u32 pd_ctl_len; 139 - u32 pd_ctl; 140 - u32 bypass; 141 - u32 is_hash; 142 - u32 hash_final; 143 - }; 144 - 145 - struct crypto4xx_req_ctx { 146 - struct crypto4xx_device *dev; /* Device in which 147 - operation to send to */ 148 - void *sa; 149 - u32 sa_dma_addr; 150 - u16 sa_len; 125 + union { 126 + struct crypto_aead *aead; 127 + } sw_cipher; 151 128 }; 152 129 153 130 struct crypto4xx_alg_common { ··· 138 149 union { 139 150 struct crypto_alg cipher; 140 151 struct ahash_alg hash; 152 + struct aead_alg aead; 141 153 } u; 142 154 }; 143 155 ··· 148 158 struct crypto4xx_device *dev; 149 159 }; 150 160 151 - static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg( 152 - struct crypto_alg *x) 153 - { 154 - switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) { 155 - case CRYPTO_ALG_TYPE_AHASH: 156 - return container_of(__crypto_ahash_alg(x), 157 - struct crypto4xx_alg, alg.u.hash); 158 - } 161 + int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); 162 + void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); 163 + void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); 164 + int crypto4xx_build_pd(struct crypto_async_request *req, 165 + struct crypto4xx_ctx *ctx, 166 + struct scatterlist *src, 167 + struct scatterlist *dst, 168 + const unsigned int datalen, 169 + const __le32 *iv, const u32 iv_len, 170 + const struct dynamic_sa_ctl *sa, 171 + const unsigned int sa_len, 172 + const unsigned int assoclen); 173 + int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, 174 + const u8 *key, unsigned int keylen); 175 + int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher, 176 + const u8 *key, unsigned int keylen); 177 + int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher, 178 + const u8 *key, unsigned int keylen); 179 + int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher, 180 + const u8 *key, unsigned int keylen); 181 + int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher, 182 + const u8 *key, unsigned int keylen); 183 + int crypto4xx_encrypt(struct ablkcipher_request *req); 184 + int crypto4xx_decrypt(struct ablkcipher_request *req); 185 + int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req); 186 + int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req); 187 + int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); 188 + int crypto4xx_hash_digest(struct ahash_request *req); 189 + int crypto4xx_hash_final(struct ahash_request *req); 190 + int crypto4xx_hash_update(struct ahash_request *req); 191 + int crypto4xx_hash_init(struct ahash_request *req); 159 192 160 - return container_of(x, struct crypto4xx_alg, alg.u.cipher); 193 + /** 194 + * Note: Only use this function to copy items that is word aligned. 195 + */ 196 + static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf, 197 + size_t len) 198 + { 199 + for (; len >= 4; buf += 4, len -= 4) 200 + *dst++ = __swab32p((u32 *) buf); 201 + 202 + if (len) { 203 + const u8 *tmp = (u8 *)buf; 204 + 205 + switch (len) { 206 + case 3: 207 + *dst = (tmp[2] << 16) | 208 + (tmp[1] << 8) | 209 + tmp[0]; 210 + break; 211 + case 2: 212 + *dst = (tmp[1] << 8) | 213 + tmp[0]; 214 + break; 215 + case 1: 216 + *dst = tmp[0]; 217 + break; 218 + default: 219 + break; 220 + } 221 + } 161 222 } 162 223 163 - extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); 164 - extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); 165 - extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx, 166 - struct crypto4xx_ctx *rctx); 167 - extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx); 168 - extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx); 169 - extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx); 170 - extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx); 171 - extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx); 172 - extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx); 173 - extern void crypto4xx_memcpy_le(unsigned int *dst, 174 - const unsigned char *buf, int len); 175 - extern u32 crypto4xx_build_pd(struct crypto_async_request *req, 176 - struct crypto4xx_ctx *ctx, 177 - struct scatterlist *src, 178 - struct scatterlist *dst, 179 - unsigned int datalen, 180 - void *iv, u32 iv_len); 181 - extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher, 182 - const u8 *key, unsigned int keylen); 183 - extern int crypto4xx_encrypt(struct ablkcipher_request *req); 184 - extern int crypto4xx_decrypt(struct ablkcipher_request *req); 185 - extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm); 186 - extern int crypto4xx_hash_digest(struct ahash_request *req); 187 - extern int crypto4xx_hash_final(struct ahash_request *req); 188 - extern int crypto4xx_hash_update(struct ahash_request *req); 189 - extern int crypto4xx_hash_init(struct ahash_request *req); 224 + static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf, 225 + size_t len) 226 + { 227 + crypto4xx_memcpy_swab32(dst, buf, len); 228 + } 229 + 230 + static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf, 231 + size_t len) 232 + { 233 + crypto4xx_memcpy_swab32((u32 *)dst, buf, len); 234 + } 235 + 236 + int crypto4xx_setauthsize_aead(struct crypto_aead *ciper, 237 + unsigned int authsize); 238 + int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, 239 + const u8 *key, unsigned int keylen); 240 + int crypto4xx_encrypt_aes_ccm(struct aead_request *req); 241 + int crypto4xx_decrypt_aes_ccm(struct aead_request *req); 242 + int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher, 243 + const u8 *key, unsigned int keylen); 244 + int crypto4xx_encrypt_aes_gcm(struct aead_request *req); 245 + int crypto4xx_decrypt_aes_gcm(struct aead_request *req); 246 + 190 247 #endif
+3
drivers/crypto/amcc/crypto4xx_reg_def.h
··· 261 261 } bf; 262 262 u32 w; 263 263 } __attribute__((packed)); 264 + #define PD_CTL_HASH_FINAL BIT(4) 265 + #define PD_CTL_PE_DONE BIT(1) 266 + #define PD_CTL_HOST_READY BIT(0) 264 267 265 268 union ce_pd_ctl_len { 266 269 struct {
-85
drivers/crypto/amcc/crypto4xx_sa.c
··· 1 - /** 2 - * AMCC SoC PPC4xx Crypto Driver 3 - * 4 - * Copyright (c) 2008 Applied Micro Circuits Corporation. 5 - * All rights reserved. James Hsiao <jhsiao@amcc.com> 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License as published by 9 - * the Free Software Foundation; either version 2 of the License, or 10 - * (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * @file crypto4xx_sa.c 18 - * 19 - * This file implements the security context 20 - * associate format. 21 - */ 22 - #include <linux/kernel.h> 23 - #include <linux/module.h> 24 - #include <linux/moduleparam.h> 25 - #include <linux/mod_devicetable.h> 26 - #include <linux/interrupt.h> 27 - #include <linux/spinlock_types.h> 28 - #include <linux/highmem.h> 29 - #include <linux/scatterlist.h> 30 - #include <linux/crypto.h> 31 - #include <crypto/algapi.h> 32 - #include <crypto/des.h> 33 - #include "crypto4xx_reg_def.h" 34 - #include "crypto4xx_sa.h" 35 - #include "crypto4xx_core.h" 36 - 37 - u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) 38 - { 39 - u32 offset; 40 - union dynamic_sa_contents cts; 41 - 42 - if (ctx->direction == DIR_INBOUND) 43 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; 44 - else 45 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; 46 - offset = cts.bf.key_size 47 - + cts.bf.inner_size 48 - + cts.bf.outer_size 49 - + cts.bf.spi 50 - + cts.bf.seq_num0 51 - + cts.bf.seq_num1 52 - + cts.bf.seq_num_mask0 53 - + cts.bf.seq_num_mask1 54 - + cts.bf.seq_num_mask2 55 - + cts.bf.seq_num_mask3 56 - + cts.bf.iv0 57 - + cts.bf.iv1 58 - + cts.bf.iv2 59 - + cts.bf.iv3; 60 - 61 - return sizeof(struct dynamic_sa_ctl) + offset * 4; 62 - } 63 - 64 - u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx) 65 - { 66 - union dynamic_sa_contents cts; 67 - 68 - if (ctx->direction == DIR_INBOUND) 69 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; 70 - else 71 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; 72 - return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4; 73 - } 74 - 75 - u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx) 76 - { 77 - union dynamic_sa_contents cts; 78 - 79 - if (ctx->direction == DIR_INBOUND) 80 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; 81 - else 82 - cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; 83 - 84 - return sizeof(struct dynamic_sa_ctl); 85 - }
+87 -12
drivers/crypto/amcc/crypto4xx_sa.h
··· 55 55 #define SA_OP_GROUP_BASIC 0 56 56 #define SA_OPCODE_ENCRYPT 0 57 57 #define SA_OPCODE_DECRYPT 0 58 + #define SA_OPCODE_ENCRYPT_HASH 1 59 + #define SA_OPCODE_HASH_DECRYPT 1 58 60 #define SA_OPCODE_HASH 3 59 61 #define SA_CIPHER_ALG_DES 0 60 62 #define SA_CIPHER_ALG_3DES 1 ··· 67 65 68 66 #define SA_HASH_ALG_MD5 0 69 67 #define SA_HASH_ALG_SHA1 1 68 + #define SA_HASH_ALG_GHASH 12 69 + #define SA_HASH_ALG_CBC_MAC 14 70 70 #define SA_HASH_ALG_NULL 15 71 71 #define SA_HASH_ALG_SHA1_DIGEST_SIZE 20 72 72 ··· 116 112 117 113 #define CRYPTO_MODE_ECB 0 118 114 #define CRYPTO_MODE_CBC 1 115 + #define CRYPTO_MODE_OFB 2 116 + #define CRYPTO_MODE_CFB 3 117 + #define CRYPTO_MODE_CTR 4 119 118 120 119 #define CRYPTO_FEEDBACK_MODE_NO_FB 0 121 120 #define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0 ··· 176 169 } __attribute__((packed)); 177 170 178 171 struct dynamic_sa_ctl { 179 - u32 sa_contents; 172 + union dynamic_sa_contents sa_contents; 180 173 union sa_command_0 sa_command_0; 181 174 union sa_command_1 sa_command_1; 182 175 } __attribute__((packed)); ··· 185 178 * State Record for Security Association (SA) 186 179 */ 187 180 struct sa_state_record { 188 - u32 save_iv[4]; 189 - u32 save_hash_byte_cnt[2]; 190 - u32 save_digest[16]; 181 + __le32 save_iv[4]; 182 + __le32 save_hash_byte_cnt[2]; 183 + union { 184 + u32 save_digest[16]; /* for MD5/SHA */ 185 + __le32 save_digest_le32[16]; /* GHASH / CBC */ 186 + }; 191 187 } __attribute__((packed)); 192 188 193 189 /** ··· 199 189 */ 200 190 struct dynamic_sa_aes128 { 201 191 struct dynamic_sa_ctl ctrl; 202 - u32 key[4]; 203 - u32 iv[4]; /* for CBC, OFC, and CFB mode */ 192 + __le32 key[4]; 193 + __le32 iv[4]; /* for CBC, OFC, and CFB mode */ 204 194 u32 state_ptr; 205 195 u32 reserved; 206 196 } __attribute__((packed)); ··· 213 203 */ 214 204 struct dynamic_sa_aes192 { 215 205 struct dynamic_sa_ctl ctrl; 216 - u32 key[6]; 217 - u32 iv[4]; /* for CBC, OFC, and CFB mode */ 206 + __le32 key[6]; 207 + __le32 iv[4]; /* for CBC, OFC, and CFB mode */ 218 208 u32 state_ptr; 219 209 u32 reserved; 220 210 } __attribute__((packed)); ··· 227 217 */ 228 218 struct dynamic_sa_aes256 { 229 219 struct dynamic_sa_ctl ctrl; 230 - u32 key[8]; 231 - u32 iv[4]; /* for CBC, OFC, and CFB mode */ 220 + __le32 key[8]; 221 + __le32 iv[4]; /* for CBC, OFC, and CFB mode */ 232 222 u32 state_ptr; 233 223 u32 reserved; 234 224 } __attribute__((packed)); ··· 238 228 #define SA_AES_CONTENTS 0x3e000002 239 229 240 230 /** 231 + * Security Association (SA) for AES128 CCM 232 + */ 233 + struct dynamic_sa_aes128_ccm { 234 + struct dynamic_sa_ctl ctrl; 235 + __le32 key[4]; 236 + __le32 iv[4]; 237 + u32 state_ptr; 238 + u32 reserved; 239 + } __packed; 240 + #define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4) 241 + #define SA_AES128_CCM_CONTENTS 0x3e000042 242 + #define SA_AES_CCM_CONTENTS 0x3e000002 243 + 244 + /** 245 + * Security Association (SA) for AES128_GCM 246 + */ 247 + struct dynamic_sa_aes128_gcm { 248 + struct dynamic_sa_ctl ctrl; 249 + __le32 key[4]; 250 + __le32 inner_digest[4]; 251 + __le32 iv[4]; 252 + u32 state_ptr; 253 + u32 reserved; 254 + } __packed; 255 + 256 + #define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4) 257 + #define SA_AES128_GCM_CONTENTS 0x3e000442 258 + #define SA_AES_GCM_CONTENTS 0x3e000402 259 + 260 + /** 241 261 * Security Association (SA) for HASH160: HMAC-SHA1 242 262 */ 243 263 struct dynamic_sa_hash160 { 244 264 struct dynamic_sa_ctl ctrl; 245 - u32 inner_digest[5]; 246 - u32 outer_digest[5]; 265 + __le32 inner_digest[5]; 266 + __le32 outer_digest[5]; 247 267 u32 state_ptr; 248 268 u32 reserved; 249 269 } __attribute__((packed)); 250 270 #define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4) 251 271 #define SA_HASH160_CONTENTS 0x2000a502 272 + 273 + static inline u32 274 + get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts) 275 + { 276 + u32 offset; 277 + 278 + offset = cts->sa_contents.bf.key_size 279 + + cts->sa_contents.bf.inner_size 280 + + cts->sa_contents.bf.outer_size 281 + + cts->sa_contents.bf.spi 282 + + cts->sa_contents.bf.seq_num0 283 + + cts->sa_contents.bf.seq_num1 284 + + cts->sa_contents.bf.seq_num_mask0 285 + + cts->sa_contents.bf.seq_num_mask1 286 + + cts->sa_contents.bf.seq_num_mask2 287 + + cts->sa_contents.bf.seq_num_mask3 288 + + cts->sa_contents.bf.iv0 289 + + cts->sa_contents.bf.iv1 290 + + cts->sa_contents.bf.iv2 291 + + cts->sa_contents.bf.iv3; 292 + 293 + return sizeof(struct dynamic_sa_ctl) + offset * 4; 294 + } 295 + 296 + static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts) 297 + { 298 + return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl)); 299 + } 300 + 301 + static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts) 302 + { 303 + return (__le32 *) ((unsigned long)cts + 304 + sizeof(struct dynamic_sa_ctl) + 305 + cts->sa_contents.bf.key_size * 4); 306 + } 252 307 253 308 #endif
+44 -36
drivers/crypto/atmel-aes.c
··· 36 36 #include <crypto/scatterwalk.h> 37 37 #include <crypto/algapi.h> 38 38 #include <crypto/aes.h> 39 + #include <crypto/gcm.h> 39 40 #include <crypto/xts.h> 40 41 #include <crypto/internal/aead.h> 41 42 #include <linux/platform_data/crypto-atmel.h> ··· 77 76 AES_FLAGS_ENCRYPT | \ 78 77 AES_FLAGS_GTAGEN) 79 78 80 - #define AES_FLAGS_INIT BIT(2) 81 79 #define AES_FLAGS_BUSY BIT(3) 82 80 #define AES_FLAGS_DUMP_REG BIT(4) 83 81 #define AES_FLAGS_OWN_SHA BIT(5) 84 82 85 - #define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY) 83 + #define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY 86 84 87 85 #define ATMEL_AES_QUEUE_LENGTH 50 88 86 ··· 110 110 int keylen; 111 111 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 112 112 u16 block_size; 113 + bool is_aead; 113 114 }; 114 115 115 116 struct atmel_aes_ctx { ··· 157 156 158 157 struct atmel_aes_reqctx { 159 158 unsigned long mode; 159 + u32 lastc[AES_BLOCK_SIZE / sizeof(u32)]; 160 160 }; 161 161 162 162 #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC ··· 450 448 if (err) 451 449 return err; 452 450 453 - if (!(dd->flags & AES_FLAGS_INIT)) { 454 - atmel_aes_write(dd, AES_CR, AES_CR_SWRST); 455 - atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); 456 - dd->flags |= AES_FLAGS_INIT; 457 - } 451 + atmel_aes_write(dd, AES_CR, AES_CR_SWRST); 452 + atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); 458 453 459 454 return 0; 460 455 } ··· 496 497 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) 497 498 { 498 499 #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC 499 - atmel_aes_authenc_complete(dd, err); 500 + if (dd->ctx->is_aead) 501 + atmel_aes_authenc_complete(dd, err); 500 502 #endif 501 503 502 504 clk_disable(dd->iclk); 503 505 dd->flags &= ~AES_FLAGS_BUSY; 506 + 507 + if (!dd->ctx->is_aead) { 508 + struct ablkcipher_request *req = 509 + ablkcipher_request_cast(dd->areq); 510 + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req); 511 + struct crypto_ablkcipher *ablkcipher = 512 + crypto_ablkcipher_reqtfm(req); 513 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 514 + 515 + if (rctx->mode & AES_FLAGS_ENCRYPT) { 516 + scatterwalk_map_and_copy(req->info, req->dst, 517 + req->nbytes - ivsize, ivsize, 0); 518 + } else { 519 + if (req->src == req->dst) { 520 + memcpy(req->info, rctx->lastc, ivsize); 521 + } else { 522 + scatterwalk_map_and_copy(req->info, req->src, 523 + req->nbytes - ivsize, ivsize, 0); 524 + } 525 + } 526 + } 504 527 505 528 if (dd->is_async) 506 529 dd->areq->complete(dd->areq, err); ··· 1092 1071 1093 1072 static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 1094 1073 { 1095 - struct atmel_aes_base_ctx *ctx; 1074 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); 1075 + struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); 1096 1076 struct atmel_aes_reqctx *rctx; 1097 1077 struct atmel_aes_dev *dd; 1098 1078 1099 - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 1100 1079 switch (mode & AES_FLAGS_OPMODE_MASK) { 1101 1080 case AES_FLAGS_CFB8: 1102 1081 ctx->block_size = CFB8_BLOCK_SIZE; ··· 1118 1097 ctx->block_size = AES_BLOCK_SIZE; 1119 1098 break; 1120 1099 } 1100 + ctx->is_aead = false; 1121 1101 1122 1102 dd = atmel_aes_find_dev(ctx); 1123 1103 if (!dd) ··· 1126 1104 1127 1105 rctx = ablkcipher_request_ctx(req); 1128 1106 rctx->mode = mode; 1107 + 1108 + if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) { 1109 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1110 + 1111 + scatterwalk_map_and_copy(rctx->lastc, req->src, 1112 + (req->nbytes - ivsize), ivsize, 0); 1113 + } 1129 1114 1130 1115 return atmel_aes_handle_queue(dd, &req->base); 1131 1116 } ··· 1265 1236 return 0; 1266 1237 } 1267 1238 1268 - static void atmel_aes_cra_exit(struct crypto_tfm *tfm) 1269 - { 1270 - } 1271 - 1272 1239 static struct crypto_alg aes_algs[] = { 1273 1240 { 1274 1241 .cra_name = "ecb(aes)", ··· 1277 1252 .cra_type = &crypto_ablkcipher_type, 1278 1253 .cra_module = THIS_MODULE, 1279 1254 .cra_init = atmel_aes_cra_init, 1280 - .cra_exit = atmel_aes_cra_exit, 1281 1255 .cra_u.ablkcipher = { 1282 1256 .min_keysize = AES_MIN_KEY_SIZE, 1283 1257 .max_keysize = AES_MAX_KEY_SIZE, ··· 1296 1272 .cra_type = &crypto_ablkcipher_type, 1297 1273 .cra_module = THIS_MODULE, 1298 1274 .cra_init = atmel_aes_cra_init, 1299 - .cra_exit = atmel_aes_cra_exit, 1300 1275 .cra_u.ablkcipher = { 1301 1276 .min_keysize = AES_MIN_KEY_SIZE, 1302 1277 .max_keysize = AES_MAX_KEY_SIZE, ··· 1316 1293 .cra_type = &crypto_ablkcipher_type, 1317 1294 .cra_module = THIS_MODULE, 1318 1295 .cra_init = atmel_aes_cra_init, 1319 - .cra_exit = atmel_aes_cra_exit, 1320 1296 .cra_u.ablkcipher = { 1321 1297 .min_keysize = AES_MIN_KEY_SIZE, 1322 1298 .max_keysize = AES_MAX_KEY_SIZE, ··· 1336 1314 .cra_type = &crypto_ablkcipher_type, 1337 1315 .cra_module = THIS_MODULE, 1338 1316 .cra_init = atmel_aes_cra_init, 1339 - .cra_exit = atmel_aes_cra_exit, 1340 1317 .cra_u.ablkcipher = { 1341 1318 .min_keysize = AES_MIN_KEY_SIZE, 1342 1319 .max_keysize = AES_MAX_KEY_SIZE, ··· 1356 1335 .cra_type = &crypto_ablkcipher_type, 1357 1336 .cra_module = THIS_MODULE, 1358 1337 .cra_init = atmel_aes_cra_init, 1359 - .cra_exit = atmel_aes_cra_exit, 1360 1338 .cra_u.ablkcipher = { 1361 1339 .min_keysize = AES_MIN_KEY_SIZE, 1362 1340 .max_keysize = AES_MAX_KEY_SIZE, ··· 1376 1356 .cra_type = &crypto_ablkcipher_type, 1377 1357 .cra_module = THIS_MODULE, 1378 1358 .cra_init = atmel_aes_cra_init, 1379 - .cra_exit = atmel_aes_cra_exit, 1380 1359 .cra_u.ablkcipher = { 1381 1360 .min_keysize = AES_MIN_KEY_SIZE, 1382 1361 .max_keysize = AES_MAX_KEY_SIZE, ··· 1396 1377 .cra_type = &crypto_ablkcipher_type, 1397 1378 .cra_module = THIS_MODULE, 1398 1379 .cra_init = atmel_aes_cra_init, 1399 - .cra_exit = atmel_aes_cra_exit, 1400 1380 .cra_u.ablkcipher = { 1401 1381 .min_keysize = AES_MIN_KEY_SIZE, 1402 1382 .max_keysize = AES_MAX_KEY_SIZE, ··· 1416 1398 .cra_type = &crypto_ablkcipher_type, 1417 1399 .cra_module = THIS_MODULE, 1418 1400 .cra_init = atmel_aes_ctr_cra_init, 1419 - .cra_exit = atmel_aes_cra_exit, 1420 1401 .cra_u.ablkcipher = { 1421 1402 .min_keysize = AES_MIN_KEY_SIZE, 1422 1403 .max_keysize = AES_MAX_KEY_SIZE, ··· 1438 1421 .cra_type = &crypto_ablkcipher_type, 1439 1422 .cra_module = THIS_MODULE, 1440 1423 .cra_init = atmel_aes_cra_init, 1441 - .cra_exit = atmel_aes_cra_exit, 1442 1424 .cra_u.ablkcipher = { 1443 1425 .min_keysize = AES_MIN_KEY_SIZE, 1444 1426 .max_keysize = AES_MAX_KEY_SIZE, ··· 1548 1532 if (err) 1549 1533 return atmel_aes_complete(dd, err); 1550 1534 1551 - if (likely(ivsize == 12)) { 1535 + if (likely(ivsize == GCM_AES_IV_SIZE)) { 1552 1536 memcpy(ctx->j0, iv, ivsize); 1553 1537 ctx->j0[3] = cpu_to_be32(1); 1554 1538 return atmel_aes_gcm_process(dd); ··· 1755 1739 1756 1740 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 1757 1741 ctx->block_size = AES_BLOCK_SIZE; 1742 + ctx->is_aead = true; 1758 1743 1759 1744 dd = atmel_aes_find_dev(ctx); 1760 1745 if (!dd) ··· 1825 1808 return 0; 1826 1809 } 1827 1810 1828 - static void atmel_aes_gcm_exit(struct crypto_aead *tfm) 1829 - { 1830 - 1831 - } 1832 - 1833 1811 static struct aead_alg aes_gcm_alg = { 1834 1812 .setkey = atmel_aes_gcm_setkey, 1835 1813 .setauthsize = atmel_aes_gcm_setauthsize, 1836 1814 .encrypt = atmel_aes_gcm_encrypt, 1837 1815 .decrypt = atmel_aes_gcm_decrypt, 1838 1816 .init = atmel_aes_gcm_init, 1839 - .exit = atmel_aes_gcm_exit, 1840 - .ivsize = 12, 1817 + .ivsize = GCM_AES_IV_SIZE, 1841 1818 .maxauthsize = AES_BLOCK_SIZE, 1842 1819 1843 1820 .base = { ··· 1966 1955 .cra_type = &crypto_ablkcipher_type, 1967 1956 .cra_module = THIS_MODULE, 1968 1957 .cra_init = atmel_aes_xts_cra_init, 1969 - .cra_exit = atmel_aes_cra_exit, 1970 1958 .cra_u.ablkcipher = { 1971 1959 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1972 1960 .max_keysize = 2 * AES_MAX_KEY_SIZE, ··· 2233 2223 2234 2224 rctx->base.mode = mode; 2235 2225 ctx->block_size = AES_BLOCK_SIZE; 2226 + ctx->is_aead = true; 2236 2227 2237 2228 dd = atmel_aes_find_dev(ctx); 2238 2229 if (!dd) ··· 2393 2382 struct crypto_platform_data *pdata) 2394 2383 { 2395 2384 struct at_dma_slave *slave; 2396 - int err = -ENOMEM; 2397 2385 dma_cap_mask_t mask; 2398 2386 2399 2387 dma_cap_zero(mask); ··· 2417 2407 dma_release_channel(dd->src.chan); 2418 2408 err_dma_in: 2419 2409 dev_warn(dd->dev, "no DMA channel available\n"); 2420 - return err; 2410 + return -ENODEV; 2421 2411 } 2422 2412 2423 2413 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) ··· 2667 2657 (unsigned long)aes_dd); 2668 2658 2669 2659 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); 2670 - 2671 - aes_dd->irq = -1; 2672 2660 2673 2661 /* Get the base address */ 2674 2662 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+1 -4
drivers/crypto/atmel-sha.c
··· 2628 2628 static int atmel_sha_dma_init(struct atmel_sha_dev *dd, 2629 2629 struct crypto_platform_data *pdata) 2630 2630 { 2631 - int err = -ENOMEM; 2632 2631 dma_cap_mask_t mask_in; 2633 2632 2634 2633 /* Try to grab DMA channel */ ··· 2638 2639 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); 2639 2640 if (!dd->dma_lch_in.chan) { 2640 2641 dev_warn(dd->dev, "no DMA channel available\n"); 2641 - return err; 2642 + return -ENODEV; 2642 2643 } 2643 2644 2644 2645 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; ··· 2776 2777 (unsigned long)sha_dd); 2777 2778 2778 2779 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); 2779 - 2780 - sha_dd->irq = -1; 2781 2780 2782 2781 /* Get the base address */ 2783 2782 sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+1 -22
drivers/crypto/atmel-tdes.c
··· 720 720 static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd, 721 721 struct crypto_platform_data *pdata) 722 722 { 723 - int err = -ENOMEM; 724 723 dma_cap_mask_t mask; 725 724 726 725 dma_cap_zero(mask); ··· 764 765 dma_release_channel(dd->dma_lch_in.chan); 765 766 err_dma_in: 766 767 dev_warn(dd->dev, "no DMA channel available\n"); 767 - return err; 768 + return -ENODEV; 768 769 } 769 770 770 771 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd) ··· 911 912 return 0; 912 913 } 913 914 914 - static void atmel_tdes_cra_exit(struct crypto_tfm *tfm) 915 - { 916 - } 917 - 918 915 static struct crypto_alg tdes_algs[] = { 919 916 { 920 917 .cra_name = "ecb(des)", ··· 923 928 .cra_type = &crypto_ablkcipher_type, 924 929 .cra_module = THIS_MODULE, 925 930 .cra_init = atmel_tdes_cra_init, 926 - .cra_exit = atmel_tdes_cra_exit, 927 931 .cra_u.ablkcipher = { 928 932 .min_keysize = DES_KEY_SIZE, 929 933 .max_keysize = DES_KEY_SIZE, ··· 942 948 .cra_type = &crypto_ablkcipher_type, 943 949 .cra_module = THIS_MODULE, 944 950 .cra_init = atmel_tdes_cra_init, 945 - .cra_exit = atmel_tdes_cra_exit, 946 951 .cra_u.ablkcipher = { 947 952 .min_keysize = DES_KEY_SIZE, 948 953 .max_keysize = DES_KEY_SIZE, ··· 962 969 .cra_type = &crypto_ablkcipher_type, 963 970 .cra_module = THIS_MODULE, 964 971 .cra_init = atmel_tdes_cra_init, 965 - .cra_exit = atmel_tdes_cra_exit, 966 972 .cra_u.ablkcipher = { 967 973 .min_keysize = DES_KEY_SIZE, 968 974 .max_keysize = DES_KEY_SIZE, ··· 982 990 .cra_type = &crypto_ablkcipher_type, 983 991 .cra_module = THIS_MODULE, 984 992 .cra_init = atmel_tdes_cra_init, 985 - .cra_exit = atmel_tdes_cra_exit, 986 993 .cra_u.ablkcipher = { 987 994 .min_keysize = DES_KEY_SIZE, 988 995 .max_keysize = DES_KEY_SIZE, ··· 1002 1011 .cra_type = &crypto_ablkcipher_type, 1003 1012 .cra_module = THIS_MODULE, 1004 1013 .cra_init = atmel_tdes_cra_init, 1005 - .cra_exit = atmel_tdes_cra_exit, 1006 1014 .cra_u.ablkcipher = { 1007 1015 .min_keysize = DES_KEY_SIZE, 1008 1016 .max_keysize = DES_KEY_SIZE, ··· 1022 1032 .cra_type = &crypto_ablkcipher_type, 1023 1033 .cra_module = THIS_MODULE, 1024 1034 .cra_init = atmel_tdes_cra_init, 1025 - .cra_exit = atmel_tdes_cra_exit, 1026 1035 .cra_u.ablkcipher = { 1027 1036 .min_keysize = DES_KEY_SIZE, 1028 1037 .max_keysize = DES_KEY_SIZE, ··· 1042 1053 .cra_type = &crypto_ablkcipher_type, 1043 1054 .cra_module = THIS_MODULE, 1044 1055 .cra_init = atmel_tdes_cra_init, 1045 - .cra_exit = atmel_tdes_cra_exit, 1046 1056 .cra_u.ablkcipher = { 1047 1057 .min_keysize = DES_KEY_SIZE, 1048 1058 .max_keysize = DES_KEY_SIZE, ··· 1062 1074 .cra_type = &crypto_ablkcipher_type, 1063 1075 .cra_module = THIS_MODULE, 1064 1076 .cra_init = atmel_tdes_cra_init, 1065 - .cra_exit = atmel_tdes_cra_exit, 1066 1077 .cra_u.ablkcipher = { 1067 1078 .min_keysize = 2 * DES_KEY_SIZE, 1068 1079 .max_keysize = 3 * DES_KEY_SIZE, ··· 1081 1094 .cra_type = &crypto_ablkcipher_type, 1082 1095 .cra_module = THIS_MODULE, 1083 1096 .cra_init = atmel_tdes_cra_init, 1084 - .cra_exit = atmel_tdes_cra_exit, 1085 1097 .cra_u.ablkcipher = { 1086 1098 .min_keysize = 2*DES_KEY_SIZE, 1087 1099 .max_keysize = 3*DES_KEY_SIZE, ··· 1101 1115 .cra_type = &crypto_ablkcipher_type, 1102 1116 .cra_module = THIS_MODULE, 1103 1117 .cra_init = atmel_tdes_cra_init, 1104 - .cra_exit = atmel_tdes_cra_exit, 1105 1118 .cra_u.ablkcipher = { 1106 1119 .min_keysize = 2*DES_KEY_SIZE, 1107 1120 .max_keysize = 2*DES_KEY_SIZE, ··· 1121 1136 .cra_type = &crypto_ablkcipher_type, 1122 1137 .cra_module = THIS_MODULE, 1123 1138 .cra_init = atmel_tdes_cra_init, 1124 - .cra_exit = atmel_tdes_cra_exit, 1125 1139 .cra_u.ablkcipher = { 1126 1140 .min_keysize = 2*DES_KEY_SIZE, 1127 1141 .max_keysize = 2*DES_KEY_SIZE, ··· 1141 1157 .cra_type = &crypto_ablkcipher_type, 1142 1158 .cra_module = THIS_MODULE, 1143 1159 .cra_init = atmel_tdes_cra_init, 1144 - .cra_exit = atmel_tdes_cra_exit, 1145 1160 .cra_u.ablkcipher = { 1146 1161 .min_keysize = 2*DES_KEY_SIZE, 1147 1162 .max_keysize = 2*DES_KEY_SIZE, ··· 1161 1178 .cra_type = &crypto_ablkcipher_type, 1162 1179 .cra_module = THIS_MODULE, 1163 1180 .cra_init = atmel_tdes_cra_init, 1164 - .cra_exit = atmel_tdes_cra_exit, 1165 1181 .cra_u.ablkcipher = { 1166 1182 .min_keysize = 2*DES_KEY_SIZE, 1167 1183 .max_keysize = 2*DES_KEY_SIZE, ··· 1181 1199 .cra_type = &crypto_ablkcipher_type, 1182 1200 .cra_module = THIS_MODULE, 1183 1201 .cra_init = atmel_tdes_cra_init, 1184 - .cra_exit = atmel_tdes_cra_exit, 1185 1202 .cra_u.ablkcipher = { 1186 1203 .min_keysize = 2*DES_KEY_SIZE, 1187 1204 .max_keysize = 3*DES_KEY_SIZE, ··· 1362 1381 (unsigned long)tdes_dd); 1363 1382 1364 1383 crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH); 1365 - 1366 - tdes_dd->irq = -1; 1367 1384 1368 1385 /* Get the base address */ 1369 1386 tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+52 -64
drivers/crypto/bcm/cipher.c
··· 256 256 return 0; 257 257 } 258 258 259 + static int mailbox_send_message(struct brcm_message *mssg, u32 flags, 260 + u8 chan_idx) 261 + { 262 + int err; 263 + int retry_cnt = 0; 264 + struct device *dev = &(iproc_priv.pdev->dev); 265 + 266 + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); 267 + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 268 + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 269 + /* 270 + * Mailbox queue is full. Since MAY_SLEEP is set, assume 271 + * not in atomic context and we can wait and try again. 272 + */ 273 + retry_cnt++; 274 + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 275 + err = mbox_send_message(iproc_priv.mbox[chan_idx], 276 + mssg); 277 + atomic_inc(&iproc_priv.mb_no_spc); 278 + } 279 + } 280 + if (err < 0) { 281 + atomic_inc(&iproc_priv.mb_send_fail); 282 + return err; 283 + } 284 + 285 + /* Check error returned by mailbox controller */ 286 + err = mssg->error; 287 + if (unlikely(err < 0)) { 288 + dev_err(dev, "message error %d", err); 289 + /* Signal txdone for mailbox channel */ 290 + } 291 + 292 + /* Signal txdone for mailbox channel */ 293 + mbox_client_txdone(iproc_priv.mbox[chan_idx], err); 294 + return err; 295 + } 296 + 259 297 /** 260 298 * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in 261 299 * a single SPU request message, starting at the current position in the request ··· 331 293 u32 pad_len; /* total length of all padding */ 332 294 bool update_key = false; 333 295 struct brcm_message *mssg; /* mailbox message */ 334 - int retry_cnt = 0; 335 296 336 297 /* number of entries in src and dst sg in mailbox message. */ 337 298 u8 rx_frag_num = 2; /* response header and STATUS */ ··· 499 462 if (err) 500 463 return err; 501 464 502 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 503 - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 504 - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 505 - /* 506 - * Mailbox queue is full. Since MAY_SLEEP is set, assume 507 - * not in atomic context and we can wait and try again. 508 - */ 509 - retry_cnt++; 510 - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 511 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 512 - mssg); 513 - atomic_inc(&iproc_priv.mb_no_spc); 514 - } 515 - } 516 - if (unlikely(err < 0)) { 517 - atomic_inc(&iproc_priv.mb_send_fail); 465 + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 466 + if (unlikely(err < 0)) 518 467 return err; 519 - } 520 468 521 469 return -EINPROGRESS; 522 470 } ··· 732 710 u32 spu_hdr_len; 733 711 unsigned int digestsize; 734 712 u16 rem = 0; 735 - int retry_cnt = 0; 736 713 737 714 /* 738 715 * number of entries in src and dst sg. Always includes SPU msg header. ··· 925 904 if (err) 926 905 return err; 927 906 928 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 929 - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 930 - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 931 - /* 932 - * Mailbox queue is full. Since MAY_SLEEP is set, assume 933 - * not in atomic context and we can wait and try again. 934 - */ 935 - retry_cnt++; 936 - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 937 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 938 - mssg); 939 - atomic_inc(&iproc_priv.mb_no_spc); 940 - } 941 - } 942 - if (err < 0) { 943 - atomic_inc(&iproc_priv.mb_send_fail); 907 + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 908 + if (unlikely(err < 0)) 944 909 return err; 945 - } 910 + 946 911 return -EINPROGRESS; 947 912 } 948 913 ··· 1327 1320 int assoc_nents = 0; 1328 1321 bool incl_icv = false; 1329 1322 unsigned int digestsize = ctx->digestsize; 1330 - int retry_cnt = 0; 1331 1323 1332 1324 /* number of entries in src and dst sg. Always includes SPU msg header. 1333 1325 */ ··· 1373 1367 * expects AAD to include just SPI and seqno. So 1374 1368 * subtract off the IV len. 1375 1369 */ 1376 - aead_parms.assoc_size -= GCM_ESP_IV_SIZE; 1370 + aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; 1377 1371 1378 1372 if (rctx->is_encrypt) { 1379 1373 aead_parms.return_iv = true; 1380 - aead_parms.ret_iv_len = GCM_ESP_IV_SIZE; 1374 + aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; 1381 1375 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; 1382 1376 } 1383 1377 } else { ··· 1564 1558 if (err) 1565 1559 return err; 1566 1560 1567 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 1568 - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 1569 - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 1570 - /* 1571 - * Mailbox queue is full. Since MAY_SLEEP is set, assume 1572 - * not in atomic context and we can wait and try again. 1573 - */ 1574 - retry_cnt++; 1575 - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 1576 - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 1577 - mssg); 1578 - atomic_inc(&iproc_priv.mb_no_spc); 1579 - } 1580 - } 1581 - if (err < 0) { 1582 - atomic_inc(&iproc_priv.mb_send_fail); 1561 + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 1562 + if (unlikely(err < 0)) 1583 1563 return err; 1584 - } 1585 1564 1586 1565 return -EINPROGRESS; 1587 1566 } ··· 3246 3255 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3247 3256 }, 3248 3257 .setkey = aead_gcm_esp_setkey, 3249 - .ivsize = GCM_ESP_IV_SIZE, 3258 + .ivsize = GCM_RFC4106_IV_SIZE, 3250 3259 .maxauthsize = AES_BLOCK_SIZE, 3251 3260 }, 3252 3261 .cipher_info = { ··· 3292 3301 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3293 3302 }, 3294 3303 .setkey = rfc4543_gcm_esp_setkey, 3295 - .ivsize = GCM_ESP_IV_SIZE, 3304 + .ivsize = GCM_RFC4106_IV_SIZE, 3296 3305 .maxauthsize = AES_BLOCK_SIZE, 3297 3306 }, 3298 3307 .cipher_info = { ··· 4528 4537 mcl->dev = dev; 4529 4538 mcl->tx_block = false; 4530 4539 mcl->tx_tout = 0; 4531 - mcl->knows_txdone = false; 4540 + mcl->knows_txdone = true; 4532 4541 mcl->rx_callback = spu_rx_callback; 4533 4542 mcl->tx_done = NULL; 4534 4543 ··· 4809 4818 struct device *dev = &pdev->dev; 4810 4819 struct spu_hw *spu = &iproc_priv.spu; 4811 4820 struct resource *spu_ctrl_regs; 4812 - const struct of_device_id *match; 4813 4821 const struct spu_type_subtype *matched_spu_type; 4814 4822 struct device_node *dn = pdev->dev.of_node; 4815 4823 int err, i; ··· 4816 4826 /* Count number of mailbox channels */ 4817 4827 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); 4818 4828 4819 - match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); 4820 - if (!match) { 4829 + matched_spu_type = of_device_get_match_data(dev); 4830 + if (!matched_spu_type) { 4821 4831 dev_err(&pdev->dev, "Failed to match device\n"); 4822 4832 return -ENODEV; 4823 4833 } 4824 - 4825 - matched_spu_type = match->data; 4826 4834 4827 4835 spu->spu_type = matched_spu_type->type; 4828 4836 spu->spu_subtype = matched_spu_type->subtype;
+1 -2
drivers/crypto/bcm/cipher.h
··· 23 23 #include <crypto/aes.h> 24 24 #include <crypto/internal/hash.h> 25 25 #include <crypto/aead.h> 26 + #include <crypto/gcm.h> 26 27 #include <crypto/sha.h> 27 28 #include <crypto/sha3.h> 28 29 ··· 40 39 #define ARC4_STATE_SIZE 4 41 40 42 41 #define CCM_AES_IV_SIZE 16 43 - #define GCM_AES_IV_SIZE 12 44 - #define GCM_ESP_IV_SIZE 8 45 42 #define CCM_ESP_IV_SIZE 8 46 43 #define RFC4543_ICV_SIZE 16 47 44
+7 -7
drivers/crypto/bcm/util.c
··· 271 271 hash = crypto_alloc_shash(name, 0, 0); 272 272 if (IS_ERR(hash)) { 273 273 rc = PTR_ERR(hash); 274 - pr_err("%s: Crypto %s allocation error %d", __func__, name, rc); 274 + pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc); 275 275 return rc; 276 276 } 277 277 ··· 279 279 sdesc = kmalloc(size, GFP_KERNEL); 280 280 if (!sdesc) { 281 281 rc = -ENOMEM; 282 - pr_err("%s: Memory allocation failure", __func__); 282 + pr_err("%s: Memory allocation failure\n", __func__); 283 283 goto do_shash_err; 284 284 } 285 285 sdesc->shash.tfm = hash; ··· 288 288 if (key_len > 0) { 289 289 rc = crypto_shash_setkey(hash, key, key_len); 290 290 if (rc) { 291 - pr_err("%s: Could not setkey %s shash", __func__, name); 291 + pr_err("%s: Could not setkey %s shash\n", __func__, name); 292 292 goto do_shash_err; 293 293 } 294 294 } 295 295 296 296 rc = crypto_shash_init(&sdesc->shash); 297 297 if (rc) { 298 - pr_err("%s: Could not init %s shash", __func__, name); 298 + pr_err("%s: Could not init %s shash\n", __func__, name); 299 299 goto do_shash_err; 300 300 } 301 301 rc = crypto_shash_update(&sdesc->shash, data1, data1_len); 302 302 if (rc) { 303 - pr_err("%s: Could not update1", __func__); 303 + pr_err("%s: Could not update1\n", __func__); 304 304 goto do_shash_err; 305 305 } 306 306 if (data2 && data2_len) { 307 307 rc = crypto_shash_update(&sdesc->shash, data2, data2_len); 308 308 if (rc) { 309 - pr_err("%s: Could not update2", __func__); 309 + pr_err("%s: Could not update2\n", __func__); 310 310 goto do_shash_err; 311 311 } 312 312 } 313 313 rc = crypto_shash_final(&sdesc->shash, result); 314 314 if (rc) 315 - pr_err("%s: Could not generate %s hash", __func__, name); 315 + pr_err("%s: Could not generate %s hash\n", __func__, name); 316 316 317 317 do_shash_err: 318 318 crypto_free_shash(hash);
+5 -5
drivers/crypto/caam/caamalg.c
··· 992 992 struct caam_ctx *ctx = crypto_aead_ctx(aead); 993 993 unsigned int ivsize = crypto_aead_ivsize(aead); 994 994 u32 *desc = edesc->hw_desc; 995 - bool generic_gcm = (ivsize == 12); 995 + bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); 996 996 unsigned int last; 997 997 998 998 init_aead_job(req, edesc, all_contig, encrypt); ··· 1004 1004 1005 1005 /* Read GCM IV */ 1006 1006 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | 1007 - FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last); 1007 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); 1008 1008 /* Append Salt */ 1009 1009 if (!generic_gcm) 1010 1010 append_data(desc, ctx->key + ctx->cdata.keylen, 4); ··· 1953 1953 .setauthsize = rfc4106_setauthsize, 1954 1954 .encrypt = ipsec_gcm_encrypt, 1955 1955 .decrypt = ipsec_gcm_decrypt, 1956 - .ivsize = 8, 1956 + .ivsize = GCM_RFC4106_IV_SIZE, 1957 1957 .maxauthsize = AES_BLOCK_SIZE, 1958 1958 }, 1959 1959 .caam = { ··· 1971 1971 .setauthsize = rfc4543_setauthsize, 1972 1972 .encrypt = ipsec_gcm_encrypt, 1973 1973 .decrypt = ipsec_gcm_decrypt, 1974 - .ivsize = 8, 1974 + .ivsize = GCM_RFC4543_IV_SIZE, 1975 1975 .maxauthsize = AES_BLOCK_SIZE, 1976 1976 }, 1977 1977 .caam = { ··· 1990 1990 .setauthsize = gcm_setauthsize, 1991 1991 .encrypt = gcm_encrypt, 1992 1992 .decrypt = gcm_decrypt, 1993 - .ivsize = 12, 1993 + .ivsize = GCM_AES_IV_SIZE, 1994 1994 .maxauthsize = AES_BLOCK_SIZE, 1995 1995 }, 1996 1996 .caam = {
+6 -1
drivers/crypto/caam/caamalg_qi.c
··· 7 7 */ 8 8 9 9 #include "compat.h" 10 - 10 + #include "ctrl.h" 11 11 #include "regs.h" 12 12 #include "intern.h" 13 13 #include "desc_constr.h" ··· 2311 2311 */ 2312 2312 if (!priv || !priv->qi_present) 2313 2313 return -ENODEV; 2314 + 2315 + if (caam_dpaa2) { 2316 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); 2317 + return -ENODEV; 2318 + } 2314 2319 2315 2320 INIT_LIST_HEAD(&alg_list); 2316 2321
+4 -8
drivers/crypto/caam/caamhash.c
··· 218 218 } 219 219 220 220 /* Map state->caam_ctx, and add it to link table */ 221 - static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, 221 + static inline int ctx_map_to_sec4_sg(struct device *jrdev, 222 222 struct caam_hash_state *state, int ctx_len, 223 223 struct sec4_sg_entry *sec4_sg, u32 flag) 224 224 { ··· 773 773 edesc->src_nents = src_nents; 774 774 edesc->sec4_sg_bytes = sec4_sg_bytes; 775 775 776 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 776 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 777 777 edesc->sec4_sg, DMA_BIDIRECTIONAL); 778 778 if (ret) 779 779 goto unmap_ctx; ··· 871 871 desc = edesc->hw_desc; 872 872 873 873 edesc->sec4_sg_bytes = sec4_sg_bytes; 874 - edesc->src_nents = 0; 875 874 876 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 875 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 877 876 edesc->sec4_sg, DMA_TO_DEVICE); 878 877 if (ret) 879 878 goto unmap_ctx; ··· 966 967 967 968 edesc->src_nents = src_nents; 968 969 969 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, 970 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, 970 971 edesc->sec4_sg, DMA_TO_DEVICE); 971 972 if (ret) 972 973 goto unmap_ctx; ··· 1122 1123 dev_err(jrdev, "unable to map dst\n"); 1123 1124 goto unmap; 1124 1125 } 1125 - edesc->src_nents = 0; 1126 1126 1127 1127 #ifdef DEBUG 1128 1128 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", ··· 1203 1205 1204 1206 edesc->src_nents = src_nents; 1205 1207 edesc->sec4_sg_bytes = sec4_sg_bytes; 1206 - edesc->dst_dma = 0; 1207 1208 1208 1209 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); 1209 1210 if (ret) ··· 1414 1417 } 1415 1418 1416 1419 edesc->src_nents = src_nents; 1417 - edesc->dst_dma = 0; 1418 1420 1419 1421 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, 1420 1422 to_hash);
+1
drivers/crypto/caam/compat.h
··· 32 32 #include <crypto/aes.h> 33 33 #include <crypto/ctr.h> 34 34 #include <crypto/des.h> 35 + #include <crypto/gcm.h> 35 36 #include <crypto/sha.h> 36 37 #include <crypto/md5.h> 37 38 #include <crypto/internal/aead.h>
+1 -1
drivers/crypto/caam/desc.h
··· 1440 1440 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT) 1441 1441 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT) 1442 1442 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT) 1443 - #define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT) 1443 + #define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT) 1444 1444 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT) 1445 1445 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT) 1446 1446 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+1 -1
drivers/crypto/cavium/nitrox/nitrox_hal.c
··· 127 127 * size and interrupt threshold. 128 128 */ 129 129 offset = NPS_PKT_IN_INSTR_BADDRX(i); 130 - nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma); 130 + nitrox_write_csr(ndev, offset, cmdq->dma); 131 131 132 132 /* configure ring size */ 133 133 offset = NPS_PKT_IN_INSTR_RSIZEX(i);
+4 -5
drivers/crypto/ccp/ccp-crypto-aes-galois.c
··· 19 19 #include <crypto/algapi.h> 20 20 #include <crypto/aes.h> 21 21 #include <crypto/ctr.h> 22 + #include <crypto/gcm.h> 22 23 #include <crypto/scatterwalk.h> 23 24 #include <linux/delay.h> 24 25 25 26 #include "ccp-crypto.h" 26 - 27 - #define AES_GCM_IVSIZE 12 28 27 29 28 static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) 30 29 { ··· 94 95 */ 95 96 96 97 /* Prepare the IV: 12 bytes + an integer (counter) */ 97 - memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE); 98 + memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); 98 99 for (i = 0; i < 3; i++) 99 - rctx->iv[i + AES_GCM_IVSIZE] = 0; 100 + rctx->iv[i + GCM_AES_IV_SIZE] = 0; 100 101 rctx->iv[AES_BLOCK_SIZE - 1] = 1; 101 102 102 103 /* Set up a scatterlist for the IV */ ··· 159 160 .encrypt = ccp_aes_gcm_encrypt, 160 161 .decrypt = ccp_aes_gcm_decrypt, 161 162 .init = ccp_aes_gcm_cra_init, 162 - .ivsize = AES_GCM_IVSIZE, 163 + .ivsize = GCM_AES_IV_SIZE, 163 164 .maxauthsize = AES_BLOCK_SIZE, 164 165 .base = { 165 166 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+3 -5
drivers/crypto/ccp/ccp-crypto-main.c
··· 222 222 223 223 /* Check if the cmd can/should be queued */ 224 224 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { 225 - ret = -EBUSY; 226 - if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) 225 + if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { 226 + ret = -ENOSPC; 227 227 goto e_lock; 228 + } 228 229 } 229 230 230 231 /* Look for an entry with the same tfm. If there is a cmd ··· 244 243 ret = ccp_enqueue_cmd(crypto_cmd->cmd); 245 244 if (!ccp_crypto_success(ret)) 246 245 goto e_lock; /* Error, don't queue it */ 247 - if ((ret == -EBUSY) && 248 - !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) 249 - goto e_lock; /* Not backlogging, don't queue it */ 250 246 } 251 247 252 248 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+1 -2
drivers/crypto/ccp/ccp-dev-v5.c
··· 788 788 struct ccp_cmd_queue *cmd_q; 789 789 struct dma_pool *dma_pool; 790 790 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 791 - unsigned int qmr, qim, i; 791 + unsigned int qmr, i; 792 792 u64 status; 793 793 u32 status_lo, status_hi; 794 794 int ret; 795 795 796 796 /* Find available queues */ 797 - qim = 0; 798 797 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 799 798 for (i = 0; i < MAX_HW_QUEUES; i++) { 800 799
+5 -2
drivers/crypto/ccp/ccp-dev.c
··· 292 292 i = ccp->cmd_q_count; 293 293 294 294 if (ccp->cmd_count >= MAX_CMD_QLEN) { 295 - ret = -EBUSY; 296 - if (cmd->flags & CCP_CMD_MAY_BACKLOG) 295 + if (cmd->flags & CCP_CMD_MAY_BACKLOG) { 296 + ret = -EBUSY; 297 297 list_add_tail(&cmd->entry, &ccp->backlog); 298 + } else { 299 + ret = -ENOSPC; 300 + } 298 301 } else { 299 302 ret = -EINPROGRESS; 300 303 ccp->cmd_count++;
+2 -3
drivers/crypto/ccp/ccp-dmaengine.c
··· 223 223 desc->tx_desc.cookie, desc->status); 224 224 225 225 dma_cookie_complete(tx_desc); 226 + dma_descriptor_unmap(tx_desc); 226 227 } 227 228 228 229 desc = __ccp_next_dma_desc(chan, desc); ··· 231 230 spin_unlock_irqrestore(&chan->lock, flags); 232 231 233 232 if (tx_desc) { 234 - if (tx_desc->callback && 235 - (tx_desc->flags & DMA_PREP_INTERRUPT)) 236 - tx_desc->callback(tx_desc->callback_param); 233 + dmaengine_desc_get_callback_invoke(tx_desc, NULL); 237 234 238 235 dma_run_dependencies(tx_desc); 239 236 }
+1050 -758
drivers/crypto/chelsio/chcr_algo.c
··· 53 53 #include <crypto/aes.h> 54 54 #include <crypto/algapi.h> 55 55 #include <crypto/hash.h> 56 + #include <crypto/gcm.h> 56 57 #include <crypto/sha.h> 57 58 #include <crypto/authenc.h> 58 59 #include <crypto/ctr.h> ··· 70 69 #include "chcr_core.h" 71 70 #include "chcr_algo.h" 72 71 #include "chcr_crypto.h" 72 + 73 + #define IV AES_BLOCK_SIZE 73 74 74 75 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) 75 76 { ··· 105 102 106 103 static inline int is_ofld_imm(const struct sk_buff *skb) 107 104 { 108 - return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); 105 + return (skb->len <= SGE_MAX_WR_LEN); 109 106 } 110 107 111 108 /* ··· 120 117 return (3 * n) / 2 + (n & 1) + 2; 121 118 } 122 119 120 + static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, 121 + unsigned int entlen, 122 + unsigned int skip) 123 + { 124 + int nents = 0; 125 + unsigned int less; 126 + unsigned int skip_len = 0; 127 + 128 + while (sg && skip) { 129 + if (sg_dma_len(sg) <= skip) { 130 + skip -= sg_dma_len(sg); 131 + skip_len = 0; 132 + sg = sg_next(sg); 133 + } else { 134 + skip_len = skip; 135 + skip = 0; 136 + } 137 + } 138 + 139 + while (sg && reqlen) { 140 + less = min(reqlen, sg_dma_len(sg) - skip_len); 141 + nents += DIV_ROUND_UP(less, entlen); 142 + reqlen -= less; 143 + skip_len = 0; 144 + sg = sg_next(sg); 145 + } 146 + return nents; 147 + } 148 + 149 + static inline void chcr_handle_ahash_resp(struct ahash_request *req, 150 + unsigned char *input, 151 + int err) 152 + { 153 + struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 154 + int digestsize, updated_digestsize; 155 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 156 + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); 157 + 158 + if (input == NULL) 159 + goto out; 160 + reqctx = ahash_request_ctx(req); 161 + digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 162 + if (reqctx->is_sg_map) 163 + chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 164 + if (reqctx->dma_addr) 165 + dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr, 166 + reqctx->dma_len, DMA_TO_DEVICE); 167 + reqctx->dma_addr = 0; 168 + updated_digestsize = digestsize; 169 + if (digestsize == SHA224_DIGEST_SIZE) 170 + updated_digestsize = SHA256_DIGEST_SIZE; 171 + else if (digestsize == SHA384_DIGEST_SIZE) 172 + updated_digestsize = SHA512_DIGEST_SIZE; 173 + if (reqctx->result == 1) { 174 + reqctx->result = 0; 175 + memcpy(req->result, input + sizeof(struct cpl_fw6_pld), 176 + digestsize); 177 + } else { 178 + memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), 179 + updated_digestsize); 180 + } 181 + out: 182 + req->base.complete(&req->base, err); 183 + 184 + } 185 + 186 + static inline void chcr_handle_aead_resp(struct aead_request *req, 187 + unsigned char *input, 188 + int err) 189 + { 190 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 191 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 192 + struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); 193 + 194 + 195 + chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); 196 + if (reqctx->b0_dma) 197 + dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, 198 + reqctx->b0_len, DMA_BIDIRECTIONAL); 199 + if (reqctx->verify == VERIFY_SW) { 200 + chcr_verify_tag(req, input, &err); 201 + reqctx->verify = VERIFY_HW; 202 + } 203 + req->base.complete(&req->base, err); 204 + 205 + } 123 206 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) 124 207 { 125 208 u8 temp[SHA512_DIGEST_SIZE]; ··· 240 151 { 241 152 struct crypto_tfm *tfm = req->tfm; 242 153 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 243 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 244 - struct chcr_req_ctx ctx_req; 245 - unsigned int digestsize, updated_digestsize; 246 154 struct adapter *adap = padap(ctx->dev); 247 155 248 156 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 249 157 case CRYPTO_ALG_TYPE_AEAD: 250 - ctx_req.req.aead_req = aead_request_cast(req); 251 - ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); 252 - dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, 253 - ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); 254 - if (ctx_req.ctx.reqctx->skb) { 255 - kfree_skb(ctx_req.ctx.reqctx->skb); 256 - ctx_req.ctx.reqctx->skb = NULL; 257 - } 258 - free_new_sg(ctx_req.ctx.reqctx->newdstsg); 259 - ctx_req.ctx.reqctx->newdstsg = NULL; 260 - if (ctx_req.ctx.reqctx->verify == VERIFY_SW) { 261 - chcr_verify_tag(ctx_req.req.aead_req, input, 262 - &err); 263 - ctx_req.ctx.reqctx->verify = VERIFY_HW; 264 - } 265 - ctx_req.req.aead_req->base.complete(req, err); 158 + chcr_handle_aead_resp(aead_request_cast(req), input, err); 266 159 break; 267 160 268 161 case CRYPTO_ALG_TYPE_ABLKCIPHER: ··· 253 182 break; 254 183 255 184 case CRYPTO_ALG_TYPE_AHASH: 256 - ctx_req.req.ahash_req = ahash_request_cast(req); 257 - ctx_req.ctx.ahash_ctx = 258 - ahash_request_ctx(ctx_req.req.ahash_req); 259 - digestsize = 260 - crypto_ahash_digestsize(crypto_ahash_reqtfm( 261 - ctx_req.req.ahash_req)); 262 - updated_digestsize = digestsize; 263 - if (digestsize == SHA224_DIGEST_SIZE) 264 - updated_digestsize = SHA256_DIGEST_SIZE; 265 - else if (digestsize == SHA384_DIGEST_SIZE) 266 - updated_digestsize = SHA512_DIGEST_SIZE; 267 - if (ctx_req.ctx.ahash_ctx->skb) { 268 - kfree_skb(ctx_req.ctx.ahash_ctx->skb); 269 - ctx_req.ctx.ahash_ctx->skb = NULL; 185 + chcr_handle_ahash_resp(ahash_request_cast(req), input, err); 270 186 } 271 - if (ctx_req.ctx.ahash_ctx->result == 1) { 272 - ctx_req.ctx.ahash_ctx->result = 0; 273 - memcpy(ctx_req.req.ahash_req->result, input + 274 - sizeof(struct cpl_fw6_pld), 275 - digestsize); 276 - } else { 277 - memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + 278 - sizeof(struct cpl_fw6_pld), 279 - updated_digestsize); 280 - } 281 - ctx_req.req.ahash_req->base.complete(req, err); 282 - break; 283 - } 284 187 atomic_inc(&adap->chcr_stats.complete); 285 188 return err; 286 189 } 287 190 288 - /* 289 - * calc_tx_flits_ofld - calculate # of flits for an offload packet 290 - * @skb: the packet 291 - * Returns the number of flits needed for the given offload packet. 292 - * These packets are already fully constructed and no additional headers 293 - * will be added. 294 - */ 295 - static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) 296 - { 297 - unsigned int flits, cnt; 298 - 299 - if (is_ofld_imm(skb)) 300 - return DIV_ROUND_UP(skb->len, 8); 301 - 302 - flits = skb_transport_offset(skb) / 8; /* headers */ 303 - cnt = skb_shinfo(skb)->nr_frags; 304 - if (skb_tail_pointer(skb) != skb_transport_header(skb)) 305 - cnt++; 306 - return flits + sgl_len(cnt); 307 - } 308 - 309 - static inline void get_aes_decrypt_key(unsigned char *dec_key, 191 + static void get_aes_decrypt_key(unsigned char *dec_key, 310 192 const unsigned char *key, 311 193 unsigned int keylength) 312 194 { ··· 406 382 return 0; 407 383 } 408 384 409 - static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, 410 - struct scatterlist *sg, 411 - struct phys_sge_parm *sg_param) 385 + static inline void dsgl_walk_init(struct dsgl_walk *walk, 386 + struct cpl_rx_phys_dsgl *dsgl) 412 387 { 413 - struct phys_sge_pairs *to; 414 - unsigned int len = 0, left_size = sg_param->obsize; 415 - unsigned int nents = sg_param->nents, i, j = 0; 388 + walk->dsgl = dsgl; 389 + walk->nents = 0; 390 + walk->to = (struct phys_sge_pairs *)(dsgl + 1); 391 + } 392 + 393 + static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) 394 + { 395 + struct cpl_rx_phys_dsgl *phys_cpl; 396 + 397 + phys_cpl = walk->dsgl; 416 398 417 399 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) 418 400 | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); ··· 428 398 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | 429 399 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | 430 400 CPL_RX_PHYS_DSGL_DCAID_V(0) | 431 - CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); 401 + CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); 432 402 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; 433 - phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); 403 + phys_cpl->rss_hdr_int.qid = htons(qid); 434 404 phys_cpl->rss_hdr_int.hash_val = 0; 435 - to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + 436 - sizeof(struct cpl_rx_phys_dsgl)); 437 - for (i = 0; nents && left_size; to++) { 438 - for (j = 0; j < 8 && nents && left_size; j++, nents--) { 439 - len = min(left_size, sg_dma_len(sg)); 440 - to->len[j] = htons(len); 441 - to->addr[j] = cpu_to_be64(sg_dma_address(sg)); 442 - left_size -= len; 443 - sg = sg_next(sg); 444 - } 445 - } 446 405 } 447 406 448 - static inline int map_writesg_phys_cpl(struct device *dev, 449 - struct cpl_rx_phys_dsgl *phys_cpl, 450 - struct scatterlist *sg, 451 - struct phys_sge_parm *sg_param) 407 + static inline void dsgl_walk_add_page(struct dsgl_walk *walk, 408 + size_t size, 409 + dma_addr_t *addr) 452 410 { 453 - if (!sg || !sg_param->nents) 454 - return -EINVAL; 411 + int j; 455 412 456 - sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); 457 - if (sg_param->nents == 0) { 458 - pr_err("CHCR : DMA mapping failed\n"); 459 - return -EINVAL; 413 + if (!size) 414 + return; 415 + j = walk->nents; 416 + walk->to->len[j % 8] = htons(size); 417 + walk->to->addr[j % 8] = cpu_to_be64(*addr); 418 + j++; 419 + if ((j % 8) == 0) 420 + walk->to++; 421 + walk->nents = j; 422 + } 423 + 424 + static void dsgl_walk_add_sg(struct dsgl_walk *walk, 425 + struct scatterlist *sg, 426 + unsigned int slen, 427 + unsigned int skip) 428 + { 429 + int skip_len = 0; 430 + unsigned int left_size = slen, len = 0; 431 + unsigned int j = walk->nents; 432 + int offset, ent_len; 433 + 434 + if (!slen) 435 + return; 436 + while (sg && skip) { 437 + if (sg_dma_len(sg) <= skip) { 438 + skip -= sg_dma_len(sg); 439 + skip_len = 0; 440 + sg = sg_next(sg); 441 + } else { 442 + skip_len = skip; 443 + skip = 0; 444 + } 460 445 } 461 - write_phys_cpl(phys_cpl, sg, sg_param); 462 - return 0; 446 + 447 + while (left_size && sg) { 448 + len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); 449 + offset = 0; 450 + while (len) { 451 + ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); 452 + walk->to->len[j % 8] = htons(ent_len); 453 + walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + 454 + offset + skip_len); 455 + offset += ent_len; 456 + len -= ent_len; 457 + j++; 458 + if ((j % 8) == 0) 459 + walk->to++; 460 + } 461 + walk->last_sg = sg; 462 + walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - 463 + skip_len) + skip_len; 464 + left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); 465 + skip_len = 0; 466 + sg = sg_next(sg); 467 + } 468 + walk->nents = j; 469 + } 470 + 471 + static inline void ulptx_walk_init(struct ulptx_walk *walk, 472 + struct ulptx_sgl *ulp) 473 + { 474 + walk->sgl = ulp; 475 + walk->nents = 0; 476 + walk->pair_idx = 0; 477 + walk->pair = ulp->sge; 478 + walk->last_sg = NULL; 479 + walk->last_sg_len = 0; 480 + } 481 + 482 + static inline void ulptx_walk_end(struct ulptx_walk *walk) 483 + { 484 + walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | 485 + ULPTX_NSGE_V(walk->nents)); 486 + } 487 + 488 + 489 + static inline void ulptx_walk_add_page(struct ulptx_walk *walk, 490 + size_t size, 491 + dma_addr_t *addr) 492 + { 493 + if (!size) 494 + return; 495 + 496 + if (walk->nents == 0) { 497 + walk->sgl->len0 = cpu_to_be32(size); 498 + walk->sgl->addr0 = cpu_to_be64(*addr); 499 + } else { 500 + walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); 501 + walk->pair->len[walk->pair_idx] = cpu_to_be32(size); 502 + walk->pair_idx = !walk->pair_idx; 503 + if (!walk->pair_idx) 504 + walk->pair++; 505 + } 506 + walk->nents++; 507 + } 508 + 509 + static void ulptx_walk_add_sg(struct ulptx_walk *walk, 510 + struct scatterlist *sg, 511 + unsigned int len, 512 + unsigned int skip) 513 + { 514 + int small; 515 + int skip_len = 0; 516 + unsigned int sgmin; 517 + 518 + if (!len) 519 + return; 520 + 521 + while (sg && skip) { 522 + if (sg_dma_len(sg) <= skip) { 523 + skip -= sg_dma_len(sg); 524 + skip_len = 0; 525 + sg = sg_next(sg); 526 + } else { 527 + skip_len = skip; 528 + skip = 0; 529 + } 530 + } 531 + if (walk->nents == 0) { 532 + small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); 533 + sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); 534 + walk->sgl->len0 = cpu_to_be32(sgmin); 535 + walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); 536 + walk->nents++; 537 + len -= sgmin; 538 + walk->last_sg = sg; 539 + walk->last_sg_len = sgmin + skip_len; 540 + skip_len += sgmin; 541 + if (sg_dma_len(sg) == skip_len) { 542 + sg = sg_next(sg); 543 + skip_len = 0; 544 + } 545 + } 546 + 547 + while (sg && len) { 548 + small = min(sg_dma_len(sg) - skip_len, len); 549 + sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); 550 + walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); 551 + walk->pair->addr[walk->pair_idx] = 552 + cpu_to_be64(sg_dma_address(sg) + skip_len); 553 + walk->pair_idx = !walk->pair_idx; 554 + walk->nents++; 555 + if (!walk->pair_idx) 556 + walk->pair++; 557 + len -= sgmin; 558 + skip_len += sgmin; 559 + walk->last_sg = sg; 560 + walk->last_sg_len = skip_len; 561 + if (sg_dma_len(sg) == skip_len) { 562 + sg = sg_next(sg); 563 + skip_len = 0; 564 + } 565 + } 463 566 } 464 567 465 568 static inline int get_aead_subtype(struct crypto_aead *aead) ··· 610 447 container_of(alg, struct chcr_alg_template, alg.crypto); 611 448 612 449 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; 613 - } 614 - 615 - static inline void write_buffer_to_skb(struct sk_buff *skb, 616 - unsigned int *frags, 617 - char *bfr, 618 - u8 bfr_len) 619 - { 620 - skb->len += bfr_len; 621 - skb->data_len += bfr_len; 622 - skb->truesize += bfr_len; 623 - get_page(virt_to_page(bfr)); 624 - skb_fill_page_desc(skb, *frags, virt_to_page(bfr), 625 - offset_in_page(bfr), bfr_len); 626 - (*frags)++; 627 - } 628 - 629 - 630 - static inline void 631 - write_sg_to_skb(struct sk_buff *skb, unsigned int *frags, 632 - struct scatterlist *sg, unsigned int count) 633 - { 634 - struct page *spage; 635 - unsigned int page_len; 636 - 637 - skb->len += count; 638 - skb->data_len += count; 639 - skb->truesize += count; 640 - 641 - while (count > 0) { 642 - if (!sg || (!(sg->length))) 643 - break; 644 - spage = sg_page(sg); 645 - get_page(spage); 646 - page_len = min(sg->length, count); 647 - skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len); 648 - (*frags)++; 649 - count -= page_len; 650 - sg = sg_next(sg); 651 - } 652 450 } 653 451 654 452 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) ··· 648 524 struct scatterlist *dst, 649 525 unsigned int minsg, 650 526 unsigned int space, 651 - short int *sent, 652 - short int *dent) 527 + unsigned int srcskip, 528 + unsigned int dstskip) 653 529 { 654 530 int srclen = 0, dstlen = 0; 655 - int srcsg = minsg, dstsg = 0; 531 + int srcsg = minsg, dstsg = minsg; 532 + int offset = 0, less; 656 533 657 - *sent = 0; 658 - *dent = 0; 659 - while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && 534 + if (sg_dma_len(src) == srcskip) { 535 + src = sg_next(src); 536 + srcskip = 0; 537 + } 538 + 539 + if (sg_dma_len(dst) == dstskip) { 540 + dst = sg_next(dst); 541 + dstskip = 0; 542 + } 543 + 544 + while (src && dst && 660 545 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { 661 - srclen += src->length; 546 + srclen += (sg_dma_len(src) - srcskip); 662 547 srcsg++; 548 + offset = 0; 663 549 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && 664 550 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { 665 551 if (srclen <= dstlen) 666 552 break; 667 - dstlen += dst->length; 668 - dst = sg_next(dst); 553 + less = min_t(unsigned int, sg_dma_len(dst) - offset - 554 + dstskip, CHCR_DST_SG_SIZE); 555 + dstlen += less; 556 + offset += less; 557 + if (offset == sg_dma_len(dst)) { 558 + dst = sg_next(dst); 559 + offset = 0; 560 + } 669 561 dstsg++; 562 + dstskip = 0; 670 563 } 671 564 src = sg_next(src); 565 + srcskip = 0; 672 566 } 673 - *sent = srcsg - minsg; 674 - *dent = dstsg; 675 567 return min(srclen, dstlen); 676 568 } 677 569 ··· 716 576 } 717 577 static inline void create_wreq(struct chcr_context *ctx, 718 578 struct chcr_wr *chcr_req, 719 - void *req, struct sk_buff *skb, 720 - int kctx_len, int hash_sz, 721 - int is_iv, 579 + struct crypto_async_request *req, 580 + unsigned int imm, 581 + int hash_sz, 582 + unsigned int len16, 722 583 unsigned int sc_len, 723 584 unsigned int lcb) 724 585 { 725 586 struct uld_ctx *u_ctx = ULD_CTX(ctx); 726 - int iv_loc = IV_DSGL; 727 587 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; 728 - unsigned int immdatalen = 0, nr_frags = 0; 729 588 730 - if (is_ofld_imm(skb)) { 731 - immdatalen = skb->data_len; 732 - iv_loc = IV_IMMEDIATE; 733 - } else { 734 - nr_frags = skb_shinfo(skb)->nr_frags; 735 - } 736 589 737 - chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, 738 - ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4)); 590 + chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; 739 591 chcr_req->wreq.pld_size_hash_size = 740 - htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | 741 - FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); 592 + htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); 742 593 chcr_req->wreq.len16_pkd = 743 - htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( 744 - (calc_tx_flits_ofld(skb) * 8), 16))); 594 + htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); 745 595 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); 746 596 chcr_req->wreq.rx_chid_to_rx_q_id = 747 597 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, 748 - is_iv ? iv_loc : IV_NOP, !!lcb, 749 - ctx->tx_qidx); 598 + !!lcb, ctx->tx_qidx); 750 599 751 600 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, 752 601 qid); 753 - chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), 754 - 16) - ((sizeof(chcr_req->wreq)) >> 4))); 602 + chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - 603 + ((sizeof(chcr_req->wreq)) >> 4))); 755 604 756 - chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); 605 + chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); 757 606 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 758 - sizeof(chcr_req->key_ctx) + 759 - kctx_len + sc_len + immdatalen); 607 + sizeof(chcr_req->key_ctx) + sc_len); 760 608 } 761 609 762 610 /** ··· 757 629 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) 758 630 { 759 631 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); 760 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 761 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 762 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 632 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 763 633 struct sk_buff *skb = NULL; 764 634 struct chcr_wr *chcr_req; 765 635 struct cpl_rx_phys_dsgl *phys_cpl; 636 + struct ulptx_sgl *ulptx; 766 637 struct chcr_blkcipher_req_ctx *reqctx = 767 638 ablkcipher_request_ctx(wrparam->req); 768 - struct phys_sge_parm sg_param; 769 - unsigned int frags = 0, transhdr_len, phys_dsgl; 639 + unsigned int temp = 0, transhdr_len, dst_size; 770 640 int error; 771 - unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; 641 + int nents; 642 + unsigned int kctx_len; 772 643 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 773 644 GFP_KERNEL : GFP_ATOMIC; 774 - struct adapter *adap = padap(ctx->dev); 645 + struct adapter *adap = padap(c_ctx(tfm)->dev); 775 646 776 - phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); 777 - 647 + nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, 648 + reqctx->dst_ofst); 649 + dst_size = get_space_for_phys_dsgl(nents + 1); 778 650 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); 779 - transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); 780 - skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 651 + transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 652 + nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, 653 + CHCR_SRC_SG_SIZE, reqctx->src_ofst); 654 + temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16) 655 + * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8); 656 + transhdr_len += temp; 657 + transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 658 + skb = alloc_skb(SGE_MAX_WR_LEN, flags); 781 659 if (!skb) { 782 660 error = -ENOMEM; 783 661 goto err; 784 662 } 785 - skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 786 663 chcr_req = __skb_put_zero(skb, transhdr_len); 787 664 chcr_req->sec_cpl.op_ivinsrtofst = 788 - FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); 665 + FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); 789 666 790 - chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes); 667 + chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); 791 668 chcr_req->sec_cpl.aadstart_cipherstop_hi = 792 - FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); 669 + FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); 793 670 794 671 chcr_req->sec_cpl.cipherstop_lo_authinsert = 795 672 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); 796 673 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, 797 674 ablkctx->ciph_mode, 798 - 0, 0, ivsize >> 1); 675 + 0, 0, IV >> 1); 799 676 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, 800 - 0, 1, phys_dsgl); 677 + 0, 0, dst_size); 801 678 802 679 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; 803 680 if ((reqctx->op == CHCR_DECRYPT_OP) && ··· 827 694 } 828 695 } 829 696 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 830 - sg_param.nents = reqctx->dst_nents; 831 - sg_param.obsize = wrparam->bytes; 832 - sg_param.qid = wrparam->qid; 833 - error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, 834 - reqctx->dst, &sg_param); 835 - if (error) 836 - goto map_fail1; 697 + ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 698 + chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); 699 + chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); 837 700 838 - skb_set_transport_header(skb, transhdr_len); 839 - write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 840 - write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes); 841 701 atomic_inc(&adap->chcr_stats.cipher_rqst); 842 - create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1, 843 - sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, 702 + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len 703 + +(reqctx->imm ? (IV + wrparam->bytes) : 0); 704 + create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, 705 + transhdr_len, temp, 844 706 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); 845 707 reqctx->skb = skb; 846 - skb_get(skb); 847 708 return skb; 848 - map_fail1: 849 - kfree_skb(skb); 850 709 err: 851 710 return ERR_PTR(error); 852 711 } ··· 863 738 unsigned int keylen) 864 739 { 865 740 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 866 - struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 867 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 741 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 868 742 int err = 0; 869 743 870 744 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); ··· 881 757 const u8 *key, 882 758 unsigned int keylen) 883 759 { 884 - struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 885 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 760 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 886 761 unsigned int ck_size, context_size; 887 762 u16 alignment = 0; 888 763 int err; ··· 913 790 const u8 *key, 914 791 unsigned int keylen) 915 792 { 916 - struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 917 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 793 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 918 794 unsigned int ck_size, context_size; 919 795 u16 alignment = 0; 920 796 int err; ··· 944 822 const u8 *key, 945 823 unsigned int keylen) 946 824 { 947 - struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 948 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 825 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 949 826 unsigned int ck_size, context_size; 950 827 u16 alignment = 0; 951 828 int err; ··· 1011 890 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) 1012 891 { 1013 892 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1014 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1015 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 893 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1016 894 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 1017 895 struct crypto_cipher *cipher; 1018 896 int ret, i; 1019 897 u8 *key; 1020 898 unsigned int keylen; 899 + int round = reqctx->last_req_len / AES_BLOCK_SIZE; 900 + int round8 = round / 8; 1021 901 1022 902 cipher = ablkctx->aes_generic; 1023 - memcpy(iv, req->info, AES_BLOCK_SIZE); 903 + memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); 1024 904 1025 905 keylen = ablkctx->enckey_len / 2; 1026 906 key = ablkctx->key + keylen; 1027 907 ret = crypto_cipher_setkey(cipher, key, keylen); 1028 908 if (ret) 1029 909 goto out; 910 + /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/ 911 + for (i = 0; i < round8; i++) 912 + gf128mul_x8_ble((le128 *)iv, (le128 *)iv); 1030 913 1031 - crypto_cipher_encrypt_one(cipher, iv, iv); 1032 - for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++) 914 + for (i = 0; i < (round % 8); i++) 1033 915 gf128mul_x_ble((le128 *)iv, (le128 *)iv); 1034 916 1035 917 crypto_cipher_decrypt_one(cipher, iv, iv); ··· 1106 982 unsigned char *input, int err) 1107 983 { 1108 984 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1109 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1110 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 1111 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 985 + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 986 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1112 987 struct sk_buff *skb; 1113 988 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; 1114 989 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 1115 990 struct cipher_wr_param wrparam; 1116 991 int bytes; 1117 992 1118 - dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents, 1119 - DMA_FROM_DEVICE); 1120 - 1121 - if (reqctx->skb) { 1122 - kfree_skb(reqctx->skb); 1123 - reqctx->skb = NULL; 1124 - } 1125 993 if (err) 1126 - goto complete; 1127 - 994 + goto unmap; 1128 995 if (req->nbytes == reqctx->processed) { 996 + chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 997 + req); 1129 998 err = chcr_final_cipher_iv(req, fw6_pld, req->info); 1130 999 goto complete; 1131 1000 } 1132 1001 1133 1002 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1134 - ctx->tx_qidx))) { 1003 + c_ctx(tfm)->tx_qidx))) { 1135 1004 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 1136 1005 err = -EBUSY; 1137 - goto complete; 1006 + goto unmap; 1138 1007 } 1139 1008 1140 1009 } 1141 - wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src, 1142 - reqctx->processed); 1143 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg, 1144 - reqctx->processed); 1145 - if (!wrparam.srcsg || !reqctx->dst) { 1146 - pr_err("Input sg list length less that nbytes\n"); 1147 - err = -EINVAL; 1148 - goto complete; 1149 - } 1150 - bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1, 1151 - SPACE_LEFT(ablkctx->enckey_len), 1152 - &wrparam.snent, &reqctx->dst_nents); 1010 + if (!reqctx->imm) { 1011 + bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, 1012 + SPACE_LEFT(ablkctx->enckey_len), 1013 + reqctx->src_ofst, reqctx->dst_ofst); 1153 1014 if ((bytes + reqctx->processed) >= req->nbytes) 1154 1015 bytes = req->nbytes - reqctx->processed; 1155 1016 else 1156 1017 bytes = ROUND_16(bytes); 1018 + } else { 1019 + /*CTR mode counter overfloa*/ 1020 + bytes = req->nbytes - reqctx->processed; 1021 + } 1022 + dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1023 + reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 1157 1024 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); 1025 + dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1026 + reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 1158 1027 if (err) 1159 - goto complete; 1028 + goto unmap; 1160 1029 1161 1030 if (unlikely(bytes == 0)) { 1031 + chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1032 + req); 1162 1033 err = chcr_cipher_fallback(ablkctx->sw_cipher, 1163 1034 req->base.flags, 1164 - wrparam.srcsg, 1165 - reqctx->dst, 1166 - req->nbytes - reqctx->processed, 1167 - reqctx->iv, 1035 + req->src, 1036 + req->dst, 1037 + req->nbytes, 1038 + req->info, 1168 1039 reqctx->op); 1169 1040 goto complete; 1170 1041 } ··· 1167 1048 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 1168 1049 CRYPTO_ALG_SUB_TYPE_CTR) 1169 1050 bytes = adjust_ctr_overflow(reqctx->iv, bytes); 1170 - reqctx->processed += bytes; 1171 - wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; 1051 + wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; 1172 1052 wrparam.req = req; 1173 1053 wrparam.bytes = bytes; 1174 1054 skb = create_cipher_wr(&wrparam); 1175 1055 if (IS_ERR(skb)) { 1176 1056 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 1177 1057 err = PTR_ERR(skb); 1178 - goto complete; 1058 + goto unmap; 1179 1059 } 1180 1060 skb->dev = u_ctx->lldi.ports[0]; 1181 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1061 + set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); 1182 1062 chcr_send_wr(skb); 1063 + reqctx->last_req_len = bytes; 1064 + reqctx->processed += bytes; 1183 1065 return 0; 1066 + unmap: 1067 + chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1184 1068 complete: 1185 - free_new_sg(reqctx->newdstsg); 1186 - reqctx->newdstsg = NULL; 1187 1069 req->base.complete(&req->base, err); 1188 1070 return err; 1189 1071 } ··· 1197 1077 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1198 1078 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); 1199 1079 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 1200 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1201 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1080 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); 1202 1081 struct cipher_wr_param wrparam; 1203 - int bytes, nents, err = -EINVAL; 1082 + int bytes, err = -EINVAL; 1204 1083 1205 - reqctx->newdstsg = NULL; 1206 1084 reqctx->processed = 0; 1207 1085 if (!req->info) 1208 1086 goto error; ··· 1211 1093 ablkctx->enckey_len, req->nbytes, ivsize); 1212 1094 goto error; 1213 1095 } 1214 - wrparam.srcsg = req->src; 1215 - if (is_newsg(req->dst, &nents)) { 1216 - reqctx->newdstsg = alloc_new_sg(req->dst, nents); 1217 - if (IS_ERR(reqctx->newdstsg)) 1218 - return PTR_ERR(reqctx->newdstsg); 1219 - reqctx->dstsg = reqctx->newdstsg; 1096 + chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1097 + if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + 1098 + AES_MIN_KEY_SIZE + 1099 + sizeof(struct cpl_rx_phys_dsgl) + 1100 + /*Min dsgl size*/ 1101 + 32))) { 1102 + /* Can be sent as Imm*/ 1103 + unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; 1104 + 1105 + dnents = sg_nents_xlen(req->dst, req->nbytes, 1106 + CHCR_DST_SG_SIZE, 0); 1107 + dnents += 1; // IV 1108 + phys_dsgl = get_space_for_phys_dsgl(dnents); 1109 + kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); 1110 + transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); 1111 + reqctx->imm = (transhdr_len + IV + req->nbytes) <= 1112 + SGE_MAX_WR_LEN; 1113 + bytes = IV + req->nbytes; 1114 + 1220 1115 } else { 1221 - reqctx->dstsg = req->dst; 1116 + reqctx->imm = 0; 1222 1117 } 1223 - bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, 1224 - SPACE_LEFT(ablkctx->enckey_len), 1225 - &wrparam.snent, 1226 - &reqctx->dst_nents); 1118 + 1119 + if (!reqctx->imm) { 1120 + bytes = chcr_sg_ent_in_wr(req->src, req->dst, 1121 + MIN_CIPHER_SG, 1122 + SPACE_LEFT(ablkctx->enckey_len), 1123 + 0, 0); 1227 1124 if ((bytes + reqctx->processed) >= req->nbytes) 1228 1125 bytes = req->nbytes - reqctx->processed; 1229 1126 else 1230 1127 bytes = ROUND_16(bytes); 1231 - if (unlikely(bytes > req->nbytes)) 1128 + } else { 1232 1129 bytes = req->nbytes; 1130 + } 1233 1131 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 1234 1132 CRYPTO_ALG_SUB_TYPE_CTR) { 1235 1133 bytes = adjust_ctr_overflow(req->info, bytes); ··· 1262 1128 1263 1129 } else { 1264 1130 1265 - memcpy(reqctx->iv, req->info, ivsize); 1131 + memcpy(reqctx->iv, req->info, IV); 1266 1132 } 1267 1133 if (unlikely(bytes == 0)) { 1134 + chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1135 + req); 1268 1136 err = chcr_cipher_fallback(ablkctx->sw_cipher, 1269 1137 req->base.flags, 1270 1138 req->src, ··· 1276 1140 op_type); 1277 1141 goto error; 1278 1142 } 1279 - reqctx->processed = bytes; 1280 - reqctx->dst = reqctx->dstsg; 1281 1143 reqctx->op = op_type; 1144 + reqctx->srcsg = req->src; 1145 + reqctx->dstsg = req->dst; 1146 + reqctx->src_ofst = 0; 1147 + reqctx->dst_ofst = 0; 1282 1148 wrparam.qid = qid; 1283 1149 wrparam.req = req; 1284 1150 wrparam.bytes = bytes; 1285 1151 *skb = create_cipher_wr(&wrparam); 1286 1152 if (IS_ERR(*skb)) { 1287 1153 err = PTR_ERR(*skb); 1288 - goto error; 1154 + goto unmap; 1289 1155 } 1156 + reqctx->processed = bytes; 1157 + reqctx->last_req_len = bytes; 1290 1158 1291 1159 return 0; 1160 + unmap: 1161 + chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); 1292 1162 error: 1293 - free_new_sg(reqctx->newdstsg); 1294 - reqctx->newdstsg = NULL; 1295 1163 return err; 1296 1164 } 1297 1165 1298 1166 static int chcr_aes_encrypt(struct ablkcipher_request *req) 1299 1167 { 1300 1168 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1301 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1302 1169 struct sk_buff *skb = NULL; 1303 1170 int err; 1304 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 1171 + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 1305 1172 1306 1173 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1307 - ctx->tx_qidx))) { 1174 + c_ctx(tfm)->tx_qidx))) { 1308 1175 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1309 1176 return -EBUSY; 1310 1177 } 1311 1178 1312 - err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, 1313 - CHCR_ENCRYPT_OP); 1179 + err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], 1180 + &skb, CHCR_ENCRYPT_OP); 1314 1181 if (err || !skb) 1315 1182 return err; 1316 1183 skb->dev = u_ctx->lldi.ports[0]; 1317 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1184 + set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); 1318 1185 chcr_send_wr(skb); 1319 1186 return -EINPROGRESS; 1320 1187 } ··· 1325 1186 static int chcr_aes_decrypt(struct ablkcipher_request *req) 1326 1187 { 1327 1188 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1328 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1329 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 1189 + struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); 1330 1190 struct sk_buff *skb = NULL; 1331 1191 int err; 1332 1192 1333 1193 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1334 - ctx->tx_qidx))) { 1194 + c_ctx(tfm)->tx_qidx))) { 1335 1195 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1336 1196 return -EBUSY; 1337 1197 } 1338 1198 1339 - err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, 1340 - CHCR_DECRYPT_OP); 1199 + err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], 1200 + &skb, CHCR_DECRYPT_OP); 1341 1201 if (err || !skb) 1342 1202 return err; 1343 1203 skb->dev = u_ctx->lldi.ports[0]; 1344 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1204 + set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); 1345 1205 chcr_send_wr(skb); 1346 1206 return -EINPROGRESS; 1347 1207 } ··· 1488 1350 { 1489 1351 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1490 1352 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1491 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1492 - struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 1353 + struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); 1493 1354 struct sk_buff *skb = NULL; 1355 + struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); 1494 1356 struct chcr_wr *chcr_req; 1495 - unsigned int frags = 0, transhdr_len, iopad_alignment = 0; 1357 + struct ulptx_sgl *ulptx; 1358 + unsigned int nents = 0, transhdr_len, iopad_alignment = 0; 1496 1359 unsigned int digestsize = crypto_ahash_digestsize(tfm); 1497 - unsigned int kctx_len = 0; 1360 + unsigned int kctx_len = 0, temp = 0; 1498 1361 u8 hash_size_in_response = 0; 1499 1362 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1500 1363 GFP_ATOMIC; 1501 - struct adapter *adap = padap(ctx->dev); 1364 + struct adapter *adap = padap(h_ctx(tfm)->dev); 1365 + int error = 0; 1502 1366 1503 1367 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); 1504 1368 kctx_len = param->alg_prm.result_size + iopad_alignment; ··· 1512 1372 else 1513 1373 hash_size_in_response = param->alg_prm.result_size; 1514 1374 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); 1515 - skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 1516 - if (!skb) 1517 - return skb; 1375 + req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <= 1376 + SGE_MAX_WR_LEN; 1377 + nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0); 1378 + nents += param->bfr_len ? 1 : 0; 1379 + transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len + 1380 + param->sg_len), 16) * 16) : 1381 + (sgl_len(nents) * 8); 1382 + transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 1518 1383 1519 - skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 1384 + skb = alloc_skb(SGE_MAX_WR_LEN, flags); 1385 + if (!skb) 1386 + return ERR_PTR(-ENOMEM); 1520 1387 chcr_req = __skb_put_zero(skb, transhdr_len); 1521 1388 1522 1389 chcr_req->sec_cpl.op_ivinsrtofst = 1523 - FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0); 1390 + FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); 1524 1391 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); 1525 1392 1526 1393 chcr_req->sec_cpl.aadstart_cipherstop_hi = ··· 1556 1409 ((kctx_len + 1557 1410 sizeof(chcr_req->key_ctx)) >> 4)); 1558 1411 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); 1559 - 1560 - skb_set_transport_header(skb, transhdr_len); 1561 - if (param->bfr_len != 0) 1562 - write_buffer_to_skb(skb, &frags, req_ctx->reqbfr, 1563 - param->bfr_len); 1564 - if (param->sg_len != 0) 1565 - write_sg_to_skb(skb, &frags, req->src, param->sg_len); 1412 + ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len + 1413 + DUMMY_BYTES); 1414 + if (param->bfr_len != 0) { 1415 + req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, 1416 + req_ctx->reqbfr, param->bfr_len, 1417 + DMA_TO_DEVICE); 1418 + if (dma_mapping_error(&u_ctx->lldi.pdev->dev, 1419 + req_ctx->dma_addr)) { 1420 + error = -ENOMEM; 1421 + goto err; 1422 + } 1423 + req_ctx->dma_len = param->bfr_len; 1424 + } else { 1425 + req_ctx->dma_addr = 0; 1426 + } 1427 + chcr_add_hash_src_ent(req, ulptx, param); 1428 + /* Request upto max wr size */ 1429 + temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len 1430 + + param->bfr_len) : 0); 1566 1431 atomic_inc(&adap->chcr_stats.digest_rqst); 1567 - create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 1568 - hash_size_in_response, 0, DUMMY_BYTES, 0); 1432 + create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm, 1433 + hash_size_in_response, transhdr_len, 1434 + temp, 0); 1569 1435 req_ctx->skb = skb; 1570 - skb_get(skb); 1571 1436 return skb; 1437 + err: 1438 + kfree_skb(skb); 1439 + return ERR_PTR(error); 1572 1440 } 1573 1441 1574 1442 static int chcr_ahash_update(struct ahash_request *req) 1575 1443 { 1576 1444 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1577 1445 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1578 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); 1579 1446 struct uld_ctx *u_ctx = NULL; 1580 1447 struct sk_buff *skb; 1581 1448 u8 remainder = 0, bs; 1582 1449 unsigned int nbytes = req->nbytes; 1583 1450 struct hash_wr_param params; 1451 + int error; 1584 1452 1585 1453 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1586 1454 1587 - u_ctx = ULD_CTX(ctx); 1455 + u_ctx = ULD_CTX(h_ctx(rtfm)); 1588 1456 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1589 - ctx->tx_qidx))) { 1457 + h_ctx(rtfm)->tx_qidx))) { 1590 1458 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1591 1459 return -EBUSY; 1592 1460 } ··· 1615 1453 req_ctx->reqlen += nbytes; 1616 1454 return 0; 1617 1455 } 1618 - 1456 + error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1457 + if (error) 1458 + return -ENOMEM; 1619 1459 params.opad_needed = 0; 1620 1460 params.more = 1; 1621 1461 params.last = 0; ··· 1628 1464 req_ctx->result = 0; 1629 1465 req_ctx->data_len += params.sg_len + params.bfr_len; 1630 1466 skb = create_hash_wr(req, &params); 1631 - if (!skb) 1632 - return -ENOMEM; 1467 + if (IS_ERR(skb)) { 1468 + error = PTR_ERR(skb); 1469 + goto unmap; 1470 + } 1633 1471 1634 1472 if (remainder) { 1635 - u8 *temp; 1636 1473 /* Swap buffers */ 1637 - temp = req_ctx->reqbfr; 1638 - req_ctx->reqbfr = req_ctx->skbfr; 1639 - req_ctx->skbfr = temp; 1474 + swap(req_ctx->reqbfr, req_ctx->skbfr); 1640 1475 sg_pcopy_to_buffer(req->src, sg_nents(req->src), 1641 1476 req_ctx->reqbfr, remainder, req->nbytes - 1642 1477 remainder); 1643 1478 } 1644 1479 req_ctx->reqlen = remainder; 1645 1480 skb->dev = u_ctx->lldi.ports[0]; 1646 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1481 + set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1647 1482 chcr_send_wr(skb); 1648 1483 1649 1484 return -EINPROGRESS; 1485 + unmap: 1486 + chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 1487 + return error; 1650 1488 } 1651 1489 1652 1490 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) ··· 1665 1499 { 1666 1500 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1667 1501 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1668 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); 1669 1502 struct hash_wr_param params; 1670 1503 struct sk_buff *skb; 1671 1504 struct uld_ctx *u_ctx = NULL; 1672 1505 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1673 1506 1674 - u_ctx = ULD_CTX(ctx); 1507 + u_ctx = ULD_CTX(h_ctx(rtfm)); 1675 1508 if (is_hmac(crypto_ahash_tfm(rtfm))) 1676 1509 params.opad_needed = 1; 1677 1510 else ··· 1693 1528 params.more = 0; 1694 1529 } 1695 1530 skb = create_hash_wr(req, &params); 1696 - if (!skb) 1697 - return -ENOMEM; 1531 + if (IS_ERR(skb)) 1532 + return PTR_ERR(skb); 1698 1533 1699 1534 skb->dev = u_ctx->lldi.ports[0]; 1700 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1535 + set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1701 1536 chcr_send_wr(skb); 1702 1537 return -EINPROGRESS; 1703 1538 } ··· 1706 1541 { 1707 1542 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1708 1543 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1709 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); 1710 1544 struct uld_ctx *u_ctx = NULL; 1711 1545 struct sk_buff *skb; 1712 1546 struct hash_wr_param params; 1713 1547 u8 bs; 1548 + int error; 1714 1549 1715 1550 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1716 - u_ctx = ULD_CTX(ctx); 1551 + u_ctx = ULD_CTX(h_ctx(rtfm)); 1717 1552 1718 1553 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1719 - ctx->tx_qidx))) { 1554 + h_ctx(rtfm)->tx_qidx))) { 1720 1555 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1721 1556 return -EBUSY; 1722 1557 } ··· 1742 1577 params.last = 1; 1743 1578 params.more = 0; 1744 1579 } 1745 - 1746 - skb = create_hash_wr(req, &params); 1747 - if (!skb) 1580 + error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1581 + if (error) 1748 1582 return -ENOMEM; 1749 1583 1584 + skb = create_hash_wr(req, &params); 1585 + if (IS_ERR(skb)) { 1586 + error = PTR_ERR(skb); 1587 + goto unmap; 1588 + } 1750 1589 skb->dev = u_ctx->lldi.ports[0]; 1751 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1590 + set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1752 1591 chcr_send_wr(skb); 1753 1592 1754 1593 return -EINPROGRESS; 1594 + unmap: 1595 + chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 1596 + return error; 1755 1597 } 1756 1598 1757 1599 static int chcr_ahash_digest(struct ahash_request *req) 1758 1600 { 1759 1601 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 1760 1602 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); 1761 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); 1762 1603 struct uld_ctx *u_ctx = NULL; 1763 1604 struct sk_buff *skb; 1764 1605 struct hash_wr_param params; 1765 1606 u8 bs; 1607 + int error; 1766 1608 1767 1609 rtfm->init(req); 1768 1610 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1769 1611 1770 - u_ctx = ULD_CTX(ctx); 1612 + u_ctx = ULD_CTX(h_ctx(rtfm)); 1771 1613 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1772 - ctx->tx_qidx))) { 1614 + h_ctx(rtfm)->tx_qidx))) { 1773 1615 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 1774 1616 return -EBUSY; 1775 1617 } ··· 1785 1613 params.opad_needed = 1; 1786 1614 else 1787 1615 params.opad_needed = 0; 1616 + error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); 1617 + if (error) 1618 + return -ENOMEM; 1788 1619 1789 1620 params.last = 0; 1790 1621 params.more = 0; ··· 1805 1630 } 1806 1631 1807 1632 skb = create_hash_wr(req, &params); 1808 - if (!skb) 1809 - return -ENOMEM; 1810 - 1633 + if (IS_ERR(skb)) { 1634 + error = PTR_ERR(skb); 1635 + goto unmap; 1636 + } 1811 1637 skb->dev = u_ctx->lldi.ports[0]; 1812 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1638 + set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); 1813 1639 chcr_send_wr(skb); 1814 1640 return -EINPROGRESS; 1641 + unmap: 1642 + chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); 1643 + return error; 1815 1644 } 1816 1645 1817 1646 static int chcr_ahash_export(struct ahash_request *areq, void *out) ··· 1825 1646 1826 1647 state->reqlen = req_ctx->reqlen; 1827 1648 state->data_len = req_ctx->data_len; 1649 + state->is_sg_map = 0; 1650 + state->result = 0; 1828 1651 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); 1829 1652 memcpy(state->partial_hash, req_ctx->partial_hash, 1830 1653 CHCR_HASH_MAX_DIGEST_SIZE); ··· 1842 1661 req_ctx->data_len = state->data_len; 1843 1662 req_ctx->reqbfr = req_ctx->bfr1; 1844 1663 req_ctx->skbfr = req_ctx->bfr2; 1664 + req_ctx->is_sg_map = 0; 1665 + req_ctx->result = 0; 1845 1666 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); 1846 1667 memcpy(req_ctx->partial_hash, state->partial_hash, 1847 1668 CHCR_HASH_MAX_DIGEST_SIZE); ··· 1853 1670 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 1854 1671 unsigned int keylen) 1855 1672 { 1856 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1857 - struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 1673 + struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); 1858 1674 unsigned int digestsize = crypto_ahash_digestsize(tfm); 1859 1675 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1860 1676 unsigned int i, err = 0, updated_digestsize; ··· 1906 1724 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1907 1725 unsigned int key_len) 1908 1726 { 1909 - struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 1910 - struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1727 + struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 1911 1728 unsigned short context_size = 0; 1912 1729 int err; 1913 1730 ··· 1945 1764 req_ctx->skbfr = req_ctx->bfr2; 1946 1765 req_ctx->skb = NULL; 1947 1766 req_ctx->result = 0; 1767 + req_ctx->is_sg_map = 0; 1948 1768 copy_hash_init_values(req_ctx->partial_hash, digestsize); 1949 1769 return 0; 1950 1770 } ··· 1961 1779 { 1962 1780 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1963 1781 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); 1964 - struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); 1965 - struct hmac_ctx *hmacctx = HMAC_CTX(ctx); 1782 + struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); 1966 1783 unsigned int digestsize = crypto_ahash_digestsize(rtfm); 1967 1784 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); 1968 1785 ··· 2007 1826 } 2008 1827 } 2009 1828 2010 - static int is_newsg(struct scatterlist *sgl, unsigned int *newents) 1829 + static int chcr_aead_common_init(struct aead_request *req, 1830 + unsigned short op_type) 2011 1831 { 2012 - int nents = 0; 2013 - int ret = 0; 1832 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1833 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 1834 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 1835 + int error = -EINVAL; 1836 + unsigned int dst_size; 1837 + unsigned int authsize = crypto_aead_authsize(tfm); 2014 1838 2015 - while (sgl) { 2016 - if (sgl->length > CHCR_SG_SIZE) 2017 - ret = 1; 2018 - nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE); 2019 - sgl = sg_next(sgl); 1839 + dst_size = req->assoclen + req->cryptlen + (op_type ? 1840 + -authsize : authsize); 1841 + /* validate key size */ 1842 + if (aeadctx->enckey_len == 0) 1843 + goto err; 1844 + if (op_type && req->cryptlen < authsize) 1845 + goto err; 1846 + error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 1847 + op_type); 1848 + if (error) { 1849 + error = -ENOMEM; 1850 + goto err; 2020 1851 } 2021 - *newents = nents; 2022 - return ret; 1852 + reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, 1853 + CHCR_SRC_SG_SIZE, 0); 1854 + reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, 1855 + CHCR_SRC_SG_SIZE, req->assoclen); 1856 + return 0; 1857 + err: 1858 + return error; 2023 1859 } 2024 1860 2025 - static inline void free_new_sg(struct scatterlist *sgl) 2026 - { 2027 - kfree(sgl); 2028 - } 2029 - 2030 - static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, 2031 - unsigned int nents) 2032 - { 2033 - struct scatterlist *newsg, *sg; 2034 - int i, len, processed = 0; 2035 - struct page *spage; 2036 - int offset; 2037 - 2038 - newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL); 2039 - if (!newsg) 2040 - return ERR_PTR(-ENOMEM); 2041 - sg = newsg; 2042 - sg_init_table(sg, nents); 2043 - offset = sgl->offset; 2044 - spage = sg_page(sgl); 2045 - for (i = 0; i < nents; i++) { 2046 - len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE); 2047 - sg_set_page(sg, spage, len, offset); 2048 - processed += len; 2049 - offset += len; 2050 - if (offset >= PAGE_SIZE) { 2051 - offset = offset % PAGE_SIZE; 2052 - spage++; 2053 - } 2054 - if (processed == sgl->length) { 2055 - processed = 0; 2056 - sgl = sg_next(sgl); 2057 - if (!sgl) 2058 - break; 2059 - spage = sg_page(sgl); 2060 - offset = sgl->offset; 2061 - } 2062 - sg = sg_next(sg); 2063 - } 2064 - return newsg; 2065 - } 2066 - 2067 - static int chcr_copy_assoc(struct aead_request *req, 2068 - struct chcr_aead_ctx *ctx) 2069 - { 2070 - SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null); 2071 - 2072 - skcipher_request_set_tfm(skreq, ctx->null); 2073 - skcipher_request_set_callback(skreq, aead_request_flags(req), 2074 - NULL, NULL); 2075 - skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen, 2076 - NULL); 2077 - 2078 - return crypto_skcipher_encrypt(skreq); 2079 - } 2080 - static int chcr_aead_need_fallback(struct aead_request *req, int src_nent, 1861 + static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, 2081 1862 int aadmax, int wrlen, 2082 1863 unsigned short op_type) 2083 1864 { 2084 1865 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); 2085 1866 2086 1867 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || 1868 + dst_nents > MAX_DSGL_ENT || 2087 1869 (req->assoclen > aadmax) || 2088 - (src_nent > MAX_SKB_FRAGS) || 2089 - (wrlen > MAX_WR_SIZE)) 1870 + (wrlen > SGE_MAX_WR_LEN)) 2090 1871 return 1; 2091 1872 return 0; 2092 1873 } ··· 2056 1913 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) 2057 1914 { 2058 1915 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2059 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2060 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 1916 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2061 1917 struct aead_request *subreq = aead_request_ctx(req); 2062 1918 2063 1919 aead_request_set_tfm(subreq, aeadctx->sw_cipher); ··· 2075 1933 unsigned short op_type) 2076 1934 { 2077 1935 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2078 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2079 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 2080 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 1936 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2081 1937 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 2082 1938 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2083 1939 struct sk_buff *skb = NULL; 2084 1940 struct chcr_wr *chcr_req; 2085 1941 struct cpl_rx_phys_dsgl *phys_cpl; 2086 - struct phys_sge_parm sg_param; 2087 - struct scatterlist *src; 2088 - unsigned int frags = 0, transhdr_len; 2089 - unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; 2090 - unsigned int kctx_len = 0, nents; 2091 - unsigned short stop_offset = 0; 1942 + struct ulptx_sgl *ulptx; 1943 + unsigned int transhdr_len; 1944 + unsigned int dst_size = 0, temp; 1945 + unsigned int kctx_len = 0, dnents; 2092 1946 unsigned int assoclen = req->assoclen; 2093 1947 unsigned int authsize = crypto_aead_authsize(tfm); 2094 - int error = -EINVAL, src_nent; 1948 + int error = -EINVAL; 2095 1949 int null = 0; 2096 1950 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 2097 1951 GFP_ATOMIC; 2098 - struct adapter *adap = padap(ctx->dev); 1952 + struct adapter *adap = padap(a_ctx(tfm)->dev); 2099 1953 2100 - reqctx->newdstsg = NULL; 2101 - dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : 2102 - authsize); 2103 - if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0)) 2104 - goto err; 1954 + if (req->cryptlen == 0) 1955 + return NULL; 2105 1956 2106 - if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 2107 - goto err; 2108 - src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); 2109 - if (src_nent < 0) 2110 - goto err; 2111 - src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); 2112 - 2113 - if (req->src != req->dst) { 2114 - error = chcr_copy_assoc(req, aeadctx); 2115 - if (error) 2116 - return ERR_PTR(error); 2117 - } 2118 - if (dst_size && is_newsg(req->dst, &nents)) { 2119 - reqctx->newdstsg = alloc_new_sg(req->dst, nents); 2120 - if (IS_ERR(reqctx->newdstsg)) 2121 - return ERR_CAST(reqctx->newdstsg); 2122 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2123 - reqctx->newdstsg, req->assoclen); 2124 - } else { 2125 - if (req->src == req->dst) 2126 - reqctx->dst = src; 2127 - else 2128 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2129 - req->dst, req->assoclen); 2130 - } 1957 + reqctx->b0_dma = 0; 2131 1958 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { 2132 1959 null = 1; 2133 1960 assoclen = 0; 2134 1961 } 2135 - reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 2136 - (op_type ? -authsize : authsize)); 2137 - if (reqctx->dst_nents < 0) { 2138 - pr_err("AUTHENC:Invalid Destination sg entries\n"); 2139 - error = -EINVAL; 2140 - goto err; 1962 + dst_size = assoclen + req->cryptlen + (op_type ? -authsize : 1963 + authsize); 1964 + error = chcr_aead_common_init(req, op_type); 1965 + if (error) 1966 + return ERR_PTR(error); 1967 + if (dst_size) { 1968 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 1969 + dnents += sg_nents_xlen(req->dst, req->cryptlen + 1970 + (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, 1971 + req->assoclen); 1972 + dnents += MIN_AUTH_SG; // For IV 1973 + } else { 1974 + dnents = 0; 2141 1975 } 2142 - dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); 1976 + 1977 + dst_size = get_space_for_phys_dsgl(dnents); 2143 1978 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) 2144 1979 - sizeof(chcr_req->key_ctx); 2145 1980 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2146 - if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG, 2147 - T6_MAX_AAD_SIZE, 2148 - transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8), 2149 - op_type)) { 1981 + reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < 1982 + SGE_MAX_WR_LEN; 1983 + temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16) 1984 + * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents 1985 + + MIN_GCM_SG) * 8); 1986 + transhdr_len += temp; 1987 + transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 1988 + 1989 + if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 1990 + transhdr_len, op_type)) { 2150 1991 atomic_inc(&adap->chcr_stats.fallback); 2151 - free_new_sg(reqctx->newdstsg); 2152 - reqctx->newdstsg = NULL; 1992 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 1993 + op_type); 2153 1994 return ERR_PTR(chcr_aead_fallback(req, op_type)); 2154 1995 } 2155 - skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 1996 + skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2156 1997 if (!skb) { 2157 1998 error = -ENOMEM; 2158 1999 goto err; 2159 2000 } 2160 2001 2161 - /* LLD is going to write the sge hdr. */ 2162 - skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 2163 - 2164 - /* Write WR */ 2165 2002 chcr_req = __skb_put_zero(skb, transhdr_len); 2166 2003 2167 - stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2004 + temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2168 2005 2169 2006 /* 2170 2007 * Input order is AAD,IV and Payload. where IV should be included as ··· 2151 2030 * to the hardware spec 2152 2031 */ 2153 2032 chcr_req->sec_cpl.op_ivinsrtofst = 2154 - FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 2155 - (ivsize ? (assoclen + 1) : 0)); 2156 - chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen); 2033 + FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, 2034 + assoclen + 1); 2035 + chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); 2157 2036 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 2158 2037 assoclen ? 1 : 0, assoclen, 2159 - assoclen + ivsize + 1, 2160 - (stop_offset & 0x1F0) >> 4); 2038 + assoclen + IV + 1, 2039 + (temp & 0x1F0) >> 4); 2161 2040 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( 2162 - stop_offset & 0xF, 2163 - null ? 0 : assoclen + ivsize + 1, 2164 - stop_offset, stop_offset); 2041 + temp & 0xF, 2042 + null ? 0 : assoclen + IV + 1, 2043 + temp, temp); 2165 2044 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 2166 2045 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, 2167 2046 CHCR_SCMD_CIPHER_MODE_AES_CBC, 2168 2047 actx->auth_mode, aeadctx->hmac_ctrl, 2169 - ivsize >> 1); 2048 + IV >> 1); 2170 2049 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 2171 - 0, 1, dst_size); 2050 + 0, 0, dst_size); 2172 2051 2173 2052 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2174 2053 if (op_type == CHCR_ENCRYPT_OP) ··· 2181 2060 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 2182 2061 4), actx->h_iopad, kctx_len - 2183 2062 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); 2184 - 2063 + memcpy(reqctx->iv, req->iv, IV); 2185 2064 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2186 - sg_param.nents = reqctx->dst_nents; 2187 - sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 2188 - sg_param.qid = qid; 2189 - error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, 2190 - reqctx->dst, &sg_param); 2191 - if (error) 2192 - goto dstmap_fail; 2193 - 2194 - skb_set_transport_header(skb, transhdr_len); 2195 - 2196 - if (assoclen) { 2197 - /* AAD buffer in */ 2198 - write_sg_to_skb(skb, &frags, req->src, assoclen); 2199 - 2200 - } 2201 - write_buffer_to_skb(skb, &frags, req->iv, ivsize); 2202 - write_sg_to_skb(skb, &frags, src, req->cryptlen); 2065 + ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2066 + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); 2067 + chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); 2203 2068 atomic_inc(&adap->chcr_stats.cipher_rqst); 2204 - create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, 2205 - sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); 2069 + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 2070 + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); 2071 + create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 2072 + transhdr_len, temp, 0); 2206 2073 reqctx->skb = skb; 2207 - skb_get(skb); 2074 + reqctx->op = op_type; 2208 2075 2209 2076 return skb; 2210 - dstmap_fail: 2211 - /* ivmap_fail: */ 2212 - kfree_skb(skb); 2213 2077 err: 2214 - free_new_sg(reqctx->newdstsg); 2215 - reqctx->newdstsg = NULL; 2078 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2079 + op_type); 2080 + 2216 2081 return ERR_PTR(error); 2082 + } 2083 + 2084 + static int chcr_aead_dma_map(struct device *dev, 2085 + struct aead_request *req, 2086 + unsigned short op_type) 2087 + { 2088 + int error; 2089 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2090 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2091 + unsigned int authsize = crypto_aead_authsize(tfm); 2092 + int dst_size; 2093 + 2094 + dst_size = req->assoclen + req->cryptlen + (op_type ? 2095 + -authsize : authsize); 2096 + if (!req->cryptlen || !dst_size) 2097 + return 0; 2098 + reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, 2099 + DMA_BIDIRECTIONAL); 2100 + if (dma_mapping_error(dev, reqctx->iv_dma)) 2101 + return -ENOMEM; 2102 + 2103 + if (req->src == req->dst) { 2104 + error = dma_map_sg(dev, req->src, sg_nents(req->src), 2105 + DMA_BIDIRECTIONAL); 2106 + if (!error) 2107 + goto err; 2108 + } else { 2109 + error = dma_map_sg(dev, req->src, sg_nents(req->src), 2110 + DMA_TO_DEVICE); 2111 + if (!error) 2112 + goto err; 2113 + error = dma_map_sg(dev, req->dst, sg_nents(req->dst), 2114 + DMA_FROM_DEVICE); 2115 + if (!error) { 2116 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2117 + DMA_TO_DEVICE); 2118 + goto err; 2119 + } 2120 + } 2121 + 2122 + return 0; 2123 + err: 2124 + dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 2125 + return -ENOMEM; 2126 + } 2127 + 2128 + static void chcr_aead_dma_unmap(struct device *dev, 2129 + struct aead_request *req, 2130 + unsigned short op_type) 2131 + { 2132 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2133 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2134 + unsigned int authsize = crypto_aead_authsize(tfm); 2135 + int dst_size; 2136 + 2137 + dst_size = req->assoclen + req->cryptlen + (op_type ? 2138 + -authsize : authsize); 2139 + if (!req->cryptlen || !dst_size) 2140 + return; 2141 + 2142 + dma_unmap_single(dev, reqctx->iv_dma, IV, 2143 + DMA_BIDIRECTIONAL); 2144 + if (req->src == req->dst) { 2145 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2146 + DMA_BIDIRECTIONAL); 2147 + } else { 2148 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2149 + DMA_TO_DEVICE); 2150 + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), 2151 + DMA_FROM_DEVICE); 2152 + } 2153 + } 2154 + 2155 + static inline void chcr_add_aead_src_ent(struct aead_request *req, 2156 + struct ulptx_sgl *ulptx, 2157 + unsigned int assoclen, 2158 + unsigned short op_type) 2159 + { 2160 + struct ulptx_walk ulp_walk; 2161 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2162 + 2163 + if (reqctx->imm) { 2164 + u8 *buf = (u8 *)ulptx; 2165 + 2166 + if (reqctx->b0_dma) { 2167 + memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); 2168 + buf += reqctx->b0_len; 2169 + } 2170 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2171 + buf, assoclen, 0); 2172 + buf += assoclen; 2173 + memcpy(buf, reqctx->iv, IV); 2174 + buf += IV; 2175 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2176 + buf, req->cryptlen, req->assoclen); 2177 + } else { 2178 + ulptx_walk_init(&ulp_walk, ulptx); 2179 + if (reqctx->b0_dma) 2180 + ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, 2181 + &reqctx->b0_dma); 2182 + ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); 2183 + ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); 2184 + ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, 2185 + req->assoclen); 2186 + ulptx_walk_end(&ulp_walk); 2187 + } 2188 + } 2189 + 2190 + static inline void chcr_add_aead_dst_ent(struct aead_request *req, 2191 + struct cpl_rx_phys_dsgl *phys_cpl, 2192 + unsigned int assoclen, 2193 + unsigned short op_type, 2194 + unsigned short qid) 2195 + { 2196 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2197 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2198 + struct dsgl_walk dsgl_walk; 2199 + unsigned int authsize = crypto_aead_authsize(tfm); 2200 + u32 temp; 2201 + 2202 + dsgl_walk_init(&dsgl_walk, phys_cpl); 2203 + if (reqctx->b0_dma) 2204 + dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); 2205 + dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); 2206 + dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); 2207 + temp = req->cryptlen + (op_type ? -authsize : authsize); 2208 + dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); 2209 + dsgl_walk_end(&dsgl_walk, qid); 2210 + } 2211 + 2212 + static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 2213 + struct ulptx_sgl *ulptx, 2214 + struct cipher_wr_param *wrparam) 2215 + { 2216 + struct ulptx_walk ulp_walk; 2217 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2218 + 2219 + if (reqctx->imm) { 2220 + u8 *buf = (u8 *)ulptx; 2221 + 2222 + memcpy(buf, reqctx->iv, IV); 2223 + buf += IV; 2224 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2225 + buf, wrparam->bytes, reqctx->processed); 2226 + } else { 2227 + ulptx_walk_init(&ulp_walk, ulptx); 2228 + ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); 2229 + ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, 2230 + reqctx->src_ofst); 2231 + reqctx->srcsg = ulp_walk.last_sg; 2232 + reqctx->src_ofst = ulp_walk.last_sg_len; 2233 + ulptx_walk_end(&ulp_walk); 2234 + } 2235 + } 2236 + 2237 + static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 2238 + struct cpl_rx_phys_dsgl *phys_cpl, 2239 + struct cipher_wr_param *wrparam, 2240 + unsigned short qid) 2241 + { 2242 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2243 + struct dsgl_walk dsgl_walk; 2244 + 2245 + dsgl_walk_init(&dsgl_walk, phys_cpl); 2246 + dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); 2247 + dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, 2248 + reqctx->dst_ofst); 2249 + reqctx->dstsg = dsgl_walk.last_sg; 2250 + reqctx->dst_ofst = dsgl_walk.last_sg_len; 2251 + 2252 + dsgl_walk_end(&dsgl_walk, qid); 2253 + } 2254 + 2255 + static inline void chcr_add_hash_src_ent(struct ahash_request *req, 2256 + struct ulptx_sgl *ulptx, 2257 + struct hash_wr_param *param) 2258 + { 2259 + struct ulptx_walk ulp_walk; 2260 + struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); 2261 + 2262 + if (reqctx->imm) { 2263 + u8 *buf = (u8 *)ulptx; 2264 + 2265 + if (param->bfr_len) { 2266 + memcpy(buf, reqctx->reqbfr, param->bfr_len); 2267 + buf += param->bfr_len; 2268 + } 2269 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), 2270 + buf, param->sg_len, 0); 2271 + } else { 2272 + ulptx_walk_init(&ulp_walk, ulptx); 2273 + if (param->bfr_len) 2274 + ulptx_walk_add_page(&ulp_walk, param->bfr_len, 2275 + &reqctx->dma_addr); 2276 + ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, 2277 + 0); 2278 + // reqctx->srcsg = ulp_walk.last_sg; 2279 + // reqctx->src_ofst = ulp_walk.last_sg_len; 2280 + ulptx_walk_end(&ulp_walk); 2281 + } 2282 + } 2283 + 2284 + 2285 + static inline int chcr_hash_dma_map(struct device *dev, 2286 + struct ahash_request *req) 2287 + { 2288 + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2289 + int error = 0; 2290 + 2291 + if (!req->nbytes) 2292 + return 0; 2293 + error = dma_map_sg(dev, req->src, sg_nents(req->src), 2294 + DMA_TO_DEVICE); 2295 + if (!error) 2296 + return error; 2297 + req_ctx->is_sg_map = 1; 2298 + return 0; 2299 + } 2300 + 2301 + static inline void chcr_hash_dma_unmap(struct device *dev, 2302 + struct ahash_request *req) 2303 + { 2304 + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2305 + 2306 + if (!req->nbytes) 2307 + return; 2308 + 2309 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2310 + DMA_TO_DEVICE); 2311 + req_ctx->is_sg_map = 0; 2312 + 2313 + } 2314 + 2315 + 2316 + static int chcr_cipher_dma_map(struct device *dev, 2317 + struct ablkcipher_request *req) 2318 + { 2319 + int error; 2320 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2321 + 2322 + reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, 2323 + DMA_BIDIRECTIONAL); 2324 + if (dma_mapping_error(dev, reqctx->iv_dma)) 2325 + return -ENOMEM; 2326 + 2327 + if (req->src == req->dst) { 2328 + error = dma_map_sg(dev, req->src, sg_nents(req->src), 2329 + DMA_BIDIRECTIONAL); 2330 + if (!error) 2331 + goto err; 2332 + } else { 2333 + error = dma_map_sg(dev, req->src, sg_nents(req->src), 2334 + DMA_TO_DEVICE); 2335 + if (!error) 2336 + goto err; 2337 + error = dma_map_sg(dev, req->dst, sg_nents(req->dst), 2338 + DMA_FROM_DEVICE); 2339 + if (!error) { 2340 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2341 + DMA_TO_DEVICE); 2342 + goto err; 2343 + } 2344 + } 2345 + 2346 + return 0; 2347 + err: 2348 + dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 2349 + return -ENOMEM; 2350 + } 2351 + static void chcr_cipher_dma_unmap(struct device *dev, 2352 + struct ablkcipher_request *req) 2353 + { 2354 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2355 + 2356 + dma_unmap_single(dev, reqctx->iv_dma, IV, 2357 + DMA_BIDIRECTIONAL); 2358 + if (req->src == req->dst) { 2359 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2360 + DMA_BIDIRECTIONAL); 2361 + } else { 2362 + dma_unmap_sg(dev, req->src, sg_nents(req->src), 2363 + DMA_TO_DEVICE); 2364 + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), 2365 + DMA_FROM_DEVICE); 2366 + } 2217 2367 } 2218 2368 2219 2369 static int set_msg_len(u8 *block, unsigned int msglen, int csize) ··· 2571 2179 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, 2572 2180 unsigned int dst_size, 2573 2181 struct aead_request *req, 2574 - unsigned short op_type, 2575 - struct chcr_context *chcrctx) 2182 + unsigned short op_type) 2576 2183 { 2577 2184 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2578 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2579 - unsigned int ivsize = AES_BLOCK_SIZE; 2185 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2580 2186 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; 2581 2187 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; 2582 - unsigned int c_id = chcrctx->dev->rx_channel_id; 2188 + unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; 2583 2189 unsigned int ccm_xtra; 2584 2190 unsigned char tag_offset = 0, auth_offset = 0; 2585 2191 unsigned int assoclen; ··· 2590 2200 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); 2591 2201 2592 2202 auth_offset = req->cryptlen ? 2593 - (assoclen + ivsize + 1 + ccm_xtra) : 0; 2203 + (assoclen + IV + 1 + ccm_xtra) : 0; 2594 2204 if (op_type == CHCR_DECRYPT_OP) { 2595 2205 if (crypto_aead_authsize(tfm) != req->cryptlen) 2596 2206 tag_offset = crypto_aead_authsize(tfm); ··· 2600 2210 2601 2211 2602 2212 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, 2603 - 2, (ivsize ? (assoclen + 1) : 0) + 2604 - ccm_xtra); 2213 + 2, assoclen + 1 + ccm_xtra); 2605 2214 sec_cpl->pldlen = 2606 - htonl(assoclen + ivsize + req->cryptlen + ccm_xtra); 2215 + htonl(assoclen + IV + req->cryptlen + ccm_xtra); 2607 2216 /* For CCM there wil be b0 always. So AAD start will be 1 always */ 2608 2217 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 2609 2218 1, assoclen + ccm_xtra, assoclen 2610 - + ivsize + 1 + ccm_xtra, 0); 2219 + + IV + 1 + ccm_xtra, 0); 2611 2220 2612 2221 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 2613 2222 auth_offset, tag_offset, ··· 2615 2226 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 2616 2227 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, 2617 2228 cipher_mode, mac_mode, 2618 - aeadctx->hmac_ctrl, ivsize >> 1); 2229 + aeadctx->hmac_ctrl, IV >> 1); 2619 2230 2620 2231 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 2621 - 1, dst_size); 2232 + 0, dst_size); 2622 2233 } 2623 2234 2624 2235 int aead_ccm_validate_input(unsigned short op_type, ··· 2638 2249 return -EINVAL; 2639 2250 } 2640 2251 } 2641 - if (aeadctx->enckey_len == 0) { 2642 - pr_err("CCM: Encryption key not set\n"); 2643 - return -EINVAL; 2644 - } 2645 2252 return 0; 2646 - } 2647 - 2648 - unsigned int fill_aead_req_fields(struct sk_buff *skb, 2649 - struct aead_request *req, 2650 - struct scatterlist *src, 2651 - unsigned int ivsize, 2652 - struct chcr_aead_ctx *aeadctx) 2653 - { 2654 - unsigned int frags = 0; 2655 - struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2656 - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2657 - /* b0 and aad length(if available) */ 2658 - 2659 - write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE + 2660 - (req->assoclen ? CCM_AAD_FIELD_SIZE : 0)); 2661 - if (req->assoclen) { 2662 - if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 2663 - write_sg_to_skb(skb, &frags, req->src, 2664 - req->assoclen - 8); 2665 - else 2666 - write_sg_to_skb(skb, &frags, req->src, req->assoclen); 2667 - } 2668 - write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 2669 - if (req->cryptlen) 2670 - write_sg_to_skb(skb, &frags, src, req->cryptlen); 2671 - 2672 - return frags; 2673 2253 } 2674 2254 2675 2255 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, ··· 2647 2289 unsigned short op_type) 2648 2290 { 2649 2291 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2650 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2651 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 2652 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2292 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2653 2293 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2654 2294 struct sk_buff *skb = NULL; 2655 2295 struct chcr_wr *chcr_req; 2656 2296 struct cpl_rx_phys_dsgl *phys_cpl; 2657 - struct phys_sge_parm sg_param; 2658 - struct scatterlist *src; 2659 - unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; 2660 - unsigned int dst_size = 0, kctx_len, nents; 2661 - unsigned int sub_type; 2297 + struct ulptx_sgl *ulptx; 2298 + unsigned int transhdr_len; 2299 + unsigned int dst_size = 0, kctx_len, dnents, temp; 2300 + unsigned int sub_type, assoclen = req->assoclen; 2662 2301 unsigned int authsize = crypto_aead_authsize(tfm); 2663 - int error = -EINVAL, src_nent; 2302 + int error = -EINVAL; 2664 2303 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 2665 2304 GFP_ATOMIC; 2666 - struct adapter *adap = padap(ctx->dev); 2305 + struct adapter *adap = padap(a_ctx(tfm)->dev); 2667 2306 2668 - dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize : 2669 - authsize); 2670 - reqctx->newdstsg = NULL; 2671 - if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 2672 - goto err; 2673 - src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen); 2674 - if (src_nent < 0) 2675 - goto err; 2676 - 2307 + reqctx->b0_dma = 0; 2677 2308 sub_type = get_aead_subtype(tfm); 2678 - src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen); 2679 - if (req->src != req->dst) { 2680 - error = chcr_copy_assoc(req, aeadctx); 2681 - if (error) { 2682 - pr_err("AAD copy to destination buffer fails\n"); 2683 - return ERR_PTR(error); 2684 - } 2685 - } 2686 - if (dst_size && is_newsg(req->dst, &nents)) { 2687 - reqctx->newdstsg = alloc_new_sg(req->dst, nents); 2688 - if (IS_ERR(reqctx->newdstsg)) 2689 - return ERR_CAST(reqctx->newdstsg); 2690 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2691 - reqctx->newdstsg, req->assoclen); 2692 - } else { 2693 - if (req->src == req->dst) 2694 - reqctx->dst = src; 2695 - else 2696 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2697 - req->dst, req->assoclen); 2698 - } 2699 - reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 2700 - (op_type ? -authsize : authsize)); 2701 - if (reqctx->dst_nents < 0) { 2702 - pr_err("CCM:Invalid Destination sg entries\n"); 2703 - error = -EINVAL; 2704 - goto err; 2705 - } 2309 + if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 2310 + assoclen -= 8; 2311 + dst_size = assoclen + req->cryptlen + (op_type ? -authsize : 2312 + authsize); 2313 + error = chcr_aead_common_init(req, op_type); 2314 + if (error) 2315 + return ERR_PTR(error); 2316 + 2317 + 2318 + reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); 2706 2319 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); 2707 2320 if (error) 2708 2321 goto err; 2709 - 2710 - dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); 2322 + if (dst_size) { 2323 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2324 + dnents += sg_nents_xlen(req->dst, req->cryptlen 2325 + + (op_type ? -authsize : authsize), 2326 + CHCR_DST_SG_SIZE, req->assoclen); 2327 + dnents += MIN_CCM_SG; // For IV and B0 2328 + } else { 2329 + dnents = 0; 2330 + } 2331 + dst_size = get_space_for_phys_dsgl(dnents); 2711 2332 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; 2712 2333 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2713 - if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG, 2714 - T6_MAX_AAD_SIZE - 18, 2715 - transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8), 2716 - op_type)) { 2334 + reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + 2335 + reqctx->b0_len) <= SGE_MAX_WR_LEN; 2336 + temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen + 2337 + reqctx->b0_len), 16) * 16) : 2338 + (sgl_len(reqctx->src_nents + reqctx->aad_nents + 2339 + MIN_CCM_SG) * 8); 2340 + transhdr_len += temp; 2341 + transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 2342 + 2343 + if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - 2344 + reqctx->b0_len, transhdr_len, op_type)) { 2717 2345 atomic_inc(&adap->chcr_stats.fallback); 2718 - free_new_sg(reqctx->newdstsg); 2719 - reqctx->newdstsg = NULL; 2346 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2347 + op_type); 2720 2348 return ERR_PTR(chcr_aead_fallback(req, op_type)); 2721 2349 } 2722 - 2723 - skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 2350 + skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2724 2351 2725 2352 if (!skb) { 2726 2353 error = -ENOMEM; 2727 2354 goto err; 2728 2355 } 2729 2356 2730 - skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 2357 + chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); 2731 2358 2732 - chcr_req = __skb_put_zero(skb, transhdr_len); 2733 - 2734 - fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx); 2359 + fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); 2735 2360 2736 2361 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2737 2362 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); ··· 2722 2381 16), aeadctx->key, aeadctx->enckey_len); 2723 2382 2724 2383 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2384 + ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2725 2385 error = ccm_format_packet(req, aeadctx, sub_type, op_type); 2726 2386 if (error) 2727 2387 goto dstmap_fail; 2728 2388 2729 - sg_param.nents = reqctx->dst_nents; 2730 - sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 2731 - sg_param.qid = qid; 2732 - error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, 2733 - reqctx->dst, &sg_param); 2734 - if (error) 2389 + reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, 2390 + &reqctx->scratch_pad, reqctx->b0_len, 2391 + DMA_BIDIRECTIONAL); 2392 + if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, 2393 + reqctx->b0_dma)) { 2394 + error = -ENOMEM; 2735 2395 goto dstmap_fail; 2396 + } 2736 2397 2737 - skb_set_transport_header(skb, transhdr_len); 2738 - frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); 2398 + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); 2399 + chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); 2400 + 2739 2401 atomic_inc(&adap->chcr_stats.aead_rqst); 2740 - create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1, 2741 - sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); 2402 + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 2403 + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + 2404 + reqctx->b0_len) : 0); 2405 + create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, 2406 + transhdr_len, temp, 0); 2742 2407 reqctx->skb = skb; 2743 - skb_get(skb); 2408 + reqctx->op = op_type; 2409 + 2744 2410 return skb; 2745 2411 dstmap_fail: 2746 2412 kfree_skb(skb); 2747 2413 err: 2748 - free_new_sg(reqctx->newdstsg); 2749 - reqctx->newdstsg = NULL; 2414 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); 2750 2415 return ERR_PTR(error); 2751 2416 } 2752 2417 ··· 2762 2415 unsigned short op_type) 2763 2416 { 2764 2417 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2765 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2766 - struct uld_ctx *u_ctx = ULD_CTX(ctx); 2767 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2418 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2768 2419 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2769 2420 struct sk_buff *skb = NULL; 2770 2421 struct chcr_wr *chcr_req; 2771 2422 struct cpl_rx_phys_dsgl *phys_cpl; 2772 - struct phys_sge_parm sg_param; 2773 - struct scatterlist *src; 2774 - unsigned int frags = 0, transhdr_len; 2775 - unsigned int ivsize = AES_BLOCK_SIZE; 2776 - unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen; 2777 - unsigned char tag_offset = 0; 2423 + struct ulptx_sgl *ulptx; 2424 + unsigned int transhdr_len, dnents = 0; 2425 + unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; 2778 2426 unsigned int authsize = crypto_aead_authsize(tfm); 2779 - int error = -EINVAL, src_nent; 2427 + int error = -EINVAL; 2780 2428 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 2781 2429 GFP_ATOMIC; 2782 - struct adapter *adap = padap(ctx->dev); 2430 + struct adapter *adap = padap(a_ctx(tfm)->dev); 2783 2431 2784 - reqctx->newdstsg = NULL; 2785 - dst_size = assoclen + req->cryptlen + (op_type ? -authsize : 2786 - authsize); 2787 - /* validate key size */ 2788 - if (aeadctx->enckey_len == 0) 2789 - goto err; 2432 + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) 2433 + assoclen = req->assoclen - 8; 2790 2434 2791 - if (op_type && req->cryptlen < crypto_aead_authsize(tfm)) 2792 - goto err; 2793 - src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen); 2794 - if (src_nent < 0) 2795 - goto err; 2796 - 2797 - src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen); 2798 - if (req->src != req->dst) { 2799 - error = chcr_copy_assoc(req, aeadctx); 2435 + reqctx->b0_dma = 0; 2436 + dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize); 2437 + error = chcr_aead_common_init(req, op_type); 2800 2438 if (error) 2801 2439 return ERR_PTR(error); 2802 - } 2803 - 2804 - if (dst_size && is_newsg(req->dst, &nents)) { 2805 - reqctx->newdstsg = alloc_new_sg(req->dst, nents); 2806 - if (IS_ERR(reqctx->newdstsg)) 2807 - return ERR_CAST(reqctx->newdstsg); 2808 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2809 - reqctx->newdstsg, assoclen); 2440 + if (dst_size) { 2441 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2442 + dnents += sg_nents_xlen(req->dst, 2443 + req->cryptlen + (op_type ? -authsize : authsize), 2444 + CHCR_DST_SG_SIZE, req->assoclen); 2445 + dnents += MIN_GCM_SG; // For IV 2810 2446 } else { 2811 - if (req->src == req->dst) 2812 - reqctx->dst = src; 2813 - else 2814 - reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, 2815 - req->dst, assoclen); 2447 + dnents = 0; 2816 2448 } 2817 - 2818 - reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen + 2819 - (op_type ? -authsize : authsize)); 2820 - if (reqctx->dst_nents < 0) { 2821 - pr_err("GCM:Invalid Destination sg entries\n"); 2822 - error = -EINVAL; 2823 - goto err; 2824 - } 2825 - 2826 - 2827 - dst_size = get_space_for_phys_dsgl(reqctx->dst_nents); 2449 + dst_size = get_space_for_phys_dsgl(dnents); 2828 2450 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + 2829 2451 AEAD_H_SIZE; 2830 2452 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); 2831 - if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG, 2832 - T6_MAX_AAD_SIZE, 2833 - transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8), 2834 - op_type)) { 2453 + reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= 2454 + SGE_MAX_WR_LEN; 2455 + temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + 2456 + req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents + 2457 + reqctx->aad_nents + MIN_GCM_SG) * 8); 2458 + transhdr_len += temp; 2459 + transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16; 2460 + if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, 2461 + transhdr_len, op_type)) { 2835 2462 atomic_inc(&adap->chcr_stats.fallback); 2836 - free_new_sg(reqctx->newdstsg); 2837 - reqctx->newdstsg = NULL; 2463 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, 2464 + op_type); 2838 2465 return ERR_PTR(chcr_aead_fallback(req, op_type)); 2839 2466 } 2840 - skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 2467 + skb = alloc_skb(SGE_MAX_WR_LEN, flags); 2841 2468 if (!skb) { 2842 2469 error = -ENOMEM; 2843 2470 goto err; 2844 2471 } 2845 2472 2846 - /* NIC driver is going to write the sge hdr. */ 2847 - skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 2848 - 2849 2473 chcr_req = __skb_put_zero(skb, transhdr_len); 2850 2474 2851 - if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) 2852 - assoclen = req->assoclen - 8; 2853 - 2854 - tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2475 + //Offset of tag from end 2476 + temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; 2855 2477 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( 2856 - ctx->dev->rx_channel_id, 2, (ivsize ? 2857 - (assoclen + 1) : 0)); 2478 + a_ctx(tfm)->dev->rx_channel_id, 2, 2479 + (assoclen + 1)); 2858 2480 chcr_req->sec_cpl.pldlen = 2859 - htonl(assoclen + ivsize + req->cryptlen); 2481 + htonl(assoclen + IV + req->cryptlen); 2860 2482 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 2861 2483 assoclen ? 1 : 0, assoclen, 2862 - assoclen + ivsize + 1, 0); 2484 + assoclen + IV + 1, 0); 2863 2485 chcr_req->sec_cpl.cipherstop_lo_authinsert = 2864 - FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1, 2865 - tag_offset, tag_offset); 2486 + FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, 2487 + temp, temp); 2866 2488 chcr_req->sec_cpl.seqno_numivs = 2867 2489 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == 2868 2490 CHCR_ENCRYPT_OP) ? 1 : 0, 2869 2491 CHCR_SCMD_CIPHER_MODE_AES_GCM, 2870 2492 CHCR_SCMD_AUTH_MODE_GHASH, 2871 - aeadctx->hmac_ctrl, ivsize >> 1); 2493 + aeadctx->hmac_ctrl, IV >> 1); 2872 2494 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 2873 - 0, 1, dst_size); 2495 + 0, 0, dst_size); 2874 2496 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2875 2497 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); 2876 2498 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) * ··· 2850 2534 if (get_aead_subtype(tfm) == 2851 2535 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { 2852 2536 memcpy(reqctx->iv, aeadctx->salt, 4); 2853 - memcpy(reqctx->iv + 4, req->iv, 8); 2537 + memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); 2854 2538 } else { 2855 - memcpy(reqctx->iv, req->iv, 12); 2539 + memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); 2856 2540 } 2857 2541 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); 2858 2542 2859 2543 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2860 - sg_param.nents = reqctx->dst_nents; 2861 - sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 2862 - sg_param.qid = qid; 2863 - error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, 2864 - reqctx->dst, &sg_param); 2865 - if (error) 2866 - goto dstmap_fail; 2544 + ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2867 2545 2868 - skb_set_transport_header(skb, transhdr_len); 2869 - write_sg_to_skb(skb, &frags, req->src, assoclen); 2870 - write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 2871 - write_sg_to_skb(skb, &frags, src, req->cryptlen); 2546 + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); 2547 + chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); 2872 2548 atomic_inc(&adap->chcr_stats.aead_rqst); 2873 - create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, 2874 - sizeof(struct cpl_rx_phys_dsgl) + dst_size, 2875 - reqctx->verify); 2549 + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + 2550 + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); 2551 + create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, 2552 + transhdr_len, temp, reqctx->verify); 2876 2553 reqctx->skb = skb; 2877 - skb_get(skb); 2554 + reqctx->op = op_type; 2878 2555 return skb; 2879 2556 2880 - dstmap_fail: 2881 - /* ivmap_fail: */ 2882 - kfree_skb(skb); 2883 2557 err: 2884 - free_new_sg(reqctx->newdstsg); 2885 - reqctx->newdstsg = NULL; 2558 + chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); 2886 2559 return ERR_PTR(error); 2887 2560 } 2888 2561 ··· 2879 2574 2880 2575 static int chcr_aead_cra_init(struct crypto_aead *tfm) 2881 2576 { 2882 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2883 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2577 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2884 2578 struct aead_alg *alg = crypto_aead_alg(tfm); 2885 2579 2886 2580 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, ··· 2890 2586 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), 2891 2587 sizeof(struct aead_request) + 2892 2588 crypto_aead_reqsize(aeadctx->sw_cipher))); 2893 - aeadctx->null = crypto_get_default_null_skcipher(); 2894 - if (IS_ERR(aeadctx->null)) 2895 - return PTR_ERR(aeadctx->null); 2896 - return chcr_device_init(ctx); 2589 + return chcr_device_init(a_ctx(tfm)); 2897 2590 } 2898 2591 2899 2592 static void chcr_aead_cra_exit(struct crypto_aead *tfm) 2900 2593 { 2901 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 2902 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2594 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2903 2595 2904 - crypto_put_default_null_skcipher(); 2905 2596 crypto_free_aead(aeadctx->sw_cipher); 2906 2597 } 2907 2598 2908 2599 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, 2909 2600 unsigned int authsize) 2910 2601 { 2911 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2602 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2912 2603 2913 2604 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; 2914 2605 aeadctx->mayverify = VERIFY_HW; ··· 2912 2613 static int chcr_authenc_setauthsize(struct crypto_aead *tfm, 2913 2614 unsigned int authsize) 2914 2615 { 2915 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2616 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2916 2617 u32 maxauth = crypto_aead_maxauthsize(tfm); 2917 2618 2918 2619 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not ··· 2950 2651 2951 2652 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 2952 2653 { 2953 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2654 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2954 2655 2955 2656 switch (authsize) { 2956 2657 case ICV_4: ··· 2990 2691 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, 2991 2692 unsigned int authsize) 2992 2693 { 2993 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2694 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2994 2695 2995 2696 switch (authsize) { 2996 2697 case ICV_8: ··· 3016 2717 static int chcr_ccm_setauthsize(struct crypto_aead *tfm, 3017 2718 unsigned int authsize) 3018 2719 { 3019 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 2720 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3020 2721 3021 2722 switch (authsize) { 3022 2723 case ICV_4: ··· 3059 2760 const u8 *key, 3060 2761 unsigned int keylen) 3061 2762 { 3062 - struct chcr_context *ctx = crypto_aead_ctx(aead); 3063 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2763 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3064 2764 unsigned char ck_size, mk_size; 3065 2765 int key_ctx_size = 0; 3066 2766 ··· 3092 2794 const u8 *key, 3093 2795 unsigned int keylen) 3094 2796 { 3095 - struct chcr_context *ctx = crypto_aead_ctx(aead); 3096 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2797 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3097 2798 int error; 3098 2799 3099 2800 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); ··· 3110 2813 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, 3111 2814 unsigned int keylen) 3112 2815 { 3113 - struct chcr_context *ctx = crypto_aead_ctx(aead); 3114 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2816 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3115 2817 int error; 3116 2818 3117 2819 if (keylen < 3) { ··· 3136 2840 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, 3137 2841 unsigned int keylen) 3138 2842 { 3139 - struct chcr_context *ctx = crypto_aead_ctx(aead); 3140 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2843 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); 3141 2844 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); 3142 2845 struct crypto_cipher *cipher; 3143 2846 unsigned int ck_size; ··· 3208 2913 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 3209 2914 unsigned int keylen) 3210 2915 { 3211 - struct chcr_context *ctx = crypto_aead_ctx(authenc); 3212 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 2916 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); 3213 2917 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 3214 2918 /* it contains auth and cipher key both*/ 3215 2919 struct crypto_authenc_keys keys; ··· 3328 3034 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, 3329 3035 const u8 *key, unsigned int keylen) 3330 3036 { 3331 - struct chcr_context *ctx = crypto_aead_ctx(authenc); 3332 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); 3037 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); 3333 3038 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 3334 3039 struct crypto_authenc_keys keys; 3335 3040 int err; ··· 3400 3107 static int chcr_aead_decrypt(struct aead_request *req) 3401 3108 { 3402 3109 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3403 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm)); 3110 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3404 3111 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3405 3112 int size; 3406 3113 ··· 3433 3140 create_wr_t create_wr_fn) 3434 3141 { 3435 3142 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3436 - struct chcr_context *ctx = crypto_aead_ctx(tfm); 3437 3143 struct uld_ctx *u_ctx; 3438 3144 struct sk_buff *skb; 3439 3145 3440 - if (!ctx->dev) { 3146 + if (!a_ctx(tfm)->dev) { 3441 3147 pr_err("chcr : %s : No crypto device.\n", __func__); 3442 3148 return -ENXIO; 3443 3149 } 3444 - u_ctx = ULD_CTX(ctx); 3150 + u_ctx = ULD_CTX(a_ctx(tfm)); 3445 3151 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 3446 - ctx->tx_qidx)) { 3152 + a_ctx(tfm)->tx_qidx)) { 3447 3153 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 3448 3154 return -EBUSY; 3449 3155 } 3450 3156 3451 3157 /* Form a WR from req */ 3452 - skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size, 3158 + skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, 3453 3159 op_type); 3454 3160 3455 3161 if (IS_ERR(skb) || !skb) 3456 3162 return PTR_ERR(skb); 3457 3163 3458 3164 skb->dev = u_ctx->lldi.ports[0]; 3459 - set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 3165 + set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); 3460 3166 chcr_send_wr(skb); 3461 3167 return -EINPROGRESS; 3462 3168 } ··· 3677 3385 sizeof(struct chcr_aead_ctx) + 3678 3386 sizeof(struct chcr_gcm_ctx), 3679 3387 }, 3680 - .ivsize = 12, 3388 + .ivsize = GCM_AES_IV_SIZE, 3681 3389 .maxauthsize = GHASH_DIGEST_SIZE, 3682 3390 .setkey = chcr_gcm_setkey, 3683 3391 .setauthsize = chcr_gcm_setauthsize, ··· 3697 3405 sizeof(struct chcr_gcm_ctx), 3698 3406 3699 3407 }, 3700 - .ivsize = 8, 3408 + .ivsize = GCM_RFC4106_IV_SIZE, 3701 3409 .maxauthsize = GHASH_DIGEST_SIZE, 3702 3410 .setkey = chcr_gcm_setkey, 3703 3411 .setauthsize = chcr_4106_4309_setauthsize,
+15 -42
drivers/crypto/chelsio/chcr_algo.h
··· 176 176 KEY_CONTEXT_SALT_PRESENT_V(1) | \ 177 177 KEY_CONTEXT_CTX_LEN_V((ctx_len))) 178 178 179 - #define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \ 179 + #define FILL_WR_OP_CCTX_SIZE \ 180 180 htonl( \ 181 181 FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \ 182 182 FW_CRYPTO_LOOKASIDE_WR) | \ 183 183 FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \ 184 - FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \ 185 - FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ 186 - FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) 184 + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \ 185 + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \ 186 + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0)) 187 187 188 - #define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \ 188 + #define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \ 189 189 htonl( \ 190 190 FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ 191 191 FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ 192 192 FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \ 193 - FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \ 193 + FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \ 194 194 FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid)) 195 195 196 196 #define FILL_ULPTX_CMD_DEST(cid, qid) \ ··· 214 214 calc_tx_flits_ofld(skb) * 8), 16))) 215 215 216 216 #define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ 217 - ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1)) 218 - 217 + ULP_TX_SC_MORE_V((immdatalen))) 219 218 #define MAX_NK 8 220 - #define CRYPTO_MAX_IMM_TX_PKT_LEN 256 221 - #define MAX_WR_SIZE 512 222 219 #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) 223 220 #define MAX_DSGL_ENT 32 224 - #define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2) 225 221 #define MIN_CIPHER_SG 1 /* IV */ 226 - #define MIN_AUTH_SG 2 /*IV + AAD*/ 227 - #define MIN_GCM_SG 2 /* IV + AAD*/ 222 + #define MIN_AUTH_SG 1 /* IV */ 223 + #define MIN_GCM_SG 1 /* IV */ 228 224 #define MIN_DIGEST_SG 1 /*Partial Buffer*/ 229 - #define MIN_CCM_SG 3 /*IV+AAD+B0*/ 225 + #define MIN_CCM_SG 2 /*IV+B0*/ 230 226 #define SPACE_LEFT(len) \ 231 - ((MAX_WR_SIZE - WR_MIN_LEN - (len))) 227 + ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) 232 228 233 - unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 234 - 48, 64, 72, 88, 235 - 96, 112, 120, 136, 236 - 144, 160, 168, 184, 237 - 192}; 229 + unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88, 230 + 96, 112, 120, 136, 144, 160, 168, 184, 231 + 192, 208, 216, 232, 240, 256, 264, 280, 232 + 288, 304, 312, 328, 336, 352, 360, 376}; 238 233 unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, 239 234 112, 112, 128, 128, 144, 144, 160, 160, 240 235 192, 192, 208, 208, 224, 224, 240, 240, ··· 253 258 254 259 struct cipher_wr_param { 255 260 struct ablkcipher_request *req; 256 - struct scatterlist *srcsg; 257 261 char *iv; 258 262 int bytes; 259 - short int snent; 260 263 unsigned short qid; 261 264 }; 262 265 enum { ··· 292 299 ICV_16 = 16 293 300 }; 294 301 295 - struct hash_op_params { 296 - unsigned char mk_size; 297 - unsigned char pad_align; 298 - unsigned char auth_mode; 299 - char hash_name[MAX_HASH_NAME]; 300 - unsigned short block_size; 301 - unsigned short word_size; 302 - unsigned short ipad_size; 303 - }; 304 - 305 302 struct phys_sge_pairs { 306 303 __be16 len[8]; 307 304 __be64 addr[8]; 308 305 }; 309 306 310 - struct phys_sge_parm { 311 - unsigned int nents; 312 - unsigned int obsize; 313 - unsigned short qid; 314 - }; 315 - 316 - struct crypto_result { 317 - struct completion completion; 318 - int err; 319 - }; 320 307 321 308 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 322 309 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
+5 -5
drivers/crypto/chelsio/chcr_core.c
··· 154 154 struct uld_ctx *u_ctx; 155 155 156 156 /* Create the device and add it in the device list */ 157 + if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 158 + return ERR_PTR(-EOPNOTSUPP); 159 + 160 + /* Create the device and add it in the device list */ 157 161 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 158 162 if (!u_ctx) { 159 - u_ctx = ERR_PTR(-ENOMEM); 160 - goto out; 161 - } 162 - if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) { 163 163 u_ctx = ERR_PTR(-ENOMEM); 164 164 goto out; 165 165 } ··· 224 224 static int __init chcr_crypto_init(void) 225 225 { 226 226 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) 227 - pr_err("ULD register fail: No chcr crypto support in cxgb4"); 227 + pr_err("ULD register fail: No chcr crypto support in cxgb4\n"); 228 228 229 229 return 0; 230 230 }
+1 -1
drivers/crypto/chelsio/chcr_core.h
··· 89 89 struct chcr_dev *dev; 90 90 }; 91 91 92 - struct uld_ctx * assign_chcr_device(void); 92 + struct uld_ctx *assign_chcr_device(void); 93 93 int chcr_send_wr(struct sk_buff *skb); 94 94 int start_crypto(void); 95 95 int stop_crypto(void);
+86 -35
drivers/crypto/chelsio/chcr_crypto.h
··· 149 149 150 150 #define CHCR_HASH_MAX_BLOCK_SIZE_64 64 151 151 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128 152 - #define CHCR_SG_SIZE 2048 152 + #define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int)) 153 + #define CHCR_DST_SG_SIZE 2048 153 154 154 - /* Aligned to 128 bit boundary */ 155 + static inline struct chcr_context *a_ctx(struct crypto_aead *tfm) 156 + { 157 + return crypto_aead_ctx(tfm); 158 + } 159 + 160 + static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm) 161 + { 162 + return crypto_ablkcipher_ctx(tfm); 163 + } 164 + 165 + static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) 166 + { 167 + return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 168 + } 155 169 156 170 struct ablk_ctx { 157 171 struct crypto_skcipher *sw_cipher; ··· 179 165 }; 180 166 struct chcr_aead_reqctx { 181 167 struct sk_buff *skb; 182 - struct scatterlist *dst; 183 - struct scatterlist *newdstsg; 184 - struct scatterlist srcffwd[2]; 185 - struct scatterlist dstffwd[2]; 168 + dma_addr_t iv_dma; 169 + dma_addr_t b0_dma; 170 + unsigned int b0_len; 171 + unsigned int op; 172 + short int aad_nents; 173 + short int src_nents; 186 174 short int dst_nents; 175 + u16 imm; 187 176 u16 verify; 188 177 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 189 178 unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; 190 179 }; 180 + 181 + struct ulptx_walk { 182 + struct ulptx_sgl *sgl; 183 + unsigned int nents; 184 + unsigned int pair_idx; 185 + unsigned int last_sg_len; 186 + struct scatterlist *last_sg; 187 + struct ulptx_sge_pair *pair; 188 + 189 + }; 190 + 191 + struct dsgl_walk { 192 + unsigned int nents; 193 + unsigned int last_sg_len; 194 + struct scatterlist *last_sg; 195 + struct cpl_rx_phys_dsgl *dsgl; 196 + struct phys_sge_pairs *to; 197 + }; 198 + 199 + 191 200 192 201 struct chcr_gcm_ctx { 193 202 u8 ghash_h[AEAD_H_SIZE]; ··· 232 195 struct chcr_aead_ctx { 233 196 __be32 key_ctx_hdr; 234 197 unsigned int enckey_len; 235 - struct crypto_skcipher *null; 236 198 struct crypto_aead *sw_cipher; 237 199 u8 salt[MAX_SALT]; 238 200 u8 key[CHCR_AES_MAX_KEY_LEN]; ··· 267 231 u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128]; 268 232 u8 *reqbfr; 269 233 u8 *skbfr; 234 + dma_addr_t dma_addr; 235 + u32 dma_len; 270 236 u8 reqlen; 271 - /* DMA the partial hash in it */ 237 + u8 imm; 238 + u8 is_sg_map; 272 239 u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; 273 240 u64 data_len; /* Data len till time */ 274 241 /* SKB which is being sent to the hardware for processing */ ··· 280 241 281 242 struct chcr_blkcipher_req_ctx { 282 243 struct sk_buff *skb; 283 - struct scatterlist srcffwd[2]; 284 - struct scatterlist dstffwd[2]; 285 244 struct scatterlist *dstsg; 286 - struct scatterlist *dst; 287 - struct scatterlist *newdstsg; 288 245 unsigned int processed; 246 + unsigned int last_req_len; 247 + struct scatterlist *srcsg; 248 + unsigned int src_ofst; 249 + unsigned int dst_ofst; 289 250 unsigned int op; 290 - short int dst_nents; 251 + dma_addr_t iv_dma; 252 + u16 imm; 291 253 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 292 254 }; 293 255 ··· 302 262 } alg; 303 263 }; 304 264 305 - struct chcr_req_ctx { 306 - union { 307 - struct ahash_request *ahash_req; 308 - struct aead_request *aead_req; 309 - struct ablkcipher_request *ablk_req; 310 - } req; 311 - union { 312 - struct chcr_ahash_req_ctx *ahash_ctx; 313 - struct chcr_aead_reqctx *reqctx; 314 - struct chcr_blkcipher_req_ctx *ablk_ctx; 315 - } ctx; 316 - }; 317 - 318 - struct sge_opaque_hdr { 319 - void *dev; 320 - dma_addr_t addr[MAX_SKB_FRAGS + 1]; 321 - }; 322 - 323 265 typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, 324 266 unsigned short qid, 325 267 int size, ··· 312 290 int size, 313 291 create_wr_t create_wr_fn); 314 292 static inline int get_aead_subtype(struct crypto_aead *aead); 315 - static int is_newsg(struct scatterlist *sgl, unsigned int *newents); 316 - static struct scatterlist *alloc_new_sg(struct scatterlist *sgl, 317 - unsigned int nents); 318 - static inline void free_new_sg(struct scatterlist *sgl); 319 293 static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 320 294 unsigned char *input, int err); 295 + static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); 296 + static int chcr_aead_dma_map(struct device *dev, struct aead_request *req, 297 + unsigned short op_type); 298 + static void chcr_aead_dma_unmap(struct device *dev, struct aead_request 299 + *req, unsigned short op_type); 300 + static inline void chcr_add_aead_dst_ent(struct aead_request *req, 301 + struct cpl_rx_phys_dsgl *phys_cpl, 302 + unsigned int assoclen, 303 + unsigned short op_type, 304 + unsigned short qid); 305 + static inline void chcr_add_aead_src_ent(struct aead_request *req, 306 + struct ulptx_sgl *ulptx, 307 + unsigned int assoclen, 308 + unsigned short op_type); 309 + static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 310 + struct ulptx_sgl *ulptx, 311 + struct cipher_wr_param *wrparam); 312 + static int chcr_cipher_dma_map(struct device *dev, 313 + struct ablkcipher_request *req); 314 + static void chcr_cipher_dma_unmap(struct device *dev, 315 + struct ablkcipher_request *req); 316 + static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 317 + struct cpl_rx_phys_dsgl *phys_cpl, 318 + struct cipher_wr_param *wrparam, 319 + unsigned short qid); 320 + int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); 321 + static inline void chcr_add_hash_src_ent(struct ahash_request *req, 322 + struct ulptx_sgl *ulptx, 323 + struct hash_wr_param *param); 324 + static inline int chcr_hash_dma_map(struct device *dev, 325 + struct ahash_request *req); 326 + static inline void chcr_hash_dma_unmap(struct device *dev, 327 + struct ahash_request *req); 321 328 #endif /* __CHCR_CRYPTO_H__ */
+2 -4
drivers/crypto/inside-secure/safexcel_hash.c
··· 308 308 ctx->base.cache_sz = 0; 309 309 } 310 310 free_cache: 311 - if (ctx->base.cache) { 312 - kfree(ctx->base.cache); 313 - ctx->base.cache = NULL; 314 - } 311 + kfree(ctx->base.cache); 312 + ctx->base.cache = NULL; 315 313 316 314 unlock: 317 315 spin_unlock_bh(&priv->ring[ring].egress_lock);
-1
drivers/crypto/ixp4xx_crypto.c
··· 534 534 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), 535 535 crypt_virt, crypt_phys); 536 536 } 537 - return; 538 537 } 539 538 540 539 static void reset_sa_dir(struct ix_sa_dir *dir)
+13 -16
drivers/crypto/marvell/cesa.c
··· 34 34 /* Limit of the crypto queue before reaching the backlog */ 35 35 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128 36 36 37 - static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA); 38 - module_param_named(allhwsupport, allhwsupport, int, 0444); 39 - MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)"); 40 - 41 37 struct mv_cesa_dev *cesa_dev; 42 38 43 39 struct crypto_async_request * ··· 72 76 73 77 ctx = crypto_tfm_ctx(req->tfm); 74 78 ctx->ops->step(req); 75 - 76 - return; 77 79 } 78 80 79 81 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status) ··· 177 183 spin_lock_bh(&engine->lock); 178 184 ret = crypto_enqueue_request(&engine->queue, req); 179 185 if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) && 180 - (ret == -EINPROGRESS || 181 - (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) 186 + (ret == -EINPROGRESS || ret == -EBUSY)) 182 187 mv_cesa_tdma_chain(engine, creq); 183 188 spin_unlock_bh(&engine->lock); 184 189 ··· 195 202 int i, j; 196 203 197 204 for (i = 0; i < cesa->caps->ncipher_algs; i++) { 198 - ret = crypto_register_alg(cesa->caps->cipher_algs[i]); 205 + ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]); 199 206 if (ret) 200 207 goto err_unregister_crypto; 201 208 } ··· 215 222 216 223 err_unregister_crypto: 217 224 for (j = 0; j < i; j++) 218 - crypto_unregister_alg(cesa->caps->cipher_algs[j]); 225 + crypto_unregister_skcipher(cesa->caps->cipher_algs[j]); 219 226 220 227 return ret; 221 228 } ··· 228 235 crypto_unregister_ahash(cesa->caps->ahash_algs[i]); 229 236 230 237 for (i = 0; i < cesa->caps->ncipher_algs; i++) 231 - crypto_unregister_alg(cesa->caps->cipher_algs[i]); 238 + crypto_unregister_skcipher(cesa->caps->cipher_algs[i]); 232 239 } 233 240 234 - static struct crypto_alg *orion_cipher_algs[] = { 241 + static struct skcipher_alg *orion_cipher_algs[] = { 235 242 &mv_cesa_ecb_des_alg, 236 243 &mv_cesa_cbc_des_alg, 237 244 &mv_cesa_ecb_des3_ede_alg, ··· 247 254 &mv_ahmac_sha1_alg, 248 255 }; 249 256 250 - static struct crypto_alg *armada_370_cipher_algs[] = { 257 + static struct skcipher_alg *armada_370_cipher_algs[] = { 251 258 &mv_cesa_ecb_des_alg, 252 259 &mv_cesa_cbc_des_alg, 253 260 &mv_cesa_ecb_des3_ede_alg, ··· 452 459 caps = match->data; 453 460 } 454 461 455 - if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport) 456 - return -ENOTSUPP; 457 - 458 462 cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL); 459 463 if (!cesa) 460 464 return -ENOMEM; ··· 589 599 return 0; 590 600 } 591 601 602 + static const struct platform_device_id mv_cesa_plat_id_table[] = { 603 + { .name = "mv_crypto" }, 604 + { /* sentinel */ }, 605 + }; 606 + MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table); 607 + 592 608 static struct platform_driver marvell_cesa = { 593 609 .probe = mv_cesa_probe, 594 610 .remove = mv_cesa_remove, 611 + .id_table = mv_cesa_plat_id_table, 595 612 .driver = { 596 613 .name = "marvell-cesa", 597 614 .of_match_table = mv_cesa_of_match_table,
+14 -13
drivers/crypto/marvell/cesa.h
··· 5 5 #include <crypto/algapi.h> 6 6 #include <crypto/hash.h> 7 7 #include <crypto/internal/hash.h> 8 + #include <crypto/internal/skcipher.h> 8 9 9 10 #include <linux/crypto.h> 10 11 #include <linux/dmapool.h> ··· 374 373 struct mv_cesa_caps { 375 374 int nengines; 376 375 bool has_tdma; 377 - struct crypto_alg **cipher_algs; 376 + struct skcipher_alg **cipher_algs; 378 377 int ncipher_algs; 379 378 struct ahash_alg **ahash_algs; 380 379 int nahash_algs; ··· 540 539 }; 541 540 542 541 /** 543 - * struct mv_cesa_ablkcipher_std_req - cipher standard request 542 + * struct mv_cesa_skcipher_std_req - cipher standard request 544 543 * @op: operation context 545 544 * @offset: current operation offset 546 545 * @size: size of the crypto operation 547 546 */ 548 - struct mv_cesa_ablkcipher_std_req { 547 + struct mv_cesa_skcipher_std_req { 549 548 struct mv_cesa_op_ctx op; 550 549 unsigned int offset; 551 550 unsigned int size; ··· 553 552 }; 554 553 555 554 /** 556 - * struct mv_cesa_ablkcipher_req - cipher request 555 + * struct mv_cesa_skcipher_req - cipher request 557 556 * @req: type specific request information 558 557 * @src_nents: number of entries in the src sg list 559 558 * @dst_nents: number of entries in the dest sg list 560 559 */ 561 - struct mv_cesa_ablkcipher_req { 560 + struct mv_cesa_skcipher_req { 562 561 struct mv_cesa_req base; 563 - struct mv_cesa_ablkcipher_std_req std; 562 + struct mv_cesa_skcipher_std_req std; 564 563 int src_nents; 565 564 int dst_nents; 566 565 }; ··· 765 764 * the backlog and will be processed later. There's no need to 766 765 * clean it up. 767 766 */ 768 - if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) 767 + if (ret == -EBUSY) 769 768 return false; 770 769 771 770 /* Request wasn't queued, we need to clean it up */ ··· 870 869 extern struct ahash_alg mv_ahmac_sha1_alg; 871 870 extern struct ahash_alg mv_ahmac_sha256_alg; 872 871 873 - extern struct crypto_alg mv_cesa_ecb_des_alg; 874 - extern struct crypto_alg mv_cesa_cbc_des_alg; 875 - extern struct crypto_alg mv_cesa_ecb_des3_ede_alg; 876 - extern struct crypto_alg mv_cesa_cbc_des3_ede_alg; 877 - extern struct crypto_alg mv_cesa_ecb_aes_alg; 878 - extern struct crypto_alg mv_cesa_cbc_aes_alg; 872 + extern struct skcipher_alg mv_cesa_ecb_des_alg; 873 + extern struct skcipher_alg mv_cesa_cbc_des_alg; 874 + extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg; 875 + extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg; 876 + extern struct skcipher_alg mv_cesa_ecb_aes_alg; 877 + extern struct skcipher_alg mv_cesa_cbc_aes_alg; 879 878 880 879 #endif /* __MARVELL_CESA_H__ */
+233 -243
drivers/crypto/marvell/cipher.c
··· 32 32 struct crypto_aes_ctx aes; 33 33 }; 34 34 35 - struct mv_cesa_ablkcipher_dma_iter { 35 + struct mv_cesa_skcipher_dma_iter { 36 36 struct mv_cesa_dma_iter base; 37 37 struct mv_cesa_sg_dma_iter src; 38 38 struct mv_cesa_sg_dma_iter dst; 39 39 }; 40 40 41 41 static inline void 42 - mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, 43 - struct ablkcipher_request *req) 42 + mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter, 43 + struct skcipher_request *req) 44 44 { 45 - mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); 45 + mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen); 46 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 47 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); 48 48 } 49 49 50 50 static inline bool 51 - mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) 51 + mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter) 52 52 { 53 53 iter->src.op_offset = 0; 54 54 iter->dst.op_offset = 0; ··· 57 57 } 58 58 59 59 static inline void 60 - mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) 60 + mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) 61 61 { 62 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 62 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 63 63 64 64 if (req->dst != req->src) { 65 65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, ··· 73 73 mv_cesa_dma_cleanup(&creq->base); 74 74 } 75 75 76 - static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) 76 + static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) 77 77 { 78 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 78 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 79 79 80 80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 81 - mv_cesa_ablkcipher_dma_cleanup(req); 81 + mv_cesa_skcipher_dma_cleanup(req); 82 82 } 83 83 84 - static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) 84 + static void mv_cesa_skcipher_std_step(struct skcipher_request *req) 85 85 { 86 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 87 - struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 86 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 87 + struct mv_cesa_skcipher_std_req *sreq = &creq->std; 88 88 struct mv_cesa_engine *engine = creq->base.engine; 89 - size_t len = min_t(size_t, req->nbytes - sreq->offset, 89 + size_t len = min_t(size_t, req->cryptlen - sreq->offset, 90 90 CESA_SA_SRAM_PAYLOAD_SIZE); 91 91 92 92 mv_cesa_adjust_op(engine, &sreq->op); ··· 114 114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 115 115 } 116 116 117 - static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, 118 - u32 status) 117 + static int mv_cesa_skcipher_std_process(struct skcipher_request *req, 118 + u32 status) 119 119 { 120 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 121 - struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 120 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 121 + struct mv_cesa_skcipher_std_req *sreq = &creq->std; 122 122 struct mv_cesa_engine *engine = creq->base.engine; 123 123 size_t len; 124 124 ··· 127 127 sreq->size, sreq->offset); 128 128 129 129 sreq->offset += len; 130 - if (sreq->offset < req->nbytes) 130 + if (sreq->offset < req->cryptlen) 131 131 return -EINPROGRESS; 132 132 133 133 return 0; 134 134 } 135 135 136 - static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, 137 - u32 status) 136 + static int mv_cesa_skcipher_process(struct crypto_async_request *req, 137 + u32 status) 138 138 { 139 - struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 140 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 139 + struct skcipher_request *skreq = skcipher_request_cast(req); 140 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 141 141 struct mv_cesa_req *basereq = &creq->base; 142 142 143 143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) 144 - return mv_cesa_ablkcipher_std_process(ablkreq, status); 144 + return mv_cesa_skcipher_std_process(skreq, status); 145 145 146 146 return mv_cesa_dma_process(basereq, status); 147 147 } 148 148 149 - static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) 149 + static void mv_cesa_skcipher_step(struct crypto_async_request *req) 150 150 { 151 - struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 152 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 151 + struct skcipher_request *skreq = skcipher_request_cast(req); 152 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 153 153 154 154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 155 155 mv_cesa_dma_step(&creq->base); 156 156 else 157 - mv_cesa_ablkcipher_std_step(ablkreq); 157 + mv_cesa_skcipher_std_step(skreq); 158 158 } 159 159 160 160 static inline void 161 - mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) 161 + mv_cesa_skcipher_dma_prepare(struct skcipher_request *req) 162 162 { 163 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 163 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 164 164 struct mv_cesa_req *basereq = &creq->base; 165 165 166 166 mv_cesa_dma_prepare(basereq, basereq->engine); 167 167 } 168 168 169 169 static inline void 170 - mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) 170 + mv_cesa_skcipher_std_prepare(struct skcipher_request *req) 171 171 { 172 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 173 - struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 172 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 173 + struct mv_cesa_skcipher_std_req *sreq = &creq->std; 174 174 175 175 sreq->size = 0; 176 176 sreq->offset = 0; 177 177 } 178 178 179 - static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, 180 - struct mv_cesa_engine *engine) 179 + static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req, 180 + struct mv_cesa_engine *engine) 181 181 { 182 - struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 183 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 182 + struct skcipher_request *skreq = skcipher_request_cast(req); 183 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 184 184 creq->base.engine = engine; 185 185 186 186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 187 - mv_cesa_ablkcipher_dma_prepare(ablkreq); 187 + mv_cesa_skcipher_dma_prepare(skreq); 188 188 else 189 - mv_cesa_ablkcipher_std_prepare(ablkreq); 189 + mv_cesa_skcipher_std_prepare(skreq); 190 190 } 191 191 192 192 static inline void 193 - mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) 193 + mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req) 194 194 { 195 - struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 195 + struct skcipher_request *skreq = skcipher_request_cast(req); 196 196 197 - mv_cesa_ablkcipher_cleanup(ablkreq); 197 + mv_cesa_skcipher_cleanup(skreq); 198 198 } 199 199 200 200 static void 201 - mv_cesa_ablkcipher_complete(struct crypto_async_request *req) 201 + mv_cesa_skcipher_complete(struct crypto_async_request *req) 202 202 { 203 - struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 204 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 203 + struct skcipher_request *skreq = skcipher_request_cast(req); 204 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 205 205 struct mv_cesa_engine *engine = creq->base.engine; 206 206 unsigned int ivsize; 207 207 208 - atomic_sub(ablkreq->nbytes, &engine->load); 209 - ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); 208 + atomic_sub(skreq->cryptlen, &engine->load); 209 + ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq)); 210 210 211 211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { 212 212 struct mv_cesa_req *basereq; 213 213 214 214 basereq = &creq->base; 215 - memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv, 215 + memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv, 216 216 ivsize); 217 217 } else { 218 - memcpy_fromio(ablkreq->info, 218 + memcpy_fromio(skreq->iv, 219 219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 220 220 ivsize); 221 221 } 222 222 } 223 223 224 - static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { 225 - .step = mv_cesa_ablkcipher_step, 226 - .process = mv_cesa_ablkcipher_process, 227 - .cleanup = mv_cesa_ablkcipher_req_cleanup, 228 - .complete = mv_cesa_ablkcipher_complete, 224 + static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = { 225 + .step = mv_cesa_skcipher_step, 226 + .process = mv_cesa_skcipher_process, 227 + .cleanup = mv_cesa_skcipher_req_cleanup, 228 + .complete = mv_cesa_skcipher_complete, 229 229 }; 230 230 231 - static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) 231 + static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm) 232 232 { 233 - struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 233 + void *ctx = crypto_tfm_ctx(tfm); 234 234 235 - ctx->base.ops = &mv_cesa_ablkcipher_req_ops; 235 + memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize); 236 + } 236 237 237 - tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); 238 + static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm) 239 + { 240 + struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm); 241 + 242 + ctx->ops = &mv_cesa_skcipher_req_ops; 243 + 244 + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 245 + sizeof(struct mv_cesa_skcipher_req)); 238 246 239 247 return 0; 240 248 } 241 249 242 - static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 250 + static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 243 251 unsigned int len) 244 252 { 245 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 253 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 246 254 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 247 255 int remaining; 248 256 int offset; ··· 259 251 260 252 ret = crypto_aes_expand_key(&ctx->aes, key, len); 261 253 if (ret) { 262 - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 254 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 263 255 return ret; 264 256 } 265 257 ··· 272 264 return 0; 273 265 } 274 266 275 - static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 267 + static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 276 268 unsigned int len) 277 269 { 278 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 270 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 279 271 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 280 272 u32 tmp[DES_EXPKEY_WORDS]; 281 273 int ret; 282 274 283 275 if (len != DES_KEY_SIZE) { 284 - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 276 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 285 277 return -EINVAL; 286 278 } 287 279 ··· 296 288 return 0; 297 289 } 298 290 299 - static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, 291 + static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, 300 292 const u8 *key, unsigned int len) 301 293 { 302 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 294 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 303 295 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 304 296 305 297 if (len != DES3_EDE_KEY_SIZE) { 306 - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 298 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 307 299 return -EINVAL; 308 300 } 309 301 ··· 312 304 return 0; 313 305 } 314 306 315 - static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, 316 - const struct mv_cesa_op_ctx *op_templ) 307 + static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, 308 + const struct mv_cesa_op_ctx *op_templ) 317 309 { 318 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 310 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 319 311 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 320 312 GFP_KERNEL : GFP_ATOMIC; 321 313 struct mv_cesa_req *basereq = &creq->base; 322 - struct mv_cesa_ablkcipher_dma_iter iter; 314 + struct mv_cesa_skcipher_dma_iter iter; 323 315 bool skip_ctx = false; 324 316 int ret; 325 317 unsigned int ivsize; ··· 347 339 } 348 340 349 341 mv_cesa_tdma_desc_iter_init(&basereq->chain); 350 - mv_cesa_ablkcipher_req_iter_init(&iter, req); 342 + mv_cesa_skcipher_req_iter_init(&iter, req); 351 343 352 344 do { 353 345 struct mv_cesa_op_ctx *op; ··· 378 370 if (ret) 379 371 goto err_free_tdma; 380 372 381 - } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); 373 + } while (mv_cesa_skcipher_req_iter_next_op(&iter)); 382 374 383 375 /* Add output data for IV */ 384 - ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); 376 + ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req)); 385 377 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, 386 378 CESA_SA_DATA_SRAM_OFFSET, 387 379 CESA_TDMA_SRC_IN_SRAM, flags); ··· 407 399 } 408 400 409 401 static inline int 410 - mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, 411 - const struct mv_cesa_op_ctx *op_templ) 402 + mv_cesa_skcipher_std_req_init(struct skcipher_request *req, 403 + const struct mv_cesa_op_ctx *op_templ) 412 404 { 413 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 414 - struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 405 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 406 + struct mv_cesa_skcipher_std_req *sreq = &creq->std; 415 407 struct mv_cesa_req *basereq = &creq->base; 416 408 417 409 sreq->op = *op_templ; ··· 422 414 return 0; 423 415 } 424 416 425 - static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, 426 - struct mv_cesa_op_ctx *tmpl) 417 + static int mv_cesa_skcipher_req_init(struct skcipher_request *req, 418 + struct mv_cesa_op_ctx *tmpl) 427 419 { 428 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 429 - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 430 - unsigned int blksize = crypto_ablkcipher_blocksize(tfm); 420 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 421 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 422 + unsigned int blksize = crypto_skcipher_blocksize(tfm); 431 423 int ret; 432 424 433 - if (!IS_ALIGNED(req->nbytes, blksize)) 425 + if (!IS_ALIGNED(req->cryptlen, blksize)) 434 426 return -EINVAL; 435 427 436 - creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 428 + creq->src_nents = sg_nents_for_len(req->src, req->cryptlen); 437 429 if (creq->src_nents < 0) { 438 430 dev_err(cesa_dev->dev, "Invalid number of src SG"); 439 431 return creq->src_nents; 440 432 } 441 - creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 433 + creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 442 434 if (creq->dst_nents < 0) { 443 435 dev_err(cesa_dev->dev, "Invalid number of dst SG"); 444 436 return creq->dst_nents; ··· 448 440 CESA_SA_DESC_CFG_OP_MSK); 449 441 450 442 if (cesa_dev->caps->has_tdma) 451 - ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); 443 + ret = mv_cesa_skcipher_dma_req_init(req, tmpl); 452 444 else 453 - ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); 445 + ret = mv_cesa_skcipher_std_req_init(req, tmpl); 454 446 455 447 return ret; 456 448 } 457 449 458 - static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req, 459 - struct mv_cesa_op_ctx *tmpl) 450 + static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, 451 + struct mv_cesa_op_ctx *tmpl) 460 452 { 461 453 int ret; 462 - struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 454 + struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 463 455 struct mv_cesa_engine *engine; 464 456 465 - ret = mv_cesa_ablkcipher_req_init(req, tmpl); 457 + ret = mv_cesa_skcipher_req_init(req, tmpl); 466 458 if (ret) 467 459 return ret; 468 460 469 - engine = mv_cesa_select_engine(req->nbytes); 470 - mv_cesa_ablkcipher_prepare(&req->base, engine); 461 + engine = mv_cesa_select_engine(req->cryptlen); 462 + mv_cesa_skcipher_prepare(&req->base, engine); 471 463 472 464 ret = mv_cesa_queue_req(&req->base, &creq->base); 473 465 474 466 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 475 - mv_cesa_ablkcipher_cleanup(req); 467 + mv_cesa_skcipher_cleanup(req); 476 468 477 469 return ret; 478 470 } 479 471 480 - static int mv_cesa_des_op(struct ablkcipher_request *req, 472 + static int mv_cesa_des_op(struct skcipher_request *req, 481 473 struct mv_cesa_op_ctx *tmpl) 482 474 { 483 475 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ··· 487 479 488 480 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); 489 481 490 - return mv_cesa_ablkcipher_queue_req(req, tmpl); 482 + return mv_cesa_skcipher_queue_req(req, tmpl); 491 483 } 492 484 493 - static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) 485 + static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req) 494 486 { 495 487 struct mv_cesa_op_ctx tmpl; 496 488 ··· 501 493 return mv_cesa_des_op(req, &tmpl); 502 494 } 503 495 504 - static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) 496 + static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req) 505 497 { 506 498 struct mv_cesa_op_ctx tmpl; 507 499 ··· 512 504 return mv_cesa_des_op(req, &tmpl); 513 505 } 514 506 515 - struct crypto_alg mv_cesa_ecb_des_alg = { 516 - .cra_name = "ecb(des)", 517 - .cra_driver_name = "mv-ecb-des", 518 - .cra_priority = 300, 519 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 520 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 521 - .cra_blocksize = DES_BLOCK_SIZE, 522 - .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 523 - .cra_alignmask = 0, 524 - .cra_type = &crypto_ablkcipher_type, 525 - .cra_module = THIS_MODULE, 526 - .cra_init = mv_cesa_ablkcipher_cra_init, 527 - .cra_u = { 528 - .ablkcipher = { 529 - .min_keysize = DES_KEY_SIZE, 530 - .max_keysize = DES_KEY_SIZE, 531 - .setkey = mv_cesa_des_setkey, 532 - .encrypt = mv_cesa_ecb_des_encrypt, 533 - .decrypt = mv_cesa_ecb_des_decrypt, 534 - }, 507 + struct skcipher_alg mv_cesa_ecb_des_alg = { 508 + .setkey = mv_cesa_des_setkey, 509 + .encrypt = mv_cesa_ecb_des_encrypt, 510 + .decrypt = mv_cesa_ecb_des_decrypt, 511 + .min_keysize = DES_KEY_SIZE, 512 + .max_keysize = DES_KEY_SIZE, 513 + .base = { 514 + .cra_name = "ecb(des)", 515 + .cra_driver_name = "mv-ecb-des", 516 + .cra_priority = 300, 517 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 518 + .cra_blocksize = DES_BLOCK_SIZE, 519 + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 520 + .cra_alignmask = 0, 521 + .cra_module = THIS_MODULE, 522 + .cra_init = mv_cesa_skcipher_cra_init, 523 + .cra_exit = mv_cesa_skcipher_cra_exit, 535 524 }, 536 525 }; 537 526 538 - static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, 527 + static int mv_cesa_cbc_des_op(struct skcipher_request *req, 539 528 struct mv_cesa_op_ctx *tmpl) 540 529 { 541 530 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 542 531 CESA_SA_DESC_CFG_CRYPTCM_MSK); 543 532 544 - memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); 533 + memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE); 545 534 546 535 return mv_cesa_des_op(req, tmpl); 547 536 } 548 537 549 - static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) 538 + static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req) 550 539 { 551 540 struct mv_cesa_op_ctx tmpl; 552 541 ··· 552 547 return mv_cesa_cbc_des_op(req, &tmpl); 553 548 } 554 549 555 - static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) 550 + static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req) 556 551 { 557 552 struct mv_cesa_op_ctx tmpl; 558 553 ··· 561 556 return mv_cesa_cbc_des_op(req, &tmpl); 562 557 } 563 558 564 - struct crypto_alg mv_cesa_cbc_des_alg = { 565 - .cra_name = "cbc(des)", 566 - .cra_driver_name = "mv-cbc-des", 567 - .cra_priority = 300, 568 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 569 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 570 - .cra_blocksize = DES_BLOCK_SIZE, 571 - .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 572 - .cra_alignmask = 0, 573 - .cra_type = &crypto_ablkcipher_type, 574 - .cra_module = THIS_MODULE, 575 - .cra_init = mv_cesa_ablkcipher_cra_init, 576 - .cra_u = { 577 - .ablkcipher = { 578 - .min_keysize = DES_KEY_SIZE, 579 - .max_keysize = DES_KEY_SIZE, 580 - .ivsize = DES_BLOCK_SIZE, 581 - .setkey = mv_cesa_des_setkey, 582 - .encrypt = mv_cesa_cbc_des_encrypt, 583 - .decrypt = mv_cesa_cbc_des_decrypt, 584 - }, 559 + struct skcipher_alg mv_cesa_cbc_des_alg = { 560 + .setkey = mv_cesa_des_setkey, 561 + .encrypt = mv_cesa_cbc_des_encrypt, 562 + .decrypt = mv_cesa_cbc_des_decrypt, 563 + .min_keysize = DES_KEY_SIZE, 564 + .max_keysize = DES_KEY_SIZE, 565 + .ivsize = DES_BLOCK_SIZE, 566 + .base = { 567 + .cra_name = "cbc(des)", 568 + .cra_driver_name = "mv-cbc-des", 569 + .cra_priority = 300, 570 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 571 + .cra_blocksize = DES_BLOCK_SIZE, 572 + .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 573 + .cra_alignmask = 0, 574 + .cra_module = THIS_MODULE, 575 + .cra_init = mv_cesa_skcipher_cra_init, 576 + .cra_exit = mv_cesa_skcipher_cra_exit, 585 577 }, 586 578 }; 587 579 588 - static int mv_cesa_des3_op(struct ablkcipher_request *req, 580 + static int mv_cesa_des3_op(struct skcipher_request *req, 589 581 struct mv_cesa_op_ctx *tmpl) 590 582 { 591 583 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ··· 592 590 593 591 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 594 592 595 - return mv_cesa_ablkcipher_queue_req(req, tmpl); 593 + return mv_cesa_skcipher_queue_req(req, tmpl); 596 594 } 597 595 598 - static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) 596 + static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req) 599 597 { 600 598 struct mv_cesa_op_ctx tmpl; 601 599 ··· 607 605 return mv_cesa_des3_op(req, &tmpl); 608 606 } 609 607 610 - static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) 608 + static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req) 611 609 { 612 610 struct mv_cesa_op_ctx tmpl; 613 611 ··· 619 617 return mv_cesa_des3_op(req, &tmpl); 620 618 } 621 619 622 - struct crypto_alg mv_cesa_ecb_des3_ede_alg = { 623 - .cra_name = "ecb(des3_ede)", 624 - .cra_driver_name = "mv-ecb-des3-ede", 625 - .cra_priority = 300, 626 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 627 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 628 - .cra_blocksize = DES3_EDE_BLOCK_SIZE, 629 - .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 630 - .cra_alignmask = 0, 631 - .cra_type = &crypto_ablkcipher_type, 632 - .cra_module = THIS_MODULE, 633 - .cra_init = mv_cesa_ablkcipher_cra_init, 634 - .cra_u = { 635 - .ablkcipher = { 636 - .min_keysize = DES3_EDE_KEY_SIZE, 637 - .max_keysize = DES3_EDE_KEY_SIZE, 638 - .ivsize = DES3_EDE_BLOCK_SIZE, 639 - .setkey = mv_cesa_des3_ede_setkey, 640 - .encrypt = mv_cesa_ecb_des3_ede_encrypt, 641 - .decrypt = mv_cesa_ecb_des3_ede_decrypt, 642 - }, 620 + struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { 621 + .setkey = mv_cesa_des3_ede_setkey, 622 + .encrypt = mv_cesa_ecb_des3_ede_encrypt, 623 + .decrypt = mv_cesa_ecb_des3_ede_decrypt, 624 + .min_keysize = DES3_EDE_KEY_SIZE, 625 + .max_keysize = DES3_EDE_KEY_SIZE, 626 + .ivsize = DES3_EDE_BLOCK_SIZE, 627 + .base = { 628 + .cra_name = "ecb(des3_ede)", 629 + .cra_driver_name = "mv-ecb-des3-ede", 630 + .cra_priority = 300, 631 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 632 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 633 + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 634 + .cra_alignmask = 0, 635 + .cra_module = THIS_MODULE, 636 + .cra_init = mv_cesa_skcipher_cra_init, 637 + .cra_exit = mv_cesa_skcipher_cra_exit, 643 638 }, 644 639 }; 645 640 646 - static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, 641 + static int mv_cesa_cbc_des3_op(struct skcipher_request *req, 647 642 struct mv_cesa_op_ctx *tmpl) 648 643 { 649 - memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); 644 + memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE); 650 645 651 646 return mv_cesa_des3_op(req, tmpl); 652 647 } 653 648 654 - static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) 649 + static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req) 655 650 { 656 651 struct mv_cesa_op_ctx tmpl; 657 652 ··· 660 661 return mv_cesa_cbc_des3_op(req, &tmpl); 661 662 } 662 663 663 - static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) 664 + static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req) 664 665 { 665 666 struct mv_cesa_op_ctx tmpl; 666 667 ··· 672 673 return mv_cesa_cbc_des3_op(req, &tmpl); 673 674 } 674 675 675 - struct crypto_alg mv_cesa_cbc_des3_ede_alg = { 676 - .cra_name = "cbc(des3_ede)", 677 - .cra_driver_name = "mv-cbc-des3-ede", 678 - .cra_priority = 300, 679 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 680 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 681 - .cra_blocksize = DES3_EDE_BLOCK_SIZE, 682 - .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 683 - .cra_alignmask = 0, 684 - .cra_type = &crypto_ablkcipher_type, 685 - .cra_module = THIS_MODULE, 686 - .cra_init = mv_cesa_ablkcipher_cra_init, 687 - .cra_u = { 688 - .ablkcipher = { 689 - .min_keysize = DES3_EDE_KEY_SIZE, 690 - .max_keysize = DES3_EDE_KEY_SIZE, 691 - .ivsize = DES3_EDE_BLOCK_SIZE, 692 - .setkey = mv_cesa_des3_ede_setkey, 693 - .encrypt = mv_cesa_cbc_des3_ede_encrypt, 694 - .decrypt = mv_cesa_cbc_des3_ede_decrypt, 695 - }, 676 + struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { 677 + .setkey = mv_cesa_des3_ede_setkey, 678 + .encrypt = mv_cesa_cbc_des3_ede_encrypt, 679 + .decrypt = mv_cesa_cbc_des3_ede_decrypt, 680 + .min_keysize = DES3_EDE_KEY_SIZE, 681 + .max_keysize = DES3_EDE_KEY_SIZE, 682 + .ivsize = DES3_EDE_BLOCK_SIZE, 683 + .base = { 684 + .cra_name = "cbc(des3_ede)", 685 + .cra_driver_name = "mv-cbc-des3-ede", 686 + .cra_priority = 300, 687 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 688 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 689 + .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 690 + .cra_alignmask = 0, 691 + .cra_module = THIS_MODULE, 692 + .cra_init = mv_cesa_skcipher_cra_init, 693 + .cra_exit = mv_cesa_skcipher_cra_exit, 696 694 }, 697 695 }; 698 696 699 - static int mv_cesa_aes_op(struct ablkcipher_request *req, 697 + static int mv_cesa_aes_op(struct skcipher_request *req, 700 698 struct mv_cesa_op_ctx *tmpl) 701 699 { 702 700 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ··· 720 724 CESA_SA_DESC_CFG_CRYPTM_MSK | 721 725 CESA_SA_DESC_CFG_AES_LEN_MSK); 722 726 723 - return mv_cesa_ablkcipher_queue_req(req, tmpl); 727 + return mv_cesa_skcipher_queue_req(req, tmpl); 724 728 } 725 729 726 - static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) 730 + static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req) 727 731 { 728 732 struct mv_cesa_op_ctx tmpl; 729 733 ··· 734 738 return mv_cesa_aes_op(req, &tmpl); 735 739 } 736 740 737 - static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) 741 + static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req) 738 742 { 739 743 struct mv_cesa_op_ctx tmpl; 740 744 ··· 745 749 return mv_cesa_aes_op(req, &tmpl); 746 750 } 747 751 748 - struct crypto_alg mv_cesa_ecb_aes_alg = { 749 - .cra_name = "ecb(aes)", 750 - .cra_driver_name = "mv-ecb-aes", 751 - .cra_priority = 300, 752 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 753 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 754 - .cra_blocksize = AES_BLOCK_SIZE, 755 - .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 756 - .cra_alignmask = 0, 757 - .cra_type = &crypto_ablkcipher_type, 758 - .cra_module = THIS_MODULE, 759 - .cra_init = mv_cesa_ablkcipher_cra_init, 760 - .cra_u = { 761 - .ablkcipher = { 762 - .min_keysize = AES_MIN_KEY_SIZE, 763 - .max_keysize = AES_MAX_KEY_SIZE, 764 - .setkey = mv_cesa_aes_setkey, 765 - .encrypt = mv_cesa_ecb_aes_encrypt, 766 - .decrypt = mv_cesa_ecb_aes_decrypt, 767 - }, 752 + struct skcipher_alg mv_cesa_ecb_aes_alg = { 753 + .setkey = mv_cesa_aes_setkey, 754 + .encrypt = mv_cesa_ecb_aes_encrypt, 755 + .decrypt = mv_cesa_ecb_aes_decrypt, 756 + .min_keysize = AES_MIN_KEY_SIZE, 757 + .max_keysize = AES_MAX_KEY_SIZE, 758 + .base = { 759 + .cra_name = "ecb(aes)", 760 + .cra_driver_name = "mv-ecb-aes", 761 + .cra_priority = 300, 762 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 763 + .cra_blocksize = AES_BLOCK_SIZE, 764 + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 765 + .cra_alignmask = 0, 766 + .cra_module = THIS_MODULE, 767 + .cra_init = mv_cesa_skcipher_cra_init, 768 + .cra_exit = mv_cesa_skcipher_cra_exit, 768 769 }, 769 770 }; 770 771 771 - static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, 772 + static int mv_cesa_cbc_aes_op(struct skcipher_request *req, 772 773 struct mv_cesa_op_ctx *tmpl) 773 774 { 774 775 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 775 776 CESA_SA_DESC_CFG_CRYPTCM_MSK); 776 - memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); 777 + memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE); 777 778 778 779 return mv_cesa_aes_op(req, tmpl); 779 780 } 780 781 781 - static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) 782 + static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req) 782 783 { 783 784 struct mv_cesa_op_ctx tmpl; 784 785 ··· 784 791 return mv_cesa_cbc_aes_op(req, &tmpl); 785 792 } 786 793 787 - static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) 794 + static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req) 788 795 { 789 796 struct mv_cesa_op_ctx tmpl; 790 797 ··· 793 800 return mv_cesa_cbc_aes_op(req, &tmpl); 794 801 } 795 802 796 - struct crypto_alg mv_cesa_cbc_aes_alg = { 797 - .cra_name = "cbc(aes)", 798 - .cra_driver_name = "mv-cbc-aes", 799 - .cra_priority = 300, 800 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 801 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 802 - .cra_blocksize = AES_BLOCK_SIZE, 803 - .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 804 - .cra_alignmask = 0, 805 - .cra_type = &crypto_ablkcipher_type, 806 - .cra_module = THIS_MODULE, 807 - .cra_init = mv_cesa_ablkcipher_cra_init, 808 - .cra_u = { 809 - .ablkcipher = { 810 - .min_keysize = AES_MIN_KEY_SIZE, 811 - .max_keysize = AES_MAX_KEY_SIZE, 812 - .ivsize = AES_BLOCK_SIZE, 813 - .setkey = mv_cesa_aes_setkey, 814 - .encrypt = mv_cesa_cbc_aes_encrypt, 815 - .decrypt = mv_cesa_cbc_aes_decrypt, 816 - }, 803 + struct skcipher_alg mv_cesa_cbc_aes_alg = { 804 + .setkey = mv_cesa_aes_setkey, 805 + .encrypt = mv_cesa_cbc_aes_encrypt, 806 + .decrypt = mv_cesa_cbc_aes_decrypt, 807 + .min_keysize = AES_MIN_KEY_SIZE, 808 + .max_keysize = AES_MAX_KEY_SIZE, 809 + .ivsize = AES_BLOCK_SIZE, 810 + .base = { 811 + .cra_name = "cbc(aes)", 812 + .cra_driver_name = "mv-cbc-aes", 813 + .cra_priority = 300, 814 + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 815 + .cra_blocksize = AES_BLOCK_SIZE, 816 + .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 817 + .cra_alignmask = 0, 818 + .cra_module = THIS_MODULE, 819 + .cra_init = mv_cesa_skcipher_cra_init, 820 + .cra_exit = mv_cesa_skcipher_cra_exit, 817 821 }, 818 822 };
+1 -4
drivers/crypto/marvell/tdma.c
··· 304 304 struct mv_cesa_tdma_desc *tdma; 305 305 306 306 tdma = mv_cesa_dma_add_desc(chain, flags); 307 - if (IS_ERR(tdma)) 308 - return PTR_ERR(tdma); 309 - 310 - return 0; 307 + return PTR_ERR_OR_ZERO(tdma); 311 308 } 312 309 313 310 int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
+12 -27
drivers/crypto/mediatek/mtk-aes.c
··· 13 13 */ 14 14 15 15 #include <crypto/aes.h> 16 + #include <crypto/gcm.h> 16 17 #include "mtk-platform.h" 17 18 18 19 #define AES_QUEUE_SIZE 512 ··· 136 135 size_t textlen; 137 136 138 137 struct crypto_skcipher *ctr; 139 - }; 140 - 141 - struct mtk_aes_gcm_setkey_result { 142 - int err; 143 - struct completion completion; 144 138 }; 145 139 146 140 struct mtk_aes_drv { ··· 924 928 static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) 925 929 { 926 930 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 931 + struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 927 932 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 933 + 934 + /* Empty messages are not supported yet */ 935 + if (!gctx->textlen && !req->assoclen) 936 + return -EINVAL; 928 937 929 938 rctx->mode = AES_FLAGS_GCM | mode; 930 939 931 940 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), 932 941 &req->base); 933 - } 934 - 935 - static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err) 936 - { 937 - struct mtk_aes_gcm_setkey_result *result = req->data; 938 - 939 - if (err == -EINPROGRESS) 940 - return; 941 - 942 - result->err = err; 943 - complete(&result->completion); 944 942 } 945 943 946 944 /* ··· 952 962 u32 hash[4]; 953 963 u8 iv[8]; 954 964 955 - struct mtk_aes_gcm_setkey_result result; 965 + struct crypto_wait wait; 956 966 957 967 struct scatterlist sg[1]; 958 968 struct skcipher_request req; ··· 992 1002 if (!data) 993 1003 return -ENOMEM; 994 1004 995 - init_completion(&data->result.completion); 1005 + crypto_init_wait(&data->wait); 996 1006 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); 997 1007 skcipher_request_set_tfm(&data->req, ctr); 998 1008 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 999 1009 CRYPTO_TFM_REQ_MAY_BACKLOG, 1000 - mtk_gcm_setkey_done, &data->result); 1010 + crypto_req_done, &data->wait); 1001 1011 skcipher_request_set_crypt(&data->req, data->sg, data->sg, 1002 1012 AES_BLOCK_SIZE, data->iv); 1003 1013 1004 - err = crypto_skcipher_encrypt(&data->req); 1005 - if (err == -EINPROGRESS || err == -EBUSY) { 1006 - err = wait_for_completion_interruptible( 1007 - &data->result.completion); 1008 - if (!err) 1009 - err = data->result.err; 1010 - } 1014 + err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), 1015 + &data->wait); 1011 1016 if (err) 1012 1017 goto out; 1013 1018 ··· 1083 1098 .decrypt = mtk_aes_gcm_decrypt, 1084 1099 .init = mtk_aes_gcm_init, 1085 1100 .exit = mtk_aes_gcm_exit, 1086 - .ivsize = 12, 1101 + .ivsize = GCM_AES_IV_SIZE, 1087 1102 .maxauthsize = AES_BLOCK_SIZE, 1088 1103 1089 1104 .base = {
-1216
drivers/crypto/mv_cesa.c
··· 1 - /* 2 - * Support for Marvell's crypto engine which can be found on some Orion5X 3 - * boards. 4 - * 5 - * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 6 - * License: GPLv2 7 - * 8 - */ 9 - #include <crypto/aes.h> 10 - #include <crypto/algapi.h> 11 - #include <linux/crypto.h> 12 - #include <linux/genalloc.h> 13 - #include <linux/interrupt.h> 14 - #include <linux/io.h> 15 - #include <linux/kthread.h> 16 - #include <linux/platform_device.h> 17 - #include <linux/scatterlist.h> 18 - #include <linux/slab.h> 19 - #include <linux/module.h> 20 - #include <linux/clk.h> 21 - #include <crypto/hmac.h> 22 - #include <crypto/internal/hash.h> 23 - #include <crypto/sha.h> 24 - #include <linux/of.h> 25 - #include <linux/of_platform.h> 26 - #include <linux/of_irq.h> 27 - 28 - #include "mv_cesa.h" 29 - 30 - #define MV_CESA "MV-CESA:" 31 - #define MAX_HW_HASH_SIZE 0xFFFF 32 - #define MV_CESA_EXPIRE 500 /* msec */ 33 - 34 - #define MV_CESA_DEFAULT_SRAM_SIZE 2048 35 - 36 - /* 37 - * STM: 38 - * /---------------------------------------\ 39 - * | | request complete 40 - * \./ | 41 - * IDLE -> new request -> BUSY -> done -> DEQUEUE 42 - * /°\ | 43 - * | | more scatter entries 44 - * \________________/ 45 - */ 46 - enum engine_status { 47 - ENGINE_IDLE, 48 - ENGINE_BUSY, 49 - ENGINE_W_DEQUEUE, 50 - }; 51 - 52 - /** 53 - * struct req_progress - used for every crypt request 54 - * @src_sg_it: sg iterator for src 55 - * @dst_sg_it: sg iterator for dst 56 - * @sg_src_left: bytes left in src to process (scatter list) 57 - * @src_start: offset to add to src start position (scatter list) 58 - * @crypt_len: length of current hw crypt/hash process 59 - * @hw_nbytes: total bytes to process in hw for this request 60 - * @copy_back: whether to copy data back (crypt) or not (hash) 61 - * @sg_dst_left: bytes left dst to process in this scatter list 62 - * @dst_start: offset to add to dst start position (scatter list) 63 - * @hw_processed_bytes: number of bytes processed by hw (request). 64 - * 65 - * sg helper are used to iterate over the scatterlist. Since the size of the 66 - * SRAM may be less than the scatter size, this struct struct is used to keep 67 - * track of progress within current scatterlist. 68 - */ 69 - struct req_progress { 70 - struct sg_mapping_iter src_sg_it; 71 - struct sg_mapping_iter dst_sg_it; 72 - void (*complete) (void); 73 - void (*process) (int is_first); 74 - 75 - /* src mostly */ 76 - int sg_src_left; 77 - int src_start; 78 - int crypt_len; 79 - int hw_nbytes; 80 - /* dst mostly */ 81 - int copy_back; 82 - int sg_dst_left; 83 - int dst_start; 84 - int hw_processed_bytes; 85 - }; 86 - 87 - struct crypto_priv { 88 - void __iomem *reg; 89 - void __iomem *sram; 90 - struct gen_pool *sram_pool; 91 - dma_addr_t sram_dma; 92 - int irq; 93 - struct clk *clk; 94 - struct task_struct *queue_th; 95 - 96 - /* the lock protects queue and eng_st */ 97 - spinlock_t lock; 98 - struct crypto_queue queue; 99 - enum engine_status eng_st; 100 - struct timer_list completion_timer; 101 - struct crypto_async_request *cur_req; 102 - struct req_progress p; 103 - int max_req_size; 104 - int sram_size; 105 - int has_sha1; 106 - int has_hmac_sha1; 107 - }; 108 - 109 - static struct crypto_priv *cpg; 110 - 111 - struct mv_ctx { 112 - u8 aes_enc_key[AES_KEY_LEN]; 113 - u32 aes_dec_key[8]; 114 - int key_len; 115 - u32 need_calc_aes_dkey; 116 - }; 117 - 118 - enum crypto_op { 119 - COP_AES_ECB, 120 - COP_AES_CBC, 121 - }; 122 - 123 - struct mv_req_ctx { 124 - enum crypto_op op; 125 - int decrypt; 126 - }; 127 - 128 - enum hash_op { 129 - COP_SHA1, 130 - COP_HMAC_SHA1 131 - }; 132 - 133 - struct mv_tfm_hash_ctx { 134 - struct crypto_shash *fallback; 135 - struct crypto_shash *base_hash; 136 - u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; 137 - int count_add; 138 - enum hash_op op; 139 - }; 140 - 141 - struct mv_req_hash_ctx { 142 - u64 count; 143 - u32 state[SHA1_DIGEST_SIZE / 4]; 144 - u8 buffer[SHA1_BLOCK_SIZE]; 145 - int first_hash; /* marks that we don't have previous state */ 146 - int last_chunk; /* marks that this is the 'final' request */ 147 - int extra_bytes; /* unprocessed bytes in buffer */ 148 - enum hash_op op; 149 - int count_add; 150 - }; 151 - 152 - static void mv_completion_timer_callback(struct timer_list *unused) 153 - { 154 - int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; 155 - 156 - printk(KERN_ERR MV_CESA 157 - "completion timer expired (CESA %sactive), cleaning up.\n", 158 - active ? "" : "in"); 159 - 160 - del_timer(&cpg->completion_timer); 161 - writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD); 162 - while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC) 163 - printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__); 164 - cpg->eng_st = ENGINE_W_DEQUEUE; 165 - wake_up_process(cpg->queue_th); 166 - } 167 - 168 - static void mv_setup_timer(void) 169 - { 170 - timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0); 171 - mod_timer(&cpg->completion_timer, 172 - jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); 173 - } 174 - 175 - static void compute_aes_dec_key(struct mv_ctx *ctx) 176 - { 177 - struct crypto_aes_ctx gen_aes_key; 178 - int key_pos; 179 - 180 - if (!ctx->need_calc_aes_dkey) 181 - return; 182 - 183 - crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); 184 - 185 - key_pos = ctx->key_len + 24; 186 - memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); 187 - switch (ctx->key_len) { 188 - case AES_KEYSIZE_256: 189 - key_pos -= 2; 190 - /* fall */ 191 - case AES_KEYSIZE_192: 192 - key_pos -= 2; 193 - memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], 194 - 4 * 4); 195 - break; 196 - } 197 - ctx->need_calc_aes_dkey = 0; 198 - } 199 - 200 - static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, 201 - unsigned int len) 202 - { 203 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 204 - struct mv_ctx *ctx = crypto_tfm_ctx(tfm); 205 - 206 - switch (len) { 207 - case AES_KEYSIZE_128: 208 - case AES_KEYSIZE_192: 209 - case AES_KEYSIZE_256: 210 - break; 211 - default: 212 - crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 213 - return -EINVAL; 214 - } 215 - ctx->key_len = len; 216 - ctx->need_calc_aes_dkey = 1; 217 - 218 - memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); 219 - return 0; 220 - } 221 - 222 - static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) 223 - { 224 - int ret; 225 - void *sbuf; 226 - int copy_len; 227 - 228 - while (len) { 229 - if (!p->sg_src_left) { 230 - ret = sg_miter_next(&p->src_sg_it); 231 - BUG_ON(!ret); 232 - p->sg_src_left = p->src_sg_it.length; 233 - p->src_start = 0; 234 - } 235 - 236 - sbuf = p->src_sg_it.addr + p->src_start; 237 - 238 - copy_len = min(p->sg_src_left, len); 239 - memcpy(dbuf, sbuf, copy_len); 240 - 241 - p->src_start += copy_len; 242 - p->sg_src_left -= copy_len; 243 - 244 - len -= copy_len; 245 - dbuf += copy_len; 246 - } 247 - } 248 - 249 - static void setup_data_in(void) 250 - { 251 - struct req_progress *p = &cpg->p; 252 - int data_in_sram = 253 - min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); 254 - copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, 255 - data_in_sram - p->crypt_len); 256 - p->crypt_len = data_in_sram; 257 - } 258 - 259 - static void mv_process_current_q(int first_block) 260 - { 261 - struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); 262 - struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 263 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 264 - struct sec_accel_config op; 265 - 266 - switch (req_ctx->op) { 267 - case COP_AES_ECB: 268 - op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; 269 - break; 270 - case COP_AES_CBC: 271 - default: 272 - op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; 273 - op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | 274 - ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); 275 - if (first_block) 276 - memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); 277 - break; 278 - } 279 - if (req_ctx->decrypt) { 280 - op.config |= CFG_DIR_DEC; 281 - memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, 282 - AES_KEY_LEN); 283 - } else { 284 - op.config |= CFG_DIR_ENC; 285 - memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, 286 - AES_KEY_LEN); 287 - } 288 - 289 - switch (ctx->key_len) { 290 - case AES_KEYSIZE_128: 291 - op.config |= CFG_AES_LEN_128; 292 - break; 293 - case AES_KEYSIZE_192: 294 - op.config |= CFG_AES_LEN_192; 295 - break; 296 - case AES_KEYSIZE_256: 297 - op.config |= CFG_AES_LEN_256; 298 - break; 299 - } 300 - op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | 301 - ENC_P_DST(SRAM_DATA_OUT_START); 302 - op.enc_key_p = SRAM_DATA_KEY_P; 303 - 304 - setup_data_in(); 305 - op.enc_len = cpg->p.crypt_len; 306 - memcpy(cpg->sram + SRAM_CONFIG, &op, 307 - sizeof(struct sec_accel_config)); 308 - 309 - /* GO */ 310 - mv_setup_timer(); 311 - writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 312 - } 313 - 314 - static void mv_crypto_algo_completion(void) 315 - { 316 - struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); 317 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 318 - 319 - sg_miter_stop(&cpg->p.src_sg_it); 320 - sg_miter_stop(&cpg->p.dst_sg_it); 321 - 322 - if (req_ctx->op != COP_AES_CBC) 323 - return ; 324 - 325 - memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); 326 - } 327 - 328 - static void mv_process_hash_current(int first_block) 329 - { 330 - struct ahash_request *req = ahash_request_cast(cpg->cur_req); 331 - const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); 332 - struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); 333 - struct req_progress *p = &cpg->p; 334 - struct sec_accel_config op = { 0 }; 335 - int is_last; 336 - 337 - switch (req_ctx->op) { 338 - case COP_SHA1: 339 - default: 340 - op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; 341 - break; 342 - case COP_HMAC_SHA1: 343 - op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; 344 - memcpy(cpg->sram + SRAM_HMAC_IV_IN, 345 - tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); 346 - break; 347 - } 348 - 349 - op.mac_src_p = 350 - MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) 351 - req_ctx-> 352 - count); 353 - 354 - setup_data_in(); 355 - 356 - op.mac_digest = 357 - MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); 358 - op.mac_iv = 359 - MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | 360 - MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); 361 - 362 - is_last = req_ctx->last_chunk 363 - && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) 364 - && (req_ctx->count <= MAX_HW_HASH_SIZE); 365 - if (req_ctx->first_hash) { 366 - if (is_last) 367 - op.config |= CFG_NOT_FRAG; 368 - else 369 - op.config |= CFG_FIRST_FRAG; 370 - 371 - req_ctx->first_hash = 0; 372 - } else { 373 - if (is_last) 374 - op.config |= CFG_LAST_FRAG; 375 - else 376 - op.config |= CFG_MID_FRAG; 377 - 378 - if (first_block) { 379 - writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); 380 - writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); 381 - writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); 382 - writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); 383 - writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); 384 - } 385 - } 386 - 387 - memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); 388 - 389 - /* GO */ 390 - mv_setup_timer(); 391 - writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 392 - } 393 - 394 - static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, 395 - struct shash_desc *desc) 396 - { 397 - int i; 398 - struct sha1_state shash_state; 399 - 400 - shash_state.count = ctx->count + ctx->count_add; 401 - for (i = 0; i < 5; i++) 402 - shash_state.state[i] = ctx->state[i]; 403 - memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); 404 - return crypto_shash_import(desc, &shash_state); 405 - } 406 - 407 - static int mv_hash_final_fallback(struct ahash_request *req) 408 - { 409 - const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); 410 - struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); 411 - SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback); 412 - int rc; 413 - 414 - shash->tfm = tfm_ctx->fallback; 415 - shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 416 - if (unlikely(req_ctx->first_hash)) { 417 - crypto_shash_init(shash); 418 - crypto_shash_update(shash, req_ctx->buffer, 419 - req_ctx->extra_bytes); 420 - } else { 421 - /* only SHA1 for now.... 422 - */ 423 - rc = mv_hash_import_sha1_ctx(req_ctx, shash); 424 - if (rc) 425 - goto out; 426 - } 427 - rc = crypto_shash_final(shash, req->result); 428 - out: 429 - return rc; 430 - } 431 - 432 - static void mv_save_digest_state(struct mv_req_hash_ctx *ctx) 433 - { 434 - ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); 435 - ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); 436 - ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); 437 - ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); 438 - ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); 439 - } 440 - 441 - static void mv_hash_algo_completion(void) 442 - { 443 - struct ahash_request *req = ahash_request_cast(cpg->cur_req); 444 - struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 445 - 446 - if (ctx->extra_bytes) 447 - copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); 448 - sg_miter_stop(&cpg->p.src_sg_it); 449 - 450 - if (likely(ctx->last_chunk)) { 451 - if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { 452 - memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, 453 - crypto_ahash_digestsize(crypto_ahash_reqtfm 454 - (req))); 455 - } else { 456 - mv_save_digest_state(ctx); 457 - mv_hash_final_fallback(req); 458 - } 459 - } else { 460 - mv_save_digest_state(ctx); 461 - } 462 - } 463 - 464 - static void dequeue_complete_req(void) 465 - { 466 - struct crypto_async_request *req = cpg->cur_req; 467 - void *buf; 468 - int ret; 469 - cpg->p.hw_processed_bytes += cpg->p.crypt_len; 470 - if (cpg->p.copy_back) { 471 - int need_copy_len = cpg->p.crypt_len; 472 - int sram_offset = 0; 473 - do { 474 - int dst_copy; 475 - 476 - if (!cpg->p.sg_dst_left) { 477 - ret = sg_miter_next(&cpg->p.dst_sg_it); 478 - BUG_ON(!ret); 479 - cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; 480 - cpg->p.dst_start = 0; 481 - } 482 - 483 - buf = cpg->p.dst_sg_it.addr; 484 - buf += cpg->p.dst_start; 485 - 486 - dst_copy = min(need_copy_len, cpg->p.sg_dst_left); 487 - 488 - memcpy(buf, 489 - cpg->sram + SRAM_DATA_OUT_START + sram_offset, 490 - dst_copy); 491 - sram_offset += dst_copy; 492 - cpg->p.sg_dst_left -= dst_copy; 493 - need_copy_len -= dst_copy; 494 - cpg->p.dst_start += dst_copy; 495 - } while (need_copy_len > 0); 496 - } 497 - 498 - cpg->p.crypt_len = 0; 499 - 500 - BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); 501 - if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { 502 - /* process next scatter list entry */ 503 - cpg->eng_st = ENGINE_BUSY; 504 - cpg->p.process(0); 505 - } else { 506 - cpg->p.complete(); 507 - cpg->eng_st = ENGINE_IDLE; 508 - local_bh_disable(); 509 - req->complete(req, 0); 510 - local_bh_enable(); 511 - } 512 - } 513 - 514 - static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) 515 - { 516 - int i = 0; 517 - size_t cur_len; 518 - 519 - while (sl) { 520 - cur_len = sl[i].length; 521 - ++i; 522 - if (total_bytes > cur_len) 523 - total_bytes -= cur_len; 524 - else 525 - break; 526 - } 527 - 528 - return i; 529 - } 530 - 531 - static void mv_start_new_crypt_req(struct ablkcipher_request *req) 532 - { 533 - struct req_progress *p = &cpg->p; 534 - int num_sgs; 535 - 536 - cpg->cur_req = &req->base; 537 - memset(p, 0, sizeof(struct req_progress)); 538 - p->hw_nbytes = req->nbytes; 539 - p->complete = mv_crypto_algo_completion; 540 - p->process = mv_process_current_q; 541 - p->copy_back = 1; 542 - 543 - num_sgs = count_sgs(req->src, req->nbytes); 544 - sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); 545 - 546 - num_sgs = count_sgs(req->dst, req->nbytes); 547 - sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); 548 - 549 - mv_process_current_q(1); 550 - } 551 - 552 - static void mv_start_new_hash_req(struct ahash_request *req) 553 - { 554 - struct req_progress *p = &cpg->p; 555 - struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 556 - int num_sgs, hw_bytes, old_extra_bytes, rc; 557 - cpg->cur_req = &req->base; 558 - memset(p, 0, sizeof(struct req_progress)); 559 - hw_bytes = req->nbytes + ctx->extra_bytes; 560 - old_extra_bytes = ctx->extra_bytes; 561 - 562 - ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; 563 - if (ctx->extra_bytes != 0 564 - && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) 565 - hw_bytes -= ctx->extra_bytes; 566 - else 567 - ctx->extra_bytes = 0; 568 - 569 - num_sgs = count_sgs(req->src, req->nbytes); 570 - sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); 571 - 572 - if (hw_bytes) { 573 - p->hw_nbytes = hw_bytes; 574 - p->complete = mv_hash_algo_completion; 575 - p->process = mv_process_hash_current; 576 - 577 - if (unlikely(old_extra_bytes)) { 578 - memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, 579 - old_extra_bytes); 580 - p->crypt_len = old_extra_bytes; 581 - } 582 - 583 - mv_process_hash_current(1); 584 - } else { 585 - copy_src_to_buf(p, ctx->buffer + old_extra_bytes, 586 - ctx->extra_bytes - old_extra_bytes); 587 - sg_miter_stop(&p->src_sg_it); 588 - if (ctx->last_chunk) 589 - rc = mv_hash_final_fallback(req); 590 - else 591 - rc = 0; 592 - cpg->eng_st = ENGINE_IDLE; 593 - local_bh_disable(); 594 - req->base.complete(&req->base, rc); 595 - local_bh_enable(); 596 - } 597 - } 598 - 599 - static int queue_manag(void *data) 600 - { 601 - cpg->eng_st = ENGINE_IDLE; 602 - do { 603 - struct crypto_async_request *async_req = NULL; 604 - struct crypto_async_request *backlog = NULL; 605 - 606 - __set_current_state(TASK_INTERRUPTIBLE); 607 - 608 - if (cpg->eng_st == ENGINE_W_DEQUEUE) 609 - dequeue_complete_req(); 610 - 611 - spin_lock_irq(&cpg->lock); 612 - if (cpg->eng_st == ENGINE_IDLE) { 613 - backlog = crypto_get_backlog(&cpg->queue); 614 - async_req = crypto_dequeue_request(&cpg->queue); 615 - if (async_req) { 616 - BUG_ON(cpg->eng_st != ENGINE_IDLE); 617 - cpg->eng_st = ENGINE_BUSY; 618 - } 619 - } 620 - spin_unlock_irq(&cpg->lock); 621 - 622 - if (backlog) { 623 - backlog->complete(backlog, -EINPROGRESS); 624 - backlog = NULL; 625 - } 626 - 627 - if (async_req) { 628 - if (crypto_tfm_alg_type(async_req->tfm) != 629 - CRYPTO_ALG_TYPE_AHASH) { 630 - struct ablkcipher_request *req = 631 - ablkcipher_request_cast(async_req); 632 - mv_start_new_crypt_req(req); 633 - } else { 634 - struct ahash_request *req = 635 - ahash_request_cast(async_req); 636 - mv_start_new_hash_req(req); 637 - } 638 - async_req = NULL; 639 - } 640 - 641 - schedule(); 642 - 643 - } while (!kthread_should_stop()); 644 - return 0; 645 - } 646 - 647 - static int mv_handle_req(struct crypto_async_request *req) 648 - { 649 - unsigned long flags; 650 - int ret; 651 - 652 - spin_lock_irqsave(&cpg->lock, flags); 653 - ret = crypto_enqueue_request(&cpg->queue, req); 654 - spin_unlock_irqrestore(&cpg->lock, flags); 655 - wake_up_process(cpg->queue_th); 656 - return ret; 657 - } 658 - 659 - static int mv_enc_aes_ecb(struct ablkcipher_request *req) 660 - { 661 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 662 - 663 - req_ctx->op = COP_AES_ECB; 664 - req_ctx->decrypt = 0; 665 - 666 - return mv_handle_req(&req->base); 667 - } 668 - 669 - static int mv_dec_aes_ecb(struct ablkcipher_request *req) 670 - { 671 - struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 672 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 673 - 674 - req_ctx->op = COP_AES_ECB; 675 - req_ctx->decrypt = 1; 676 - 677 - compute_aes_dec_key(ctx); 678 - return mv_handle_req(&req->base); 679 - } 680 - 681 - static int mv_enc_aes_cbc(struct ablkcipher_request *req) 682 - { 683 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 684 - 685 - req_ctx->op = COP_AES_CBC; 686 - req_ctx->decrypt = 0; 687 - 688 - return mv_handle_req(&req->base); 689 - } 690 - 691 - static int mv_dec_aes_cbc(struct ablkcipher_request *req) 692 - { 693 - struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 694 - struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 695 - 696 - req_ctx->op = COP_AES_CBC; 697 - req_ctx->decrypt = 1; 698 - 699 - compute_aes_dec_key(ctx); 700 - return mv_handle_req(&req->base); 701 - } 702 - 703 - static int mv_cra_init(struct crypto_tfm *tfm) 704 - { 705 - tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); 706 - return 0; 707 - } 708 - 709 - static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, 710 - int is_last, unsigned int req_len, 711 - int count_add) 712 - { 713 - memset(ctx, 0, sizeof(*ctx)); 714 - ctx->op = op; 715 - ctx->count = req_len; 716 - ctx->first_hash = 1; 717 - ctx->last_chunk = is_last; 718 - ctx->count_add = count_add; 719 - } 720 - 721 - static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, 722 - unsigned req_len) 723 - { 724 - ctx->last_chunk = is_last; 725 - ctx->count += req_len; 726 - } 727 - 728 - static int mv_hash_init(struct ahash_request *req) 729 - { 730 - const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); 731 - mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, 732 - tfm_ctx->count_add); 733 - return 0; 734 - } 735 - 736 - static int mv_hash_update(struct ahash_request *req) 737 - { 738 - if (!req->nbytes) 739 - return 0; 740 - 741 - mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); 742 - return mv_handle_req(&req->base); 743 - } 744 - 745 - static int mv_hash_final(struct ahash_request *req) 746 - { 747 - struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); 748 - 749 - ahash_request_set_crypt(req, NULL, req->result, 0); 750 - mv_update_hash_req_ctx(ctx, 1, 0); 751 - return mv_handle_req(&req->base); 752 - } 753 - 754 - static int mv_hash_finup(struct ahash_request *req) 755 - { 756 - mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); 757 - return mv_handle_req(&req->base); 758 - } 759 - 760 - static int mv_hash_digest(struct ahash_request *req) 761 - { 762 - const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); 763 - mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, 764 - req->nbytes, tfm_ctx->count_add); 765 - return mv_handle_req(&req->base); 766 - } 767 - 768 - static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, 769 - const void *ostate) 770 - { 771 - const struct sha1_state *isha1_state = istate, *osha1_state = ostate; 772 - int i; 773 - for (i = 0; i < 5; i++) { 774 - ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); 775 - ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); 776 - } 777 - } 778 - 779 - static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, 780 - unsigned int keylen) 781 - { 782 - int rc; 783 - struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); 784 - int bs, ds, ss; 785 - 786 - if (!ctx->base_hash) 787 - return 0; 788 - 789 - rc = crypto_shash_setkey(ctx->fallback, key, keylen); 790 - if (rc) 791 - return rc; 792 - 793 - /* Can't see a way to extract the ipad/opad from the fallback tfm 794 - so I'm basically copying code from the hmac module */ 795 - bs = crypto_shash_blocksize(ctx->base_hash); 796 - ds = crypto_shash_digestsize(ctx->base_hash); 797 - ss = crypto_shash_statesize(ctx->base_hash); 798 - 799 - { 800 - SHASH_DESC_ON_STACK(shash, ctx->base_hash); 801 - 802 - unsigned int i; 803 - char ipad[ss]; 804 - char opad[ss]; 805 - 806 - shash->tfm = ctx->base_hash; 807 - shash->flags = crypto_shash_get_flags(ctx->base_hash) & 808 - CRYPTO_TFM_REQ_MAY_SLEEP; 809 - 810 - if (keylen > bs) { 811 - int err; 812 - 813 - err = 814 - crypto_shash_digest(shash, key, keylen, ipad); 815 - if (err) 816 - return err; 817 - 818 - keylen = ds; 819 - } else 820 - memcpy(ipad, key, keylen); 821 - 822 - memset(ipad + keylen, 0, bs - keylen); 823 - memcpy(opad, ipad, bs); 824 - 825 - for (i = 0; i < bs; i++) { 826 - ipad[i] ^= HMAC_IPAD_VALUE; 827 - opad[i] ^= HMAC_OPAD_VALUE; 828 - } 829 - 830 - rc = crypto_shash_init(shash) ? : 831 - crypto_shash_update(shash, ipad, bs) ? : 832 - crypto_shash_export(shash, ipad) ? : 833 - crypto_shash_init(shash) ? : 834 - crypto_shash_update(shash, opad, bs) ? : 835 - crypto_shash_export(shash, opad); 836 - 837 - if (rc == 0) 838 - mv_hash_init_ivs(ctx, ipad, opad); 839 - 840 - return rc; 841 - } 842 - } 843 - 844 - static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, 845 - enum hash_op op, int count_add) 846 - { 847 - const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 848 - struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); 849 - struct crypto_shash *fallback_tfm = NULL; 850 - struct crypto_shash *base_hash = NULL; 851 - int err = -ENOMEM; 852 - 853 - ctx->op = op; 854 - ctx->count_add = count_add; 855 - 856 - /* Allocate a fallback and abort if it failed. */ 857 - fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, 858 - CRYPTO_ALG_NEED_FALLBACK); 859 - if (IS_ERR(fallback_tfm)) { 860 - printk(KERN_WARNING MV_CESA 861 - "Fallback driver '%s' could not be loaded!\n", 862 - fallback_driver_name); 863 - err = PTR_ERR(fallback_tfm); 864 - goto out; 865 - } 866 - ctx->fallback = fallback_tfm; 867 - 868 - if (base_hash_name) { 869 - /* Allocate a hash to compute the ipad/opad of hmac. */ 870 - base_hash = crypto_alloc_shash(base_hash_name, 0, 871 - CRYPTO_ALG_NEED_FALLBACK); 872 - if (IS_ERR(base_hash)) { 873 - printk(KERN_WARNING MV_CESA 874 - "Base driver '%s' could not be loaded!\n", 875 - base_hash_name); 876 - err = PTR_ERR(base_hash); 877 - goto err_bad_base; 878 - } 879 - } 880 - ctx->base_hash = base_hash; 881 - 882 - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 883 - sizeof(struct mv_req_hash_ctx) + 884 - crypto_shash_descsize(ctx->fallback)); 885 - return 0; 886 - err_bad_base: 887 - crypto_free_shash(fallback_tfm); 888 - out: 889 - return err; 890 - } 891 - 892 - static void mv_cra_hash_exit(struct crypto_tfm *tfm) 893 - { 894 - struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); 895 - 896 - crypto_free_shash(ctx->fallback); 897 - if (ctx->base_hash) 898 - crypto_free_shash(ctx->base_hash); 899 - } 900 - 901 - static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) 902 - { 903 - return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); 904 - } 905 - 906 - static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) 907 - { 908 - return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); 909 - } 910 - 911 - static irqreturn_t crypto_int(int irq, void *priv) 912 - { 913 - u32 val; 914 - 915 - val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); 916 - if (!(val & SEC_INT_ACCEL0_DONE)) 917 - return IRQ_NONE; 918 - 919 - if (!del_timer(&cpg->completion_timer)) { 920 - printk(KERN_WARNING MV_CESA 921 - "got an interrupt but no pending timer?\n"); 922 - } 923 - val &= ~SEC_INT_ACCEL0_DONE; 924 - writel(val, cpg->reg + FPGA_INT_STATUS); 925 - writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); 926 - BUG_ON(cpg->eng_st != ENGINE_BUSY); 927 - cpg->eng_st = ENGINE_W_DEQUEUE; 928 - wake_up_process(cpg->queue_th); 929 - return IRQ_HANDLED; 930 - } 931 - 932 - static struct crypto_alg mv_aes_alg_ecb = { 933 - .cra_name = "ecb(aes)", 934 - .cra_driver_name = "mv-ecb-aes", 935 - .cra_priority = 300, 936 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 937 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 938 - .cra_blocksize = 16, 939 - .cra_ctxsize = sizeof(struct mv_ctx), 940 - .cra_alignmask = 0, 941 - .cra_type = &crypto_ablkcipher_type, 942 - .cra_module = THIS_MODULE, 943 - .cra_init = mv_cra_init, 944 - .cra_u = { 945 - .ablkcipher = { 946 - .min_keysize = AES_MIN_KEY_SIZE, 947 - .max_keysize = AES_MAX_KEY_SIZE, 948 - .setkey = mv_setkey_aes, 949 - .encrypt = mv_enc_aes_ecb, 950 - .decrypt = mv_dec_aes_ecb, 951 - }, 952 - }, 953 - }; 954 - 955 - static struct crypto_alg mv_aes_alg_cbc = { 956 - .cra_name = "cbc(aes)", 957 - .cra_driver_name = "mv-cbc-aes", 958 - .cra_priority = 300, 959 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 960 - CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 961 - .cra_blocksize = AES_BLOCK_SIZE, 962 - .cra_ctxsize = sizeof(struct mv_ctx), 963 - .cra_alignmask = 0, 964 - .cra_type = &crypto_ablkcipher_type, 965 - .cra_module = THIS_MODULE, 966 - .cra_init = mv_cra_init, 967 - .cra_u = { 968 - .ablkcipher = { 969 - .ivsize = AES_BLOCK_SIZE, 970 - .min_keysize = AES_MIN_KEY_SIZE, 971 - .max_keysize = AES_MAX_KEY_SIZE, 972 - .setkey = mv_setkey_aes, 973 - .encrypt = mv_enc_aes_cbc, 974 - .decrypt = mv_dec_aes_cbc, 975 - }, 976 - }, 977 - }; 978 - 979 - static struct ahash_alg mv_sha1_alg = { 980 - .init = mv_hash_init, 981 - .update = mv_hash_update, 982 - .final = mv_hash_final, 983 - .finup = mv_hash_finup, 984 - .digest = mv_hash_digest, 985 - .halg = { 986 - .digestsize = SHA1_DIGEST_SIZE, 987 - .base = { 988 - .cra_name = "sha1", 989 - .cra_driver_name = "mv-sha1", 990 - .cra_priority = 300, 991 - .cra_flags = 992 - CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 993 - CRYPTO_ALG_NEED_FALLBACK, 994 - .cra_blocksize = SHA1_BLOCK_SIZE, 995 - .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), 996 - .cra_init = mv_cra_hash_sha1_init, 997 - .cra_exit = mv_cra_hash_exit, 998 - .cra_module = THIS_MODULE, 999 - } 1000 - } 1001 - }; 1002 - 1003 - static struct ahash_alg mv_hmac_sha1_alg = { 1004 - .init = mv_hash_init, 1005 - .update = mv_hash_update, 1006 - .final = mv_hash_final, 1007 - .finup = mv_hash_finup, 1008 - .digest = mv_hash_digest, 1009 - .setkey = mv_hash_setkey, 1010 - .halg = { 1011 - .digestsize = SHA1_DIGEST_SIZE, 1012 - .base = { 1013 - .cra_name = "hmac(sha1)", 1014 - .cra_driver_name = "mv-hmac-sha1", 1015 - .cra_priority = 300, 1016 - .cra_flags = 1017 - CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 1018 - CRYPTO_ALG_NEED_FALLBACK, 1019 - .cra_blocksize = SHA1_BLOCK_SIZE, 1020 - .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), 1021 - .cra_init = mv_cra_hash_hmac_sha1_init, 1022 - .cra_exit = mv_cra_hash_exit, 1023 - .cra_module = THIS_MODULE, 1024 - } 1025 - } 1026 - }; 1027 - 1028 - static int mv_cesa_get_sram(struct platform_device *pdev, 1029 - struct crypto_priv *cp) 1030 - { 1031 - struct resource *res; 1032 - u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE; 1033 - 1034 - of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size", 1035 - &sram_size); 1036 - 1037 - cp->sram_size = sram_size; 1038 - cp->sram_pool = of_gen_pool_get(pdev->dev.of_node, 1039 - "marvell,crypto-srams", 0); 1040 - if (cp->sram_pool) { 1041 - cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size, 1042 - &cp->sram_dma); 1043 - if (cp->sram) 1044 - return 0; 1045 - 1046 - return -ENOMEM; 1047 - } 1048 - 1049 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 1050 - "sram"); 1051 - if (!res || resource_size(res) < cp->sram_size) 1052 - return -EINVAL; 1053 - 1054 - cp->sram = devm_ioremap_resource(&pdev->dev, res); 1055 - if (IS_ERR(cp->sram)) 1056 - return PTR_ERR(cp->sram); 1057 - 1058 - return 0; 1059 - } 1060 - 1061 - static int mv_probe(struct platform_device *pdev) 1062 - { 1063 - struct crypto_priv *cp; 1064 - struct resource *res; 1065 - int irq; 1066 - int ret; 1067 - 1068 - if (cpg) { 1069 - printk(KERN_ERR MV_CESA "Second crypto dev?\n"); 1070 - return -EEXIST; 1071 - } 1072 - 1073 - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 1074 - if (!res) 1075 - return -ENXIO; 1076 - 1077 - cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL); 1078 - if (!cp) 1079 - return -ENOMEM; 1080 - 1081 - spin_lock_init(&cp->lock); 1082 - crypto_init_queue(&cp->queue, 50); 1083 - cp->reg = devm_ioremap_resource(&pdev->dev, res); 1084 - if (IS_ERR(cp->reg)) { 1085 - ret = PTR_ERR(cp->reg); 1086 - goto err; 1087 - } 1088 - 1089 - ret = mv_cesa_get_sram(pdev, cp); 1090 - if (ret) 1091 - goto err; 1092 - 1093 - cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; 1094 - 1095 - irq = platform_get_irq(pdev, 0); 1096 - if (irq < 0) { 1097 - ret = irq; 1098 - goto err; 1099 - } 1100 - cp->irq = irq; 1101 - 1102 - platform_set_drvdata(pdev, cp); 1103 - cpg = cp; 1104 - 1105 - cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); 1106 - if (IS_ERR(cp->queue_th)) { 1107 - ret = PTR_ERR(cp->queue_th); 1108 - goto err; 1109 - } 1110 - 1111 - ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), 1112 - cp); 1113 - if (ret) 1114 - goto err_thread; 1115 - 1116 - /* Not all platforms can gate the clock, so it is not 1117 - an error if the clock does not exists. */ 1118 - cp->clk = clk_get(&pdev->dev, NULL); 1119 - if (!IS_ERR(cp->clk)) 1120 - clk_prepare_enable(cp->clk); 1121 - 1122 - writel(0, cpg->reg + SEC_ACCEL_INT_STATUS); 1123 - writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1124 - writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1125 - writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); 1126 - 1127 - ret = crypto_register_alg(&mv_aes_alg_ecb); 1128 - if (ret) { 1129 - printk(KERN_WARNING MV_CESA 1130 - "Could not register aes-ecb driver\n"); 1131 - goto err_irq; 1132 - } 1133 - 1134 - ret = crypto_register_alg(&mv_aes_alg_cbc); 1135 - if (ret) { 1136 - printk(KERN_WARNING MV_CESA 1137 - "Could not register aes-cbc driver\n"); 1138 - goto err_unreg_ecb; 1139 - } 1140 - 1141 - ret = crypto_register_ahash(&mv_sha1_alg); 1142 - if (ret == 0) 1143 - cpg->has_sha1 = 1; 1144 - else 1145 - printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); 1146 - 1147 - ret = crypto_register_ahash(&mv_hmac_sha1_alg); 1148 - if (ret == 0) { 1149 - cpg->has_hmac_sha1 = 1; 1150 - } else { 1151 - printk(KERN_WARNING MV_CESA 1152 - "Could not register hmac-sha1 driver\n"); 1153 - } 1154 - 1155 - return 0; 1156 - err_unreg_ecb: 1157 - crypto_unregister_alg(&mv_aes_alg_ecb); 1158 - err_irq: 1159 - free_irq(irq, cp); 1160 - if (!IS_ERR(cp->clk)) { 1161 - clk_disable_unprepare(cp->clk); 1162 - clk_put(cp->clk); 1163 - } 1164 - err_thread: 1165 - kthread_stop(cp->queue_th); 1166 - err: 1167 - cpg = NULL; 1168 - return ret; 1169 - } 1170 - 1171 - static int mv_remove(struct platform_device *pdev) 1172 - { 1173 - struct crypto_priv *cp = platform_get_drvdata(pdev); 1174 - 1175 - crypto_unregister_alg(&mv_aes_alg_ecb); 1176 - crypto_unregister_alg(&mv_aes_alg_cbc); 1177 - if (cp->has_sha1) 1178 - crypto_unregister_ahash(&mv_sha1_alg); 1179 - if (cp->has_hmac_sha1) 1180 - crypto_unregister_ahash(&mv_hmac_sha1_alg); 1181 - kthread_stop(cp->queue_th); 1182 - free_irq(cp->irq, cp); 1183 - memset(cp->sram, 0, cp->sram_size); 1184 - 1185 - if (!IS_ERR(cp->clk)) { 1186 - clk_disable_unprepare(cp->clk); 1187 - clk_put(cp->clk); 1188 - } 1189 - 1190 - cpg = NULL; 1191 - return 0; 1192 - } 1193 - 1194 - static const struct of_device_id mv_cesa_of_match_table[] = { 1195 - { .compatible = "marvell,orion-crypto", }, 1196 - { .compatible = "marvell,kirkwood-crypto", }, 1197 - { .compatible = "marvell,dove-crypto", }, 1198 - {} 1199 - }; 1200 - MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); 1201 - 1202 - static struct platform_driver marvell_crypto = { 1203 - .probe = mv_probe, 1204 - .remove = mv_remove, 1205 - .driver = { 1206 - .name = "mv_crypto", 1207 - .of_match_table = mv_cesa_of_match_table, 1208 - }, 1209 - }; 1210 - MODULE_ALIAS("platform:mv_crypto"); 1211 - 1212 - module_platform_driver(marvell_crypto); 1213 - 1214 - MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); 1215 - MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); 1216 - MODULE_LICENSE("GPL");
-151
drivers/crypto/mv_cesa.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __MV_CRYPTO_H__ 3 - #define __MV_CRYPTO_H__ 4 - 5 - #define DIGEST_INITIAL_VAL_A 0xdd00 6 - #define DIGEST_INITIAL_VAL_B 0xdd04 7 - #define DIGEST_INITIAL_VAL_C 0xdd08 8 - #define DIGEST_INITIAL_VAL_D 0xdd0c 9 - #define DIGEST_INITIAL_VAL_E 0xdd10 10 - #define DES_CMD_REG 0xdd58 11 - 12 - #define SEC_ACCEL_CMD 0xde00 13 - #define SEC_CMD_EN_SEC_ACCL0 (1 << 0) 14 - #define SEC_CMD_EN_SEC_ACCL1 (1 << 1) 15 - #define SEC_CMD_DISABLE_SEC (1 << 2) 16 - 17 - #define SEC_ACCEL_DESC_P0 0xde04 18 - #define SEC_DESC_P0_PTR(x) (x) 19 - 20 - #define SEC_ACCEL_DESC_P1 0xde14 21 - #define SEC_DESC_P1_PTR(x) (x) 22 - 23 - #define SEC_ACCEL_CFG 0xde08 24 - #define SEC_CFG_STOP_DIG_ERR (1 << 0) 25 - #define SEC_CFG_CH0_W_IDMA (1 << 7) 26 - #define SEC_CFG_CH1_W_IDMA (1 << 8) 27 - #define SEC_CFG_ACT_CH0_IDMA (1 << 9) 28 - #define SEC_CFG_ACT_CH1_IDMA (1 << 10) 29 - 30 - #define SEC_ACCEL_STATUS 0xde0c 31 - #define SEC_ST_ACT_0 (1 << 0) 32 - #define SEC_ST_ACT_1 (1 << 1) 33 - 34 - /* 35 - * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata 36 - * 4.12. It looks like that it was part of an IRQ-controller in FPGA and 37 - * someone forgot to remove it while switching to the core and moving to 38 - * SEC_ACCEL_INT_STATUS. 39 - */ 40 - #define FPGA_INT_STATUS 0xdd68 41 - #define SEC_ACCEL_INT_STATUS 0xde20 42 - #define SEC_INT_AUTH_DONE (1 << 0) 43 - #define SEC_INT_DES_E_DONE (1 << 1) 44 - #define SEC_INT_AES_E_DONE (1 << 2) 45 - #define SEC_INT_AES_D_DONE (1 << 3) 46 - #define SEC_INT_ENC_DONE (1 << 4) 47 - #define SEC_INT_ACCEL0_DONE (1 << 5) 48 - #define SEC_INT_ACCEL1_DONE (1 << 6) 49 - #define SEC_INT_ACC0_IDMA_DONE (1 << 7) 50 - #define SEC_INT_ACC1_IDMA_DONE (1 << 8) 51 - 52 - #define SEC_ACCEL_INT_MASK 0xde24 53 - 54 - #define AES_KEY_LEN (8 * 4) 55 - 56 - struct sec_accel_config { 57 - 58 - u32 config; 59 - #define CFG_OP_MAC_ONLY 0 60 - #define CFG_OP_CRYPT_ONLY 1 61 - #define CFG_OP_MAC_CRYPT 2 62 - #define CFG_OP_CRYPT_MAC 3 63 - #define CFG_MACM_MD5 (4 << 4) 64 - #define CFG_MACM_SHA1 (5 << 4) 65 - #define CFG_MACM_HMAC_MD5 (6 << 4) 66 - #define CFG_MACM_HMAC_SHA1 (7 << 4) 67 - #define CFG_ENCM_DES (1 << 8) 68 - #define CFG_ENCM_3DES (2 << 8) 69 - #define CFG_ENCM_AES (3 << 8) 70 - #define CFG_DIR_ENC (0 << 12) 71 - #define CFG_DIR_DEC (1 << 12) 72 - #define CFG_ENC_MODE_ECB (0 << 16) 73 - #define CFG_ENC_MODE_CBC (1 << 16) 74 - #define CFG_3DES_EEE (0 << 20) 75 - #define CFG_3DES_EDE (1 << 20) 76 - #define CFG_AES_LEN_128 (0 << 24) 77 - #define CFG_AES_LEN_192 (1 << 24) 78 - #define CFG_AES_LEN_256 (2 << 24) 79 - #define CFG_NOT_FRAG (0 << 30) 80 - #define CFG_FIRST_FRAG (1 << 30) 81 - #define CFG_LAST_FRAG (2 << 30) 82 - #define CFG_MID_FRAG (3 << 30) 83 - 84 - u32 enc_p; 85 - #define ENC_P_SRC(x) (x) 86 - #define ENC_P_DST(x) ((x) << 16) 87 - 88 - u32 enc_len; 89 - #define ENC_LEN(x) (x) 90 - 91 - u32 enc_key_p; 92 - #define ENC_KEY_P(x) (x) 93 - 94 - u32 enc_iv; 95 - #define ENC_IV_POINT(x) ((x) << 0) 96 - #define ENC_IV_BUF_POINT(x) ((x) << 16) 97 - 98 - u32 mac_src_p; 99 - #define MAC_SRC_DATA_P(x) (x) 100 - #define MAC_SRC_TOTAL_LEN(x) ((x) << 16) 101 - 102 - u32 mac_digest; 103 - #define MAC_DIGEST_P(x) (x) 104 - #define MAC_FRAG_LEN(x) ((x) << 16) 105 - u32 mac_iv; 106 - #define MAC_INNER_IV_P(x) (x) 107 - #define MAC_OUTER_IV_P(x) ((x) << 16) 108 - }__attribute__ ((packed)); 109 - /* 110 - * /-----------\ 0 111 - * | ACCEL CFG | 4 * 8 112 - * |-----------| 0x20 113 - * | CRYPT KEY | 8 * 4 114 - * |-----------| 0x40 115 - * | IV IN | 4 * 4 116 - * |-----------| 0x40 (inplace) 117 - * | IV BUF | 4 * 4 118 - * |-----------| 0x80 119 - * | DATA IN | 16 * x (max ->max_req_size) 120 - * |-----------| 0x80 (inplace operation) 121 - * | DATA OUT | 16 * x (max ->max_req_size) 122 - * \-----------/ SRAM size 123 - */ 124 - 125 - /* Hashing memory map: 126 - * /-----------\ 0 127 - * | ACCEL CFG | 4 * 8 128 - * |-----------| 0x20 129 - * | Inner IV | 5 * 4 130 - * |-----------| 0x34 131 - * | Outer IV | 5 * 4 132 - * |-----------| 0x48 133 - * | Output BUF| 5 * 4 134 - * |-----------| 0x80 135 - * | DATA IN | 64 * x (max ->max_req_size) 136 - * \-----------/ SRAM size 137 - */ 138 - #define SRAM_CONFIG 0x00 139 - #define SRAM_DATA_KEY_P 0x20 140 - #define SRAM_DATA_IV 0x40 141 - #define SRAM_DATA_IV_BUF 0x40 142 - #define SRAM_DATA_IN_START 0x80 143 - #define SRAM_DATA_OUT_START 0x80 144 - 145 - #define SRAM_HMAC_IV_IN 0x20 146 - #define SRAM_HMAC_IV_OUT 0x34 147 - #define SRAM_DIGEST_BUF 0x48 148 - 149 - #define SRAM_CFG_SPACE 0x80 150 - 151 - #endif
+4 -8
drivers/crypto/n2_core.c
··· 1962 1962 1963 1963 static void free_n2cp(struct n2_crypto *np) 1964 1964 { 1965 - if (np->cwq_info.ino_table) { 1966 - kfree(np->cwq_info.ino_table); 1967 - np->cwq_info.ino_table = NULL; 1968 - } 1965 + kfree(np->cwq_info.ino_table); 1966 + np->cwq_info.ino_table = NULL; 1969 1967 1970 1968 kfree(np); 1971 1969 } ··· 2077 2079 2078 2080 static void free_ncp(struct n2_mau *mp) 2079 2081 { 2080 - if (mp->mau_info.ino_table) { 2081 - kfree(mp->mau_info.ino_table); 2082 - mp->mau_info.ino_table = NULL; 2083 - } 2082 + kfree(mp->mau_info.ino_table); 2083 + mp->mau_info.ino_table = NULL; 2084 2084 2085 2085 kfree(mp); 2086 2086 }
+1 -1
drivers/crypto/nx/nx-842-pseries.c
··· 1082 1082 return 0; 1083 1083 } 1084 1084 1085 - static struct vio_device_id nx842_vio_driver_ids[] = { 1085 + static const struct vio_device_id nx842_vio_driver_ids[] = { 1086 1086 {"ibm,compression-v1", "ibm,compression"}, 1087 1087 {"", ""}, 1088 1088 };
+5 -4
drivers/crypto/nx/nx-aes-gcm.c
··· 22 22 #include <crypto/internal/aead.h> 23 23 #include <crypto/aes.h> 24 24 #include <crypto/algapi.h> 25 + #include <crypto/gcm.h> 25 26 #include <crypto/scatterwalk.h> 26 27 #include <linux/module.h> 27 28 #include <linux/types.h> ··· 434 433 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 435 434 char *iv = rctx->iv; 436 435 437 - memcpy(iv, req->iv, 12); 436 + memcpy(iv, req->iv, GCM_AES_IV_SIZE); 438 437 439 438 return gcm_aes_nx_crypt(req, 1, req->assoclen); 440 439 } ··· 444 443 struct nx_gcm_rctx *rctx = aead_request_ctx(req); 445 444 char *iv = rctx->iv; 446 445 447 - memcpy(iv, req->iv, 12); 446 + memcpy(iv, req->iv, GCM_AES_IV_SIZE); 448 447 449 448 return gcm_aes_nx_crypt(req, 0, req->assoclen); 450 449 } ··· 499 498 }, 500 499 .init = nx_crypto_ctx_aes_gcm_init, 501 500 .exit = nx_crypto_ctx_aead_exit, 502 - .ivsize = 12, 501 + .ivsize = GCM_AES_IV_SIZE, 503 502 .maxauthsize = AES_BLOCK_SIZE, 504 503 .setkey = gcm_aes_nx_set_key, 505 504 .encrypt = gcm_aes_nx_encrypt, ··· 517 516 }, 518 517 .init = nx_crypto_ctx_aes_gcm_init, 519 518 .exit = nx_crypto_ctx_aead_exit, 520 - .ivsize = 8, 519 + .ivsize = GCM_RFC4106_IV_SIZE, 521 520 .maxauthsize = AES_BLOCK_SIZE, 522 521 .setkey = gcm4106_aes_nx_set_key, 523 522 .setauthsize = gcm4106_aes_nx_setauthsize,
+1 -1
drivers/crypto/nx/nx.c
··· 833 833 vio_unregister_driver(&nx_driver.viodriver); 834 834 } 835 835 836 - static struct vio_device_id nx_crypto_driver_ids[] = { 836 + static const struct vio_device_id nx_crypto_driver_ids[] = { 837 837 { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, 838 838 { "", "" } 839 839 };
+6 -5
drivers/crypto/omap-aes-gcm.c
··· 18 18 #include <linux/omap-dma.h> 19 19 #include <linux/interrupt.h> 20 20 #include <crypto/aes.h> 21 + #include <crypto/gcm.h> 21 22 #include <crypto/scatterwalk.h> 22 23 #include <crypto/skcipher.h> 23 24 #include <crypto/internal/aead.h> ··· 187 186 sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL); 188 187 if (!sk_req) { 189 188 pr_err("skcipher: Failed to allocate request\n"); 190 - return -1; 189 + return -ENOMEM; 191 190 } 192 191 193 192 init_completion(&result.completion); ··· 215 214 } 216 215 /* fall through */ 217 216 default: 218 - pr_err("Encryption of IV failed for GCM mode"); 217 + pr_err("Encryption of IV failed for GCM mode\n"); 219 218 break; 220 219 } 221 220 ··· 312 311 int err, assoclen; 313 312 314 313 memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag)); 315 - memcpy(rctx->iv + 12, &counter, 4); 314 + memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4); 316 315 317 316 err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv); 318 317 if (err) ··· 340 339 { 341 340 struct omap_aes_reqctx *rctx = aead_request_ctx(req); 342 341 343 - memcpy(rctx->iv, req->iv, 12); 342 + memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); 344 343 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM); 345 344 } 346 345 ··· 348 347 { 349 348 struct omap_aes_reqctx *rctx = aead_request_ctx(req); 350 349 351 - memcpy(rctx->iv, req->iv, 12); 350 + memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); 352 351 return omap_aes_gcm_crypt(req, FLAGS_GCM); 353 352 } 354 353
+5 -7
drivers/crypto/omap-aes.c
··· 35 35 #include <linux/interrupt.h> 36 36 #include <crypto/scatterwalk.h> 37 37 #include <crypto/aes.h> 38 + #include <crypto/gcm.h> 38 39 #include <crypto/engine.h> 39 40 #include <crypto/internal/skcipher.h> 40 41 #include <crypto/internal/aead.h> ··· 768 767 }, 769 768 .init = omap_aes_gcm_cra_init, 770 769 .exit = omap_aes_gcm_cra_exit, 771 - .ivsize = 12, 770 + .ivsize = GCM_AES_IV_SIZE, 772 771 .maxauthsize = AES_BLOCK_SIZE, 773 772 .setkey = omap_aes_gcm_setkey, 774 773 .encrypt = omap_aes_gcm_encrypt, ··· 789 788 .init = omap_aes_gcm_cra_init, 790 789 .exit = omap_aes_gcm_cra_exit, 791 790 .maxauthsize = AES_BLOCK_SIZE, 792 - .ivsize = 8, 791 + .ivsize = GCM_RFC4106_IV_SIZE, 793 792 .setkey = omap_aes_4106gcm_setkey, 794 793 .encrypt = omap_aes_4106gcm_encrypt, 795 794 .decrypt = omap_aes_4106gcm_decrypt, ··· 975 974 struct device *dev, struct resource *res) 976 975 { 977 976 struct device_node *node = dev->of_node; 978 - const struct of_device_id *match; 979 977 int err = 0; 980 978 981 - match = of_match_device(of_match_ptr(omap_aes_of_match), dev); 982 - if (!match) { 979 + dd->pdata = of_device_get_match_data(dev); 980 + if (!dd->pdata) { 983 981 dev_err(dev, "no compatible OF match\n"); 984 982 err = -EINVAL; 985 983 goto err; ··· 990 990 err = -EINVAL; 991 991 goto err; 992 992 } 993 - 994 - dd->pdata = match->data; 995 993 996 994 err: 997 995 return err;
+2 -5
drivers/crypto/omap-des.c
··· 928 928 static int omap_des_get_of(struct omap_des_dev *dd, 929 929 struct platform_device *pdev) 930 930 { 931 - const struct of_device_id *match; 932 931 933 - match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev); 934 - if (!match) { 932 + dd->pdata = of_device_get_match_data(&pdev->dev); 933 + if (!dd->pdata) { 935 934 dev_err(&pdev->dev, "no compatible OF match\n"); 936 935 return -EINVAL; 937 936 } 938 - 939 - dd->pdata = match->data; 940 937 941 938 return 0; 942 939 }
+2 -5
drivers/crypto/omap-sham.c
··· 1944 1944 struct device *dev, struct resource *res) 1945 1945 { 1946 1946 struct device_node *node = dev->of_node; 1947 - const struct of_device_id *match; 1948 1947 int err = 0; 1949 1948 1950 - match = of_match_device(of_match_ptr(omap_sham_of_match), dev); 1951 - if (!match) { 1949 + dd->pdata = of_device_get_match_data(dev); 1950 + if (!dd->pdata) { 1952 1951 dev_err(dev, "no compatible OF match\n"); 1953 1952 err = -EINVAL; 1954 1953 goto err; ··· 1966 1967 err = -EINVAL; 1967 1968 goto err; 1968 1969 } 1969 - 1970 - dd->pdata = match->data; 1971 1970 1972 1971 err: 1973 1972 return err;
+1 -1
drivers/crypto/padlock-aes.c
··· 482 482 } 483 483 }; 484 484 485 - static struct x86_cpu_id padlock_cpu_id[] = { 485 + static const struct x86_cpu_id padlock_cpu_id[] = { 486 486 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT), 487 487 {} 488 488 };
+1 -1
drivers/crypto/padlock-sha.c
··· 509 509 } 510 510 }; 511 511 512 - static struct x86_cpu_id padlock_sha_ids[] = { 512 + static const struct x86_cpu_id padlock_sha_ids[] = { 513 513 X86_FEATURE_MATCH(X86_FEATURE_PHE), 514 514 {} 515 515 };
-3
drivers/crypto/qat/qat_common/adf_dev_mgr.c
··· 228 228 list_add_tail(&map->list, &vfs_table); 229 229 } else if (accel_dev->is_vf && pf) { 230 230 /* VF on host */ 231 - struct adf_accel_vf_info *vf_info; 232 231 struct vf_id_map *map; 233 - 234 - vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev); 235 232 236 233 map = adf_find_vf(adf_get_vf_num(accel_dev)); 237 234 if (map) {
+8 -10
drivers/crypto/qat/qat_common/qat_asym_algs.c
··· 443 443 struct qat_crypto_instance *inst = ctx->inst; 444 444 struct device *dev = &GET_DEV(inst->accel_dev); 445 445 446 - if (unlikely(!params->p || !params->g)) 447 - return -EINVAL; 448 - 449 446 if (qat_dh_check_params_length(params->p_size << 3)) 450 447 return -EINVAL; 451 448 ··· 459 462 } 460 463 461 464 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 462 - if (!ctx->g) { 463 - dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); 464 - ctx->p = NULL; 465 + if (!ctx->g) 465 466 return -ENOMEM; 466 - } 467 467 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, 468 468 params->g_size); 469 469 ··· 501 507 502 508 ret = qat_dh_set_params(ctx, &params); 503 509 if (ret < 0) 504 - return ret; 510 + goto err_clear_ctx; 505 511 506 512 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 507 513 GFP_KERNEL); 508 514 if (!ctx->xa) { 509 - qat_dh_clear_ctx(dev, ctx); 510 - return -ENOMEM; 515 + ret = -ENOMEM; 516 + goto err_clear_ctx; 511 517 } 512 518 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, 513 519 params.key_size); 514 520 515 521 return 0; 522 + 523 + err_clear_ctx: 524 + qat_dh_clear_ctx(dev, ctx); 525 + return ret; 516 526 } 517 527 518 528 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
+9 -6
drivers/crypto/qat/qat_common/qat_uclo.c
··· 567 567 code_page->imp_expr_tab_offset); 568 568 if (uc_var_tab->entry_num || imp_var_tab->entry_num || 569 569 imp_expr_tab->entry_num) { 570 - pr_err("QAT: UOF can't contain imported variable to be parsed"); 570 + pr_err("QAT: UOF can't contain imported variable to be parsed\n"); 571 571 return -EINVAL; 572 572 } 573 573 neigh_reg_tab = (struct icp_qat_uof_objtable *) 574 574 (encap_uof_obj->beg_uof + 575 575 code_page->neigh_reg_tab_offset); 576 576 if (neigh_reg_tab->entry_num) { 577 - pr_err("QAT: UOF can't contain shared control store feature"); 577 + pr_err("QAT: UOF can't contain shared control store feature\n"); 578 578 return -EINVAL; 579 579 } 580 580 if (image->numpages > 1) { 581 - pr_err("QAT: UOF can't contain multiple pages"); 581 + pr_err("QAT: UOF can't contain multiple pages\n"); 582 582 return -EINVAL; 583 583 } 584 584 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { 585 - pr_err("QAT: UOF can't use shared control store feature"); 585 + pr_err("QAT: UOF can't use shared control store feature\n"); 586 586 return -EFAULT; 587 587 } 588 588 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { 589 - pr_err("QAT: UOF can't use reloadable feature"); 589 + pr_err("QAT: UOF can't use reloadable feature\n"); 590 590 return -EFAULT; 591 591 } 592 592 return 0; ··· 702 702 } 703 703 } 704 704 if (!mflag) { 705 - pr_err("QAT: uimage uses AE not set"); 705 + pr_err("QAT: uimage uses AE not set\n"); 706 706 return -EINVAL; 707 707 } 708 708 return 0; ··· 791 791 case ICP_GPA_ABS: 792 792 case ICP_GPB_ABS: 793 793 ctx_mask = 0; 794 + /* fall through */ 794 795 case ICP_GPA_REL: 795 796 case ICP_GPB_REL: 796 797 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, ··· 801 800 case ICP_SR_RD_ABS: 802 801 case ICP_DR_RD_ABS: 803 802 ctx_mask = 0; 803 + /* fall through */ 804 804 case ICP_SR_REL: 805 805 case ICP_DR_REL: 806 806 case ICP_SR_RD_REL: ··· 811 809 case ICP_SR_WR_ABS: 812 810 case ICP_DR_WR_ABS: 813 811 ctx_mask = 0; 812 + /* fall through */ 814 813 case ICP_SR_WR_REL: 815 814 case ICP_DR_WR_REL: 816 815 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+1 -4
drivers/crypto/qce/ablkcipher.c
··· 248 248 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0, 249 249 CRYPTO_ALG_ASYNC | 250 250 CRYPTO_ALG_NEED_FALLBACK); 251 - if (IS_ERR(ctx->fallback)) 252 - return PTR_ERR(ctx->fallback); 253 - 254 - return 0; 251 + return PTR_ERR_OR_ZERO(ctx->fallback); 255 252 } 256 253 257 254 static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+4 -26
drivers/crypto/qce/sha.c
··· 349 349 return qce->async_req_enqueue(tmpl->qce, &req->base); 350 350 } 351 351 352 - struct qce_ahash_result { 353 - struct completion completion; 354 - int error; 355 - }; 356 - 357 - static void qce_digest_complete(struct crypto_async_request *req, int error) 358 - { 359 - struct qce_ahash_result *result = req->data; 360 - 361 - if (error == -EINPROGRESS) 362 - return; 363 - 364 - result->error = error; 365 - complete(&result->completion); 366 - } 367 - 368 352 static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, 369 353 unsigned int keylen) 370 354 { 371 355 unsigned int digestsize = crypto_ahash_digestsize(tfm); 372 356 struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); 373 - struct qce_ahash_result result; 357 + struct crypto_wait wait; 374 358 struct ahash_request *req; 375 359 struct scatterlist sg; 376 360 unsigned int blocksize; ··· 389 405 goto err_free_ahash; 390 406 } 391 407 392 - init_completion(&result.completion); 408 + crypto_init_wait(&wait); 393 409 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 394 - qce_digest_complete, &result); 410 + crypto_req_done, &wait); 395 411 crypto_ahash_clear_flags(ahash_tfm, ~0); 396 412 397 413 buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); ··· 404 420 sg_init_one(&sg, buf, keylen); 405 421 ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); 406 422 407 - ret = crypto_ahash_digest(req); 408 - if (ret == -EINPROGRESS || ret == -EBUSY) { 409 - ret = wait_for_completion_interruptible(&result.completion); 410 - if (!ret) 411 - ret = result.error; 412 - } 413 - 423 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 414 424 if (ret) 415 425 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 416 426
+1482 -96
drivers/crypto/s5p-sss.c
··· 1 1 /* 2 2 * Cryptographic API. 3 3 * 4 - * Support for Samsung S5PV210 HW acceleration. 4 + * Support for Samsung S5PV210 and Exynos HW acceleration. 5 5 * 6 6 * Copyright (C) 2011 NetUP Inc. All rights reserved. 7 + * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. 7 8 * 8 9 * This program is free software; you can redistribute it and/or modify 9 10 * it under the terms of the GNU General Public License version 2 as published 10 11 * by the Free Software Foundation. 11 12 * 13 + * Hash part based on omap-sham.c driver. 12 14 */ 13 15 14 16 #include <linux/clk.h> ··· 32 30 #include <crypto/algapi.h> 33 31 #include <crypto/scatterwalk.h> 34 32 35 - #define _SBF(s, v) ((v) << (s)) 33 + #include <crypto/hash.h> 34 + #include <crypto/md5.h> 35 + #include <crypto/sha.h> 36 + #include <crypto/internal/hash.h> 37 + 38 + #define _SBF(s, v) ((v) << (s)) 36 39 37 40 /* Feed control registers */ 38 - #define SSS_REG_FCINTSTAT 0x0000 39 - #define SSS_FCINTSTAT_BRDMAINT BIT(3) 40 - #define SSS_FCINTSTAT_BTDMAINT BIT(2) 41 - #define SSS_FCINTSTAT_HRDMAINT BIT(1) 42 - #define SSS_FCINTSTAT_PKDMAINT BIT(0) 41 + #define SSS_REG_FCINTSTAT 0x0000 42 + #define SSS_FCINTSTAT_HPARTINT BIT(7) 43 + #define SSS_FCINTSTAT_HDONEINT BIT(5) 44 + #define SSS_FCINTSTAT_BRDMAINT BIT(3) 45 + #define SSS_FCINTSTAT_BTDMAINT BIT(2) 46 + #define SSS_FCINTSTAT_HRDMAINT BIT(1) 47 + #define SSS_FCINTSTAT_PKDMAINT BIT(0) 43 48 44 - #define SSS_REG_FCINTENSET 0x0004 45 - #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) 46 - #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) 47 - #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) 48 - #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) 49 + #define SSS_REG_FCINTENSET 0x0004 50 + #define SSS_FCINTENSET_HPARTINTENSET BIT(7) 51 + #define SSS_FCINTENSET_HDONEINTENSET BIT(5) 52 + #define SSS_FCINTENSET_BRDMAINTENSET BIT(3) 53 + #define SSS_FCINTENSET_BTDMAINTENSET BIT(2) 54 + #define SSS_FCINTENSET_HRDMAINTENSET BIT(1) 55 + #define SSS_FCINTENSET_PKDMAINTENSET BIT(0) 49 56 50 - #define SSS_REG_FCINTENCLR 0x0008 51 - #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) 52 - #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) 53 - #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) 54 - #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) 57 + #define SSS_REG_FCINTENCLR 0x0008 58 + #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7) 59 + #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5) 60 + #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3) 61 + #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2) 62 + #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1) 63 + #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0) 55 64 56 - #define SSS_REG_FCINTPEND 0x000C 57 - #define SSS_FCINTPEND_BRDMAINTP BIT(3) 58 - #define SSS_FCINTPEND_BTDMAINTP BIT(2) 59 - #define SSS_FCINTPEND_HRDMAINTP BIT(1) 60 - #define SSS_FCINTPEND_PKDMAINTP BIT(0) 65 + #define SSS_REG_FCINTPEND 0x000C 66 + #define SSS_FCINTPEND_HPARTINTP BIT(7) 67 + #define SSS_FCINTPEND_HDONEINTP BIT(5) 68 + #define SSS_FCINTPEND_BRDMAINTP BIT(3) 69 + #define SSS_FCINTPEND_BTDMAINTP BIT(2) 70 + #define SSS_FCINTPEND_HRDMAINTP BIT(1) 71 + #define SSS_FCINTPEND_PKDMAINTP BIT(0) 61 72 62 - #define SSS_REG_FCFIFOSTAT 0x0010 63 - #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) 64 - #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) 65 - #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) 66 - #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) 67 - #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) 68 - #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) 69 - #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) 70 - #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) 73 + #define SSS_REG_FCFIFOSTAT 0x0010 74 + #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7) 75 + #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6) 76 + #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5) 77 + #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4) 78 + #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3) 79 + #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2) 80 + #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1) 81 + #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0) 71 82 72 - #define SSS_REG_FCFIFOCTRL 0x0014 73 - #define SSS_FCFIFOCTRL_DESSEL BIT(2) 74 - #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) 75 - #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) 76 - #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) 83 + #define SSS_REG_FCFIFOCTRL 0x0014 84 + #define SSS_FCFIFOCTRL_DESSEL BIT(2) 85 + #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) 86 + #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) 87 + #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) 88 + #define SSS_HASHIN_MASK _SBF(0, 0x03) 77 89 78 - #define SSS_REG_FCBRDMAS 0x0020 79 - #define SSS_REG_FCBRDMAL 0x0024 80 - #define SSS_REG_FCBRDMAC 0x0028 81 - #define SSS_FCBRDMAC_BYTESWAP BIT(1) 82 - #define SSS_FCBRDMAC_FLUSH BIT(0) 90 + #define SSS_REG_FCBRDMAS 0x0020 91 + #define SSS_REG_FCBRDMAL 0x0024 92 + #define SSS_REG_FCBRDMAC 0x0028 93 + #define SSS_FCBRDMAC_BYTESWAP BIT(1) 94 + #define SSS_FCBRDMAC_FLUSH BIT(0) 83 95 84 - #define SSS_REG_FCBTDMAS 0x0030 85 - #define SSS_REG_FCBTDMAL 0x0034 86 - #define SSS_REG_FCBTDMAC 0x0038 87 - #define SSS_FCBTDMAC_BYTESWAP BIT(1) 88 - #define SSS_FCBTDMAC_FLUSH BIT(0) 96 + #define SSS_REG_FCBTDMAS 0x0030 97 + #define SSS_REG_FCBTDMAL 0x0034 98 + #define SSS_REG_FCBTDMAC 0x0038 99 + #define SSS_FCBTDMAC_BYTESWAP BIT(1) 100 + #define SSS_FCBTDMAC_FLUSH BIT(0) 89 101 90 - #define SSS_REG_FCHRDMAS 0x0040 91 - #define SSS_REG_FCHRDMAL 0x0044 92 - #define SSS_REG_FCHRDMAC 0x0048 93 - #define SSS_FCHRDMAC_BYTESWAP BIT(1) 94 - #define SSS_FCHRDMAC_FLUSH BIT(0) 102 + #define SSS_REG_FCHRDMAS 0x0040 103 + #define SSS_REG_FCHRDMAL 0x0044 104 + #define SSS_REG_FCHRDMAC 0x0048 105 + #define SSS_FCHRDMAC_BYTESWAP BIT(1) 106 + #define SSS_FCHRDMAC_FLUSH BIT(0) 95 107 96 - #define SSS_REG_FCPKDMAS 0x0050 97 - #define SSS_REG_FCPKDMAL 0x0054 98 - #define SSS_REG_FCPKDMAC 0x0058 99 - #define SSS_FCPKDMAC_BYTESWAP BIT(3) 100 - #define SSS_FCPKDMAC_DESCEND BIT(2) 101 - #define SSS_FCPKDMAC_TRANSMIT BIT(1) 102 - #define SSS_FCPKDMAC_FLUSH BIT(0) 108 + #define SSS_REG_FCPKDMAS 0x0050 109 + #define SSS_REG_FCPKDMAL 0x0054 110 + #define SSS_REG_FCPKDMAC 0x0058 111 + #define SSS_FCPKDMAC_BYTESWAP BIT(3) 112 + #define SSS_FCPKDMAC_DESCEND BIT(2) 113 + #define SSS_FCPKDMAC_TRANSMIT BIT(1) 114 + #define SSS_FCPKDMAC_FLUSH BIT(0) 103 115 104 - #define SSS_REG_FCPKDMAO 0x005C 116 + #define SSS_REG_FCPKDMAO 0x005C 105 117 106 118 /* AES registers */ 107 119 #define SSS_REG_AES_CONTROL 0x00 108 - #define SSS_AES_BYTESWAP_DI BIT(11) 109 - #define SSS_AES_BYTESWAP_DO BIT(10) 110 - #define SSS_AES_BYTESWAP_IV BIT(9) 111 - #define SSS_AES_BYTESWAP_CNT BIT(8) 112 - #define SSS_AES_BYTESWAP_KEY BIT(7) 113 - #define SSS_AES_KEY_CHANGE_MODE BIT(6) 114 - #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) 115 - #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) 116 - #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) 117 - #define SSS_AES_FIFO_MODE BIT(3) 118 - #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) 119 - #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) 120 - #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 121 - #define SSS_AES_MODE_DECRYPT BIT(0) 120 + #define SSS_AES_BYTESWAP_DI BIT(11) 121 + #define SSS_AES_BYTESWAP_DO BIT(10) 122 + #define SSS_AES_BYTESWAP_IV BIT(9) 123 + #define SSS_AES_BYTESWAP_CNT BIT(8) 124 + #define SSS_AES_BYTESWAP_KEY BIT(7) 125 + #define SSS_AES_KEY_CHANGE_MODE BIT(6) 126 + #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) 127 + #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) 128 + #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) 129 + #define SSS_AES_FIFO_MODE BIT(3) 130 + #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) 131 + #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) 132 + #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) 133 + #define SSS_AES_MODE_DECRYPT BIT(0) 122 134 123 135 #define SSS_REG_AES_STATUS 0x04 124 - #define SSS_AES_BUSY BIT(2) 125 - #define SSS_AES_INPUT_READY BIT(1) 126 - #define SSS_AES_OUTPUT_READY BIT(0) 136 + #define SSS_AES_BUSY BIT(2) 137 + #define SSS_AES_INPUT_READY BIT(1) 138 + #define SSS_AES_OUTPUT_READY BIT(0) 127 139 128 140 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) 129 141 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) ··· 145 129 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2)) 146 130 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2)) 147 131 148 - #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 149 - #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 150 - #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 132 + #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg)) 133 + #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg)) 134 + #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg)) 151 135 152 - #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) 136 + #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg) 153 137 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \ 154 138 SSS_AES_REG(dev, reg)) 155 139 156 140 /* HW engine modes */ 157 - #define FLAGS_AES_DECRYPT BIT(0) 158 - #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 159 - #define FLAGS_AES_CBC _SBF(1, 0x01) 160 - #define FLAGS_AES_CTR _SBF(1, 0x02) 141 + #define FLAGS_AES_DECRYPT BIT(0) 142 + #define FLAGS_AES_MODE_MASK _SBF(1, 0x03) 143 + #define FLAGS_AES_CBC _SBF(1, 0x01) 144 + #define FLAGS_AES_CTR _SBF(1, 0x02) 161 145 162 - #define AES_KEY_LEN 16 163 - #define CRYPTO_QUEUE_LEN 1 146 + #define AES_KEY_LEN 16 147 + #define CRYPTO_QUEUE_LEN 1 148 + 149 + /* HASH registers */ 150 + #define SSS_REG_HASH_CTRL 0x00 151 + 152 + #define SSS_HASH_USER_IV_EN BIT(5) 153 + #define SSS_HASH_INIT_BIT BIT(4) 154 + #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00) 155 + #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01) 156 + #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02) 157 + 158 + #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03) 159 + 160 + #define SSS_REG_HASH_CTRL_PAUSE 0x04 161 + 162 + #define SSS_HASH_PAUSE BIT(0) 163 + 164 + #define SSS_REG_HASH_CTRL_FIFO 0x08 165 + 166 + #define SSS_HASH_FIFO_MODE_DMA BIT(0) 167 + #define SSS_HASH_FIFO_MODE_CPU 0 168 + 169 + #define SSS_REG_HASH_CTRL_SWAP 0x0C 170 + 171 + #define SSS_HASH_BYTESWAP_DI BIT(3) 172 + #define SSS_HASH_BYTESWAP_DO BIT(2) 173 + #define SSS_HASH_BYTESWAP_IV BIT(1) 174 + #define SSS_HASH_BYTESWAP_KEY BIT(0) 175 + 176 + #define SSS_REG_HASH_STATUS 0x10 177 + 178 + #define SSS_HASH_STATUS_MSG_DONE BIT(6) 179 + #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4) 180 + #define SSS_HASH_STATUS_BUFFER_READY BIT(0) 181 + 182 + #define SSS_REG_HASH_MSG_SIZE_LOW 0x20 183 + #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24 184 + 185 + #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28 186 + #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C 187 + 188 + #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2)) 189 + #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2)) 190 + 191 + #define HASH_BLOCK_SIZE 64 192 + #define HASH_REG_SIZEOF 4 193 + #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF) 194 + #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF) 195 + #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF) 196 + 197 + /* 198 + * HASH bit numbers, used by device, setting in dev->hash_flags with 199 + * functions set_bit(), clear_bit() or tested with test_bit() or BIT(), 200 + * to keep HASH state BUSY or FREE, or to signal state from irq_handler 201 + * to hash_tasklet. SGS keep track of allocated memory for scatterlist 202 + */ 203 + #define HASH_FLAGS_BUSY 0 204 + #define HASH_FLAGS_FINAL 1 205 + #define HASH_FLAGS_DMA_ACTIVE 2 206 + #define HASH_FLAGS_OUTPUT_READY 3 207 + #define HASH_FLAGS_DMA_READY 4 208 + #define HASH_FLAGS_SGS_COPIED 5 209 + #define HASH_FLAGS_SGS_ALLOCED 6 210 + 211 + /* HASH HW constants */ 212 + #define BUFLEN HASH_BLOCK_SIZE 213 + 214 + #define SSS_HASH_DMA_LEN_ALIGN 8 215 + #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1) 216 + 217 + #define SSS_HASH_QUEUE_LENGTH 10 164 218 165 219 /** 166 220 * struct samsung_aes_variant - platform specific SSS driver data 167 221 * @aes_offset: AES register offset from SSS module's base. 222 + * @hash_offset: HASH register offset from SSS module's base. 168 223 * 169 224 * Specifies platform specific configuration of SSS module. 170 225 * Note: A structure for driver specific platform data is used for future ··· 243 156 */ 244 157 struct samsung_aes_variant { 245 158 unsigned int aes_offset; 159 + unsigned int hash_offset; 246 160 }; 247 161 248 162 struct s5p_aes_reqctx { ··· 283 195 * protects against concurrent access to these fields. 284 196 * @lock: Lock for protecting both access to device hardware registers 285 197 * and fields related to current request (including the busy field). 198 + * @res: Resources for hash. 199 + * @io_hash_base: Per-variant offset for HASH block IO memory. 200 + * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags 201 + * variable. 202 + * @hash_flags: Flags for current HASH op. 203 + * @hash_queue: Async hash queue. 204 + * @hash_tasklet: New HASH request scheduling job. 205 + * @xmit_buf: Buffer for current HASH request transfer into SSS block. 206 + * @hash_req: Current request sending to SSS HASH block. 207 + * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block. 208 + * @hash_sg_cnt: Counter for hash_sg_iter. 209 + * 210 + * @use_hash: true if HASH algs enabled 286 211 */ 287 212 struct s5p_aes_dev { 288 213 struct device *dev; ··· 316 215 struct crypto_queue queue; 317 216 bool busy; 318 217 spinlock_t lock; 218 + 219 + struct resource *res; 220 + void __iomem *io_hash_base; 221 + 222 + spinlock_t hash_lock; /* protect hash_ vars */ 223 + unsigned long hash_flags; 224 + struct crypto_queue hash_queue; 225 + struct tasklet_struct hash_tasklet; 226 + 227 + u8 xmit_buf[BUFLEN]; 228 + struct ahash_request *hash_req; 229 + struct scatterlist *hash_sg_iter; 230 + unsigned int hash_sg_cnt; 231 + 232 + bool use_hash; 319 233 }; 320 234 321 - static struct s5p_aes_dev *s5p_dev; 235 + /** 236 + * struct s5p_hash_reqctx - HASH request context 237 + * @dd: Associated device 238 + * @op_update: Current request operation (OP_UPDATE or OP_FINAL) 239 + * @digcnt: Number of bytes processed by HW (without buffer[] ones) 240 + * @digest: Digest message or IV for partial result 241 + * @nregs: Number of HW registers for digest or IV read/write 242 + * @engine: Bits for selecting type of HASH in SSS block 243 + * @sg: sg for DMA transfer 244 + * @sg_len: Length of sg for DMA transfer 245 + * @sgl[]: sg for joining buffer and req->src scatterlist 246 + * @skip: Skip offset in req->src for current op 247 + * @total: Total number of bytes for current request 248 + * @finup: Keep state for finup or final. 249 + * @error: Keep track of error. 250 + * @bufcnt: Number of bytes holded in buffer[] 251 + * @buffer[]: For byte(s) from end of req->src in UPDATE op 252 + */ 253 + struct s5p_hash_reqctx { 254 + struct s5p_aes_dev *dd; 255 + bool op_update; 256 + 257 + u64 digcnt; 258 + u8 digest[SHA256_DIGEST_SIZE]; 259 + 260 + unsigned int nregs; /* digest_size / sizeof(reg) */ 261 + u32 engine; 262 + 263 + struct scatterlist *sg; 264 + unsigned int sg_len; 265 + struct scatterlist sgl[2]; 266 + unsigned int skip; 267 + unsigned int total; 268 + bool finup; 269 + bool error; 270 + 271 + u32 bufcnt; 272 + u8 buffer[0]; 273 + }; 274 + 275 + /** 276 + * struct s5p_hash_ctx - HASH transformation context 277 + * @dd: Associated device 278 + * @flags: Bits for algorithm HASH. 279 + * @fallback: Software transformation for zero message or size < BUFLEN. 280 + */ 281 + struct s5p_hash_ctx { 282 + struct s5p_aes_dev *dd; 283 + unsigned long flags; 284 + struct crypto_shash *fallback; 285 + }; 322 286 323 287 static const struct samsung_aes_variant s5p_aes_data = { 324 288 .aes_offset = 0x4000, 289 + .hash_offset = 0x6000, 325 290 }; 326 291 327 292 static const struct samsung_aes_variant exynos_aes_data = { 328 293 .aes_offset = 0x200, 294 + .hash_offset = 0x400, 329 295 }; 330 296 331 297 static const struct of_device_id s5p_sss_dt_match[] = { ··· 421 253 return (struct samsung_aes_variant *) 422 254 platform_get_device_id(pdev)->driver_data; 423 255 } 256 + 257 + static struct s5p_aes_dev *s5p_dev; 424 258 425 259 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) 426 260 { ··· 606 436 return ret; 607 437 } 608 438 439 + static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset) 440 + { 441 + return __raw_readl(dd->io_hash_base + offset); 442 + } 443 + 444 + static inline void s5p_hash_write(struct s5p_aes_dev *dd, 445 + u32 offset, u32 value) 446 + { 447 + __raw_writel(value, dd->io_hash_base + offset); 448 + } 449 + 450 + /** 451 + * s5p_set_dma_hashdata() - start DMA with sg 452 + * @dev: device 453 + * @sg: scatterlist ready to DMA transmit 454 + */ 455 + static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev, 456 + struct scatterlist *sg) 457 + { 458 + dev->hash_sg_cnt--; 459 + SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg)); 460 + SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */ 461 + } 462 + 463 + /** 464 + * s5p_hash_rx() - get next hash_sg_iter 465 + * @dev: device 466 + * 467 + * Return: 468 + * 2 if there is no more data and it is UPDATE op 469 + * 1 if new receiving (input) data is ready and can be written to device 470 + * 0 if there is no more data and it is FINAL op 471 + */ 472 + static int s5p_hash_rx(struct s5p_aes_dev *dev) 473 + { 474 + if (dev->hash_sg_cnt > 0) { 475 + dev->hash_sg_iter = sg_next(dev->hash_sg_iter); 476 + return 1; 477 + } 478 + 479 + set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags); 480 + if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags)) 481 + return 0; 482 + 483 + return 2; 484 + } 485 + 609 486 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) 610 487 { 611 488 struct platform_device *pdev = dev_id; 612 489 struct s5p_aes_dev *dev = platform_get_drvdata(pdev); 613 490 int err_dma_tx = 0; 614 491 int err_dma_rx = 0; 492 + int err_dma_hx = 0; 615 493 bool tx_end = false; 494 + bool hx_end = false; 616 495 unsigned long flags; 617 496 uint32_t status; 497 + u32 st_bits; 618 498 int err; 619 499 620 500 spin_lock_irqsave(&dev->lock, flags); ··· 676 456 * 677 457 * If there is no more data in tx scatter list, call s5p_aes_complete() 678 458 * and schedule new tasklet. 459 + * 460 + * Handle hx interrupt. If there is still data map next entry. 679 461 */ 680 462 status = SSS_READ(dev, FCINTSTAT); 681 463 if (status & SSS_FCINTSTAT_BRDMAINT) ··· 689 467 err_dma_tx = s5p_aes_tx(dev); 690 468 } 691 469 692 - SSS_WRITE(dev, FCINTPEND, status); 470 + if (status & SSS_FCINTSTAT_HRDMAINT) 471 + err_dma_hx = s5p_hash_rx(dev); 472 + 473 + st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT | 474 + SSS_FCINTSTAT_HRDMAINT); 475 + /* clear DMA bits */ 476 + SSS_WRITE(dev, FCINTPEND, st_bits); 477 + 478 + /* clear HASH irq bits */ 479 + if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) { 480 + /* cannot have both HPART and HDONE */ 481 + if (status & SSS_FCINTSTAT_HPARTINT) 482 + st_bits = SSS_HASH_STATUS_PARTIAL_DONE; 483 + 484 + if (status & SSS_FCINTSTAT_HDONEINT) 485 + st_bits = SSS_HASH_STATUS_MSG_DONE; 486 + 487 + set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags); 488 + s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits); 489 + hx_end = true; 490 + /* when DONE or PART, do not handle HASH DMA */ 491 + err_dma_hx = 0; 492 + } 693 493 694 494 if (err_dma_rx < 0) { 695 495 err = err_dma_rx; ··· 724 480 725 481 if (tx_end) { 726 482 s5p_sg_done(dev); 483 + if (err_dma_hx == 1) 484 + s5p_set_dma_hashdata(dev, dev->hash_sg_iter); 727 485 728 486 spin_unlock_irqrestore(&dev->lock, flags); 729 487 ··· 743 497 s5p_set_dma_outdata(dev, dev->sg_dst); 744 498 if (err_dma_rx == 1) 745 499 s5p_set_dma_indata(dev, dev->sg_src); 500 + if (err_dma_hx == 1) 501 + s5p_set_dma_hashdata(dev, dev->hash_sg_iter); 746 502 747 503 spin_unlock_irqrestore(&dev->lock, flags); 748 504 } 749 505 750 - return IRQ_HANDLED; 506 + goto hash_irq_end; 751 507 752 508 error: 753 509 s5p_sg_done(dev); 754 510 dev->busy = false; 511 + if (err_dma_hx == 1) 512 + s5p_set_dma_hashdata(dev, dev->hash_sg_iter); 513 + 755 514 spin_unlock_irqrestore(&dev->lock, flags); 756 515 s5p_aes_complete(dev, err); 757 516 517 + hash_irq_end: 518 + /* 519 + * Note about else if: 520 + * when hash_sg_iter reaches end and its UPDATE op, 521 + * issue SSS_HASH_PAUSE and wait for HPART irq 522 + */ 523 + if (hx_end) 524 + tasklet_schedule(&dev->hash_tasklet); 525 + else if (err_dma_hx == 2) 526 + s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE, 527 + SSS_HASH_PAUSE); 528 + 758 529 return IRQ_HANDLED; 759 530 } 531 + 532 + /** 533 + * s5p_hash_read_msg() - read message or IV from HW 534 + * @req: AHASH request 535 + */ 536 + static void s5p_hash_read_msg(struct ahash_request *req) 537 + { 538 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 539 + struct s5p_aes_dev *dd = ctx->dd; 540 + u32 *hash = (u32 *)ctx->digest; 541 + unsigned int i; 542 + 543 + for (i = 0; i < ctx->nregs; i++) 544 + hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i)); 545 + } 546 + 547 + /** 548 + * s5p_hash_write_ctx_iv() - write IV for next partial/finup op. 549 + * @dd: device 550 + * @ctx: request context 551 + */ 552 + static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd, 553 + struct s5p_hash_reqctx *ctx) 554 + { 555 + u32 *hash = (u32 *)ctx->digest; 556 + unsigned int i; 557 + 558 + for (i = 0; i < ctx->nregs; i++) 559 + s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]); 560 + } 561 + 562 + /** 563 + * s5p_hash_write_iv() - write IV for next partial/finup op. 564 + * @req: AHASH request 565 + */ 566 + static void s5p_hash_write_iv(struct ahash_request *req) 567 + { 568 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 569 + 570 + s5p_hash_write_ctx_iv(ctx->dd, ctx); 571 + } 572 + 573 + /** 574 + * s5p_hash_copy_result() - copy digest into req->result 575 + * @req: AHASH request 576 + */ 577 + static void s5p_hash_copy_result(struct ahash_request *req) 578 + { 579 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 580 + 581 + if (!req->result) 582 + return; 583 + 584 + memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF); 585 + } 586 + 587 + /** 588 + * s5p_hash_dma_flush() - flush HASH DMA 589 + * @dev: secss device 590 + */ 591 + static void s5p_hash_dma_flush(struct s5p_aes_dev *dev) 592 + { 593 + SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH); 594 + } 595 + 596 + /** 597 + * s5p_hash_dma_enable() - enable DMA mode for HASH 598 + * @dev: secss device 599 + * 600 + * enable DMA mode for HASH 601 + */ 602 + static void s5p_hash_dma_enable(struct s5p_aes_dev *dev) 603 + { 604 + s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA); 605 + } 606 + 607 + /** 608 + * s5p_hash_irq_disable() - disable irq HASH signals 609 + * @dev: secss device 610 + * @flags: bitfield with irq's to be disabled 611 + */ 612 + static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags) 613 + { 614 + SSS_WRITE(dev, FCINTENCLR, flags); 615 + } 616 + 617 + /** 618 + * s5p_hash_irq_enable() - enable irq signals 619 + * @dev: secss device 620 + * @flags: bitfield with irq's to be enabled 621 + */ 622 + static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags) 623 + { 624 + SSS_WRITE(dev, FCINTENSET, flags); 625 + } 626 + 627 + /** 628 + * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH 629 + * @dev: secss device 630 + * @hashflow: HASH stream flow with/without crypto AES/DES 631 + */ 632 + static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow) 633 + { 634 + unsigned long flags; 635 + u32 flow; 636 + 637 + spin_lock_irqsave(&dev->lock, flags); 638 + 639 + flow = SSS_READ(dev, FCFIFOCTRL); 640 + flow &= ~SSS_HASHIN_MASK; 641 + flow |= hashflow; 642 + SSS_WRITE(dev, FCFIFOCTRL, flow); 643 + 644 + spin_unlock_irqrestore(&dev->lock, flags); 645 + } 646 + 647 + /** 648 + * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS 649 + * @dev: secss device 650 + * @hashflow: HASH stream flow with/without AES/DES 651 + * 652 + * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW, 653 + * enable HASH irq's HRDMA, HDONE, HPART 654 + */ 655 + static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow) 656 + { 657 + s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR | 658 + SSS_FCINTENCLR_HDONEINTENCLR | 659 + SSS_FCINTENCLR_HPARTINTENCLR); 660 + s5p_hash_dma_flush(dev); 661 + 662 + s5p_hash_dma_enable(dev); 663 + s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK); 664 + s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET | 665 + SSS_FCINTENSET_HDONEINTENSET | 666 + SSS_FCINTENSET_HPARTINTENSET); 667 + } 668 + 669 + /** 670 + * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing 671 + * @dd: secss device 672 + * @length: length for request 673 + * @final: true if final op 674 + * 675 + * Prepare SSS HASH block for processing bytes in DMA mode. If it is called 676 + * after previous updates, fill up IV words. For final, calculate and set 677 + * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH 678 + * length as 2^63 so it will be never reached and set to zero prelow and 679 + * prehigh. 680 + * 681 + * This function does not start DMA transfer. 682 + */ 683 + static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length, 684 + bool final) 685 + { 686 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); 687 + u32 prelow, prehigh, low, high; 688 + u32 configflags, swapflags; 689 + u64 tmplen; 690 + 691 + configflags = ctx->engine | SSS_HASH_INIT_BIT; 692 + 693 + if (likely(ctx->digcnt)) { 694 + s5p_hash_write_ctx_iv(dd, ctx); 695 + configflags |= SSS_HASH_USER_IV_EN; 696 + } 697 + 698 + if (final) { 699 + /* number of bytes for last part */ 700 + low = length; 701 + high = 0; 702 + /* total number of bits prev hashed */ 703 + tmplen = ctx->digcnt * 8; 704 + prelow = (u32)tmplen; 705 + prehigh = (u32)(tmplen >> 32); 706 + } else { 707 + prelow = 0; 708 + prehigh = 0; 709 + low = 0; 710 + high = BIT(31); 711 + } 712 + 713 + swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO | 714 + SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY; 715 + 716 + s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low); 717 + s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high); 718 + s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow); 719 + s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh); 720 + 721 + s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags); 722 + s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags); 723 + } 724 + 725 + /** 726 + * s5p_hash_xmit_dma() - start DMA hash processing 727 + * @dd: secss device 728 + * @length: length for request 729 + * @final: true if final op 730 + * 731 + * Update digcnt here, as it is needed for finup/final op. 732 + */ 733 + static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length, 734 + bool final) 735 + { 736 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); 737 + unsigned int cnt; 738 + 739 + cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); 740 + if (!cnt) { 741 + dev_err(dd->dev, "dma_map_sg error\n"); 742 + ctx->error = true; 743 + return -EINVAL; 744 + } 745 + 746 + set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); 747 + dd->hash_sg_iter = ctx->sg; 748 + dd->hash_sg_cnt = cnt; 749 + s5p_hash_write_ctrl(dd, length, final); 750 + ctx->digcnt += length; 751 + ctx->total -= length; 752 + 753 + /* catch last interrupt */ 754 + if (final) 755 + set_bit(HASH_FLAGS_FINAL, &dd->hash_flags); 756 + 757 + s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */ 758 + 759 + return -EINPROGRESS; 760 + } 761 + 762 + /** 763 + * s5p_hash_copy_sgs() - copy request's bytes into new buffer 764 + * @ctx: request context 765 + * @sg: source scatterlist request 766 + * @new_len: number of bytes to process from sg 767 + * 768 + * Allocate new buffer, copy data for HASH into it. If there was xmit_buf 769 + * filled, copy it first, then copy data from sg into it. Prepare one sgl[0] 770 + * with allocated buffer. 771 + * 772 + * Set bit in dd->hash_flag so we can free it after irq ends processing. 773 + */ 774 + static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx, 775 + struct scatterlist *sg, unsigned int new_len) 776 + { 777 + unsigned int pages, len; 778 + void *buf; 779 + 780 + len = new_len + ctx->bufcnt; 781 + pages = get_order(len); 782 + 783 + buf = (void *)__get_free_pages(GFP_ATOMIC, pages); 784 + if (!buf) { 785 + dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n"); 786 + ctx->error = true; 787 + return -ENOMEM; 788 + } 789 + 790 + if (ctx->bufcnt) 791 + memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); 792 + 793 + scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, 794 + new_len, 0); 795 + sg_init_table(ctx->sgl, 1); 796 + sg_set_buf(ctx->sgl, buf, len); 797 + ctx->sg = ctx->sgl; 798 + ctx->sg_len = 1; 799 + ctx->bufcnt = 0; 800 + ctx->skip = 0; 801 + set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags); 802 + 803 + return 0; 804 + } 805 + 806 + /** 807 + * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy 808 + * @ctx: request context 809 + * @sg: source scatterlist request 810 + * @new_len: number of bytes to process from sg 811 + * 812 + * Allocate new scatterlist table, copy data for HASH into it. If there was 813 + * xmit_buf filled, prepare it first, then copy page, length and offset from 814 + * source sg into it, adjusting begin and/or end for skip offset and 815 + * hash_later value. 816 + * 817 + * Resulting sg table will be assigned to ctx->sg. Set flag so we can free 818 + * it after irq ends processing. 819 + */ 820 + static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx, 821 + struct scatterlist *sg, unsigned int new_len) 822 + { 823 + unsigned int skip = ctx->skip, n = sg_nents(sg); 824 + struct scatterlist *tmp; 825 + unsigned int len; 826 + 827 + if (ctx->bufcnt) 828 + n++; 829 + 830 + ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); 831 + if (!ctx->sg) { 832 + ctx->error = true; 833 + return -ENOMEM; 834 + } 835 + 836 + sg_init_table(ctx->sg, n); 837 + 838 + tmp = ctx->sg; 839 + 840 + ctx->sg_len = 0; 841 + 842 + if (ctx->bufcnt) { 843 + sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); 844 + tmp = sg_next(tmp); 845 + ctx->sg_len++; 846 + } 847 + 848 + while (sg && skip >= sg->length) { 849 + skip -= sg->length; 850 + sg = sg_next(sg); 851 + } 852 + 853 + while (sg && new_len) { 854 + len = sg->length - skip; 855 + if (new_len < len) 856 + len = new_len; 857 + 858 + new_len -= len; 859 + sg_set_page(tmp, sg_page(sg), len, sg->offset + skip); 860 + skip = 0; 861 + if (new_len <= 0) 862 + sg_mark_end(tmp); 863 + 864 + tmp = sg_next(tmp); 865 + ctx->sg_len++; 866 + sg = sg_next(sg); 867 + } 868 + 869 + set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags); 870 + 871 + return 0; 872 + } 873 + 874 + /** 875 + * s5p_hash_prepare_sgs() - prepare sg for processing 876 + * @ctx: request context 877 + * @sg: source scatterlist request 878 + * @nbytes: number of bytes to process from sg 879 + * @final: final flag 880 + * 881 + * Check two conditions: (1) if buffers in sg have len aligned data, and (2) 882 + * sg table have good aligned elements (list_ok). If one of this checks fails, 883 + * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy 884 + * data into this buffer and prepare request in sgl, or (2) allocates new sg 885 + * table and prepare sg elements. 886 + * 887 + * For digest or finup all conditions can be good, and we may not need any 888 + * fixes. 889 + */ 890 + static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx, 891 + struct scatterlist *sg, 892 + unsigned int new_len, bool final) 893 + { 894 + unsigned int skip = ctx->skip, nbytes = new_len, n = 0; 895 + bool aligned = true, list_ok = true; 896 + struct scatterlist *sg_tmp = sg; 897 + 898 + if (!sg || !sg->length || !new_len) 899 + return 0; 900 + 901 + if (skip || !final) 902 + list_ok = false; 903 + 904 + while (nbytes > 0 && sg_tmp) { 905 + n++; 906 + if (skip >= sg_tmp->length) { 907 + skip -= sg_tmp->length; 908 + if (!sg_tmp->length) { 909 + aligned = false; 910 + break; 911 + } 912 + } else { 913 + if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) { 914 + aligned = false; 915 + break; 916 + } 917 + 918 + if (nbytes < sg_tmp->length - skip) { 919 + list_ok = false; 920 + break; 921 + } 922 + 923 + nbytes -= sg_tmp->length - skip; 924 + skip = 0; 925 + } 926 + 927 + sg_tmp = sg_next(sg_tmp); 928 + } 929 + 930 + if (!aligned) 931 + return s5p_hash_copy_sgs(ctx, sg, new_len); 932 + else if (!list_ok) 933 + return s5p_hash_copy_sg_lists(ctx, sg, new_len); 934 + 935 + /* 936 + * Have aligned data from previous operation and/or current 937 + * Note: will enter here only if (digest or finup) and aligned 938 + */ 939 + if (ctx->bufcnt) { 940 + ctx->sg_len = n; 941 + sg_init_table(ctx->sgl, 2); 942 + sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt); 943 + sg_chain(ctx->sgl, 2, sg); 944 + ctx->sg = ctx->sgl; 945 + ctx->sg_len++; 946 + } else { 947 + ctx->sg = sg; 948 + ctx->sg_len = n; 949 + } 950 + 951 + return 0; 952 + } 953 + 954 + /** 955 + * s5p_hash_prepare_request() - prepare request for processing 956 + * @req: AHASH request 957 + * @update: true if UPDATE op 958 + * 959 + * Note 1: we can have update flag _and_ final flag at the same time. 960 + * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or 961 + * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or 962 + * we have final op 963 + */ 964 + static int s5p_hash_prepare_request(struct ahash_request *req, bool update) 965 + { 966 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 967 + bool final = ctx->finup; 968 + int xmit_len, hash_later, nbytes; 969 + int ret; 970 + 971 + if (!req) 972 + return 0; 973 + 974 + if (update) 975 + nbytes = req->nbytes; 976 + else 977 + nbytes = 0; 978 + 979 + ctx->total = nbytes + ctx->bufcnt; 980 + if (!ctx->total) 981 + return 0; 982 + 983 + if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) { 984 + /* bytes left from previous request, so fill up to BUFLEN */ 985 + int len = BUFLEN - ctx->bufcnt % BUFLEN; 986 + 987 + if (len > nbytes) 988 + len = nbytes; 989 + 990 + scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, 991 + 0, len, 0); 992 + ctx->bufcnt += len; 993 + nbytes -= len; 994 + ctx->skip = len; 995 + } else { 996 + ctx->skip = 0; 997 + } 998 + 999 + if (ctx->bufcnt) 1000 + memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt); 1001 + 1002 + xmit_len = ctx->total; 1003 + if (final) { 1004 + hash_later = 0; 1005 + } else { 1006 + if (IS_ALIGNED(xmit_len, BUFLEN)) 1007 + xmit_len -= BUFLEN; 1008 + else 1009 + xmit_len -= xmit_len & (BUFLEN - 1); 1010 + 1011 + hash_later = ctx->total - xmit_len; 1012 + /* copy hash_later bytes from end of req->src */ 1013 + /* previous bytes are in xmit_buf, so no overwrite */ 1014 + scatterwalk_map_and_copy(ctx->buffer, req->src, 1015 + req->nbytes - hash_later, 1016 + hash_later, 0); 1017 + } 1018 + 1019 + if (xmit_len > BUFLEN) { 1020 + ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later, 1021 + final); 1022 + if (ret) 1023 + return ret; 1024 + } else { 1025 + /* have buffered data only */ 1026 + if (unlikely(!ctx->bufcnt)) { 1027 + /* first update didn't fill up buffer */ 1028 + scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, 1029 + 0, xmit_len, 0); 1030 + } 1031 + 1032 + sg_init_table(ctx->sgl, 1); 1033 + sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len); 1034 + 1035 + ctx->sg = ctx->sgl; 1036 + ctx->sg_len = 1; 1037 + } 1038 + 1039 + ctx->bufcnt = hash_later; 1040 + if (!final) 1041 + ctx->total = xmit_len; 1042 + 1043 + return 0; 1044 + } 1045 + 1046 + /** 1047 + * s5p_hash_update_dma_stop() - unmap DMA 1048 + * @dd: secss device 1049 + * 1050 + * Unmap scatterlist ctx->sg. 1051 + */ 1052 + static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd) 1053 + { 1054 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req); 1055 + 1056 + dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE); 1057 + clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags); 1058 + } 1059 + 1060 + /** 1061 + * s5p_hash_finish() - copy calculated digest to crypto layer 1062 + * @req: AHASH request 1063 + */ 1064 + static void s5p_hash_finish(struct ahash_request *req) 1065 + { 1066 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1067 + struct s5p_aes_dev *dd = ctx->dd; 1068 + 1069 + if (ctx->digcnt) 1070 + s5p_hash_copy_result(req); 1071 + 1072 + dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt); 1073 + } 1074 + 1075 + /** 1076 + * s5p_hash_finish_req() - finish request 1077 + * @req: AHASH request 1078 + * @err: error 1079 + */ 1080 + static void s5p_hash_finish_req(struct ahash_request *req, int err) 1081 + { 1082 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1083 + struct s5p_aes_dev *dd = ctx->dd; 1084 + unsigned long flags; 1085 + 1086 + if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags)) 1087 + free_pages((unsigned long)sg_virt(ctx->sg), 1088 + get_order(ctx->sg->length)); 1089 + 1090 + if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags)) 1091 + kfree(ctx->sg); 1092 + 1093 + ctx->sg = NULL; 1094 + dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) | 1095 + BIT(HASH_FLAGS_SGS_COPIED)); 1096 + 1097 + if (!err && !ctx->error) { 1098 + s5p_hash_read_msg(req); 1099 + if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags)) 1100 + s5p_hash_finish(req); 1101 + } else { 1102 + ctx->error = true; 1103 + } 1104 + 1105 + spin_lock_irqsave(&dd->hash_lock, flags); 1106 + dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) | 1107 + BIT(HASH_FLAGS_DMA_READY) | 1108 + BIT(HASH_FLAGS_OUTPUT_READY)); 1109 + spin_unlock_irqrestore(&dd->hash_lock, flags); 1110 + 1111 + if (req->base.complete) 1112 + req->base.complete(&req->base, err); 1113 + } 1114 + 1115 + /** 1116 + * s5p_hash_handle_queue() - handle hash queue 1117 + * @dd: device s5p_aes_dev 1118 + * @req: AHASH request 1119 + * 1120 + * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the 1121 + * device then processes the first request from the dd->queue 1122 + * 1123 + * Returns: see s5p_hash_final below. 1124 + */ 1125 + static int s5p_hash_handle_queue(struct s5p_aes_dev *dd, 1126 + struct ahash_request *req) 1127 + { 1128 + struct crypto_async_request *async_req, *backlog; 1129 + struct s5p_hash_reqctx *ctx; 1130 + unsigned long flags; 1131 + int err = 0, ret = 0; 1132 + 1133 + retry: 1134 + spin_lock_irqsave(&dd->hash_lock, flags); 1135 + if (req) 1136 + ret = ahash_enqueue_request(&dd->hash_queue, req); 1137 + 1138 + if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { 1139 + spin_unlock_irqrestore(&dd->hash_lock, flags); 1140 + return ret; 1141 + } 1142 + 1143 + backlog = crypto_get_backlog(&dd->hash_queue); 1144 + async_req = crypto_dequeue_request(&dd->hash_queue); 1145 + if (async_req) 1146 + set_bit(HASH_FLAGS_BUSY, &dd->hash_flags); 1147 + 1148 + spin_unlock_irqrestore(&dd->hash_lock, flags); 1149 + 1150 + if (!async_req) 1151 + return ret; 1152 + 1153 + if (backlog) 1154 + backlog->complete(backlog, -EINPROGRESS); 1155 + 1156 + req = ahash_request_cast(async_req); 1157 + dd->hash_req = req; 1158 + ctx = ahash_request_ctx(req); 1159 + 1160 + err = s5p_hash_prepare_request(req, ctx->op_update); 1161 + if (err || !ctx->total) 1162 + goto out; 1163 + 1164 + dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n", 1165 + ctx->op_update, req->nbytes); 1166 + 1167 + s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT); 1168 + if (ctx->digcnt) 1169 + s5p_hash_write_iv(req); /* restore hash IV */ 1170 + 1171 + if (ctx->op_update) { /* HASH_OP_UPDATE */ 1172 + err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup); 1173 + if (err != -EINPROGRESS && ctx->finup && !ctx->error) 1174 + /* no final() after finup() */ 1175 + err = s5p_hash_xmit_dma(dd, ctx->total, true); 1176 + } else { /* HASH_OP_FINAL */ 1177 + err = s5p_hash_xmit_dma(dd, ctx->total, true); 1178 + } 1179 + out: 1180 + if (err != -EINPROGRESS) { 1181 + /* hash_tasklet_cb will not finish it, so do it here */ 1182 + s5p_hash_finish_req(req, err); 1183 + req = NULL; 1184 + 1185 + /* 1186 + * Execute next request immediately if there is anything 1187 + * in queue. 1188 + */ 1189 + goto retry; 1190 + } 1191 + 1192 + return ret; 1193 + } 1194 + 1195 + /** 1196 + * s5p_hash_tasklet_cb() - hash tasklet 1197 + * @data: ptr to s5p_aes_dev 1198 + */ 1199 + static void s5p_hash_tasklet_cb(unsigned long data) 1200 + { 1201 + struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data; 1202 + 1203 + if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) { 1204 + s5p_hash_handle_queue(dd, NULL); 1205 + return; 1206 + } 1207 + 1208 + if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) { 1209 + if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE, 1210 + &dd->hash_flags)) { 1211 + s5p_hash_update_dma_stop(dd); 1212 + } 1213 + 1214 + if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY, 1215 + &dd->hash_flags)) { 1216 + /* hash or semi-hash ready */ 1217 + clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); 1218 + goto finish; 1219 + } 1220 + } 1221 + 1222 + return; 1223 + 1224 + finish: 1225 + /* finish curent request */ 1226 + s5p_hash_finish_req(dd->hash_req, 0); 1227 + 1228 + /* If we are not busy, process next req */ 1229 + if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) 1230 + s5p_hash_handle_queue(dd, NULL); 1231 + } 1232 + 1233 + /** 1234 + * s5p_hash_enqueue() - enqueue request 1235 + * @req: AHASH request 1236 + * @op: operation UPDATE (true) or FINAL (false) 1237 + * 1238 + * Returns: see s5p_hash_final below. 1239 + */ 1240 + static int s5p_hash_enqueue(struct ahash_request *req, bool op) 1241 + { 1242 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1243 + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1244 + 1245 + ctx->op_update = op; 1246 + 1247 + return s5p_hash_handle_queue(tctx->dd, req); 1248 + } 1249 + 1250 + /** 1251 + * s5p_hash_update() - process the hash input data 1252 + * @req: AHASH request 1253 + * 1254 + * If request will fit in buffer, copy it and return immediately 1255 + * else enqueue it with OP_UPDATE. 1256 + * 1257 + * Returns: see s5p_hash_final below. 1258 + */ 1259 + static int s5p_hash_update(struct ahash_request *req) 1260 + { 1261 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1262 + 1263 + if (!req->nbytes) 1264 + return 0; 1265 + 1266 + if (ctx->bufcnt + req->nbytes <= BUFLEN) { 1267 + scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, 1268 + 0, req->nbytes, 0); 1269 + ctx->bufcnt += req->nbytes; 1270 + return 0; 1271 + } 1272 + 1273 + return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */ 1274 + } 1275 + 1276 + /** 1277 + * s5p_hash_shash_digest() - calculate shash digest 1278 + * @tfm: crypto transformation 1279 + * @flags: tfm flags 1280 + * @data: input data 1281 + * @len: length of data 1282 + * @out: output buffer 1283 + */ 1284 + static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags, 1285 + const u8 *data, unsigned int len, u8 *out) 1286 + { 1287 + SHASH_DESC_ON_STACK(shash, tfm); 1288 + 1289 + shash->tfm = tfm; 1290 + shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP; 1291 + 1292 + return crypto_shash_digest(shash, data, len, out); 1293 + } 1294 + 1295 + /** 1296 + * s5p_hash_final_shash() - calculate shash digest 1297 + * @req: AHASH request 1298 + */ 1299 + static int s5p_hash_final_shash(struct ahash_request *req) 1300 + { 1301 + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 1302 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1303 + 1304 + return s5p_hash_shash_digest(tctx->fallback, req->base.flags, 1305 + ctx->buffer, ctx->bufcnt, req->result); 1306 + } 1307 + 1308 + /** 1309 + * s5p_hash_final() - close up hash and calculate digest 1310 + * @req: AHASH request 1311 + * 1312 + * Note: in final req->src do not have any data, and req->nbytes can be 1313 + * non-zero. 1314 + * 1315 + * If there were no input data processed yet and the buffered hash data is 1316 + * less than BUFLEN (64) then calculate the final hash immediately by using 1317 + * SW algorithm fallback. 1318 + * 1319 + * Otherwise enqueues the current AHASH request with OP_FINAL operation op 1320 + * and finalize hash message in HW. Note that if digcnt!=0 then there were 1321 + * previous update op, so there are always some buffered bytes in ctx->buffer, 1322 + * which means that ctx->bufcnt!=0 1323 + * 1324 + * Returns: 1325 + * 0 if the request has been processed immediately, 1326 + * -EINPROGRESS if the operation has been queued for later execution or is set 1327 + * to processing by HW, 1328 + * -EBUSY if queue is full and request should be resubmitted later, 1329 + * other negative values denotes an error. 1330 + */ 1331 + static int s5p_hash_final(struct ahash_request *req) 1332 + { 1333 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1334 + 1335 + ctx->finup = true; 1336 + if (ctx->error) 1337 + return -EINVAL; /* uncompleted hash is not needed */ 1338 + 1339 + if (!ctx->digcnt && ctx->bufcnt < BUFLEN) 1340 + return s5p_hash_final_shash(req); 1341 + 1342 + return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */ 1343 + } 1344 + 1345 + /** 1346 + * s5p_hash_finup() - process last req->src and calculate digest 1347 + * @req: AHASH request containing the last update data 1348 + * 1349 + * Return values: see s5p_hash_final above. 1350 + */ 1351 + static int s5p_hash_finup(struct ahash_request *req) 1352 + { 1353 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1354 + int err1, err2; 1355 + 1356 + ctx->finup = true; 1357 + 1358 + err1 = s5p_hash_update(req); 1359 + if (err1 == -EINPROGRESS || err1 == -EBUSY) 1360 + return err1; 1361 + 1362 + /* 1363 + * final() has to be always called to cleanup resources even if 1364 + * update() failed, except EINPROGRESS or calculate digest for small 1365 + * size 1366 + */ 1367 + err2 = s5p_hash_final(req); 1368 + 1369 + return err1 ?: err2; 1370 + } 1371 + 1372 + /** 1373 + * s5p_hash_init() - initialize AHASH request contex 1374 + * @req: AHASH request 1375 + * 1376 + * Init async hash request context. 1377 + */ 1378 + static int s5p_hash_init(struct ahash_request *req) 1379 + { 1380 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1381 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1382 + struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); 1383 + 1384 + ctx->dd = tctx->dd; 1385 + ctx->error = false; 1386 + ctx->finup = false; 1387 + ctx->bufcnt = 0; 1388 + ctx->digcnt = 0; 1389 + ctx->total = 0; 1390 + ctx->skip = 0; 1391 + 1392 + dev_dbg(tctx->dd->dev, "init: digest size: %d\n", 1393 + crypto_ahash_digestsize(tfm)); 1394 + 1395 + switch (crypto_ahash_digestsize(tfm)) { 1396 + case MD5_DIGEST_SIZE: 1397 + ctx->engine = SSS_HASH_ENGINE_MD5; 1398 + ctx->nregs = HASH_MD5_MAX_REG; 1399 + break; 1400 + case SHA1_DIGEST_SIZE: 1401 + ctx->engine = SSS_HASH_ENGINE_SHA1; 1402 + ctx->nregs = HASH_SHA1_MAX_REG; 1403 + break; 1404 + case SHA256_DIGEST_SIZE: 1405 + ctx->engine = SSS_HASH_ENGINE_SHA256; 1406 + ctx->nregs = HASH_SHA256_MAX_REG; 1407 + break; 1408 + default: 1409 + ctx->error = true; 1410 + return -EINVAL; 1411 + } 1412 + 1413 + return 0; 1414 + } 1415 + 1416 + /** 1417 + * s5p_hash_digest - calculate digest from req->src 1418 + * @req: AHASH request 1419 + * 1420 + * Return values: see s5p_hash_final above. 1421 + */ 1422 + static int s5p_hash_digest(struct ahash_request *req) 1423 + { 1424 + return s5p_hash_init(req) ?: s5p_hash_finup(req); 1425 + } 1426 + 1427 + /** 1428 + * s5p_hash_cra_init_alg - init crypto alg transformation 1429 + * @tfm: crypto transformation 1430 + */ 1431 + static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm) 1432 + { 1433 + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); 1434 + const char *alg_name = crypto_tfm_alg_name(tfm); 1435 + 1436 + tctx->dd = s5p_dev; 1437 + /* Allocate a fallback and abort if it failed. */ 1438 + tctx->fallback = crypto_alloc_shash(alg_name, 0, 1439 + CRYPTO_ALG_NEED_FALLBACK); 1440 + if (IS_ERR(tctx->fallback)) { 1441 + pr_err("fallback alloc fails for '%s'\n", alg_name); 1442 + return PTR_ERR(tctx->fallback); 1443 + } 1444 + 1445 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1446 + sizeof(struct s5p_hash_reqctx) + BUFLEN); 1447 + 1448 + return 0; 1449 + } 1450 + 1451 + /** 1452 + * s5p_hash_cra_init - init crypto tfm 1453 + * @tfm: crypto transformation 1454 + */ 1455 + static int s5p_hash_cra_init(struct crypto_tfm *tfm) 1456 + { 1457 + return s5p_hash_cra_init_alg(tfm); 1458 + } 1459 + 1460 + /** 1461 + * s5p_hash_cra_exit - exit crypto tfm 1462 + * @tfm: crypto transformation 1463 + * 1464 + * free allocated fallback 1465 + */ 1466 + static void s5p_hash_cra_exit(struct crypto_tfm *tfm) 1467 + { 1468 + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm); 1469 + 1470 + crypto_free_shash(tctx->fallback); 1471 + tctx->fallback = NULL; 1472 + } 1473 + 1474 + /** 1475 + * s5p_hash_export - export hash state 1476 + * @req: AHASH request 1477 + * @out: buffer for exported state 1478 + */ 1479 + static int s5p_hash_export(struct ahash_request *req, void *out) 1480 + { 1481 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1482 + 1483 + memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt); 1484 + 1485 + return 0; 1486 + } 1487 + 1488 + /** 1489 + * s5p_hash_import - import hash state 1490 + * @req: AHASH request 1491 + * @in: buffer with state to be imported from 1492 + */ 1493 + static int s5p_hash_import(struct ahash_request *req, const void *in) 1494 + { 1495 + struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); 1496 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1497 + struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm); 1498 + const struct s5p_hash_reqctx *ctx_in = in; 1499 + 1500 + memcpy(ctx, in, sizeof(*ctx) + BUFLEN); 1501 + if (ctx_in->bufcnt > BUFLEN) { 1502 + ctx->error = true; 1503 + return -EINVAL; 1504 + } 1505 + 1506 + ctx->dd = tctx->dd; 1507 + ctx->error = false; 1508 + 1509 + return 0; 1510 + } 1511 + 1512 + static struct ahash_alg algs_sha1_md5_sha256[] = { 1513 + { 1514 + .init = s5p_hash_init, 1515 + .update = s5p_hash_update, 1516 + .final = s5p_hash_final, 1517 + .finup = s5p_hash_finup, 1518 + .digest = s5p_hash_digest, 1519 + .export = s5p_hash_export, 1520 + .import = s5p_hash_import, 1521 + .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, 1522 + .halg.digestsize = SHA1_DIGEST_SIZE, 1523 + .halg.base = { 1524 + .cra_name = "sha1", 1525 + .cra_driver_name = "exynos-sha1", 1526 + .cra_priority = 100, 1527 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1528 + CRYPTO_ALG_KERN_DRIVER_ONLY | 1529 + CRYPTO_ALG_ASYNC | 1530 + CRYPTO_ALG_NEED_FALLBACK, 1531 + .cra_blocksize = HASH_BLOCK_SIZE, 1532 + .cra_ctxsize = sizeof(struct s5p_hash_ctx), 1533 + .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, 1534 + .cra_module = THIS_MODULE, 1535 + .cra_init = s5p_hash_cra_init, 1536 + .cra_exit = s5p_hash_cra_exit, 1537 + } 1538 + }, 1539 + { 1540 + .init = s5p_hash_init, 1541 + .update = s5p_hash_update, 1542 + .final = s5p_hash_final, 1543 + .finup = s5p_hash_finup, 1544 + .digest = s5p_hash_digest, 1545 + .export = s5p_hash_export, 1546 + .import = s5p_hash_import, 1547 + .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, 1548 + .halg.digestsize = MD5_DIGEST_SIZE, 1549 + .halg.base = { 1550 + .cra_name = "md5", 1551 + .cra_driver_name = "exynos-md5", 1552 + .cra_priority = 100, 1553 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1554 + CRYPTO_ALG_KERN_DRIVER_ONLY | 1555 + CRYPTO_ALG_ASYNC | 1556 + CRYPTO_ALG_NEED_FALLBACK, 1557 + .cra_blocksize = HASH_BLOCK_SIZE, 1558 + .cra_ctxsize = sizeof(struct s5p_hash_ctx), 1559 + .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, 1560 + .cra_module = THIS_MODULE, 1561 + .cra_init = s5p_hash_cra_init, 1562 + .cra_exit = s5p_hash_cra_exit, 1563 + } 1564 + }, 1565 + { 1566 + .init = s5p_hash_init, 1567 + .update = s5p_hash_update, 1568 + .final = s5p_hash_final, 1569 + .finup = s5p_hash_finup, 1570 + .digest = s5p_hash_digest, 1571 + .export = s5p_hash_export, 1572 + .import = s5p_hash_import, 1573 + .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN, 1574 + .halg.digestsize = SHA256_DIGEST_SIZE, 1575 + .halg.base = { 1576 + .cra_name = "sha256", 1577 + .cra_driver_name = "exynos-sha256", 1578 + .cra_priority = 100, 1579 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 1580 + CRYPTO_ALG_KERN_DRIVER_ONLY | 1581 + CRYPTO_ALG_ASYNC | 1582 + CRYPTO_ALG_NEED_FALLBACK, 1583 + .cra_blocksize = HASH_BLOCK_SIZE, 1584 + .cra_ctxsize = sizeof(struct s5p_hash_ctx), 1585 + .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK, 1586 + .cra_module = THIS_MODULE, 1587 + .cra_init = s5p_hash_cra_init, 1588 + .cra_exit = s5p_hash_cra_exit, 1589 + } 1590 + } 1591 + 1592 + }; 760 1593 761 1594 static void s5p_set_aes(struct s5p_aes_dev *dev, 762 1595 uint8_t *key, uint8_t *iv, unsigned int keylen) ··· 2154 829 struct samsung_aes_variant *variant; 2155 830 struct s5p_aes_dev *pdata; 2156 831 struct resource *res; 832 + unsigned int hash_i; 2157 833 2158 834 if (s5p_dev) 2159 835 return -EEXIST; ··· 2163 837 if (!pdata) 2164 838 return -ENOMEM; 2165 839 2166 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2167 - pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 2168 - if (IS_ERR(pdata->ioaddr)) 2169 - return PTR_ERR(pdata->ioaddr); 2170 - 2171 840 variant = find_s5p_sss_version(pdev); 841 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 842 + 843 + /* 844 + * Note: HASH and PRNG uses the same registers in secss, avoid 845 + * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG 846 + * is enabled in config. We need larger size for HASH registers in 847 + * secss, current describe only AES/DES 848 + */ 849 + if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) { 850 + if (variant == &exynos_aes_data) { 851 + res->end += 0x300; 852 + pdata->use_hash = true; 853 + } 854 + } 855 + 856 + pdata->res = res; 857 + pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 858 + if (IS_ERR(pdata->ioaddr)) { 859 + if (!pdata->use_hash) 860 + return PTR_ERR(pdata->ioaddr); 861 + /* try AES without HASH */ 862 + res->end -= 0x300; 863 + pdata->use_hash = false; 864 + pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); 865 + if (IS_ERR(pdata->ioaddr)) 866 + return PTR_ERR(pdata->ioaddr); 867 + } 2172 868 2173 869 pdata->clk = devm_clk_get(dev, "secss"); 2174 870 if (IS_ERR(pdata->clk)) { ··· 2205 857 } 2206 858 2207 859 spin_lock_init(&pdata->lock); 860 + spin_lock_init(&pdata->hash_lock); 2208 861 2209 862 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset; 863 + pdata->io_hash_base = pdata->ioaddr + variant->hash_offset; 2210 864 2211 865 pdata->irq_fc = platform_get_irq(pdev, 0); 2212 866 if (pdata->irq_fc < 0) { ··· 2238 888 goto err_algs; 2239 889 } 2240 890 891 + if (pdata->use_hash) { 892 + tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb, 893 + (unsigned long)pdata); 894 + crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH); 895 + 896 + for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256); 897 + hash_i++) { 898 + struct ahash_alg *alg; 899 + 900 + alg = &algs_sha1_md5_sha256[hash_i]; 901 + err = crypto_register_ahash(alg); 902 + if (err) { 903 + dev_err(dev, "can't register '%s': %d\n", 904 + alg->halg.base.cra_driver_name, err); 905 + goto err_hash; 906 + } 907 + } 908 + } 909 + 2241 910 dev_info(dev, "s5p-sss driver registered\n"); 2242 911 2243 912 return 0; 2244 913 914 + err_hash: 915 + for (j = hash_i - 1; j >= 0; j--) 916 + crypto_unregister_ahash(&algs_sha1_md5_sha256[j]); 917 + 918 + tasklet_kill(&pdata->hash_tasklet); 919 + res->end -= 0x300; 920 + 2245 921 err_algs: 2246 - dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); 922 + if (i < ARRAY_SIZE(algs)) 923 + dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, 924 + err); 2247 925 2248 926 for (j = 0; j < i; j++) 2249 927 crypto_unregister_alg(&algs[j]); ··· 2298 920 crypto_unregister_alg(&algs[i]); 2299 921 2300 922 tasklet_kill(&pdata->tasklet); 923 + if (pdata->use_hash) { 924 + for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--) 925 + crypto_unregister_ahash(&algs_sha1_md5_sha256[i]); 926 + 927 + pdata->res->end -= 0x300; 928 + tasklet_kill(&pdata->hash_tasklet); 929 + pdata->use_hash = false; 930 + } 2301 931 2302 932 clk_disable_unprepare(pdata->clk); 2303 - 2304 933 s5p_dev = NULL; 2305 934 2306 935 return 0; ··· 2327 942 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support."); 2328 943 MODULE_LICENSE("GPL v2"); 2329 944 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>"); 945 + MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
+5 -15
drivers/crypto/stm32/stm32-hash.c
··· 895 895 static int stm32_hash_update(struct ahash_request *req) 896 896 { 897 897 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); 898 - int ret; 899 898 900 899 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU)) 901 900 return 0; ··· 908 909 return 0; 909 910 } 910 911 911 - ret = stm32_hash_enqueue(req, HASH_OP_UPDATE); 912 - 913 - if (rctx->flags & HASH_FLAGS_FINUP) 914 - return ret; 915 - 916 - return 0; 912 + return stm32_hash_enqueue(req, HASH_OP_UPDATE); 917 913 } 918 914 919 915 static int stm32_hash_final(struct ahash_request *req) ··· 1064 1070 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) 1065 1071 { 1066 1072 struct stm32_hash_dev *hdev = dev_id; 1067 - int err; 1068 1073 1069 1074 if (HASH_FLAGS_CPU & hdev->flags) { 1070 1075 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { ··· 1080 1087 return IRQ_HANDLED; 1081 1088 1082 1089 finish: 1083 - /*Finish current request */ 1084 - stm32_hash_finish_req(hdev->req, err); 1090 + /* Finish current request */ 1091 + stm32_hash_finish_req(hdev->req, 0); 1085 1092 1086 1093 return IRQ_HANDLED; 1087 1094 } ··· 1404 1411 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, 1405 1412 struct device *dev) 1406 1413 { 1407 - const struct of_device_id *match; 1408 1414 int err; 1409 1415 1410 - match = of_match_device(stm32_hash_of_match, dev); 1411 - if (!match) { 1416 + hdev->pdata = of_device_get_match_data(dev); 1417 + if (!hdev->pdata) { 1412 1418 dev_err(dev, "no compatible OF match\n"); 1413 1419 return -EINVAL; 1414 1420 } 1415 1421 1416 1422 err = of_property_read_u32(dev->of_node, "dma-maxburst", 1417 1423 &hdev->dma_maxburst); 1418 - 1419 - hdev->pdata = match->data; 1420 1424 1421 1425 return err; 1422 1426 }
+356 -226
drivers/crypto/talitos.c
··· 56 56 #include "talitos.h" 57 57 58 58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 59 - bool is_sec1) 59 + unsigned int len, bool is_sec1) 60 60 { 61 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 62 - if (!is_sec1) 62 + if (is_sec1) { 63 + ptr->len1 = cpu_to_be16(len); 64 + } else { 65 + ptr->len = cpu_to_be16(len); 63 66 ptr->eptr = upper_32_bits(dma_addr); 67 + } 64 68 } 65 69 66 70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, 67 71 struct talitos_ptr *src_ptr, bool is_sec1) 68 72 { 69 73 dst_ptr->ptr = src_ptr->ptr; 70 - if (!is_sec1) 71 - dst_ptr->eptr = src_ptr->eptr; 72 - } 73 - 74 - static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 75 - bool is_sec1) 76 - { 77 74 if (is_sec1) { 78 - ptr->res = 0; 79 - ptr->len1 = cpu_to_be16(len); 75 + dst_ptr->len1 = src_ptr->len1; 80 76 } else { 81 - ptr->len = cpu_to_be16(len); 77 + dst_ptr->len = src_ptr->len; 78 + dst_ptr->eptr = src_ptr->eptr; 82 79 } 83 80 } 84 81 ··· 113 116 struct talitos_private *priv = dev_get_drvdata(dev); 114 117 bool is_sec1 = has_ftr_sec1(priv); 115 118 116 - to_talitos_ptr_len(ptr, len, is_sec1); 117 - to_talitos_ptr(ptr, dma_addr, is_sec1); 118 - to_talitos_ptr_ext_set(ptr, 0, is_sec1); 119 + to_talitos_ptr(ptr, dma_addr, len, is_sec1); 119 120 } 120 121 121 122 /* ··· 160 165 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 161 166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 162 167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 168 + /* enable chaining descriptors */ 169 + if (is_sec1) 170 + setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 171 + TALITOS_CCCR_LO_NE); 163 172 164 173 /* and ICCR writeback, if available */ 165 174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) ··· 286 287 /* map descriptor and save caller data */ 287 288 if (is_sec1) { 288 289 desc->hdr1 = desc->hdr; 289 - desc->next_desc = 0; 290 290 request->dma_desc = dma_map_single(dev, &desc->hdr1, 291 291 TALITOS_DESC_SIZE, 292 292 DMA_BIDIRECTIONAL); ··· 337 339 338 340 /* descriptors with their done bits set don't get the error */ 339 341 rmb(); 340 - hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; 342 + if (!is_sec1) 343 + hdr = request->desc->hdr; 344 + else if (request->desc->next_desc) 345 + hdr = (request->desc + 1)->hdr1; 346 + else 347 + hdr = request->desc->hdr1; 341 348 342 349 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 343 350 status = 0; ··· 396 393 \ 397 394 if (ch_done_mask & 0x10000000) \ 398 395 flush_channel(dev, 0, 0, 0); \ 399 - if (priv->num_channels == 1) \ 400 - goto out; \ 401 396 if (ch_done_mask & 0x40000000) \ 402 397 flush_channel(dev, 1, 0, 0); \ 403 398 if (ch_done_mask & 0x00010000) \ ··· 403 402 if (ch_done_mask & 0x00040000) \ 404 403 flush_channel(dev, 3, 0, 0); \ 405 404 \ 406 - out: \ 407 405 /* At this point, all completed channels have been processed */ \ 408 406 /* Unmask done interrupts for channels completed later on. */ \ 409 407 spin_lock_irqsave(&priv->reg_lock, flags); \ ··· 412 412 } 413 413 414 414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 415 + DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE) 415 416 416 417 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 417 418 static void talitos2_done_##name(unsigned long data) \ ··· 423 422 \ 424 423 if (ch_done_mask & 1) \ 425 424 flush_channel(dev, 0, 0, 0); \ 426 - if (priv->num_channels == 1) \ 427 - goto out; \ 428 425 if (ch_done_mask & (1 << 2)) \ 429 426 flush_channel(dev, 1, 0, 0); \ 430 427 if (ch_done_mask & (1 << 4)) \ ··· 430 431 if (ch_done_mask & (1 << 6)) \ 431 432 flush_channel(dev, 3, 0, 0); \ 432 433 \ 433 - out: \ 434 434 /* At this point, all completed channels have been processed */ \ 435 435 /* Unmask done interrupts for channels completed later on. */ \ 436 436 spin_lock_irqsave(&priv->reg_lock, flags); \ ··· 439 441 } 440 442 441 443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 444 + DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE) 442 445 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 443 446 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 444 447 ··· 463 464 tail = priv->chan[ch].tail; 464 465 465 466 iter = tail; 466 - while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { 467 + while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && 468 + priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { 467 469 iter = (iter + 1) & (priv->fifo_len - 1); 468 470 if (iter == tail) { 469 471 dev_err(dev, "couldn't locate current descriptor\n"); 470 472 return 0; 471 473 } 472 474 } 475 + 476 + if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) 477 + return (priv->chan[ch].fifo[iter].desc + 1)->hdr; 473 478 474 479 return priv->chan[ch].fifo[iter].desc->hdr; 475 480 } ··· 828 825 __be32 desc_hdr_template; 829 826 u8 key[TALITOS_MAX_KEY_SIZE]; 830 827 u8 iv[TALITOS_MAX_IV_LENGTH]; 828 + dma_addr_t dma_key; 831 829 unsigned int keylen; 832 830 unsigned int enckeylen; 833 831 unsigned int authkeylen; 832 + dma_addr_t dma_buf; 833 + dma_addr_t dma_hw_context; 834 834 }; 835 835 836 836 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE ··· 842 836 struct talitos_ahash_req_ctx { 843 837 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 844 838 unsigned int hw_context_size; 845 - u8 buf[HASH_MAX_BLOCK_SIZE]; 846 - u8 bufnext[HASH_MAX_BLOCK_SIZE]; 839 + u8 buf[2][HASH_MAX_BLOCK_SIZE]; 840 + int buf_idx; 847 841 unsigned int swinit; 848 842 unsigned int first; 849 843 unsigned int last; ··· 867 861 const u8 *key, unsigned int keylen) 868 862 { 869 863 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 864 + struct device *dev = ctx->dev; 870 865 struct crypto_authenc_keys keys; 871 866 872 867 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) ··· 876 869 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 877 870 goto badkey; 878 871 872 + if (ctx->keylen) 873 + dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 874 + 879 875 memcpy(ctx->key, keys.authkey, keys.authkeylen); 880 876 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 881 877 882 878 ctx->keylen = keys.authkeylen + keys.enckeylen; 883 879 ctx->enckeylen = keys.enckeylen; 884 880 ctx->authkeylen = keys.authkeylen; 881 + ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 882 + DMA_TO_DEVICE); 885 883 886 884 return 0; 887 885 ··· 960 948 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 961 949 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 962 950 unsigned int ivsize = crypto_aead_ivsize(aead); 951 + bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; 952 + struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; 963 953 964 - if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) 954 + if (is_ipsec_esp) 965 955 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], 966 956 DMA_FROM_DEVICE); 967 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 968 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 969 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 957 + unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); 970 958 971 959 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 972 960 areq->assoclen); ··· 975 963 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 976 964 DMA_BIDIRECTIONAL); 977 965 978 - if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) { 966 + if (!is_ipsec_esp) { 979 967 unsigned int dst_nents = edesc->dst_nents ? : 1; 980 968 981 969 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, ··· 995 983 struct aead_request *areq = context; 996 984 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 997 985 unsigned int authsize = crypto_aead_authsize(authenc); 986 + unsigned int ivsize = crypto_aead_ivsize(authenc); 998 987 struct talitos_edesc *edesc; 999 988 struct scatterlist *sg; 1000 989 void *icvdata; ··· 1015 1002 memcpy((char *)sg_virt(sg) + sg->length - authsize, 1016 1003 icvdata, authsize); 1017 1004 } 1005 + 1006 + dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1018 1007 1019 1008 kfree(edesc); 1020 1009 ··· 1112 1097 len = cryptlen; 1113 1098 1114 1099 to_talitos_ptr(link_tbl_ptr + count, 1115 - sg_dma_address(sg) + offset, 0); 1116 - to_talitos_ptr_len(link_tbl_ptr + count, len, 0); 1100 + sg_dma_address(sg) + offset, len, 0); 1117 1101 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1118 1102 count++; 1119 1103 cryptlen -= len; ··· 1130 1116 return count; 1131 1117 } 1132 1118 1133 - int talitos_sg_map(struct device *dev, struct scatterlist *src, 1119 + static int talitos_sg_map(struct device *dev, struct scatterlist *src, 1134 1120 unsigned int len, struct talitos_edesc *edesc, 1135 1121 struct talitos_ptr *ptr, 1136 1122 int sg_count, unsigned int offset, int tbl_off) ··· 1138 1124 struct talitos_private *priv = dev_get_drvdata(dev); 1139 1125 bool is_sec1 = has_ftr_sec1(priv); 1140 1126 1141 - to_talitos_ptr_len(ptr, len, is_sec1); 1142 - to_talitos_ptr_ext_set(ptr, 0, is_sec1); 1143 - 1144 1127 if (sg_count == 1) { 1145 - to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1); 1128 + to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1146 1129 return sg_count; 1147 1130 } 1148 1131 if (is_sec1) { 1149 - to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1); 1132 + to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1150 1133 return sg_count; 1151 1134 } 1152 1135 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, ··· 1154 1143 return sg_count; 1155 1144 } 1156 1145 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1157 - tbl_off * sizeof(struct talitos_ptr), is_sec1); 1146 + tbl_off * sizeof(struct talitos_ptr), len, is_sec1); 1158 1147 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); 1159 1148 1160 1149 return sg_count; ··· 1181 1170 bool sync_needed = false; 1182 1171 struct talitos_private *priv = dev_get_drvdata(dev); 1183 1172 bool is_sec1 = has_ftr_sec1(priv); 1173 + bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; 1174 + struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; 1175 + struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; 1184 1176 1185 1177 /* hmac key */ 1186 - map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1187 - DMA_TO_DEVICE); 1178 + to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); 1188 1179 1189 1180 sg_count = edesc->src_nents ?: 1; 1190 1181 if (is_sec1 && sg_count > 1) ··· 1207 1194 } 1208 1195 1209 1196 /* cipher iv */ 1210 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { 1211 - to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1); 1212 - to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1); 1213 - to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1); 1214 - } else { 1215 - to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1); 1216 - to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1); 1217 - to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1); 1218 - } 1197 + to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1); 1219 1198 1220 1199 /* cipher key */ 1221 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) 1222 - map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 1223 - (char *)&ctx->key + ctx->authkeylen, 1224 - DMA_TO_DEVICE); 1225 - else 1226 - map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen, 1227 - (char *)&ctx->key + ctx->authkeylen, 1228 - DMA_TO_DEVICE); 1200 + to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen, 1201 + ctx->enckeylen, is_sec1); 1229 1202 1230 1203 /* 1231 1204 * cipher in ··· 1219 1220 * extent is bytes of HMAC postpended to ciphertext, 1220 1221 * typically 12 for ipsec 1221 1222 */ 1222 - to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1); 1223 - to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1); 1224 - 1225 1223 sg_link_tbl_len = cryptlen; 1226 1224 1227 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { 1225 + if (is_ipsec_esp) { 1228 1226 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1); 1229 1227 1230 - if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1228 + if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV) 1231 1229 sg_link_tbl_len += authsize; 1232 1230 } 1233 1231 1234 - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, 1235 - &desc->ptr[4], sg_count, areq->assoclen, 1236 - tbl_off); 1232 + ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc, 1233 + &desc->ptr[4], sg_count, areq->assoclen, tbl_off); 1237 1234 1238 - if (sg_count > 1) { 1239 - tbl_off += sg_count; 1235 + if (ret > 1) { 1236 + tbl_off += ret; 1240 1237 sync_needed = true; 1241 1238 } 1242 1239 ··· 1243 1248 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1244 1249 } 1245 1250 1246 - sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc, 1247 - &desc->ptr[5], sg_count, areq->assoclen, 1248 - tbl_off); 1251 + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1252 + sg_count, areq->assoclen, tbl_off); 1249 1253 1250 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) 1254 + if (is_ipsec_esp) 1251 1255 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); 1252 1256 1253 - if (sg_count > 1) { 1257 + /* ICV data */ 1258 + if (ret > 1) { 1259 + tbl_off += ret; 1254 1260 edesc->icv_ool = true; 1255 1261 sync_needed = true; 1256 1262 1257 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) { 1263 + if (is_ipsec_esp) { 1258 1264 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1259 1265 int offset = (edesc->src_nents + edesc->dst_nents + 2) * 1260 1266 sizeof(struct talitos_ptr) + authsize; 1261 1267 1262 1268 /* Add an entry to the link table for ICV data */ 1263 - tbl_ptr += sg_count - 1; 1264 - to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1); 1265 - tbl_ptr++; 1269 + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); 1266 1270 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, 1267 1271 is_sec1); 1268 - to_talitos_ptr_len(tbl_ptr, authsize, is_sec1); 1269 1272 1270 1273 /* icv data follows link tables */ 1271 1274 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, 1272 - is_sec1); 1275 + authsize, is_sec1); 1276 + } else { 1277 + dma_addr_t addr = edesc->dma_link_tbl; 1278 + 1279 + if (is_sec1) 1280 + addr += areq->assoclen + cryptlen; 1281 + else 1282 + addr += sizeof(struct talitos_ptr) * tbl_off; 1283 + 1284 + to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); 1285 + } 1286 + } else if (!is_ipsec_esp) { 1287 + ret = talitos_sg_map(dev, areq->dst, authsize, edesc, 1288 + &desc->ptr[6], sg_count, areq->assoclen + 1289 + cryptlen, 1290 + tbl_off); 1291 + if (ret > 1) { 1292 + tbl_off += ret; 1293 + edesc->icv_ool = true; 1294 + sync_needed = true; 1295 + } else { 1296 + edesc->icv_ool = false; 1273 1297 } 1274 1298 } else { 1275 1299 edesc->icv_ool = false; 1276 1300 } 1277 1301 1278 - /* ICV data */ 1279 - if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) { 1280 - to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1); 1281 - to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl + 1282 - areq->assoclen + cryptlen, is_sec1); 1283 - } 1284 - 1285 1302 /* iv out */ 1286 - if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) 1303 + if (is_ipsec_esp) 1287 1304 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1288 1305 DMA_FROM_DEVICE); 1289 1306 ··· 1394 1387 alloc_len += icv_stashing ? authsize : 0; 1395 1388 } 1396 1389 1390 + /* if its a ahash, add space for a second desc next to the first one */ 1391 + if (is_sec1 && !dst) 1392 + alloc_len += sizeof(struct talitos_desc); 1393 + 1397 1394 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1398 1395 if (!edesc) { 1399 1396 dev_err(dev, "could not allocate edescriptor\n"); 1400 1397 err = ERR_PTR(-ENOMEM); 1401 1398 goto error_sg; 1402 1399 } 1400 + memset(&edesc->desc, 0, sizeof(edesc->desc)); 1403 1401 1404 1402 edesc->src_nents = src_nents; 1405 1403 edesc->dst_nents = dst_nents; 1406 1404 edesc->iv_dma = iv_dma; 1407 1405 edesc->dma_len = dma_len; 1408 - if (dma_len) 1409 - edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1406 + if (dma_len) { 1407 + void *addr = &edesc->link_tbl[0]; 1408 + 1409 + if (is_sec1 && !dst) 1410 + addr += sizeof(struct talitos_desc); 1411 + edesc->dma_link_tbl = dma_map_single(dev, addr, 1410 1412 edesc->dma_len, 1411 1413 DMA_BIDIRECTIONAL); 1412 - 1414 + } 1413 1415 return edesc; 1414 1416 error_sg: 1415 1417 if (iv_dma) ··· 1484 1468 DESC_HDR_MODE1_MDEU_CICV; 1485 1469 1486 1470 /* reset integrity check result bits */ 1487 - edesc->desc.hdr_lo = 0; 1488 1471 1489 1472 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1490 1473 } ··· 1509 1494 const u8 *key, unsigned int keylen) 1510 1495 { 1511 1496 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1497 + struct device *dev = ctx->dev; 1498 + u32 tmp[DES_EXPKEY_WORDS]; 1512 1499 1513 1500 if (keylen > TALITOS_MAX_KEY_SIZE) { 1514 1501 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1515 1502 return -EINVAL; 1516 1503 } 1517 1504 1505 + if (unlikely(crypto_ablkcipher_get_flags(cipher) & 1506 + CRYPTO_TFM_REQ_WEAK_KEY) && 1507 + !des_ekey(tmp, key)) { 1508 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); 1509 + return -EINVAL; 1510 + } 1511 + 1512 + if (ctx->keylen) 1513 + dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 1514 + 1518 1515 memcpy(&ctx->key, key, keylen); 1519 1516 ctx->keylen = keylen; 1517 + 1518 + ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); 1520 1519 1521 1520 return 0; 1522 1521 } ··· 1542 1513 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1543 1514 1544 1515 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0); 1545 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1546 1516 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1547 1517 1548 1518 if (edesc->dma_len) ··· 1583 1555 bool is_sec1 = has_ftr_sec1(priv); 1584 1556 1585 1557 /* first DWORD empty */ 1586 - desc->ptr[0] = zero_entry; 1587 1558 1588 1559 /* cipher iv */ 1589 - to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); 1590 - to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); 1591 - to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1); 1560 + to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1); 1592 1561 1593 1562 /* cipher key */ 1594 - map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1595 - (char *)&ctx->key, DMA_TO_DEVICE); 1563 + to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1); 1596 1564 1597 1565 sg_count = edesc->src_nents ?: 1; 1598 1566 if (is_sec1 && sg_count > 1) ··· 1623 1599 DMA_FROM_DEVICE); 1624 1600 1625 1601 /* last DWORD empty */ 1626 - desc->ptr[6] = zero_entry; 1627 1602 1628 1603 if (sync_needed) 1629 1604 dma_sync_single_for_device(dev, edesc->dma_link_tbl, ··· 1686 1663 struct ahash_request *areq) 1687 1664 { 1688 1665 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1689 - struct talitos_private *priv = dev_get_drvdata(dev); 1690 - bool is_sec1 = has_ftr_sec1(priv); 1691 - 1692 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1693 1666 1694 1667 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1695 - 1696 - /* When using hashctx-in, must unmap it. */ 1697 - if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1698 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1699 - DMA_TO_DEVICE); 1700 - 1701 - if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) 1702 - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], 1703 - DMA_TO_DEVICE); 1704 1668 1705 1669 if (edesc->dma_len) 1706 1670 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1707 1671 DMA_BIDIRECTIONAL); 1708 1672 1673 + if (edesc->desc.next_desc) 1674 + dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc), 1675 + TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); 1709 1676 } 1710 1677 1711 1678 static void ahash_done(struct device *dev, ··· 1709 1696 1710 1697 if (!req_ctx->last && req_ctx->to_hash_later) { 1711 1698 /* Position any partial block for next update/final/finup */ 1712 - memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1699 + req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1; 1713 1700 req_ctx->nbuf = req_ctx->to_hash_later; 1714 1701 } 1715 1702 common_nonsnoop_hash_unmap(dev, edesc, areq); ··· 1723 1710 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1724 1711 * ourself and submit a padded block 1725 1712 */ 1726 - void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1713 + static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1727 1714 struct talitos_edesc *edesc, 1728 1715 struct talitos_ptr *ptr) 1729 1716 { ··· 1742 1729 1743 1730 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1744 1731 struct ahash_request *areq, unsigned int length, 1732 + unsigned int offset, 1745 1733 void (*callback) (struct device *dev, 1746 1734 struct talitos_desc *desc, 1747 1735 void *context, int error)) ··· 1759 1745 int sg_count; 1760 1746 1761 1747 /* first DWORD empty */ 1762 - desc->ptr[0] = zero_entry; 1763 1748 1764 1749 /* hash context in */ 1765 1750 if (!req_ctx->first || req_ctx->swinit) { 1766 - map_single_talitos_ptr(dev, &desc->ptr[1], 1767 - req_ctx->hw_context_size, 1768 - (char *)req_ctx->hw_context, 1769 - DMA_TO_DEVICE); 1751 + to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context, 1752 + req_ctx->hw_context_size, is_sec1); 1770 1753 req_ctx->swinit = 0; 1771 - } else { 1772 - desc->ptr[1] = zero_entry; 1773 1754 } 1774 1755 /* Indicate next op is not the first. */ 1775 1756 req_ctx->first = 0; 1776 1757 1777 1758 /* HMAC key */ 1778 1759 if (ctx->keylen) 1779 - map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1780 - (char *)&ctx->key, DMA_TO_DEVICE); 1781 - else 1782 - desc->ptr[2] = zero_entry; 1760 + to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, 1761 + is_sec1); 1762 + 1763 + if (is_sec1 && req_ctx->nbuf) 1764 + length -= req_ctx->nbuf; 1783 1765 1784 1766 sg_count = edesc->src_nents ?: 1; 1785 1767 if (is_sec1 && sg_count > 1) 1786 - sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); 1787 - else 1768 + sg_pcopy_to_buffer(req_ctx->psrc, sg_count, 1769 + edesc->buf + sizeof(struct talitos_desc), 1770 + length, req_ctx->nbuf); 1771 + else if (length) 1788 1772 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1789 1773 DMA_TO_DEVICE); 1790 1774 /* 1791 1775 * data in 1792 1776 */ 1793 - sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1794 - &desc->ptr[3], sg_count, 0, 0); 1795 - if (sg_count > 1) 1796 - sync_needed = true; 1777 + if (is_sec1 && req_ctx->nbuf) { 1778 + dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx * 1779 + HASH_MAX_BLOCK_SIZE; 1780 + 1781 + to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1); 1782 + } else { 1783 + sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1784 + &desc->ptr[3], sg_count, offset, 0); 1785 + if (sg_count > 1) 1786 + sync_needed = true; 1787 + } 1797 1788 1798 1789 /* fifth DWORD empty */ 1799 - desc->ptr[4] = zero_entry; 1800 1790 1801 1791 /* hash/HMAC out -or- hash context out */ 1802 1792 if (req_ctx->last) ··· 1808 1790 crypto_ahash_digestsize(tfm), 1809 1791 areq->result, DMA_FROM_DEVICE); 1810 1792 else 1811 - map_single_talitos_ptr(dev, &desc->ptr[5], 1812 - req_ctx->hw_context_size, 1813 - req_ctx->hw_context, DMA_FROM_DEVICE); 1793 + to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, 1794 + req_ctx->hw_context_size, is_sec1); 1814 1795 1815 1796 /* last DWORD empty */ 1816 - desc->ptr[6] = zero_entry; 1817 1797 1818 1798 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1819 1799 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1800 + 1801 + if (is_sec1 && req_ctx->nbuf && length) { 1802 + struct talitos_desc *desc2 = desc + 1; 1803 + dma_addr_t next_desc; 1804 + 1805 + memset(desc2, 0, sizeof(*desc2)); 1806 + desc2->hdr = desc->hdr; 1807 + desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT; 1808 + desc2->hdr1 = desc2->hdr; 1809 + desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1810 + desc->hdr |= DESC_HDR_MODE0_MDEU_CONT; 1811 + desc->hdr &= ~DESC_HDR_DONE_NOTIFY; 1812 + 1813 + to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context, 1814 + req_ctx->hw_context_size, is_sec1); 1815 + 1816 + copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1817 + sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1818 + &desc2->ptr[3], sg_count, offset, 0); 1819 + if (sg_count > 1) 1820 + sync_needed = true; 1821 + copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); 1822 + if (req_ctx->last) 1823 + to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context, 1824 + req_ctx->hw_context_size, is_sec1); 1825 + 1826 + next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE, 1827 + DMA_BIDIRECTIONAL); 1828 + desc->next_desc = cpu_to_be32(next_desc); 1829 + } 1820 1830 1821 1831 if (sync_needed) 1822 1832 dma_sync_single_for_device(dev, edesc->dma_link_tbl, ··· 1864 1818 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1865 1819 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1866 1820 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1821 + struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1822 + bool is_sec1 = has_ftr_sec1(priv); 1823 + 1824 + if (is_sec1) 1825 + nbytes -= req_ctx->nbuf; 1867 1826 1868 1827 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, 1869 1828 nbytes, 0, 0, 0, areq->base.flags, false); ··· 1877 1826 static int ahash_init(struct ahash_request *areq) 1878 1827 { 1879 1828 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1829 + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1830 + struct device *dev = ctx->dev; 1880 1831 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1832 + unsigned int size; 1833 + struct talitos_private *priv = dev_get_drvdata(dev); 1834 + bool is_sec1 = has_ftr_sec1(priv); 1881 1835 1882 1836 /* Initialize the context */ 1837 + req_ctx->buf_idx = 0; 1883 1838 req_ctx->nbuf = 0; 1884 1839 req_ctx->first = 1; /* first indicates h/w must init its context */ 1885 1840 req_ctx->swinit = 0; /* assume h/w init of context */ 1886 - req_ctx->hw_context_size = 1887 - (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1841 + size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1888 1842 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1889 1843 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1844 + req_ctx->hw_context_size = size; 1890 1845 1846 + if (ctx->dma_hw_context) 1847 + dma_unmap_single(dev, ctx->dma_hw_context, size, 1848 + DMA_BIDIRECTIONAL); 1849 + ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size, 1850 + DMA_BIDIRECTIONAL); 1851 + if (ctx->dma_buf) 1852 + dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf), 1853 + DMA_TO_DEVICE); 1854 + if (is_sec1) 1855 + ctx->dma_buf = dma_map_single(dev, req_ctx->buf, 1856 + sizeof(req_ctx->buf), 1857 + DMA_TO_DEVICE); 1891 1858 return 0; 1892 1859 } 1893 1860 ··· 1916 1847 static int ahash_init_sha224_swinit(struct ahash_request *areq) 1917 1848 { 1918 1849 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1850 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1851 + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1852 + struct device *dev = ctx->dev; 1919 1853 1920 1854 ahash_init(areq); 1921 1855 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ ··· 1936 1864 req_ctx->hw_context[8] = 0; 1937 1865 req_ctx->hw_context[9] = 0; 1938 1866 1867 + dma_sync_single_for_device(dev, ctx->dma_hw_context, 1868 + req_ctx->hw_context_size, DMA_TO_DEVICE); 1869 + 1939 1870 return 0; 1940 1871 } 1941 1872 ··· 1954 1879 unsigned int to_hash_later; 1955 1880 unsigned int nsg; 1956 1881 int nents; 1882 + struct device *dev = ctx->dev; 1883 + struct talitos_private *priv = dev_get_drvdata(dev); 1884 + bool is_sec1 = has_ftr_sec1(priv); 1885 + int offset = 0; 1886 + u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; 1957 1887 1958 1888 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 1959 1889 /* Buffer up to one whole block */ ··· 1968 1888 return nents; 1969 1889 } 1970 1890 sg_copy_to_buffer(areq->src, nents, 1971 - req_ctx->buf + req_ctx->nbuf, nbytes); 1891 + ctx_buf + req_ctx->nbuf, nbytes); 1972 1892 req_ctx->nbuf += nbytes; 1973 1893 return 0; 1974 1894 } ··· 1989 1909 } 1990 1910 1991 1911 /* Chain in any previously buffered data */ 1992 - if (req_ctx->nbuf) { 1912 + if (!is_sec1 && req_ctx->nbuf) { 1993 1913 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 1994 1914 sg_init_table(req_ctx->bufsl, nsg); 1995 - sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1915 + sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); 1996 1916 if (nsg > 1) 1997 1917 sg_chain(req_ctx->bufsl, 2, areq->src); 1998 1918 req_ctx->psrc = req_ctx->bufsl; 1919 + } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { 1920 + if (nbytes_to_hash > blocksize) 1921 + offset = blocksize - req_ctx->nbuf; 1922 + else 1923 + offset = nbytes_to_hash - req_ctx->nbuf; 1924 + nents = sg_nents_for_len(areq->src, offset); 1925 + if (nents < 0) { 1926 + dev_err(ctx->dev, "Invalid number of src SG.\n"); 1927 + return nents; 1928 + } 1929 + sg_copy_to_buffer(areq->src, nents, 1930 + ctx_buf + req_ctx->nbuf, offset); 1931 + req_ctx->nbuf += offset; 1932 + req_ctx->psrc = areq->src; 1999 1933 } else 2000 1934 req_ctx->psrc = areq->src; 2001 1935 ··· 2020 1926 return nents; 2021 1927 } 2022 1928 sg_pcopy_to_buffer(areq->src, nents, 2023 - req_ctx->bufnext, 1929 + req_ctx->buf[(req_ctx->buf_idx + 1) & 1], 2024 1930 to_hash_later, 2025 1931 nbytes - to_hash_later); 2026 1932 } ··· 2042 1948 /* request SEC to INIT hash. */ 2043 1949 if (req_ctx->first && !req_ctx->swinit) 2044 1950 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 1951 + if (is_sec1) { 1952 + dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx * 1953 + HASH_MAX_BLOCK_SIZE; 1954 + 1955 + dma_sync_single_for_device(dev, dma_buf, 1956 + req_ctx->nbuf, DMA_TO_DEVICE); 1957 + } 2045 1958 2046 1959 /* When the tfm context has a keylen, it's an HMAC. 2047 1960 * A first or last (ie. not middle) descriptor must request HMAC. ··· 2056 1955 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2057 1956 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2058 1957 2059 - return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, 1958 + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, 2060 1959 ahash_done); 2061 1960 } 2062 1961 ··· 2102 2001 { 2103 2002 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2104 2003 struct talitos_export_state *export = out; 2004 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2005 + struct talitos_ctx *ctx = crypto_ahash_ctx(ahash); 2006 + struct device *dev = ctx->dev; 2105 2007 2008 + dma_sync_single_for_cpu(dev, ctx->dma_hw_context, 2009 + req_ctx->hw_context_size, DMA_FROM_DEVICE); 2106 2010 memcpy(export->hw_context, req_ctx->hw_context, 2107 2011 req_ctx->hw_context_size); 2108 - memcpy(export->buf, req_ctx->buf, req_ctx->nbuf); 2012 + memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); 2109 2013 export->swinit = req_ctx->swinit; 2110 2014 export->first = req_ctx->first; 2111 2015 export->last = req_ctx->last; ··· 2125 2019 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2126 2020 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2127 2021 const struct talitos_export_state *export = in; 2022 + unsigned int size; 2023 + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2024 + struct device *dev = ctx->dev; 2025 + struct talitos_private *priv = dev_get_drvdata(dev); 2026 + bool is_sec1 = has_ftr_sec1(priv); 2128 2027 2129 2028 memset(req_ctx, 0, sizeof(*req_ctx)); 2130 - req_ctx->hw_context_size = 2131 - (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2029 + size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2132 2030 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2133 2031 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2134 - memcpy(req_ctx->hw_context, export->hw_context, 2135 - req_ctx->hw_context_size); 2136 - memcpy(req_ctx->buf, export->buf, export->nbuf); 2032 + req_ctx->hw_context_size = size; 2033 + if (ctx->dma_hw_context) 2034 + dma_unmap_single(dev, ctx->dma_hw_context, size, 2035 + DMA_BIDIRECTIONAL); 2036 + 2037 + memcpy(req_ctx->hw_context, export->hw_context, size); 2038 + ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size, 2039 + DMA_BIDIRECTIONAL); 2040 + if (ctx->dma_buf) 2041 + dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf), 2042 + DMA_TO_DEVICE); 2043 + memcpy(req_ctx->buf[0], export->buf, export->nbuf); 2044 + if (is_sec1) 2045 + ctx->dma_buf = dma_map_single(dev, req_ctx->buf, 2046 + sizeof(req_ctx->buf), 2047 + DMA_TO_DEVICE); 2137 2048 req_ctx->swinit = export->swinit; 2138 2049 req_ctx->first = export->first; 2139 2050 req_ctx->last = export->last; ··· 2160 2037 return 0; 2161 2038 } 2162 2039 2163 - struct keyhash_result { 2164 - struct completion completion; 2165 - int err; 2166 - }; 2167 - 2168 - static void keyhash_complete(struct crypto_async_request *req, int err) 2169 - { 2170 - struct keyhash_result *res = req->data; 2171 - 2172 - if (err == -EINPROGRESS) 2173 - return; 2174 - 2175 - res->err = err; 2176 - complete(&res->completion); 2177 - } 2178 - 2179 2040 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2180 2041 u8 *hash) 2181 2042 { ··· 2167 2060 2168 2061 struct scatterlist sg[1]; 2169 2062 struct ahash_request *req; 2170 - struct keyhash_result hresult; 2063 + struct crypto_wait wait; 2171 2064 int ret; 2172 2065 2173 - init_completion(&hresult.completion); 2066 + crypto_init_wait(&wait); 2174 2067 2175 2068 req = ahash_request_alloc(tfm, GFP_KERNEL); 2176 2069 if (!req) ··· 2179 2072 /* Keep tfm keylen == 0 during hash of the long key */ 2180 2073 ctx->keylen = 0; 2181 2074 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2182 - keyhash_complete, &hresult); 2075 + crypto_req_done, &wait); 2183 2076 2184 2077 sg_init_one(&sg[0], key, keylen); 2185 2078 2186 2079 ahash_request_set_crypt(req, sg, hash, keylen); 2187 - ret = crypto_ahash_digest(req); 2188 - switch (ret) { 2189 - case 0: 2190 - break; 2191 - case -EINPROGRESS: 2192 - case -EBUSY: 2193 - ret = wait_for_completion_interruptible( 2194 - &hresult.completion); 2195 - if (!ret) 2196 - ret = hresult.err; 2197 - break; 2198 - default: 2199 - break; 2200 - } 2080 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 2081 + 2201 2082 ahash_request_free(req); 2202 2083 2203 2084 return ret; ··· 2195 2100 unsigned int keylen) 2196 2101 { 2197 2102 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2103 + struct device *dev = ctx->dev; 2198 2104 unsigned int blocksize = 2199 2105 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2200 2106 unsigned int digestsize = crypto_ahash_digestsize(tfm); ··· 2218 2122 memcpy(ctx->key, hash, digestsize); 2219 2123 } 2220 2124 2125 + if (ctx->keylen) 2126 + dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 2127 + 2221 2128 ctx->keylen = keysize; 2129 + ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE); 2222 2130 2223 2131 return 0; 2224 2132 } ··· 2714 2614 .ivsize = AES_BLOCK_SIZE, 2715 2615 } 2716 2616 }, 2717 - .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2617 + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | 2718 2618 DESC_HDR_SEL0_AESU | 2719 2619 DESC_HDR_MODE0_AESU_CTR, 2720 2620 }, ··· 3051 2951 return 0; 3052 2952 } 3053 2953 2954 + static void talitos_cra_exit(struct crypto_tfm *tfm) 2955 + { 2956 + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2957 + struct device *dev = ctx->dev; 2958 + 2959 + if (ctx->keylen) 2960 + dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 2961 + } 2962 + 2963 + static void talitos_cra_exit_ahash(struct crypto_tfm *tfm) 2964 + { 2965 + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2966 + struct device *dev = ctx->dev; 2967 + unsigned int size; 2968 + 2969 + talitos_cra_exit(tfm); 2970 + 2971 + size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <= 2972 + SHA256_DIGEST_SIZE) 2973 + ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2974 + : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2975 + 2976 + if (ctx->dma_hw_context) 2977 + dma_unmap_single(dev, ctx->dma_hw_context, size, 2978 + DMA_BIDIRECTIONAL); 2979 + if (ctx->dma_buf) 2980 + dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2, 2981 + DMA_TO_DEVICE); 2982 + } 2983 + 3054 2984 /* 3055 2985 * given the alg's descriptor header template, determine whether descriptor 3056 2986 * type and primary/secondary execution units required match the hw ··· 3119 2989 break; 3120 2990 } 3121 2991 list_del(&t_alg->entry); 3122 - kfree(t_alg); 3123 2992 } 3124 2993 3125 2994 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 3126 2995 talitos_unregister_rng(dev); 3127 - 3128 - for (i = 0; priv->chan && i < priv->num_channels; i++) 3129 - kfree(priv->chan[i].fifo); 3130 - 3131 - kfree(priv->chan); 3132 2996 3133 2997 for (i = 0; i < 2; i++) 3134 2998 if (priv->irq[i]) { ··· 3133 3009 tasklet_kill(&priv->done_task[0]); 3134 3010 if (priv->irq[1]) 3135 3011 tasklet_kill(&priv->done_task[1]); 3136 - 3137 - iounmap(priv->reg); 3138 - 3139 - kfree(priv); 3140 3012 3141 3013 return 0; 3142 3014 } ··· 3145 3025 struct talitos_crypto_alg *t_alg; 3146 3026 struct crypto_alg *alg; 3147 3027 3148 - t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 3028 + t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg), 3029 + GFP_KERNEL); 3149 3030 if (!t_alg) 3150 3031 return ERR_PTR(-ENOMEM); 3151 3032 ··· 3156 3035 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3157 3036 alg = &t_alg->algt.alg.crypto; 3158 3037 alg->cra_init = talitos_cra_init; 3038 + alg->cra_exit = talitos_cra_exit; 3159 3039 alg->cra_type = &crypto_ablkcipher_type; 3160 3040 alg->cra_ablkcipher.setkey = ablkcipher_setkey; 3161 3041 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; ··· 3165 3043 break; 3166 3044 case CRYPTO_ALG_TYPE_AEAD: 3167 3045 alg = &t_alg->algt.alg.aead.base; 3046 + alg->cra_exit = talitos_cra_exit; 3168 3047 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 3169 3048 t_alg->algt.alg.aead.setkey = aead_setkey; 3170 3049 t_alg->algt.alg.aead.encrypt = aead_encrypt; 3171 3050 t_alg->algt.alg.aead.decrypt = aead_decrypt; 3051 + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3052 + !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { 3053 + devm_kfree(dev, t_alg); 3054 + return ERR_PTR(-ENOTSUPP); 3055 + } 3172 3056 break; 3173 3057 case CRYPTO_ALG_TYPE_AHASH: 3174 3058 alg = &t_alg->algt.alg.hash.halg.base; 3175 3059 alg->cra_init = talitos_cra_init_ahash; 3060 + alg->cra_exit = talitos_cra_exit_ahash; 3176 3061 alg->cra_type = &crypto_ahash_type; 3177 3062 t_alg->algt.alg.hash.init = ahash_init; 3178 3063 t_alg->algt.alg.hash.update = ahash_update; ··· 3193 3064 3194 3065 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 3195 3066 !strncmp(alg->cra_name, "hmac", 4)) { 3196 - kfree(t_alg); 3067 + devm_kfree(dev, t_alg); 3197 3068 return ERR_PTR(-ENOTSUPP); 3198 3069 } 3199 3070 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && ··· 3208 3079 break; 3209 3080 default: 3210 3081 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 3211 - kfree(t_alg); 3082 + devm_kfree(dev, t_alg); 3212 3083 return ERR_PTR(-EINVAL); 3213 3084 } 3214 3085 ··· 3285 3156 struct device *dev = &ofdev->dev; 3286 3157 struct device_node *np = ofdev->dev.of_node; 3287 3158 struct talitos_private *priv; 3288 - const unsigned int *prop; 3289 3159 int i, err; 3290 3160 int stride; 3161 + struct resource *res; 3291 3162 3292 - priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 3163 + priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL); 3293 3164 if (!priv) 3294 3165 return -ENOMEM; 3295 3166 ··· 3301 3172 3302 3173 spin_lock_init(&priv->reg_lock); 3303 3174 3304 - priv->reg = of_iomap(np, 0); 3175 + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 3176 + if (!res) 3177 + return -ENXIO; 3178 + priv->reg = devm_ioremap(dev, res->start, resource_size(res)); 3305 3179 if (!priv->reg) { 3306 3180 dev_err(dev, "failed to of_iomap\n"); 3307 3181 err = -ENOMEM; ··· 3312 3180 } 3313 3181 3314 3182 /* get SEC version capabilities from device tree */ 3315 - prop = of_get_property(np, "fsl,num-channels", NULL); 3316 - if (prop) 3317 - priv->num_channels = *prop; 3318 - 3319 - prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 3320 - if (prop) 3321 - priv->chfifo_len = *prop; 3322 - 3323 - prop = of_get_property(np, "fsl,exec-units-mask", NULL); 3324 - if (prop) 3325 - priv->exec_units = *prop; 3326 - 3327 - prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 3328 - if (prop) 3329 - priv->desc_types = *prop; 3183 + of_property_read_u32(np, "fsl,num-channels", &priv->num_channels); 3184 + of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len); 3185 + of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units); 3186 + of_property_read_u32(np, "fsl,descriptor-types-mask", 3187 + &priv->desc_types); 3330 3188 3331 3189 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 3332 3190 !priv->exec_units || !priv->desc_types) { ··· 3366 3244 goto err_out; 3367 3245 3368 3246 if (of_device_is_compatible(np, "fsl,sec1.0")) { 3369 - tasklet_init(&priv->done_task[0], talitos1_done_4ch, 3370 - (unsigned long)dev); 3371 - } else { 3372 - if (!priv->irq[1]) { 3373 - tasklet_init(&priv->done_task[0], talitos2_done_4ch, 3247 + if (priv->num_channels == 1) 3248 + tasklet_init(&priv->done_task[0], talitos1_done_ch0, 3374 3249 (unsigned long)dev); 3375 - } else { 3250 + else 3251 + tasklet_init(&priv->done_task[0], talitos1_done_4ch, 3252 + (unsigned long)dev); 3253 + } else { 3254 + if (priv->irq[1]) { 3376 3255 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 3377 3256 (unsigned long)dev); 3378 3257 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 3379 3258 (unsigned long)dev); 3259 + } else if (priv->num_channels == 1) { 3260 + tasklet_init(&priv->done_task[0], talitos2_done_ch0, 3261 + (unsigned long)dev); 3262 + } else { 3263 + tasklet_init(&priv->done_task[0], talitos2_done_4ch, 3264 + (unsigned long)dev); 3380 3265 } 3381 3266 } 3382 3267 3383 - priv->chan = kzalloc(sizeof(struct talitos_channel) * 3384 - priv->num_channels, GFP_KERNEL); 3268 + priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) * 3269 + priv->num_channels, GFP_KERNEL); 3385 3270 if (!priv->chan) { 3386 3271 dev_err(dev, "failed to allocate channel management space\n"); 3387 3272 err = -ENOMEM; ··· 3405 3276 spin_lock_init(&priv->chan[i].head_lock); 3406 3277 spin_lock_init(&priv->chan[i].tail_lock); 3407 3278 3408 - priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * 3409 - priv->fifo_len, GFP_KERNEL); 3279 + priv->chan[i].fifo = devm_kzalloc(dev, 3280 + sizeof(struct talitos_request) * 3281 + priv->fifo_len, GFP_KERNEL); 3410 3282 if (!priv->chan[i].fifo) { 3411 3283 dev_err(dev, "failed to allocate request fifo %d\n", i); 3412 3284 err = -ENOMEM; ··· 3473 3343 if (err) { 3474 3344 dev_err(dev, "%s alg registration failed\n", 3475 3345 alg->cra_driver_name); 3476 - kfree(t_alg); 3346 + devm_kfree(dev, t_alg); 3477 3347 } else 3478 3348 list_add_tail(&t_alg->entry, &priv->alg_list); 3479 3349 }
+5 -2
drivers/crypto/talitos.h
··· 52 52 __be32 ptr; /* address */ 53 53 }; 54 54 55 - static const struct talitos_ptr zero_entry; 56 - 57 55 /* descriptor */ 58 56 struct talitos_desc { 59 57 __be32 hdr; /* header high bits */ ··· 208 210 #define TALITOS_ISR 0x1010 /* interrupt status register */ 209 211 #define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */ 210 212 #define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */ 213 + #define TALITOS1_ISR_CH_0_ERR (2 << 28) /* ch 0 errors mask */ 214 + #define TALITOS1_ISR_CH_0_DONE (1 << 28) /* ch 0 done mask */ 211 215 #define TALITOS1_ISR_TEA_ERR 0x00000040 212 216 #define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */ 213 217 #define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */ 218 + #define TALITOS2_ISR_CH_0_ERR 2 /* ch 0 errors mask */ 219 + #define TALITOS2_ISR_CH_0_DONE 1 /* ch 0 done mask */ 214 220 #define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */ 215 221 #define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */ 216 222 #define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */ ··· 236 234 #define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ 237 235 #define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */ 238 236 #define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ 237 + #define TALITOS_CCCR_LO_NE 0x8 /* fetch next descriptor enab. */ 239 238 #define TALITOS_CCCR_LO_NT 0x4 /* notification type */ 240 239 #define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ 241 240 #define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */
-1
drivers/crypto/ux500/cryp/cryp_core.c
··· 1751 1751 { 1752 1752 pr_debug("[%s] is called!", __func__); 1753 1753 platform_driver_unregister(&cryp_driver); 1754 - return; 1755 1754 } 1756 1755 1757 1756 module_init(ux500_cryp_mod_init);
+1 -1
drivers/crypto/virtio/virtio_crypto_algs.c
··· 319 319 struct virtio_crypto *vcrypto = 320 320 virtcrypto_get_dev_node(node); 321 321 if (!vcrypto) { 322 - pr_err("virtio_crypto: Could not find a virtio device in the system"); 322 + pr_err("virtio_crypto: Could not find a virtio device in the system\n"); 323 323 return -ENODEV; 324 324 } 325 325
+17 -16
drivers/crypto/vmx/aes_ctr.c
··· 27 27 #include <asm/switch_to.h> 28 28 #include <crypto/aes.h> 29 29 #include <crypto/scatterwalk.h> 30 + #include <crypto/skcipher.h> 31 + 30 32 #include "aesp8-ppc.h" 31 33 32 34 struct p8_aes_ctr_ctx { 33 - struct crypto_blkcipher *fallback; 35 + struct crypto_skcipher *fallback; 34 36 struct aes_key enc_key; 35 37 }; 36 38 37 39 static int p8_aes_ctr_init(struct crypto_tfm *tfm) 38 40 { 39 41 const char *alg = crypto_tfm_alg_name(tfm); 40 - struct crypto_blkcipher *fallback; 42 + struct crypto_skcipher *fallback; 41 43 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 42 44 43 - fallback = 44 - crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); 45 + fallback = crypto_alloc_skcipher(alg, 0, 46 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 45 47 if (IS_ERR(fallback)) { 46 48 printk(KERN_ERR 47 49 "Failed to allocate transformation for '%s': %ld\n", ··· 51 49 return PTR_ERR(fallback); 52 50 } 53 51 printk(KERN_INFO "Using '%s' as fallback implementation.\n", 54 - crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback)); 52 + crypto_skcipher_driver_name(fallback)); 55 53 56 - crypto_blkcipher_set_flags( 54 + crypto_skcipher_set_flags( 57 55 fallback, 58 - crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm)); 56 + crypto_skcipher_get_flags((struct crypto_skcipher *)tfm)); 59 57 ctx->fallback = fallback; 60 58 61 59 return 0; ··· 66 64 struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 67 65 68 66 if (ctx->fallback) { 69 - crypto_free_blkcipher(ctx->fallback); 67 + crypto_free_skcipher(ctx->fallback); 70 68 ctx->fallback = NULL; 71 69 } 72 70 } ··· 85 83 pagefault_enable(); 86 84 preempt_enable(); 87 85 88 - ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); 86 + ret += crypto_skcipher_setkey(ctx->fallback, key, keylen); 89 87 return ret; 90 88 } 91 89 ··· 119 117 struct blkcipher_walk walk; 120 118 struct p8_aes_ctr_ctx *ctx = 121 119 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); 122 - struct blkcipher_desc fallback_desc = { 123 - .tfm = ctx->fallback, 124 - .info = desc->info, 125 - .flags = desc->flags 126 - }; 127 120 128 121 if (in_interrupt()) { 129 - ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, 130 - nbytes); 122 + SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback); 123 + skcipher_request_set_tfm(req, ctx->fallback); 124 + skcipher_request_set_callback(req, desc->flags, NULL, NULL); 125 + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 126 + ret = crypto_skcipher_encrypt(req); 127 + skcipher_request_zero(req); 131 128 } else { 132 129 blkcipher_walk_init(&walk, dst, src, nbytes); 133 130 ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+20 -61
drivers/md/dm-verity-target.c
··· 92 92 return block >> (level * v->hash_per_block_bits); 93 93 } 94 94 95 - /* 96 - * Callback function for asynchrnous crypto API completion notification 97 - */ 98 - static void verity_op_done(struct crypto_async_request *base, int err) 99 - { 100 - struct verity_result *res = (struct verity_result *)base->data; 101 - 102 - if (err == -EINPROGRESS) 103 - return; 104 - 105 - res->err = err; 106 - complete(&res->completion); 107 - } 108 - 109 - /* 110 - * Wait for async crypto API callback 111 - */ 112 - static inline int verity_complete_op(struct verity_result *res, int ret) 113 - { 114 - switch (ret) { 115 - case 0: 116 - break; 117 - 118 - case -EINPROGRESS: 119 - case -EBUSY: 120 - ret = wait_for_completion_interruptible(&res->completion); 121 - if (!ret) 122 - ret = res->err; 123 - reinit_completion(&res->completion); 124 - break; 125 - 126 - default: 127 - DMERR("verity_wait_hash: crypto op submission failed: %d", ret); 128 - } 129 - 130 - if (unlikely(ret < 0)) 131 - DMERR("verity_wait_hash: crypto op failed: %d", ret); 132 - 133 - return ret; 134 - } 135 - 136 95 static int verity_hash_update(struct dm_verity *v, struct ahash_request *req, 137 96 const u8 *data, size_t len, 138 - struct verity_result *res) 97 + struct crypto_wait *wait) 139 98 { 140 99 struct scatterlist sg; 141 100 142 101 sg_init_one(&sg, data, len); 143 102 ahash_request_set_crypt(req, &sg, NULL, len); 144 103 145 - return verity_complete_op(res, crypto_ahash_update(req)); 104 + return crypto_wait_req(crypto_ahash_update(req), wait); 146 105 } 147 106 148 107 /* 149 108 * Wrapper for crypto_ahash_init, which handles verity salting. 150 109 */ 151 110 static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, 152 - struct verity_result *res) 111 + struct crypto_wait *wait) 153 112 { 154 113 int r; 155 114 156 115 ahash_request_set_tfm(req, v->tfm); 157 116 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP | 158 117 CRYPTO_TFM_REQ_MAY_BACKLOG, 159 - verity_op_done, (void *)res); 160 - init_completion(&res->completion); 118 + crypto_req_done, (void *)wait); 119 + crypto_init_wait(wait); 161 120 162 - r = verity_complete_op(res, crypto_ahash_init(req)); 121 + r = crypto_wait_req(crypto_ahash_init(req), wait); 163 122 164 123 if (unlikely(r < 0)) { 165 124 DMERR("crypto_ahash_init failed: %d", r); ··· 126 167 } 127 168 128 169 if (likely(v->salt_size && (v->version >= 1))) 129 - r = verity_hash_update(v, req, v->salt, v->salt_size, res); 170 + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); 130 171 131 172 return r; 132 173 } 133 174 134 175 static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, 135 - u8 *digest, struct verity_result *res) 176 + u8 *digest, struct crypto_wait *wait) 136 177 { 137 178 int r; 138 179 139 180 if (unlikely(v->salt_size && (!v->version))) { 140 - r = verity_hash_update(v, req, v->salt, v->salt_size, res); 181 + r = verity_hash_update(v, req, v->salt, v->salt_size, wait); 141 182 142 183 if (r < 0) { 143 184 DMERR("verity_hash_final failed updating salt: %d", r); ··· 146 187 } 147 188 148 189 ahash_request_set_crypt(req, NULL, digest, 0); 149 - r = verity_complete_op(res, crypto_ahash_final(req)); 190 + r = crypto_wait_req(crypto_ahash_final(req), wait); 150 191 out: 151 192 return r; 152 193 } ··· 155 196 const u8 *data, size_t len, u8 *digest) 156 197 { 157 198 int r; 158 - struct verity_result res; 199 + struct crypto_wait wait; 159 200 160 - r = verity_hash_init(v, req, &res); 201 + r = verity_hash_init(v, req, &wait); 161 202 if (unlikely(r < 0)) 162 203 goto out; 163 204 164 - r = verity_hash_update(v, req, data, len, &res); 205 + r = verity_hash_update(v, req, data, len, &wait); 165 206 if (unlikely(r < 0)) 166 207 goto out; 167 208 168 - r = verity_hash_final(v, req, digest, &res); 209 + r = verity_hash_final(v, req, digest, &wait); 169 210 170 211 out: 171 212 return r; ··· 348 389 * Calculates the digest for the given bio 349 390 */ 350 391 int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io, 351 - struct bvec_iter *iter, struct verity_result *res) 392 + struct bvec_iter *iter, struct crypto_wait *wait) 352 393 { 353 394 unsigned int todo = 1 << v->data_dev_block_bits; 354 395 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); ··· 373 414 */ 374 415 sg_set_page(&sg, bv.bv_page, len, bv.bv_offset); 375 416 ahash_request_set_crypt(req, &sg, NULL, len); 376 - r = verity_complete_op(res, crypto_ahash_update(req)); 417 + r = crypto_wait_req(crypto_ahash_update(req), wait); 377 418 378 419 if (unlikely(r < 0)) { 379 420 DMERR("verity_for_io_block crypto op failed: %d", r); ··· 441 482 struct dm_verity *v = io->v; 442 483 struct bvec_iter start; 443 484 unsigned b; 444 - struct verity_result res; 485 + struct crypto_wait wait; 445 486 446 487 for (b = 0; b < io->n_blocks; b++) { 447 488 int r; ··· 466 507 continue; 467 508 } 468 509 469 - r = verity_hash_init(v, req, &res); 510 + r = verity_hash_init(v, req, &wait); 470 511 if (unlikely(r < 0)) 471 512 return r; 472 513 473 514 start = io->iter; 474 - r = verity_for_io_block(v, io, &io->iter, &res); 515 + r = verity_for_io_block(v, io, &io->iter, &wait); 475 516 if (unlikely(r < 0)) 476 517 return r; 477 518 478 519 r = verity_hash_final(v, req, verity_io_real_digest(v, io), 479 - &res); 520 + &wait); 480 521 if (unlikely(r < 0)) 481 522 return r; 482 523
-5
drivers/md/dm-verity.h
··· 90 90 */ 91 91 }; 92 92 93 - struct verity_result { 94 - struct completion completion; 95 - int err; 96 - }; 97 - 98 93 static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v, 99 94 struct dm_verity_io *io) 100 95 {
+7 -1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 1537 1537 */ 1538 1538 static inline int is_ofld_imm(const struct sk_buff *skb) 1539 1539 { 1540 - return skb->len <= MAX_IMM_TX_PKT_LEN; 1540 + struct work_request_hdr *req = (struct work_request_hdr *)skb->data; 1541 + unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); 1542 + 1543 + if (opcode == FW_CRYPTO_LOOKASIDE_WR) 1544 + return skb->len <= SGE_MAX_WR_LEN; 1545 + else 1546 + return skb->len <= MAX_IMM_TX_PKT_LEN; 1541 1547 } 1542 1548 1543 1549 /**
+4 -26
fs/cifs/smb2ops.c
··· 2087 2087 return sg; 2088 2088 } 2089 2089 2090 - struct cifs_crypt_result { 2091 - int err; 2092 - struct completion completion; 2093 - }; 2094 - 2095 - static void cifs_crypt_complete(struct crypto_async_request *req, int err) 2096 - { 2097 - struct cifs_crypt_result *res = req->data; 2098 - 2099 - if (err == -EINPROGRESS) 2100 - return; 2101 - 2102 - res->err = err; 2103 - complete(&res->completion); 2104 - } 2105 - 2106 2090 static int 2107 2091 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) 2108 2092 { ··· 2127 2143 struct aead_request *req; 2128 2144 char *iv; 2129 2145 unsigned int iv_len; 2130 - struct cifs_crypt_result result = {0, }; 2146 + DECLARE_CRYPTO_WAIT(wait); 2131 2147 struct crypto_aead *tfm; 2132 2148 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); 2133 - 2134 - init_completion(&result.completion); 2135 2149 2136 2150 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key); 2137 2151 if (rc) { ··· 2190 2208 aead_request_set_ad(req, assoc_data_len); 2191 2209 2192 2210 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2193 - cifs_crypt_complete, &result); 2211 + crypto_req_done, &wait); 2194 2212 2195 - rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); 2196 - 2197 - if (rc == -EINPROGRESS || rc == -EBUSY) { 2198 - wait_for_completion(&result.completion); 2199 - rc = result.err; 2200 - } 2213 + rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) 2214 + : crypto_aead_decrypt(req), &wait); 2201 2215 2202 2216 if (!rc && enc) 2203 2217 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
+4 -24
fs/crypto/crypto.c
··· 126 126 } 127 127 EXPORT_SYMBOL(fscrypt_get_ctx); 128 128 129 - /** 130 - * page_crypt_complete() - completion callback for page crypto 131 - * @req: The asynchronous cipher request context 132 - * @res: The result of the cipher operation 133 - */ 134 - static void page_crypt_complete(struct crypto_async_request *req, int res) 135 - { 136 - struct fscrypt_completion_result *ecr = req->data; 137 - 138 - if (res == -EINPROGRESS) 139 - return; 140 - ecr->res = res; 141 - complete(&ecr->completion); 142 - } 143 - 144 129 int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw, 145 130 u64 lblk_num, struct page *src_page, 146 131 struct page *dest_page, unsigned int len, ··· 136 151 u8 padding[FS_IV_SIZE - sizeof(__le64)]; 137 152 } iv; 138 153 struct skcipher_request *req = NULL; 139 - DECLARE_FS_COMPLETION_RESULT(ecr); 154 + DECLARE_CRYPTO_WAIT(wait); 140 155 struct scatterlist dst, src; 141 156 struct fscrypt_info *ci = inode->i_crypt_info; 142 157 struct crypto_skcipher *tfm = ci->ci_ctfm; ··· 164 179 165 180 skcipher_request_set_callback( 166 181 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 167 - page_crypt_complete, &ecr); 182 + crypto_req_done, &wait); 168 183 169 184 sg_init_table(&dst, 1); 170 185 sg_set_page(&dst, dest_page, len, offs); ··· 172 187 sg_set_page(&src, src_page, len, offs); 173 188 skcipher_request_set_crypt(req, &src, &dst, len, &iv); 174 189 if (rw == FS_DECRYPT) 175 - res = crypto_skcipher_decrypt(req); 190 + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); 176 191 else 177 - res = crypto_skcipher_encrypt(req); 178 - if (res == -EINPROGRESS || res == -EBUSY) { 179 - BUG_ON(req->base.data != &ecr); 180 - wait_for_completion(&ecr.completion); 181 - res = ecr.res; 182 - } 192 + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 183 193 skcipher_request_free(req); 184 194 if (res) { 185 195 printk_ratelimited(KERN_ERR
+6 -30
fs/crypto/fname.c
··· 16 16 #include "fscrypt_private.h" 17 17 18 18 /** 19 - * fname_crypt_complete() - completion callback for filename crypto 20 - * @req: The asynchronous cipher request context 21 - * @res: The result of the cipher operation 22 - */ 23 - static void fname_crypt_complete(struct crypto_async_request *req, int res) 24 - { 25 - struct fscrypt_completion_result *ecr = req->data; 26 - 27 - if (res == -EINPROGRESS) 28 - return; 29 - ecr->res = res; 30 - complete(&ecr->completion); 31 - } 32 - 33 - /** 34 19 * fname_encrypt() - encrypt a filename 35 20 * 36 21 * The caller must have allocated sufficient memory for the @oname string. ··· 26 41 const struct qstr *iname, struct fscrypt_str *oname) 27 42 { 28 43 struct skcipher_request *req = NULL; 29 - DECLARE_FS_COMPLETION_RESULT(ecr); 44 + DECLARE_CRYPTO_WAIT(wait); 30 45 struct fscrypt_info *ci = inode->i_crypt_info; 31 46 struct crypto_skcipher *tfm = ci->ci_ctfm; 32 47 int res = 0; ··· 62 77 } 63 78 skcipher_request_set_callback(req, 64 79 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 65 - fname_crypt_complete, &ecr); 80 + crypto_req_done, &wait); 66 81 sg_init_one(&sg, oname->name, cryptlen); 67 82 skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); 68 83 69 84 /* Do the encryption */ 70 - res = crypto_skcipher_encrypt(req); 71 - if (res == -EINPROGRESS || res == -EBUSY) { 72 - /* Request is being completed asynchronously; wait for it */ 73 - wait_for_completion(&ecr.completion); 74 - res = ecr.res; 75 - } 85 + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 76 86 skcipher_request_free(req); 77 87 if (res < 0) { 78 88 printk_ratelimited(KERN_ERR ··· 91 111 struct fscrypt_str *oname) 92 112 { 93 113 struct skcipher_request *req = NULL; 94 - DECLARE_FS_COMPLETION_RESULT(ecr); 114 + DECLARE_CRYPTO_WAIT(wait); 95 115 struct scatterlist src_sg, dst_sg; 96 116 struct fscrypt_info *ci = inode->i_crypt_info; 97 117 struct crypto_skcipher *tfm = ci->ci_ctfm; ··· 112 132 } 113 133 skcipher_request_set_callback(req, 114 134 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 115 - fname_crypt_complete, &ecr); 135 + crypto_req_done, &wait); 116 136 117 137 /* Initialize IV */ 118 138 memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); ··· 121 141 sg_init_one(&src_sg, iname->name, iname->len); 122 142 sg_init_one(&dst_sg, oname->name, oname->len); 123 143 skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); 124 - res = crypto_skcipher_decrypt(req); 125 - if (res == -EINPROGRESS || res == -EBUSY) { 126 - wait_for_completion(&ecr.completion); 127 - res = ecr.res; 128 - } 144 + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); 129 145 skcipher_request_free(req); 130 146 if (res < 0) { 131 147 printk_ratelimited(KERN_ERR
-10
fs/crypto/fscrypt_private.h
··· 70 70 #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 71 71 #define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002 72 72 73 - struct fscrypt_completion_result { 74 - struct completion completion; 75 - int res; 76 - }; 77 - 78 - #define DECLARE_FS_COMPLETION_RESULT(ecr) \ 79 - struct fscrypt_completion_result ecr = { \ 80 - COMPLETION_INITIALIZER_ONSTACK((ecr).completion), 0 } 81 - 82 - 83 73 /* crypto.c */ 84 74 extern int fscrypt_initialize(unsigned int cop_flags); 85 75 extern struct workqueue_struct *fscrypt_read_workqueue;
+3 -18
fs/crypto/keyinfo.c
··· 18 18 19 19 static struct crypto_shash *essiv_hash_tfm; 20 20 21 - static void derive_crypt_complete(struct crypto_async_request *req, int rc) 22 - { 23 - struct fscrypt_completion_result *ecr = req->data; 24 - 25 - if (rc == -EINPROGRESS) 26 - return; 27 - 28 - ecr->res = rc; 29 - complete(&ecr->completion); 30 - } 31 - 32 21 /** 33 22 * derive_key_aes() - Derive a key using AES-128-ECB 34 23 * @deriving_key: Encryption key used for derivation. ··· 32 43 { 33 44 int res = 0; 34 45 struct skcipher_request *req = NULL; 35 - DECLARE_FS_COMPLETION_RESULT(ecr); 46 + DECLARE_CRYPTO_WAIT(wait); 36 47 struct scatterlist src_sg, dst_sg; 37 48 struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); 38 49 ··· 49 60 } 50 61 skcipher_request_set_callback(req, 51 62 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 52 - derive_crypt_complete, &ecr); 63 + crypto_req_done, &wait); 53 64 res = crypto_skcipher_setkey(tfm, deriving_key, 54 65 FS_AES_128_ECB_KEY_SIZE); 55 66 if (res < 0) ··· 59 70 sg_init_one(&dst_sg, derived_raw_key, source_key->size); 60 71 skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size, 61 72 NULL); 62 - res = crypto_skcipher_encrypt(req); 63 - if (res == -EINPROGRESS || res == -EBUSY) { 64 - wait_for_completion(&ecr.completion); 65 - res = ecr.res; 66 - } 73 + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); 67 74 out: 68 75 skcipher_request_free(req); 69 76 crypto_free_skcipher(tfm);
+1 -1
include/crypto/dh.h
··· 53 53 * 54 54 * Return: size of the key in bytes 55 55 */ 56 - int crypto_dh_key_len(const struct dh *params); 56 + unsigned int crypto_dh_key_len(const struct dh *params); 57 57 58 58 /** 59 59 * crypto_dh_encode_key() - encode the private key
+1 -2
include/crypto/drbg.h
··· 126 126 __u8 *ctr_null_value; /* CTR mode aligned zero buf */ 127 127 __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ 128 128 __u8 *outscratchpad; /* CTR mode aligned outbuf */ 129 - struct completion ctr_completion; /* CTR mode async handler */ 130 - int ctr_async_err; /* CTR mode async error */ 129 + struct crypto_wait ctr_wait; /* CTR mode async wait obj */ 131 130 132 131 bool seeded; /* DRBG fully seeded? */ 133 132 bool pr; /* Prediction resistance enabled? */
+1 -1
include/crypto/ecdh.h
··· 54 54 * 55 55 * Return: size of the key in bytes 56 56 */ 57 - int crypto_ecdh_key_len(const struct ecdh *params); 57 + unsigned int crypto_ecdh_key_len(const struct ecdh *params); 58 58 59 59 /** 60 60 * crypto_ecdh_encode_key() - encode the private key
+8
include/crypto/gcm.h
··· 1 + #ifndef _CRYPTO_GCM_H 2 + #define _CRYPTO_GCM_H 3 + 4 + #define GCM_AES_IV_SIZE 12 5 + #define GCM_RFC4106_IV_SIZE 8 6 + #define GCM_RFC4543_IV_SIZE 8 7 + 8 + #endif
+1 -1
include/crypto/gf128mul.h
··· 227 227 struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g); 228 228 void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t); 229 229 void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t); 230 - 230 + void gf128mul_x8_ble(le128 *r, const le128 *x); 231 231 static inline void gf128mul_free_4k(struct gf128mul_4k *t) 232 232 { 233 233 kzfree(t);
+16 -12
include/crypto/hash.h
··· 75 75 * state of the HASH transformation at the beginning. This shall fill in 76 76 * the internal structures used during the entire duration of the whole 77 77 * transformation. No data processing happens at this point. 78 + * Note: mandatory. 78 79 * @update: Push a chunk of data into the driver for transformation. This 79 80 * function actually pushes blocks of data from upper layers into the 80 81 * driver, which then passes those to the hardware as seen fit. This ··· 85 84 * context, as this function may be called in parallel with the same 86 85 * transformation object. Data processing can happen synchronously 87 86 * [SHASH] or asynchronously [AHASH] at this point. 87 + * Note: mandatory. 88 88 * @final: Retrieve result from the driver. This function finalizes the 89 89 * transformation and retrieves the resulting hash from the driver and 90 90 * pushes it back to upper layers. No data processing happens at this 91 - * point. 91 + * point unless hardware requires it to finish the transformation 92 + * (then the data buffered by the device driver is processed). 93 + * Note: mandatory. 92 94 * @finup: Combination of @update and @final. This function is effectively a 93 95 * combination of @update and @final calls issued in sequence. As some 94 96 * hardware cannot do @update and @final separately, this callback was 95 97 * added to allow such hardware to be used at least by IPsec. Data 96 98 * processing can happen synchronously [SHASH] or asynchronously [AHASH] 97 99 * at this point. 100 + * Note: optional. 98 101 * @digest: Combination of @init and @update and @final. This function 99 102 * effectively behaves as the entire chain of operations, @init, 100 103 * @update and @final issued in sequence. Just like @finup, this was ··· 421 416 * needed to perform the cipher operation 422 417 * 423 418 * This function is a "short-hand" for the function calls of 424 - * crypto_ahash_update and crypto_shash_final. The parameters have the same 419 + * crypto_ahash_update and crypto_ahash_final. The parameters have the same 425 420 * meaning as discussed for those separate functions. 426 421 * 427 - * Return: 0 if the message digest creation was successful; < 0 if an error 428 - * occurred 422 + * Return: see crypto_ahash_final() 429 423 */ 430 424 int crypto_ahash_finup(struct ahash_request *req); 431 425 ··· 437 433 * based on all data added to the cipher handle. The message digest is placed 438 434 * into the output buffer registered with the ahash_request handle. 439 435 * 440 - * Return: 0 if the message digest creation was successful; < 0 if an error 441 - * occurred 436 + * Return: 437 + * 0 if the message digest was successfully calculated; 438 + * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later; 439 + * -EBUSY if queue is full and request should be resubmitted later; 440 + * other < 0 if an error occurred 442 441 */ 443 442 int crypto_ahash_final(struct ahash_request *req); 444 443 ··· 454 447 * crypto_ahash_update and crypto_ahash_final. The parameters have the same 455 448 * meaning as discussed for those separate three functions. 456 449 * 457 - * Return: 0 if the message digest creation was successful; < 0 if an error 458 - * occurred 450 + * Return: see crypto_ahash_final() 459 451 */ 460 452 int crypto_ahash_digest(struct ahash_request *req); 461 453 ··· 499 493 * handle. Any potentially existing state created by previous operations is 500 494 * discarded. 501 495 * 502 - * Return: 0 if the message digest initialization was successful; < 0 if an 503 - * error occurred 496 + * Return: see crypto_ahash_final() 504 497 */ 505 498 static inline int crypto_ahash_init(struct ahash_request *req) 506 499 { ··· 515 510 * is pointed to by the scatter/gather list registered in the &ahash_request 516 511 * handle 517 512 * 518 - * Return: 0 if the message digest update was successful; < 0 if an error 519 - * occurred 513 + * Return: see crypto_ahash_final() 520 514 */ 521 515 static inline int crypto_ahash_update(struct ahash_request *req) 522 516 {
+1 -14
include/crypto/if_alg.h
··· 40 40 void *private; 41 41 }; 42 42 43 - struct af_alg_completion { 44 - struct completion completion; 45 - int err; 46 - }; 47 - 48 43 struct af_alg_control { 49 44 struct af_alg_iv *iv; 50 45 int op; ··· 147 152 void *iv; 148 153 size_t aead_assoclen; 149 154 150 - struct af_alg_completion completion; 155 + struct crypto_wait wait; 151 156 152 157 size_t used; 153 158 size_t rcvused; ··· 172 177 173 178 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con); 174 179 175 - int af_alg_wait_for_completion(int err, struct af_alg_completion *completion); 176 - void af_alg_complete(struct crypto_async_request *req, int err); 177 - 178 180 static inline struct alg_sock *alg_sk(struct sock *sk) 179 181 { 180 182 return (struct alg_sock *)sk; 181 - } 182 - 183 - static inline void af_alg_init_completion(struct af_alg_completion *completion) 184 - { 185 - init_completion(&completion->completion); 186 183 } 187 184 188 185 /**
+40
include/crypto/sm3.h
··· 1 + /* 2 + * Common values for SM3 algorithm 3 + */ 4 + 5 + #ifndef _CRYPTO_SM3_H 6 + #define _CRYPTO_SM3_H 7 + 8 + #include <linux/types.h> 9 + 10 + #define SM3_DIGEST_SIZE 32 11 + #define SM3_BLOCK_SIZE 64 12 + 13 + #define SM3_T1 0x79CC4519 14 + #define SM3_T2 0x7A879D8A 15 + 16 + #define SM3_IVA 0x7380166f 17 + #define SM3_IVB 0x4914b2b9 18 + #define SM3_IVC 0x172442d7 19 + #define SM3_IVD 0xda8a0600 20 + #define SM3_IVE 0xa96f30bc 21 + #define SM3_IVF 0x163138aa 22 + #define SM3_IVG 0xe38dee4d 23 + #define SM3_IVH 0xb0fb0e4e 24 + 25 + extern const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE]; 26 + 27 + struct sm3_state { 28 + u32 state[SM3_DIGEST_SIZE / 4]; 29 + u64 count; 30 + u8 buffer[SM3_BLOCK_SIZE]; 31 + }; 32 + 33 + struct shash_desc; 34 + 35 + extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, 36 + unsigned int len); 37 + 38 + extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, 39 + unsigned int len, u8 *hash); 40 + #endif
+117
include/crypto/sm3_base.h
··· 1 + /* 2 + * sm3_base.h - core logic for SM3 implementations 3 + * 4 + * Copyright (C) 2017 ARM Limited or its affiliates. 5 + * Written by Gilad Ben-Yossef <gilad@benyossef.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <crypto/internal/hash.h> 21 + #include <crypto/sm3.h> 22 + #include <linux/crypto.h> 23 + #include <linux/module.h> 24 + #include <asm/unaligned.h> 25 + 26 + typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); 27 + 28 + static inline int sm3_base_init(struct shash_desc *desc) 29 + { 30 + struct sm3_state *sctx = shash_desc_ctx(desc); 31 + 32 + sctx->state[0] = SM3_IVA; 33 + sctx->state[1] = SM3_IVB; 34 + sctx->state[2] = SM3_IVC; 35 + sctx->state[3] = SM3_IVD; 36 + sctx->state[4] = SM3_IVE; 37 + sctx->state[5] = SM3_IVF; 38 + sctx->state[6] = SM3_IVG; 39 + sctx->state[7] = SM3_IVH; 40 + sctx->count = 0; 41 + 42 + return 0; 43 + } 44 + 45 + static inline int sm3_base_do_update(struct shash_desc *desc, 46 + const u8 *data, 47 + unsigned int len, 48 + sm3_block_fn *block_fn) 49 + { 50 + struct sm3_state *sctx = shash_desc_ctx(desc); 51 + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; 52 + 53 + sctx->count += len; 54 + 55 + if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { 56 + int blocks; 57 + 58 + if (partial) { 59 + int p = SM3_BLOCK_SIZE - partial; 60 + 61 + memcpy(sctx->buffer + partial, data, p); 62 + data += p; 63 + len -= p; 64 + 65 + block_fn(sctx, sctx->buffer, 1); 66 + } 67 + 68 + blocks = len / SM3_BLOCK_SIZE; 69 + len %= SM3_BLOCK_SIZE; 70 + 71 + if (blocks) { 72 + block_fn(sctx, data, blocks); 73 + data += blocks * SM3_BLOCK_SIZE; 74 + } 75 + partial = 0; 76 + } 77 + if (len) 78 + memcpy(sctx->buffer + partial, data, len); 79 + 80 + return 0; 81 + } 82 + 83 + static inline int sm3_base_do_finalize(struct shash_desc *desc, 84 + sm3_block_fn *block_fn) 85 + { 86 + const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64); 87 + struct sm3_state *sctx = shash_desc_ctx(desc); 88 + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); 89 + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; 90 + 91 + sctx->buffer[partial++] = 0x80; 92 + if (partial > bit_offset) { 93 + memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial); 94 + partial = 0; 95 + 96 + block_fn(sctx, sctx->buffer, 1); 97 + } 98 + 99 + memset(sctx->buffer + partial, 0x0, bit_offset - partial); 100 + *bits = cpu_to_be64(sctx->count << 3); 101 + block_fn(sctx, sctx->buffer, 1); 102 + 103 + return 0; 104 + } 105 + 106 + static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) 107 + { 108 + struct sm3_state *sctx = shash_desc_ctx(desc); 109 + __be32 *digest = (__be32 *)out; 110 + int i; 111 + 112 + for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) 113 + put_unaligned_be32(sctx->state[i], digest++); 114 + 115 + *sctx = (struct sm3_state){}; 116 + return 0; 117 + }
+40
include/linux/crypto.h
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/string.h> 26 26 #include <linux/uaccess.h> 27 + #include <linux/completion.h> 27 28 28 29 /* 29 30 * Autoloaded crypto modules should only use a prefixed name to avoid allowing ··· 467 466 468 467 struct module *cra_module; 469 468 } CRYPTO_MINALIGN_ATTR; 469 + 470 + /* 471 + * A helper struct for waiting for completion of async crypto ops 472 + */ 473 + struct crypto_wait { 474 + struct completion completion; 475 + int err; 476 + }; 477 + 478 + /* 479 + * Macro for declaring a crypto op async wait object on stack 480 + */ 481 + #define DECLARE_CRYPTO_WAIT(_wait) \ 482 + struct crypto_wait _wait = { \ 483 + COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 484 + 485 + /* 486 + * Async ops completion helper functioons 487 + */ 488 + void crypto_req_done(struct crypto_async_request *req, int err); 489 + 490 + static inline int crypto_wait_req(int err, struct crypto_wait *wait) 491 + { 492 + switch (err) { 493 + case -EINPROGRESS: 494 + case -EBUSY: 495 + wait_for_completion(&wait->completion); 496 + reinit_completion(&wait->completion); 497 + err = wait->err; 498 + break; 499 + }; 500 + 501 + return err; 502 + } 503 + 504 + static inline void crypto_init_wait(struct crypto_wait *wait) 505 + { 506 + init_completion(&wait->completion); 507 + } 470 508 471 509 /* 472 510 * Algorithm registration interface.
+4
include/linux/padata.h
··· 37 37 * @list: List entry, to attach to the padata lists. 38 38 * @pd: Pointer to the internal control structure. 39 39 * @cb_cpu: Callback cpu for serializatioon. 40 + * @cpu: Cpu for parallelization. 40 41 * @seq_nr: Sequence number of the parallelized data object. 41 42 * @info: Used to pass information from the parallel to the serial function. 42 43 * @parallel: Parallel execution function. ··· 47 46 struct list_head list; 48 47 struct parallel_data *pd; 49 48 int cb_cpu; 49 + int cpu; 50 50 int info; 51 51 void (*parallel)(struct padata_priv *padata); 52 52 void (*serial)(struct padata_priv *padata); ··· 87 85 * @swork: work struct for serialization. 88 86 * @pd: Backpointer to the internal control structure. 89 87 * @work: work struct for parallelization. 88 + * @reorder_work: work struct for reordering. 90 89 * @num_obj: Number of objects that are processed by this cpu. 91 90 * @cpu_index: Index of the cpu. 92 91 */ ··· 96 93 struct padata_list reorder; 97 94 struct parallel_data *pd; 98 95 struct work_struct work; 96 + struct work_struct reorder_work; 99 97 atomic_t num_obj; 100 98 int cpu_index; 101 99 };
+68 -3
kernel/padata.c
··· 131 131 padata->cb_cpu = cb_cpu; 132 132 133 133 target_cpu = padata_cpu_hash(pd); 134 + padata->cpu = target_cpu; 134 135 queue = per_cpu_ptr(pd->pqueue, target_cpu); 135 136 136 137 spin_lock(&queue->parallel.lock); ··· 276 275 return; 277 276 } 278 277 278 + static void invoke_padata_reorder(struct work_struct *work) 279 + { 280 + struct padata_parallel_queue *pqueue; 281 + struct parallel_data *pd; 282 + 283 + local_bh_disable(); 284 + pqueue = container_of(work, struct padata_parallel_queue, reorder_work); 285 + pd = pqueue->pd; 286 + padata_reorder(pd); 287 + local_bh_enable(); 288 + } 289 + 279 290 static void padata_reorder_timer(unsigned long arg) 280 291 { 281 292 struct parallel_data *pd = (struct parallel_data *)arg; 293 + unsigned int weight; 294 + int target_cpu, cpu; 282 295 283 - padata_reorder(pd); 296 + cpu = get_cpu(); 297 + 298 + /* We don't lock pd here to not interfere with parallel processing 299 + * padata_reorder() calls on other CPUs. We just need any CPU out of 300 + * the cpumask.pcpu set. It would be nice if it's the right one but 301 + * it doesn't matter if we're off to the next one by using an outdated 302 + * pd->processed value. 303 + */ 304 + weight = cpumask_weight(pd->cpumask.pcpu); 305 + target_cpu = padata_index_to_cpu(pd, pd->processed % weight); 306 + 307 + /* ensure to call the reorder callback on the correct CPU */ 308 + if (cpu != target_cpu) { 309 + struct padata_parallel_queue *pqueue; 310 + struct padata_instance *pinst; 311 + 312 + /* The timer function is serialized wrt itself -- no locking 313 + * needed. 314 + */ 315 + pinst = pd->pinst; 316 + pqueue = per_cpu_ptr(pd->pqueue, target_cpu); 317 + queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work); 318 + } else { 319 + padata_reorder(pd); 320 + } 321 + 322 + put_cpu(); 284 323 } 285 324 286 325 static void padata_serial_worker(struct work_struct *serial_work) ··· 364 323 int cpu; 365 324 struct padata_parallel_queue *pqueue; 366 325 struct parallel_data *pd; 326 + int reorder_via_wq = 0; 367 327 368 328 pd = padata->pd; 369 329 370 330 cpu = get_cpu(); 331 + 332 + /* We need to run on the same CPU padata_do_parallel(.., padata, ..) 333 + * was called on -- or, at least, enqueue the padata object into the 334 + * correct per-cpu queue. 335 + */ 336 + if (cpu != padata->cpu) { 337 + reorder_via_wq = 1; 338 + cpu = padata->cpu; 339 + } 340 + 371 341 pqueue = per_cpu_ptr(pd->pqueue, cpu); 372 342 373 343 spin_lock(&pqueue->reorder.lock); ··· 388 336 389 337 put_cpu(); 390 338 391 - padata_reorder(pd); 339 + /* If we're running on the wrong CPU, call padata_reorder() via a 340 + * kernel worker. 341 + */ 342 + if (reorder_via_wq) 343 + queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); 344 + else 345 + padata_reorder(pd); 392 346 } 393 347 EXPORT_SYMBOL(padata_do_serial); 394 348 ··· 442 384 struct padata_parallel_queue *pqueue; 443 385 444 386 cpu_index = 0; 445 - for_each_cpu(cpu, pd->cpumask.pcpu) { 387 + for_each_possible_cpu(cpu) { 446 388 pqueue = per_cpu_ptr(pd->pqueue, cpu); 389 + 390 + if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) { 391 + pqueue->cpu_index = -1; 392 + continue; 393 + } 394 + 447 395 pqueue->pd = pd; 448 396 pqueue->cpu_index = cpu_index; 449 397 cpu_index++; ··· 457 393 __padata_list_init(&pqueue->reorder); 458 394 __padata_list_init(&pqueue->parallel); 459 395 INIT_WORK(&pqueue->work, padata_parallel_worker); 396 + INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder); 460 397 atomic_set(&pqueue->num_obj, 0); 461 398 } 462 399 }
+2
lib/mpi/mpi-pow.c
··· 26 26 * however I decided to publish this code under the plain GPL. 27 27 */ 28 28 29 + #include <linux/sched.h> 29 30 #include <linux/string.h> 30 31 #include "mpi-internal.h" 31 32 #include "longlong.h" ··· 257 256 } 258 257 e <<= 1; 259 258 c--; 259 + cond_resched(); 260 260 } 261 261 262 262 i--;
+1 -1
net/ipv4/ah4.c
··· 240 240 if (err == -EINPROGRESS) 241 241 goto out; 242 242 243 - if (err == -EBUSY) 243 + if (err == -ENOSPC) 244 244 err = NET_XMIT_DROP; 245 245 goto out_free; 246 246 }
+1 -1
net/ipv4/esp4.c
··· 432 432 case -EINPROGRESS: 433 433 goto error; 434 434 435 - case -EBUSY: 435 + case -ENOSPC: 436 436 err = NET_XMIT_DROP; 437 437 break; 438 438
+1 -1
net/ipv6/ah6.c
··· 443 443 if (err == -EINPROGRESS) 444 444 goto out; 445 445 446 - if (err == -EBUSY) 446 + if (err == -ENOSPC) 447 447 err = NET_XMIT_DROP; 448 448 goto out_free; 449 449 }
+1 -1
net/ipv6/esp6.c
··· 396 396 case -EINPROGRESS: 397 397 goto error; 398 398 399 - case -EBUSY: 399 + case -ENOSPC: 400 400 err = NET_XMIT_DROP; 401 401 break; 402 402
+17 -39
security/integrity/ima/ima_crypto.c
··· 27 27 28 28 #include "ima.h" 29 29 30 - struct ahash_completion { 31 - struct completion completion; 32 - int err; 33 - }; 34 - 35 30 /* minimum file size for ahash use */ 36 31 static unsigned long ima_ahash_minsize; 37 32 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); ··· 191 196 crypto_free_ahash(tfm); 192 197 } 193 198 194 - static void ahash_complete(struct crypto_async_request *req, int err) 199 + static inline int ahash_wait(int err, struct crypto_wait *wait) 195 200 { 196 - struct ahash_completion *res = req->data; 197 201 198 - if (err == -EINPROGRESS) 199 - return; 200 - res->err = err; 201 - complete(&res->completion); 202 - } 202 + err = crypto_wait_req(err, wait); 203 203 204 - static int ahash_wait(int err, struct ahash_completion *res) 205 - { 206 - switch (err) { 207 - case 0: 208 - break; 209 - case -EINPROGRESS: 210 - case -EBUSY: 211 - wait_for_completion(&res->completion); 212 - reinit_completion(&res->completion); 213 - err = res->err; 214 - /* fall through */ 215 - default: 204 + if (err) 216 205 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); 217 - } 218 206 219 207 return err; 220 208 } ··· 211 233 int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0; 212 234 struct ahash_request *req; 213 235 struct scatterlist sg[1]; 214 - struct ahash_completion res; 236 + struct crypto_wait wait; 215 237 size_t rbuf_size[2]; 216 238 217 239 hash->length = crypto_ahash_digestsize(tfm); ··· 220 242 if (!req) 221 243 return -ENOMEM; 222 244 223 - init_completion(&res.completion); 245 + crypto_init_wait(&wait); 224 246 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 225 247 CRYPTO_TFM_REQ_MAY_SLEEP, 226 - ahash_complete, &res); 248 + crypto_req_done, &wait); 227 249 228 - rc = ahash_wait(crypto_ahash_init(req), &res); 250 + rc = ahash_wait(crypto_ahash_init(req), &wait); 229 251 if (rc) 230 252 goto out1; 231 253 ··· 266 288 * read/request, wait for the completion of the 267 289 * previous ahash_update() request. 268 290 */ 269 - rc = ahash_wait(ahash_rc, &res); 291 + rc = ahash_wait(ahash_rc, &wait); 270 292 if (rc) 271 293 goto out3; 272 294 } ··· 282 304 * read/request, wait for the completion of the 283 305 * previous ahash_update() request. 284 306 */ 285 - rc = ahash_wait(ahash_rc, &res); 307 + rc = ahash_wait(ahash_rc, &wait); 286 308 if (rc) 287 309 goto out3; 288 310 } ··· 296 318 active = !active; /* swap buffers, if we use two */ 297 319 } 298 320 /* wait for the last update request to complete */ 299 - rc = ahash_wait(ahash_rc, &res); 321 + rc = ahash_wait(ahash_rc, &wait); 300 322 out3: 301 323 if (read) 302 324 file->f_mode &= ~FMODE_READ; ··· 305 327 out2: 306 328 if (!rc) { 307 329 ahash_request_set_crypt(req, NULL, hash->digest, 0); 308 - rc = ahash_wait(crypto_ahash_final(req), &res); 330 + rc = ahash_wait(crypto_ahash_final(req), &wait); 309 331 } 310 332 out1: 311 333 ahash_request_free(req); ··· 515 537 { 516 538 struct ahash_request *req; 517 539 struct scatterlist sg; 518 - struct ahash_completion res; 540 + struct crypto_wait wait; 519 541 int rc, ahash_rc = 0; 520 542 521 543 hash->length = crypto_ahash_digestsize(tfm); ··· 524 546 if (!req) 525 547 return -ENOMEM; 526 548 527 - init_completion(&res.completion); 549 + crypto_init_wait(&wait); 528 550 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 529 551 CRYPTO_TFM_REQ_MAY_SLEEP, 530 - ahash_complete, &res); 552 + crypto_req_done, &wait); 531 553 532 - rc = ahash_wait(crypto_ahash_init(req), &res); 554 + rc = ahash_wait(crypto_ahash_init(req), &wait); 533 555 if (rc) 534 556 goto out; 535 557 ··· 539 561 ahash_rc = crypto_ahash_update(req); 540 562 541 563 /* wait for the update request to complete */ 542 - rc = ahash_wait(ahash_rc, &res); 564 + rc = ahash_wait(ahash_rc, &wait); 543 565 if (!rc) { 544 566 ahash_request_set_crypt(req, NULL, hash->digest, 0); 545 - rc = ahash_wait(crypto_ahash_final(req), &res); 567 + rc = ahash_wait(crypto_ahash_final(req), &wait); 546 568 } 547 569 out: 548 570 ahash_request_free(req);