Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: eip93 - Add Inside Secure SafeXcel EIP-93 crypto engine support

Add support for the Inside Secure SafeXcel EIP-93 Crypto Engine used on
Mediatek MT7621 SoC and new Airoha SoC.

EIP-93 IP supports AES/DES/3DES ciphers in ECB/CBC and CTR modes as well as
authenc(HMAC(x), cipher(y)) using HMAC MD5, SHA1, SHA224 and SHA256.

EIP-93 provide regs to signal support for specific chipers and the
driver dynamically register only the supported one by the chip.

Signed-off-by: Richard van Schagen <vschagen@icloud.com>
Co-developed-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Christian Marangi and committed by
Herbert Xu
9739f5f9 bbbbd1d1

+4056
+7
MAINTAINERS
··· 11466 11466 S: Maintained 11467 11467 F: drivers/crypto/inside-secure/ 11468 11468 11469 + INSIDE SECURE EIP93 CRYPTO DRIVER 11470 + M: Christian Marangi <ansuelsmth@gmail.com> 11471 + L: linux-crypto@vger.kernel.org 11472 + S: Maintained 11473 + F: Documentation/devicetree/bindings/crypto/inside-secure,safexcel-eip93.yaml 11474 + F: drivers/crypto/inside-secure/eip93/ 11475 + 11469 11476 INTEGRITY MEASUREMENT ARCHITECTURE (IMA) 11470 11477 M: Mimi Zohar <zohar@linux.ibm.com> 11471 11478 M: Roberto Sassu <roberto.sassu@huawei.com>
+1
drivers/crypto/Kconfig
··· 855 855 856 856 source "drivers/crypto/aspeed/Kconfig" 857 857 source "drivers/crypto/starfive/Kconfig" 858 + source "drivers/crypto/inside-secure/eip93/Kconfig" 858 859 859 860 endif # CRYPTO_HW
+1
drivers/crypto/Makefile
··· 50 50 obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ 51 51 obj-y += intel/ 52 52 obj-y += starfive/ 53 + obj-y += inside-secure/eip93/
+20
drivers/crypto/inside-secure/eip93/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + config CRYPTO_DEV_EIP93 3 + tristate "Support for EIP93 crypto HW accelerators" 4 + depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST 5 + select CRYPTO_LIB_AES 6 + select CRYPTO_LIB_DES 7 + select CRYPTO_SKCIPHER 8 + select CRYPTO_AEAD 9 + select CRYPTO_AUTHENC 10 + select CRYPTO_MD5 11 + select CRYPTO_SHA1 12 + select CRYPTO_SHA256 13 + help 14 + EIP93 have various crypto HW accelerators. Select this if 15 + you want to use the EIP93 modules for any of the crypto algorithms. 16 + 17 + If the IP supports it, this provide offload for AES - ECB, CBC and 18 + CTR crypto. Also provide DES and 3DES ECB and CBC. 19 + 20 + Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo.
+5
drivers/crypto/inside-secure/eip93/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o 2 + 3 + crypto-hw-eip93-y += eip93-main.o eip93-common.o 4 + crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o 5 + crypto-hw-eip93-y += eip93-hash.o
+711
drivers/crypto/inside-secure/eip93/eip93-aead.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + 9 + #include <crypto/aead.h> 10 + #include <crypto/aes.h> 11 + #include <crypto/authenc.h> 12 + #include <crypto/ctr.h> 13 + #include <crypto/hmac.h> 14 + #include <crypto/internal/aead.h> 15 + #include <crypto/md5.h> 16 + #include <crypto/null.h> 17 + #include <crypto/sha1.h> 18 + #include <crypto/sha2.h> 19 + 20 + #include <crypto/internal/des.h> 21 + 22 + #include <linux/crypto.h> 23 + #include <linux/dma-mapping.h> 24 + 25 + #include "eip93-aead.h" 26 + #include "eip93-cipher.h" 27 + #include "eip93-common.h" 28 + #include "eip93-regs.h" 29 + 30 + void eip93_aead_handle_result(struct crypto_async_request *async, int err) 31 + { 32 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); 33 + struct eip93_device *eip93 = ctx->eip93; 34 + struct aead_request *req = aead_request_cast(async); 35 + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); 36 + 37 + eip93_unmap_dma(eip93, rctx, req->src, req->dst); 38 + eip93_handle_result(eip93, rctx, req->iv); 39 + 40 + aead_request_complete(req, err); 41 + } 42 + 43 + static int eip93_aead_send_req(struct crypto_async_request *async) 44 + { 45 + struct aead_request *req = aead_request_cast(async); 46 + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); 47 + int err; 48 + 49 + err = check_valid_request(rctx); 50 + if (err) { 51 + aead_request_complete(req, err); 52 + return err; 53 + } 54 + 55 + return eip93_send_req(async, req->iv, rctx); 56 + } 57 + 58 + /* Crypto aead API functions */ 59 + static int eip93_aead_cra_init(struct crypto_tfm *tfm) 60 + { 61 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 62 + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, 63 + struct eip93_alg_template, alg.aead.base); 64 + 65 + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), 66 + sizeof(struct eip93_cipher_reqctx)); 67 + 68 + ctx->eip93 = tmpl->eip93; 69 + ctx->flags = tmpl->flags; 70 + ctx->type = tmpl->type; 71 + ctx->set_assoc = true; 72 + 73 + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); 74 + if (!ctx->sa_record) 75 + return -ENOMEM; 76 + 77 + return 0; 78 + } 79 + 80 + static void eip93_aead_cra_exit(struct crypto_tfm *tfm) 81 + { 82 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 83 + 84 + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, 85 + sizeof(*ctx->sa_record), DMA_TO_DEVICE); 86 + kfree(ctx->sa_record); 87 + } 88 + 89 + static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key, 90 + unsigned int len) 91 + { 92 + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); 93 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 94 + struct crypto_authenc_keys keys; 95 + struct crypto_aes_ctx aes; 96 + struct sa_record *sa_record = ctx->sa_record; 97 + u32 nonce = 0; 98 + int ret; 99 + 100 + if (crypto_authenc_extractkeys(&keys, key, len)) 101 + return -EINVAL; 102 + 103 + if (IS_RFC3686(ctx->flags)) { 104 + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) 105 + return -EINVAL; 106 + 107 + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; 108 + memcpy(&nonce, keys.enckey + keys.enckeylen, 109 + CTR_RFC3686_NONCE_SIZE); 110 + } 111 + 112 + switch ((ctx->flags & EIP93_ALG_MASK)) { 113 + case EIP93_ALG_DES: 114 + ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); 115 + if (ret) 116 + return ret; 117 + 118 + break; 119 + case EIP93_ALG_3DES: 120 + if (keys.enckeylen != DES3_EDE_KEY_SIZE) 121 + return -EINVAL; 122 + 123 + ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); 124 + if (ret) 125 + return ret; 126 + 127 + break; 128 + case EIP93_ALG_AES: 129 + ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen); 130 + if (ret) 131 + return ret; 132 + 133 + break; 134 + } 135 + 136 + ctx->blksize = crypto_aead_blocksize(ctfm); 137 + /* Encryption key */ 138 + eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags); 139 + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; 140 + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, 141 + EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH); 142 + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; 143 + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 144 + ctx->authsize / sizeof(u32)); 145 + 146 + memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen); 147 + ctx->sa_nonce = nonce; 148 + sa_record->sa_nonce = nonce; 149 + 150 + /* authentication key */ 151 + ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen, 152 + ctx->authsize, sa_record->sa_i_digest, 153 + sa_record->sa_o_digest, false); 154 + 155 + ctx->set_assoc = true; 156 + 157 + return ret; 158 + } 159 + 160 + static int eip93_aead_setauthsize(struct crypto_aead *ctfm, 161 + unsigned int authsize) 162 + { 163 + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); 164 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 165 + 166 + ctx->authsize = authsize; 167 + ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; 168 + ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 169 + ctx->authsize / sizeof(u32)); 170 + 171 + return 0; 172 + } 173 + 174 + static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx, 175 + struct aead_request *req) 176 + { 177 + struct sa_record *sa_record = ctx->sa_record; 178 + 179 + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET; 180 + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET, 181 + req->assoclen / sizeof(u32)); 182 + 183 + ctx->assoclen = req->assoclen; 184 + } 185 + 186 + static int eip93_aead_crypt(struct aead_request *req) 187 + { 188 + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); 189 + struct crypto_async_request *async = &req->base; 190 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 191 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 192 + int ret; 193 + 194 + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, 195 + sizeof(*ctx->sa_record), DMA_TO_DEVICE); 196 + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); 197 + if (ret) 198 + return ret; 199 + 200 + rctx->textsize = req->cryptlen; 201 + rctx->blksize = ctx->blksize; 202 + rctx->assoclen = req->assoclen; 203 + rctx->authsize = ctx->authsize; 204 + rctx->sg_src = req->src; 205 + rctx->sg_dst = req->dst; 206 + rctx->ivsize = crypto_aead_ivsize(aead); 207 + rctx->desc_flags = EIP93_DESC_AEAD; 208 + rctx->sa_record_base = ctx->sa_record_base; 209 + 210 + if (IS_DECRYPT(rctx->flags)) 211 + rctx->textsize -= rctx->authsize; 212 + 213 + return eip93_aead_send_req(async); 214 + } 215 + 216 + static int eip93_aead_encrypt(struct aead_request *req) 217 + { 218 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 219 + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); 220 + 221 + rctx->flags = ctx->flags; 222 + rctx->flags |= EIP93_ENCRYPT; 223 + if (ctx->set_assoc) { 224 + eip93_aead_setassoc(ctx, req); 225 + ctx->set_assoc = false; 226 + } 227 + 228 + if (req->assoclen != ctx->assoclen) { 229 + dev_err(ctx->eip93->dev, "Request AAD length error\n"); 230 + return -EINVAL; 231 + } 232 + 233 + return eip93_aead_crypt(req); 234 + } 235 + 236 + static int eip93_aead_decrypt(struct aead_request *req) 237 + { 238 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 239 + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); 240 + 241 + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; 242 + ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD | 243 + EIP93_SA_CMD_COPY_DIGEST); 244 + 245 + rctx->flags = ctx->flags; 246 + rctx->flags |= EIP93_DECRYPT; 247 + if (ctx->set_assoc) { 248 + eip93_aead_setassoc(ctx, req); 249 + ctx->set_assoc = false; 250 + } 251 + 252 + if (req->assoclen != ctx->assoclen) { 253 + dev_err(ctx->eip93->dev, "Request AAD length error\n"); 254 + return -EINVAL; 255 + } 256 + 257 + return eip93_aead_crypt(req); 258 + } 259 + 260 + /* Available authenc algorithms in this module */ 261 + struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = { 262 + .type = EIP93_ALG_TYPE_AEAD, 263 + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES, 264 + .alg.aead = { 265 + .setkey = eip93_aead_setkey, 266 + .encrypt = eip93_aead_encrypt, 267 + .decrypt = eip93_aead_decrypt, 268 + .ivsize = AES_BLOCK_SIZE, 269 + .setauthsize = eip93_aead_setauthsize, 270 + .maxauthsize = MD5_DIGEST_SIZE, 271 + .base = { 272 + .cra_name = "authenc(hmac(md5),cbc(aes))", 273 + .cra_driver_name = 274 + "authenc(hmac(md5-eip93), cbc(aes-eip93))", 275 + .cra_priority = EIP93_CRA_PRIORITY, 276 + .cra_flags = CRYPTO_ALG_ASYNC | 277 + CRYPTO_ALG_KERN_DRIVER_ONLY | 278 + CRYPTO_ALG_ALLOCATES_MEMORY, 279 + .cra_blocksize = AES_BLOCK_SIZE, 280 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 281 + .cra_alignmask = 0, 282 + .cra_init = eip93_aead_cra_init, 283 + .cra_exit = eip93_aead_cra_exit, 284 + .cra_module = THIS_MODULE, 285 + }, 286 + }, 287 + }; 288 + 289 + struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = { 290 + .type = EIP93_ALG_TYPE_AEAD, 291 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES, 292 + .alg.aead = { 293 + .setkey = eip93_aead_setkey, 294 + .encrypt = eip93_aead_encrypt, 295 + .decrypt = eip93_aead_decrypt, 296 + .ivsize = AES_BLOCK_SIZE, 297 + .setauthsize = eip93_aead_setauthsize, 298 + .maxauthsize = SHA1_DIGEST_SIZE, 299 + .base = { 300 + .cra_name = "authenc(hmac(sha1),cbc(aes))", 301 + .cra_driver_name = 302 + "authenc(hmac(sha1-eip93),cbc(aes-eip93))", 303 + .cra_priority = EIP93_CRA_PRIORITY, 304 + .cra_flags = CRYPTO_ALG_ASYNC | 305 + CRYPTO_ALG_KERN_DRIVER_ONLY | 306 + CRYPTO_ALG_ALLOCATES_MEMORY, 307 + .cra_blocksize = AES_BLOCK_SIZE, 308 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 309 + .cra_alignmask = 0, 310 + .cra_init = eip93_aead_cra_init, 311 + .cra_exit = eip93_aead_cra_exit, 312 + .cra_module = THIS_MODULE, 313 + }, 314 + }, 315 + }; 316 + 317 + struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = { 318 + .type = EIP93_ALG_TYPE_AEAD, 319 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES, 320 + .alg.aead = { 321 + .setkey = eip93_aead_setkey, 322 + .encrypt = eip93_aead_encrypt, 323 + .decrypt = eip93_aead_decrypt, 324 + .ivsize = AES_BLOCK_SIZE, 325 + .setauthsize = eip93_aead_setauthsize, 326 + .maxauthsize = SHA224_DIGEST_SIZE, 327 + .base = { 328 + .cra_name = "authenc(hmac(sha224),cbc(aes))", 329 + .cra_driver_name = 330 + "authenc(hmac(sha224-eip93),cbc(aes-eip93))", 331 + .cra_priority = EIP93_CRA_PRIORITY, 332 + .cra_flags = CRYPTO_ALG_ASYNC | 333 + CRYPTO_ALG_KERN_DRIVER_ONLY | 334 + CRYPTO_ALG_ALLOCATES_MEMORY, 335 + .cra_blocksize = AES_BLOCK_SIZE, 336 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 337 + .cra_alignmask = 0, 338 + .cra_init = eip93_aead_cra_init, 339 + .cra_exit = eip93_aead_cra_exit, 340 + .cra_module = THIS_MODULE, 341 + }, 342 + }, 343 + }; 344 + 345 + struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = { 346 + .type = EIP93_ALG_TYPE_AEAD, 347 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES, 348 + .alg.aead = { 349 + .setkey = eip93_aead_setkey, 350 + .encrypt = eip93_aead_encrypt, 351 + .decrypt = eip93_aead_decrypt, 352 + .ivsize = AES_BLOCK_SIZE, 353 + .setauthsize = eip93_aead_setauthsize, 354 + .maxauthsize = SHA256_DIGEST_SIZE, 355 + .base = { 356 + .cra_name = "authenc(hmac(sha256),cbc(aes))", 357 + .cra_driver_name = 358 + "authenc(hmac(sha256-eip93),cbc(aes-eip93))", 359 + .cra_priority = EIP93_CRA_PRIORITY, 360 + .cra_flags = CRYPTO_ALG_ASYNC | 361 + CRYPTO_ALG_KERN_DRIVER_ONLY | 362 + CRYPTO_ALG_ALLOCATES_MEMORY, 363 + .cra_blocksize = AES_BLOCK_SIZE, 364 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 365 + .cra_alignmask = 0, 366 + .cra_init = eip93_aead_cra_init, 367 + .cra_exit = eip93_aead_cra_exit, 368 + .cra_module = THIS_MODULE, 369 + }, 370 + }, 371 + }; 372 + 373 + struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = { 374 + .type = EIP93_ALG_TYPE_AEAD, 375 + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | 376 + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, 377 + .alg.aead = { 378 + .setkey = eip93_aead_setkey, 379 + .encrypt = eip93_aead_encrypt, 380 + .decrypt = eip93_aead_decrypt, 381 + .ivsize = CTR_RFC3686_IV_SIZE, 382 + .setauthsize = eip93_aead_setauthsize, 383 + .maxauthsize = MD5_DIGEST_SIZE, 384 + .base = { 385 + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))", 386 + .cra_driver_name = 387 + "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))", 388 + .cra_priority = EIP93_CRA_PRIORITY, 389 + .cra_flags = CRYPTO_ALG_ASYNC | 390 + CRYPTO_ALG_KERN_DRIVER_ONLY | 391 + CRYPTO_ALG_ALLOCATES_MEMORY, 392 + .cra_blocksize = 1, 393 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 394 + .cra_alignmask = 0, 395 + .cra_init = eip93_aead_cra_init, 396 + .cra_exit = eip93_aead_cra_exit, 397 + .cra_module = THIS_MODULE, 398 + }, 399 + }, 400 + }; 401 + 402 + struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = { 403 + .type = EIP93_ALG_TYPE_AEAD, 404 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | 405 + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, 406 + .alg.aead = { 407 + .setkey = eip93_aead_setkey, 408 + .encrypt = eip93_aead_encrypt, 409 + .decrypt = eip93_aead_decrypt, 410 + .ivsize = CTR_RFC3686_IV_SIZE, 411 + .setauthsize = eip93_aead_setauthsize, 412 + .maxauthsize = SHA1_DIGEST_SIZE, 413 + .base = { 414 + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", 415 + .cra_driver_name = 416 + "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))", 417 + .cra_priority = EIP93_CRA_PRIORITY, 418 + .cra_flags = CRYPTO_ALG_ASYNC | 419 + CRYPTO_ALG_KERN_DRIVER_ONLY | 420 + CRYPTO_ALG_ALLOCATES_MEMORY, 421 + .cra_blocksize = 1, 422 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 423 + .cra_alignmask = 0, 424 + .cra_init = eip93_aead_cra_init, 425 + .cra_exit = eip93_aead_cra_exit, 426 + .cra_module = THIS_MODULE, 427 + }, 428 + }, 429 + }; 430 + 431 + struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = { 432 + .type = EIP93_ALG_TYPE_AEAD, 433 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | 434 + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, 435 + .alg.aead = { 436 + .setkey = eip93_aead_setkey, 437 + .encrypt = eip93_aead_encrypt, 438 + .decrypt = eip93_aead_decrypt, 439 + .ivsize = CTR_RFC3686_IV_SIZE, 440 + .setauthsize = eip93_aead_setauthsize, 441 + .maxauthsize = SHA224_DIGEST_SIZE, 442 + .base = { 443 + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", 444 + .cra_driver_name = 445 + "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))", 446 + .cra_priority = EIP93_CRA_PRIORITY, 447 + .cra_flags = CRYPTO_ALG_ASYNC | 448 + CRYPTO_ALG_KERN_DRIVER_ONLY | 449 + CRYPTO_ALG_ALLOCATES_MEMORY, 450 + .cra_blocksize = 1, 451 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 452 + .cra_alignmask = 0, 453 + .cra_init = eip93_aead_cra_init, 454 + .cra_exit = eip93_aead_cra_exit, 455 + .cra_module = THIS_MODULE, 456 + }, 457 + }, 458 + }; 459 + 460 + struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = { 461 + .type = EIP93_ALG_TYPE_AEAD, 462 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | 463 + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, 464 + .alg.aead = { 465 + .setkey = eip93_aead_setkey, 466 + .encrypt = eip93_aead_encrypt, 467 + .decrypt = eip93_aead_decrypt, 468 + .ivsize = CTR_RFC3686_IV_SIZE, 469 + .setauthsize = eip93_aead_setauthsize, 470 + .maxauthsize = SHA256_DIGEST_SIZE, 471 + .base = { 472 + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", 473 + .cra_driver_name = 474 + "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))", 475 + .cra_priority = EIP93_CRA_PRIORITY, 476 + .cra_flags = CRYPTO_ALG_ASYNC | 477 + CRYPTO_ALG_KERN_DRIVER_ONLY | 478 + CRYPTO_ALG_ALLOCATES_MEMORY, 479 + .cra_blocksize = 1, 480 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 481 + .cra_alignmask = 0, 482 + .cra_init = eip93_aead_cra_init, 483 + .cra_exit = eip93_aead_cra_exit, 484 + .cra_module = THIS_MODULE, 485 + }, 486 + }, 487 + }; 488 + 489 + struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = { 490 + .type = EIP93_ALG_TYPE_AEAD, 491 + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES, 492 + .alg.aead = { 493 + .setkey = eip93_aead_setkey, 494 + .encrypt = eip93_aead_encrypt, 495 + .decrypt = eip93_aead_decrypt, 496 + .ivsize = DES_BLOCK_SIZE, 497 + .setauthsize = eip93_aead_setauthsize, 498 + .maxauthsize = MD5_DIGEST_SIZE, 499 + .base = { 500 + .cra_name = "authenc(hmac(md5),cbc(des))", 501 + .cra_driver_name = 502 + "authenc(hmac(md5-eip93),cbc(des-eip93))", 503 + .cra_priority = EIP93_CRA_PRIORITY, 504 + .cra_flags = CRYPTO_ALG_ASYNC | 505 + CRYPTO_ALG_KERN_DRIVER_ONLY | 506 + CRYPTO_ALG_ALLOCATES_MEMORY, 507 + .cra_blocksize = DES_BLOCK_SIZE, 508 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 509 + .cra_alignmask = 0, 510 + .cra_init = eip93_aead_cra_init, 511 + .cra_exit = eip93_aead_cra_exit, 512 + .cra_module = THIS_MODULE, 513 + }, 514 + }, 515 + }; 516 + 517 + struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = { 518 + .type = EIP93_ALG_TYPE_AEAD, 519 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES, 520 + .alg.aead = { 521 + .setkey = eip93_aead_setkey, 522 + .encrypt = eip93_aead_encrypt, 523 + .decrypt = eip93_aead_decrypt, 524 + .ivsize = DES_BLOCK_SIZE, 525 + .setauthsize = eip93_aead_setauthsize, 526 + .maxauthsize = SHA1_DIGEST_SIZE, 527 + .base = { 528 + .cra_name = "authenc(hmac(sha1),cbc(des))", 529 + .cra_driver_name = 530 + "authenc(hmac(sha1-eip93),cbc(des-eip93))", 531 + .cra_priority = EIP93_CRA_PRIORITY, 532 + .cra_flags = CRYPTO_ALG_ASYNC | 533 + CRYPTO_ALG_KERN_DRIVER_ONLY | 534 + CRYPTO_ALG_ALLOCATES_MEMORY, 535 + .cra_blocksize = DES_BLOCK_SIZE, 536 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 537 + .cra_alignmask = 0, 538 + .cra_init = eip93_aead_cra_init, 539 + .cra_exit = eip93_aead_cra_exit, 540 + .cra_module = THIS_MODULE, 541 + }, 542 + }, 543 + }; 544 + 545 + struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = { 546 + .type = EIP93_ALG_TYPE_AEAD, 547 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES, 548 + .alg.aead = { 549 + .setkey = eip93_aead_setkey, 550 + .encrypt = eip93_aead_encrypt, 551 + .decrypt = eip93_aead_decrypt, 552 + .ivsize = DES_BLOCK_SIZE, 553 + .setauthsize = eip93_aead_setauthsize, 554 + .maxauthsize = SHA224_DIGEST_SIZE, 555 + .base = { 556 + .cra_name = "authenc(hmac(sha224),cbc(des))", 557 + .cra_driver_name = 558 + "authenc(hmac(sha224-eip93),cbc(des-eip93))", 559 + .cra_priority = EIP93_CRA_PRIORITY, 560 + .cra_flags = CRYPTO_ALG_ASYNC | 561 + CRYPTO_ALG_KERN_DRIVER_ONLY | 562 + CRYPTO_ALG_ALLOCATES_MEMORY, 563 + .cra_blocksize = DES_BLOCK_SIZE, 564 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 565 + .cra_alignmask = 0, 566 + .cra_init = eip93_aead_cra_init, 567 + .cra_exit = eip93_aead_cra_exit, 568 + .cra_module = THIS_MODULE, 569 + }, 570 + }, 571 + }; 572 + 573 + struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = { 574 + .type = EIP93_ALG_TYPE_AEAD, 575 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES, 576 + .alg.aead = { 577 + .setkey = eip93_aead_setkey, 578 + .encrypt = eip93_aead_encrypt, 579 + .decrypt = eip93_aead_decrypt, 580 + .ivsize = DES_BLOCK_SIZE, 581 + .setauthsize = eip93_aead_setauthsize, 582 + .maxauthsize = SHA256_DIGEST_SIZE, 583 + .base = { 584 + .cra_name = "authenc(hmac(sha256),cbc(des))", 585 + .cra_driver_name = 586 + "authenc(hmac(sha256-eip93),cbc(des-eip93))", 587 + .cra_priority = EIP93_CRA_PRIORITY, 588 + .cra_flags = CRYPTO_ALG_ASYNC | 589 + CRYPTO_ALG_KERN_DRIVER_ONLY | 590 + CRYPTO_ALG_ALLOCATES_MEMORY, 591 + .cra_blocksize = DES_BLOCK_SIZE, 592 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 593 + .cra_alignmask = 0, 594 + .cra_init = eip93_aead_cra_init, 595 + .cra_exit = eip93_aead_cra_exit, 596 + .cra_module = THIS_MODULE, 597 + }, 598 + }, 599 + }; 600 + 601 + struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = { 602 + .type = EIP93_ALG_TYPE_AEAD, 603 + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES, 604 + .alg.aead = { 605 + .setkey = eip93_aead_setkey, 606 + .encrypt = eip93_aead_encrypt, 607 + .decrypt = eip93_aead_decrypt, 608 + .ivsize = DES3_EDE_BLOCK_SIZE, 609 + .setauthsize = eip93_aead_setauthsize, 610 + .maxauthsize = MD5_DIGEST_SIZE, 611 + .base = { 612 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 613 + .cra_driver_name = 614 + "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))", 615 + .cra_priority = EIP93_CRA_PRIORITY, 616 + .cra_flags = CRYPTO_ALG_ASYNC | 617 + CRYPTO_ALG_KERN_DRIVER_ONLY | 618 + CRYPTO_ALG_ALLOCATES_MEMORY, 619 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 620 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 621 + .cra_alignmask = 0x0, 622 + .cra_init = eip93_aead_cra_init, 623 + .cra_exit = eip93_aead_cra_exit, 624 + .cra_module = THIS_MODULE, 625 + }, 626 + }, 627 + }; 628 + 629 + struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = { 630 + .type = EIP93_ALG_TYPE_AEAD, 631 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES, 632 + .alg.aead = { 633 + .setkey = eip93_aead_setkey, 634 + .encrypt = eip93_aead_encrypt, 635 + .decrypt = eip93_aead_decrypt, 636 + .ivsize = DES3_EDE_BLOCK_SIZE, 637 + .setauthsize = eip93_aead_setauthsize, 638 + .maxauthsize = SHA1_DIGEST_SIZE, 639 + .base = { 640 + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 641 + .cra_driver_name = 642 + "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))", 643 + .cra_priority = EIP93_CRA_PRIORITY, 644 + .cra_flags = CRYPTO_ALG_ASYNC | 645 + CRYPTO_ALG_KERN_DRIVER_ONLY | 646 + CRYPTO_ALG_ALLOCATES_MEMORY, 647 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 648 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 649 + .cra_alignmask = 0x0, 650 + .cra_init = eip93_aead_cra_init, 651 + .cra_exit = eip93_aead_cra_exit, 652 + .cra_module = THIS_MODULE, 653 + }, 654 + }, 655 + }; 656 + 657 + struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = { 658 + .type = EIP93_ALG_TYPE_AEAD, 659 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES, 660 + .alg.aead = { 661 + .setkey = eip93_aead_setkey, 662 + .encrypt = eip93_aead_encrypt, 663 + .decrypt = eip93_aead_decrypt, 664 + .ivsize = DES3_EDE_BLOCK_SIZE, 665 + .setauthsize = eip93_aead_setauthsize, 666 + .maxauthsize = SHA224_DIGEST_SIZE, 667 + .base = { 668 + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", 669 + .cra_driver_name = 670 + "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))", 671 + .cra_priority = EIP93_CRA_PRIORITY, 672 + .cra_flags = CRYPTO_ALG_ASYNC | 673 + CRYPTO_ALG_KERN_DRIVER_ONLY | 674 + CRYPTO_ALG_ALLOCATES_MEMORY, 675 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 676 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 677 + .cra_alignmask = 0x0, 678 + .cra_init = eip93_aead_cra_init, 679 + .cra_exit = eip93_aead_cra_exit, 680 + .cra_module = THIS_MODULE, 681 + }, 682 + }, 683 + }; 684 + 685 + struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = { 686 + .type = EIP93_ALG_TYPE_AEAD, 687 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES, 688 + .alg.aead = { 689 + .setkey = eip93_aead_setkey, 690 + .encrypt = eip93_aead_encrypt, 691 + .decrypt = eip93_aead_decrypt, 692 + .ivsize = DES3_EDE_BLOCK_SIZE, 693 + .setauthsize = eip93_aead_setauthsize, 694 + .maxauthsize = SHA256_DIGEST_SIZE, 695 + .base = { 696 + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 697 + .cra_driver_name = 698 + "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))", 699 + .cra_priority = EIP93_CRA_PRIORITY, 700 + .cra_flags = CRYPTO_ALG_ASYNC | 701 + CRYPTO_ALG_KERN_DRIVER_ONLY | 702 + CRYPTO_ALG_ALLOCATES_MEMORY, 703 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 704 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 705 + .cra_alignmask = 0x0, 706 + .cra_init = eip93_aead_cra_init, 707 + .cra_exit = eip93_aead_cra_exit, 708 + .cra_module = THIS_MODULE, 709 + }, 710 + }, 711 + };
+38
drivers/crypto/inside-secure/eip93/eip93-aead.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_AEAD_H_ 9 + #define _EIP93_AEAD_H_ 10 + 11 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes; 12 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes; 13 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes; 14 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes; 15 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes; 16 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes; 17 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes; 18 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes; 19 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes; 20 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes; 21 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes; 22 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes; 23 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des; 24 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des; 25 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des; 26 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des; 27 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede; 28 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede; 29 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede; 30 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede; 31 + extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null; 32 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null; 33 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null; 34 + extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null; 35 + 36 + void eip93_aead_handle_result(struct crypto_async_request *async, int err); 37 + 38 + #endif /* _EIP93_AEAD_H_ */
+16
drivers/crypto/inside-secure/eip93/eip93-aes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_AES_H_ 9 + #define _EIP93_AES_H_ 10 + 11 + extern struct eip93_alg_template eip93_alg_ecb_aes; 12 + extern struct eip93_alg_template eip93_alg_cbc_aes; 13 + extern struct eip93_alg_template eip93_alg_ctr_aes; 14 + extern struct eip93_alg_template eip93_alg_rfc3686_aes; 15 + 16 + #endif /* _EIP93_AES_H_ */
+413
drivers/crypto/inside-secure/eip93/eip93-cipher.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + 9 + #include <crypto/aes.h> 10 + #include <crypto/ctr.h> 11 + #include <crypto/internal/des.h> 12 + #include <linux/dma-mapping.h> 13 + 14 + #include "eip93-aes.h" 15 + #include "eip93-cipher.h" 16 + #include "eip93-common.h" 17 + #include "eip93-des.h" 18 + #include "eip93-regs.h" 19 + 20 + void eip93_skcipher_handle_result(struct crypto_async_request *async, int err) 21 + { 22 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); 23 + struct eip93_device *eip93 = ctx->eip93; 24 + struct skcipher_request *req = skcipher_request_cast(async); 25 + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); 26 + 27 + eip93_unmap_dma(eip93, rctx, req->src, req->dst); 28 + eip93_handle_result(eip93, rctx, req->iv); 29 + 30 + skcipher_request_complete(req, err); 31 + } 32 + 33 + static int eip93_skcipher_send_req(struct crypto_async_request *async) 34 + { 35 + struct skcipher_request *req = skcipher_request_cast(async); 36 + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); 37 + int err; 38 + 39 + err = check_valid_request(rctx); 40 + 41 + if (err) { 42 + skcipher_request_complete(req, err); 43 + return err; 44 + } 45 + 46 + return eip93_send_req(async, req->iv, rctx); 47 + } 48 + 49 + /* Crypto skcipher API functions */ 50 + static int eip93_skcipher_cra_init(struct crypto_tfm *tfm) 51 + { 52 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 53 + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, 54 + struct eip93_alg_template, alg.skcipher.base); 55 + 56 + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 57 + sizeof(struct eip93_cipher_reqctx)); 58 + 59 + memset(ctx, 0, sizeof(*ctx)); 60 + 61 + ctx->eip93 = tmpl->eip93; 62 + ctx->type = tmpl->type; 63 + 64 + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); 65 + if (!ctx->sa_record) 66 + return -ENOMEM; 67 + 68 + return 0; 69 + } 70 + 71 + static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm) 72 + { 73 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 74 + 75 + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, 76 + sizeof(*ctx->sa_record), DMA_TO_DEVICE); 77 + kfree(ctx->sa_record); 78 + } 79 + 80 + static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key, 81 + unsigned int len) 82 + { 83 + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); 84 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); 85 + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, 86 + struct eip93_alg_template, 87 + alg.skcipher.base); 88 + struct sa_record *sa_record = ctx->sa_record; 89 + unsigned int keylen = len; 90 + u32 flags = tmpl->flags; 91 + u32 nonce = 0; 92 + int ret; 93 + 94 + if (!key || !keylen) 95 + return -EINVAL; 96 + 97 + if (IS_RFC3686(flags)) { 98 + if (len < CTR_RFC3686_NONCE_SIZE) 99 + return -EINVAL; 100 + 101 + keylen = len - CTR_RFC3686_NONCE_SIZE; 102 + memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE); 103 + } 104 + 105 + if (flags & EIP93_ALG_DES) { 106 + ctx->blksize = DES_BLOCK_SIZE; 107 + ret = verify_skcipher_des_key(ctfm, key); 108 + if (ret) 109 + return ret; 110 + } 111 + if (flags & EIP93_ALG_3DES) { 112 + ctx->blksize = DES3_EDE_BLOCK_SIZE; 113 + ret = verify_skcipher_des3_key(ctfm, key); 114 + if (ret) 115 + return ret; 116 + } 117 + 118 + if (flags & EIP93_ALG_AES) { 119 + struct crypto_aes_ctx aes; 120 + 121 + ctx->blksize = AES_BLOCK_SIZE; 122 + ret = aes_expandkey(&aes, key, keylen); 123 + if (ret) 124 + return ret; 125 + } 126 + 127 + eip93_set_sa_record(sa_record, keylen, flags); 128 + 129 + memcpy(sa_record->sa_key, key, keylen); 130 + ctx->sa_nonce = nonce; 131 + sa_record->sa_nonce = nonce; 132 + 133 + return 0; 134 + } 135 + 136 + static int eip93_skcipher_crypt(struct skcipher_request *req) 137 + { 138 + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); 139 + struct crypto_async_request *async = &req->base; 140 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 141 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 142 + int ret; 143 + 144 + if (!req->cryptlen) 145 + return 0; 146 + 147 + /* 148 + * ECB and CBC algorithms require message lengths to be 149 + * multiples of block size. 150 + */ 151 + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) 152 + if (!IS_ALIGNED(req->cryptlen, 153 + crypto_skcipher_blocksize(skcipher))) 154 + return -EINVAL; 155 + 156 + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, 157 + sizeof(*ctx->sa_record), DMA_TO_DEVICE); 158 + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); 159 + if (ret) 160 + return ret; 161 + 162 + rctx->assoclen = 0; 163 + rctx->textsize = req->cryptlen; 164 + rctx->authsize = 0; 165 + rctx->sg_src = req->src; 166 + rctx->sg_dst = req->dst; 167 + rctx->ivsize = crypto_skcipher_ivsize(skcipher); 168 + rctx->blksize = ctx->blksize; 169 + rctx->desc_flags = EIP93_DESC_SKCIPHER; 170 + rctx->sa_record_base = ctx->sa_record_base; 171 + 172 + return eip93_skcipher_send_req(async); 173 + } 174 + 175 + static int eip93_skcipher_encrypt(struct skcipher_request *req) 176 + { 177 + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); 178 + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, 179 + struct eip93_alg_template, alg.skcipher.base); 180 + 181 + rctx->flags = tmpl->flags; 182 + rctx->flags |= EIP93_ENCRYPT; 183 + 184 + return eip93_skcipher_crypt(req); 185 + } 186 + 187 + static int eip93_skcipher_decrypt(struct skcipher_request *req) 188 + { 189 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 190 + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); 191 + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, 192 + struct eip93_alg_template, alg.skcipher.base); 193 + 194 + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; 195 + 196 + rctx->flags = tmpl->flags; 197 + rctx->flags |= EIP93_DECRYPT; 198 + 199 + return eip93_skcipher_crypt(req); 200 + } 201 + 202 + /* Available algorithms in this module */ 203 + struct eip93_alg_template eip93_alg_ecb_aes = { 204 + .type = EIP93_ALG_TYPE_SKCIPHER, 205 + .flags = EIP93_MODE_ECB | EIP93_ALG_AES, 206 + .alg.skcipher = { 207 + .setkey = eip93_skcipher_setkey, 208 + .encrypt = eip93_skcipher_encrypt, 209 + .decrypt = eip93_skcipher_decrypt, 210 + .min_keysize = AES_MIN_KEY_SIZE, 211 + .max_keysize = AES_MAX_KEY_SIZE, 212 + .ivsize = 0, 213 + .base = { 214 + .cra_name = "ecb(aes)", 215 + .cra_driver_name = "ecb(aes-eip93)", 216 + .cra_priority = EIP93_CRA_PRIORITY, 217 + .cra_flags = CRYPTO_ALG_ASYNC | 218 + CRYPTO_ALG_NEED_FALLBACK | 219 + CRYPTO_ALG_KERN_DRIVER_ONLY, 220 + .cra_blocksize = AES_BLOCK_SIZE, 221 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 222 + .cra_alignmask = 0xf, 223 + .cra_init = eip93_skcipher_cra_init, 224 + .cra_exit = eip93_skcipher_cra_exit, 225 + .cra_module = THIS_MODULE, 226 + }, 227 + }, 228 + }; 229 + 230 + struct eip93_alg_template eip93_alg_cbc_aes = { 231 + .type = EIP93_ALG_TYPE_SKCIPHER, 232 + .flags = EIP93_MODE_CBC | EIP93_ALG_AES, 233 + .alg.skcipher = { 234 + .setkey = eip93_skcipher_setkey, 235 + .encrypt = eip93_skcipher_encrypt, 236 + .decrypt = eip93_skcipher_decrypt, 237 + .min_keysize = AES_MIN_KEY_SIZE, 238 + .max_keysize = AES_MAX_KEY_SIZE, 239 + .ivsize = AES_BLOCK_SIZE, 240 + .base = { 241 + .cra_name = "cbc(aes)", 242 + .cra_driver_name = "cbc(aes-eip93)", 243 + .cra_priority = EIP93_CRA_PRIORITY, 244 + .cra_flags = CRYPTO_ALG_ASYNC | 245 + CRYPTO_ALG_NEED_FALLBACK | 246 + CRYPTO_ALG_KERN_DRIVER_ONLY, 247 + .cra_blocksize = AES_BLOCK_SIZE, 248 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 249 + .cra_alignmask = 0xf, 250 + .cra_init = eip93_skcipher_cra_init, 251 + .cra_exit = eip93_skcipher_cra_exit, 252 + .cra_module = THIS_MODULE, 253 + }, 254 + }, 255 + }; 256 + 257 + struct eip93_alg_template eip93_alg_ctr_aes = { 258 + .type = EIP93_ALG_TYPE_SKCIPHER, 259 + .flags = EIP93_MODE_CTR | EIP93_ALG_AES, 260 + .alg.skcipher = { 261 + .setkey = eip93_skcipher_setkey, 262 + .encrypt = eip93_skcipher_encrypt, 263 + .decrypt = eip93_skcipher_decrypt, 264 + .min_keysize = AES_MIN_KEY_SIZE, 265 + .max_keysize = AES_MAX_KEY_SIZE, 266 + .ivsize = AES_BLOCK_SIZE, 267 + .base = { 268 + .cra_name = "ctr(aes)", 269 + .cra_driver_name = "ctr(aes-eip93)", 270 + .cra_priority = EIP93_CRA_PRIORITY, 271 + .cra_flags = CRYPTO_ALG_ASYNC | 272 + CRYPTO_ALG_NEED_FALLBACK | 273 + CRYPTO_ALG_KERN_DRIVER_ONLY, 274 + .cra_blocksize = 1, 275 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 276 + .cra_alignmask = 0xf, 277 + .cra_init = eip93_skcipher_cra_init, 278 + .cra_exit = eip93_skcipher_cra_exit, 279 + .cra_module = THIS_MODULE, 280 + }, 281 + }, 282 + }; 283 + 284 + struct eip93_alg_template eip93_alg_rfc3686_aes = { 285 + .type = EIP93_ALG_TYPE_SKCIPHER, 286 + .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, 287 + .alg.skcipher = { 288 + .setkey = eip93_skcipher_setkey, 289 + .encrypt = eip93_skcipher_encrypt, 290 + .decrypt = eip93_skcipher_decrypt, 291 + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 292 + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 293 + .ivsize = CTR_RFC3686_IV_SIZE, 294 + .base = { 295 + .cra_name = "rfc3686(ctr(aes))", 296 + .cra_driver_name = "rfc3686(ctr(aes-eip93))", 297 + .cra_priority = EIP93_CRA_PRIORITY, 298 + .cra_flags = CRYPTO_ALG_ASYNC | 299 + CRYPTO_ALG_NEED_FALLBACK | 300 + CRYPTO_ALG_KERN_DRIVER_ONLY, 301 + .cra_blocksize = 1, 302 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 303 + .cra_alignmask = 0xf, 304 + .cra_init = eip93_skcipher_cra_init, 305 + .cra_exit = eip93_skcipher_cra_exit, 306 + .cra_module = THIS_MODULE, 307 + }, 308 + }, 309 + }; 310 + 311 + struct eip93_alg_template eip93_alg_ecb_des = { 312 + .type = EIP93_ALG_TYPE_SKCIPHER, 313 + .flags = EIP93_MODE_ECB | EIP93_ALG_DES, 314 + .alg.skcipher = { 315 + .setkey = eip93_skcipher_setkey, 316 + .encrypt = eip93_skcipher_encrypt, 317 + .decrypt = eip93_skcipher_decrypt, 318 + .min_keysize = DES_KEY_SIZE, 319 + .max_keysize = DES_KEY_SIZE, 320 + .ivsize = 0, 321 + .base = { 322 + .cra_name = "ecb(des)", 323 + .cra_driver_name = "ebc(des-eip93)", 324 + .cra_priority = EIP93_CRA_PRIORITY, 325 + .cra_flags = CRYPTO_ALG_ASYNC | 326 + CRYPTO_ALG_KERN_DRIVER_ONLY, 327 + .cra_blocksize = DES_BLOCK_SIZE, 328 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 329 + .cra_alignmask = 0, 330 + .cra_init = eip93_skcipher_cra_init, 331 + .cra_exit = eip93_skcipher_cra_exit, 332 + .cra_module = THIS_MODULE, 333 + }, 334 + }, 335 + }; 336 + 337 + struct eip93_alg_template eip93_alg_cbc_des = { 338 + .type = EIP93_ALG_TYPE_SKCIPHER, 339 + .flags = EIP93_MODE_CBC | EIP93_ALG_DES, 340 + .alg.skcipher = { 341 + .setkey = eip93_skcipher_setkey, 342 + .encrypt = eip93_skcipher_encrypt, 343 + .decrypt = eip93_skcipher_decrypt, 344 + .min_keysize = DES_KEY_SIZE, 345 + .max_keysize = DES_KEY_SIZE, 346 + .ivsize = DES_BLOCK_SIZE, 347 + .base = { 348 + .cra_name = "cbc(des)", 349 + .cra_driver_name = "cbc(des-eip93)", 350 + .cra_priority = EIP93_CRA_PRIORITY, 351 + .cra_flags = CRYPTO_ALG_ASYNC | 352 + CRYPTO_ALG_KERN_DRIVER_ONLY, 353 + .cra_blocksize = DES_BLOCK_SIZE, 354 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 355 + .cra_alignmask = 0, 356 + .cra_init = eip93_skcipher_cra_init, 357 + .cra_exit = eip93_skcipher_cra_exit, 358 + .cra_module = THIS_MODULE, 359 + }, 360 + }, 361 + }; 362 + 363 + struct eip93_alg_template eip93_alg_ecb_des3_ede = { 364 + .type = EIP93_ALG_TYPE_SKCIPHER, 365 + .flags = EIP93_MODE_ECB | EIP93_ALG_3DES, 366 + .alg.skcipher = { 367 + .setkey = eip93_skcipher_setkey, 368 + .encrypt = eip93_skcipher_encrypt, 369 + .decrypt = eip93_skcipher_decrypt, 370 + .min_keysize = DES3_EDE_KEY_SIZE, 371 + .max_keysize = DES3_EDE_KEY_SIZE, 372 + .ivsize = 0, 373 + .base = { 374 + .cra_name = "ecb(des3_ede)", 375 + .cra_driver_name = "ecb(des3_ede-eip93)", 376 + .cra_priority = EIP93_CRA_PRIORITY, 377 + .cra_flags = CRYPTO_ALG_ASYNC | 378 + CRYPTO_ALG_KERN_DRIVER_ONLY, 379 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 380 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 381 + .cra_alignmask = 0, 382 + .cra_init = eip93_skcipher_cra_init, 383 + .cra_exit = eip93_skcipher_cra_exit, 384 + .cra_module = THIS_MODULE, 385 + }, 386 + }, 387 + }; 388 + 389 + struct eip93_alg_template eip93_alg_cbc_des3_ede = { 390 + .type = EIP93_ALG_TYPE_SKCIPHER, 391 + .flags = EIP93_MODE_CBC | EIP93_ALG_3DES, 392 + .alg.skcipher = { 393 + .setkey = eip93_skcipher_setkey, 394 + .encrypt = eip93_skcipher_encrypt, 395 + .decrypt = eip93_skcipher_decrypt, 396 + .min_keysize = DES3_EDE_KEY_SIZE, 397 + .max_keysize = DES3_EDE_KEY_SIZE, 398 + .ivsize = DES3_EDE_BLOCK_SIZE, 399 + .base = { 400 + .cra_name = "cbc(des3_ede)", 401 + .cra_driver_name = "cbc(des3_ede-eip93)", 402 + .cra_priority = EIP93_CRA_PRIORITY, 403 + .cra_flags = CRYPTO_ALG_ASYNC | 404 + CRYPTO_ALG_KERN_DRIVER_ONLY, 405 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 406 + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), 407 + .cra_alignmask = 0, 408 + .cra_init = eip93_skcipher_cra_init, 409 + .cra_exit = eip93_skcipher_cra_exit, 410 + .cra_module = THIS_MODULE, 411 + }, 412 + }, 413 + };
+60
drivers/crypto/inside-secure/eip93/eip93-cipher.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_CIPHER_H_ 9 + #define _EIP93_CIPHER_H_ 10 + 11 + #include "eip93-main.h" 12 + 13 + struct eip93_crypto_ctx { 14 + struct eip93_device *eip93; 15 + u32 flags; 16 + struct sa_record *sa_record; 17 + u32 sa_nonce; 18 + int blksize; 19 + dma_addr_t sa_record_base; 20 + /* AEAD specific */ 21 + unsigned int authsize; 22 + unsigned int assoclen; 23 + bool set_assoc; 24 + enum eip93_alg_type type; 25 + }; 26 + 27 + struct eip93_cipher_reqctx { 28 + u16 desc_flags; 29 + u16 flags; 30 + unsigned int blksize; 31 + unsigned int ivsize; 32 + unsigned int textsize; 33 + unsigned int assoclen; 34 + unsigned int authsize; 35 + dma_addr_t sa_record_base; 36 + struct sa_state *sa_state; 37 + dma_addr_t sa_state_base; 38 + struct eip93_descriptor *cdesc; 39 + struct scatterlist *sg_src; 40 + struct scatterlist *sg_dst; 41 + int src_nents; 42 + int dst_nents; 43 + struct sa_state *sa_state_ctr; 44 + dma_addr_t sa_state_ctr_base; 45 + }; 46 + 47 + int check_valid_request(struct eip93_cipher_reqctx *rctx); 48 + 49 + void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, 50 + struct scatterlist *reqsrc, struct scatterlist *reqdst); 51 + 52 + void eip93_skcipher_handle_result(struct crypto_async_request *async, int err); 53 + 54 + int eip93_send_req(struct crypto_async_request *async, 55 + const u8 *reqiv, struct eip93_cipher_reqctx *rctx); 56 + 57 + void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, 58 + u8 *reqiv); 59 + 60 + #endif /* _EIP93_CIPHER_H_ */
+809
drivers/crypto/inside-secure/eip93/eip93-common.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + 9 + #include <crypto/aes.h> 10 + #include <crypto/ctr.h> 11 + #include <crypto/hmac.h> 12 + #include <crypto/sha1.h> 13 + #include <crypto/sha2.h> 14 + #include <linux/kernel.h> 15 + #include <linux/delay.h> 16 + #include <linux/dma-mapping.h> 17 + #include <linux/scatterlist.h> 18 + 19 + #include "eip93-cipher.h" 20 + #include "eip93-hash.h" 21 + #include "eip93-common.h" 22 + #include "eip93-main.h" 23 + #include "eip93-regs.h" 24 + 25 + int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err) 26 + { 27 + u32 ext_err; 28 + 29 + if (!err) 30 + return 0; 31 + 32 + switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) { 33 + case EIP93_PE_CTRL_PE_AUTH_ERR: 34 + case EIP93_PE_CTRL_PE_PAD_ERR: 35 + return -EBADMSG; 36 + /* let software handle anti-replay errors */ 37 + case EIP93_PE_CTRL_PE_SEQNUM_ERR: 38 + return 0; 39 + case EIP93_PE_CTRL_PE_EXT_ERR: 40 + break; 41 + default: 42 + dev_err(eip93->dev, "Unhandled error 0x%08x\n", err); 43 + return -EINVAL; 44 + } 45 + 46 + /* Parse additional ext errors */ 47 + ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err); 48 + switch (ext_err) { 49 + case EIP93_PE_CTRL_PE_EXT_ERR_BUS: 50 + case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING: 51 + return -EIO; 52 + case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER: 53 + return -EACCES; 54 + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP: 55 + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO: 56 + case EIP93_PE_CTRL_PE_EXT_ERR_SPI: 57 + return -EINVAL; 58 + case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH: 59 + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH: 60 + case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR: 61 + return -EBADMSG; 62 + default: 63 + dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err); 64 + return -EINVAL; 65 + } 66 + } 67 + 68 + static void *eip93_ring_next_wptr(struct eip93_device *eip93, 69 + struct eip93_desc_ring *ring) 70 + { 71 + void *ptr = ring->write; 72 + 73 + if ((ring->write == ring->read - ring->offset) || 74 + (ring->read == ring->base && ring->write == ring->base_end)) 75 + return ERR_PTR(-ENOMEM); 76 + 77 + if (ring->write == ring->base_end) 78 + ring->write = ring->base; 79 + else 80 + ring->write += ring->offset; 81 + 82 + return ptr; 83 + } 84 + 85 + static void *eip93_ring_next_rptr(struct eip93_device *eip93, 86 + struct eip93_desc_ring *ring) 87 + { 88 + void *ptr = ring->read; 89 + 90 + if (ring->write == ring->read) 91 + return ERR_PTR(-ENOENT); 92 + 93 + if (ring->read == ring->base_end) 94 + ring->read = ring->base; 95 + else 96 + ring->read += ring->offset; 97 + 98 + return ptr; 99 + } 100 + 101 + int eip93_put_descriptor(struct eip93_device *eip93, 102 + struct eip93_descriptor *desc) 103 + { 104 + struct eip93_descriptor *cdesc; 105 + struct eip93_descriptor *rdesc; 106 + 107 + rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr); 108 + if (IS_ERR(rdesc)) 109 + return -ENOENT; 110 + 111 + cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr); 112 + if (IS_ERR(cdesc)) 113 + return -ENOENT; 114 + 115 + memset(rdesc, 0, sizeof(struct eip93_descriptor)); 116 + 117 + memcpy(cdesc, desc, sizeof(struct eip93_descriptor)); 118 + 119 + return 0; 120 + } 121 + 122 + void *eip93_get_descriptor(struct eip93_device *eip93) 123 + { 124 + struct eip93_descriptor *cdesc; 125 + void *ptr; 126 + 127 + cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr); 128 + if (IS_ERR(cdesc)) 129 + return ERR_PTR(-ENOENT); 130 + 131 + memset(cdesc, 0, sizeof(struct eip93_descriptor)); 132 + 133 + ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr); 134 + if (IS_ERR(ptr)) 135 + return ERR_PTR(-ENOENT); 136 + 137 + return ptr; 138 + } 139 + 140 + static void eip93_free_sg_copy(const int len, struct scatterlist **sg) 141 + { 142 + if (!*sg || !len) 143 + return; 144 + 145 + free_pages((unsigned long)sg_virt(*sg), get_order(len)); 146 + kfree(*sg); 147 + *sg = NULL; 148 + } 149 + 150 + static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst, 151 + const u32 len, const bool copy) 152 + { 153 + void *pages; 154 + 155 + *dst = kmalloc(sizeof(**dst), GFP_KERNEL); 156 + if (!*dst) 157 + return -ENOMEM; 158 + 159 + pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, 160 + get_order(len)); 161 + if (!pages) { 162 + kfree(*dst); 163 + *dst = NULL; 164 + return -ENOMEM; 165 + } 166 + 167 + sg_init_table(*dst, 1); 168 + sg_set_buf(*dst, pages, len); 169 + 170 + /* copy only as requested */ 171 + if (copy) 172 + sg_copy_to_buffer(src, sg_nents(src), pages, len); 173 + 174 + return 0; 175 + } 176 + 177 + static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len, 178 + const int blksize) 179 + { 180 + int nents; 181 + 182 + for (nents = 0; sg; sg = sg_next(sg), ++nents) { 183 + if (!IS_ALIGNED(sg->offset, 4)) 184 + return false; 185 + 186 + if (len <= sg->length) { 187 + if (!IS_ALIGNED(len, blksize)) 188 + return false; 189 + 190 + return true; 191 + } 192 + 193 + if (!IS_ALIGNED(sg->length, blksize)) 194 + return false; 195 + 196 + len -= sg->length; 197 + } 198 + return false; 199 + } 200 + 201 + int check_valid_request(struct eip93_cipher_reqctx *rctx) 202 + { 203 + struct scatterlist *src = rctx->sg_src; 204 + struct scatterlist *dst = rctx->sg_dst; 205 + u32 src_nents, dst_nents; 206 + u32 textsize = rctx->textsize; 207 + u32 authsize = rctx->authsize; 208 + u32 blksize = rctx->blksize; 209 + u32 totlen_src = rctx->assoclen + rctx->textsize; 210 + u32 totlen_dst = rctx->assoclen + rctx->textsize; 211 + u32 copy_len; 212 + bool src_align, dst_align; 213 + int err = -EINVAL; 214 + 215 + if (!IS_CTR(rctx->flags)) { 216 + if (!IS_ALIGNED(textsize, blksize)) 217 + return err; 218 + } 219 + 220 + if (authsize) { 221 + if (IS_ENCRYPT(rctx->flags)) 222 + totlen_dst += authsize; 223 + else 224 + totlen_src += authsize; 225 + } 226 + 227 + src_nents = sg_nents_for_len(src, totlen_src); 228 + dst_nents = sg_nents_for_len(dst, totlen_dst); 229 + 230 + if (src == dst) { 231 + src_nents = max(src_nents, dst_nents); 232 + dst_nents = src_nents; 233 + if (unlikely((totlen_src || totlen_dst) && src_nents <= 0)) 234 + return err; 235 + 236 + } else { 237 + if (unlikely(totlen_src && src_nents <= 0)) 238 + return err; 239 + 240 + if (unlikely(totlen_dst && dst_nents <= 0)) 241 + return err; 242 + } 243 + 244 + if (authsize) { 245 + if (dst_nents == 1 && src_nents == 1) { 246 + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); 247 + if (src == dst) 248 + dst_align = src_align; 249 + else 250 + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); 251 + } else { 252 + src_align = false; 253 + dst_align = false; 254 + } 255 + } else { 256 + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); 257 + if (src == dst) 258 + dst_align = src_align; 259 + else 260 + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); 261 + } 262 + 263 + copy_len = max(totlen_src, totlen_dst); 264 + if (!src_align) { 265 + err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true); 266 + if (err) 267 + return err; 268 + } 269 + 270 + if (!dst_align) { 271 + err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false); 272 + if (err) 273 + return err; 274 + } 275 + 276 + rctx->src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); 277 + rctx->dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); 278 + 279 + return 0; 280 + } 281 + 282 + /* 283 + * Set sa_record function: 284 + * Even sa_record is set to "0", keep " = 0" for readability. 285 + */ 286 + void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, 287 + const u32 flags) 288 + { 289 + /* Reset cmd word */ 290 + sa_record->sa_cmd0_word = 0; 291 + sa_record->sa_cmd1_word = 0; 292 + 293 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE; 294 + if (!IS_ECB(flags)) 295 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV; 296 + 297 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC; 298 + 299 + switch ((flags & EIP93_ALG_MASK)) { 300 + case EIP93_ALG_AES: 301 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES; 302 + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 303 + keylen >> 3); 304 + break; 305 + case EIP93_ALG_3DES: 306 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES; 307 + break; 308 + case EIP93_ALG_DES: 309 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES; 310 + break; 311 + default: 312 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL; 313 + } 314 + 315 + switch ((flags & EIP93_HASH_MASK)) { 316 + case EIP93_HASH_SHA256: 317 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256; 318 + break; 319 + case EIP93_HASH_SHA224: 320 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224; 321 + break; 322 + case EIP93_HASH_SHA1: 323 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1; 324 + break; 325 + case EIP93_HASH_MD5: 326 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5; 327 + break; 328 + default: 329 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL; 330 + } 331 + 332 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO; 333 + 334 + switch ((flags & EIP93_MODE_MASK)) { 335 + case EIP93_MODE_CBC: 336 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC; 337 + break; 338 + case EIP93_MODE_CTR: 339 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR; 340 + break; 341 + case EIP93_MODE_ECB: 342 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB; 343 + break; 344 + } 345 + 346 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD; 347 + if (IS_HASH(flags)) { 348 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD; 349 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST; 350 + } 351 + 352 + if (IS_HMAC(flags)) { 353 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC; 354 + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER; 355 + } 356 + 357 + sa_record->sa_spi = 0x0; 358 + sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF; 359 + sa_record->sa_seqmum_mask[1] = 0x0; 360 + } 361 + 362 + /* 363 + * Poor mans Scatter/gather function: 364 + * Create a Descriptor for every segment to avoid copying buffers. 365 + * For performance better to wait for hardware to perform multiple DMA 366 + */ 367 + static int eip93_scatter_combine(struct eip93_device *eip93, 368 + struct eip93_cipher_reqctx *rctx, 369 + u32 datalen, u32 split, int offsetin) 370 + { 371 + struct eip93_descriptor *cdesc = rctx->cdesc; 372 + struct scatterlist *sgsrc = rctx->sg_src; 373 + struct scatterlist *sgdst = rctx->sg_dst; 374 + unsigned int remainin = sg_dma_len(sgsrc); 375 + unsigned int remainout = sg_dma_len(sgdst); 376 + dma_addr_t saddr = sg_dma_address(sgsrc); 377 + dma_addr_t daddr = sg_dma_address(sgdst); 378 + dma_addr_t state_addr; 379 + u32 src_addr, dst_addr, len, n; 380 + bool nextin = false; 381 + bool nextout = false; 382 + int offsetout = 0; 383 + int err; 384 + 385 + if (IS_ECB(rctx->flags)) 386 + rctx->sa_state_base = 0; 387 + 388 + if (split < datalen) { 389 + state_addr = rctx->sa_state_ctr_base; 390 + n = split; 391 + } else { 392 + state_addr = rctx->sa_state_base; 393 + n = datalen; 394 + } 395 + 396 + do { 397 + if (nextin) { 398 + sgsrc = sg_next(sgsrc); 399 + remainin = sg_dma_len(sgsrc); 400 + if (remainin == 0) 401 + continue; 402 + 403 + saddr = sg_dma_address(sgsrc); 404 + offsetin = 0; 405 + nextin = false; 406 + } 407 + 408 + if (nextout) { 409 + sgdst = sg_next(sgdst); 410 + remainout = sg_dma_len(sgdst); 411 + if (remainout == 0) 412 + continue; 413 + 414 + daddr = sg_dma_address(sgdst); 415 + offsetout = 0; 416 + nextout = false; 417 + } 418 + src_addr = saddr + offsetin; 419 + dst_addr = daddr + offsetout; 420 + 421 + if (remainin == remainout) { 422 + len = remainin; 423 + if (len > n) { 424 + len = n; 425 + remainin -= n; 426 + remainout -= n; 427 + offsetin += n; 428 + offsetout += n; 429 + } else { 430 + nextin = true; 431 + nextout = true; 432 + } 433 + } else if (remainin < remainout) { 434 + len = remainin; 435 + if (len > n) { 436 + len = n; 437 + remainin -= n; 438 + remainout -= n; 439 + offsetin += n; 440 + offsetout += n; 441 + } else { 442 + offsetout += len; 443 + remainout -= len; 444 + nextin = true; 445 + } 446 + } else { 447 + len = remainout; 448 + if (len > n) { 449 + len = n; 450 + remainin -= n; 451 + remainout -= n; 452 + offsetin += n; 453 + offsetout += n; 454 + } else { 455 + offsetin += len; 456 + remainin -= len; 457 + nextout = true; 458 + } 459 + } 460 + n -= len; 461 + 462 + cdesc->src_addr = src_addr; 463 + cdesc->dst_addr = dst_addr; 464 + cdesc->state_addr = state_addr; 465 + cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, 466 + EIP93_PE_LENGTH_HOST_READY); 467 + cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len); 468 + 469 + if (n == 0) { 470 + n = datalen - split; 471 + split = datalen; 472 + state_addr = rctx->sa_state_base; 473 + } 474 + 475 + if (n == 0) 476 + cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, 477 + EIP93_DESC_LAST); 478 + 479 + /* 480 + * Loop - Delay - No need to rollback 481 + * Maybe refine by slowing down at EIP93_RING_BUSY 482 + */ 483 + again: 484 + scoped_guard(spinlock_irqsave, &eip93->ring->write_lock) 485 + err = eip93_put_descriptor(eip93, cdesc); 486 + if (err) { 487 + usleep_range(EIP93_RING_BUSY_DELAY, 488 + EIP93_RING_BUSY_DELAY * 2); 489 + goto again; 490 + } 491 + /* Writing new descriptor count starts DMA action */ 492 + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); 493 + } while (n); 494 + 495 + return -EINPROGRESS; 496 + } 497 + 498 + int eip93_send_req(struct crypto_async_request *async, 499 + const u8 *reqiv, struct eip93_cipher_reqctx *rctx) 500 + { 501 + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); 502 + struct eip93_device *eip93 = ctx->eip93; 503 + struct scatterlist *src = rctx->sg_src; 504 + struct scatterlist *dst = rctx->sg_dst; 505 + struct sa_state *sa_state; 506 + struct eip93_descriptor cdesc; 507 + u32 flags = rctx->flags; 508 + int offsetin = 0, err; 509 + u32 datalen = rctx->assoclen + rctx->textsize; 510 + u32 split = datalen; 511 + u32 start, end, ctr, blocks; 512 + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 513 + int crypto_async_idr; 514 + 515 + rctx->sa_state_ctr = NULL; 516 + rctx->sa_state = NULL; 517 + 518 + if (IS_ECB(flags)) 519 + goto skip_iv; 520 + 521 + memcpy(iv, reqiv, rctx->ivsize); 522 + 523 + rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL); 524 + if (!rctx->sa_state) 525 + return -ENOMEM; 526 + 527 + sa_state = rctx->sa_state; 528 + 529 + memcpy(sa_state->state_iv, iv, rctx->ivsize); 530 + if (IS_RFC3686(flags)) { 531 + sa_state->state_iv[0] = ctx->sa_nonce; 532 + sa_state->state_iv[1] = iv[0]; 533 + sa_state->state_iv[2] = iv[1]; 534 + sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1); 535 + } else if (!IS_HMAC(flags) && IS_CTR(flags)) { 536 + /* Compute data length. */ 537 + blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE); 538 + ctr = be32_to_cpu((__be32 __force)iv[3]); 539 + /* Check 32bit counter overflow. */ 540 + start = ctr; 541 + end = start + blocks - 1; 542 + if (end < start) { 543 + split = AES_BLOCK_SIZE * -start; 544 + /* 545 + * Increment the counter manually to cope with 546 + * the hardware counter overflow. 547 + */ 548 + iv[3] = 0xffffffff; 549 + crypto_inc((u8 *)iv, AES_BLOCK_SIZE); 550 + 551 + rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr), 552 + GFP_KERNEL); 553 + if (!rctx->sa_state_ctr) { 554 + err = -ENOMEM; 555 + goto free_sa_state; 556 + } 557 + 558 + memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize); 559 + memcpy(sa_state->state_iv, iv, rctx->ivsize); 560 + 561 + rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr, 562 + sizeof(*rctx->sa_state_ctr), 563 + DMA_TO_DEVICE); 564 + err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base); 565 + if (err) 566 + goto free_sa_state_ctr; 567 + } 568 + } 569 + 570 + rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state, 571 + sizeof(*rctx->sa_state), DMA_TO_DEVICE); 572 + err = dma_mapping_error(eip93->dev, rctx->sa_state_base); 573 + if (err) 574 + goto free_sa_state_ctr_dma; 575 + 576 + skip_iv: 577 + 578 + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, 579 + EIP93_PE_CTRL_HOST_READY); 580 + cdesc.sa_addr = rctx->sa_record_base; 581 + cdesc.arc4_addr = 0; 582 + 583 + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) 584 + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, 585 + EIP93_RING_NUM - 1, GFP_ATOMIC); 586 + 587 + cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | 588 + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags); 589 + 590 + rctx->cdesc = &cdesc; 591 + 592 + /* map DMA_BIDIRECTIONAL to invalidate cache on destination 593 + * implies __dma_cache_wback_inv 594 + */ 595 + if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) { 596 + err = -ENOMEM; 597 + goto free_sa_state_ctr_dma; 598 + } 599 + 600 + if (src != dst && 601 + !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) { 602 + err = -ENOMEM; 603 + goto free_sg_dma; 604 + } 605 + 606 + return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin); 607 + 608 + free_sg_dma: 609 + dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL); 610 + free_sa_state_ctr_dma: 611 + if (rctx->sa_state_ctr) 612 + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, 613 + sizeof(*rctx->sa_state_ctr), 614 + DMA_TO_DEVICE); 615 + free_sa_state_ctr: 616 + kfree(rctx->sa_state_ctr); 617 + if (rctx->sa_state) 618 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 619 + sizeof(*rctx->sa_state), 620 + DMA_TO_DEVICE); 621 + free_sa_state: 622 + kfree(rctx->sa_state); 623 + 624 + return err; 625 + } 626 + 627 + void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, 628 + struct scatterlist *reqsrc, struct scatterlist *reqdst) 629 + { 630 + u32 len = rctx->assoclen + rctx->textsize; 631 + u32 authsize = rctx->authsize; 632 + u32 flags = rctx->flags; 633 + u32 *otag; 634 + int i; 635 + 636 + if (rctx->sg_src == rctx->sg_dst) { 637 + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, 638 + DMA_BIDIRECTIONAL); 639 + goto process_tag; 640 + } 641 + 642 + dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents, 643 + DMA_TO_DEVICE); 644 + 645 + if (rctx->sg_src != reqsrc) 646 + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src); 647 + 648 + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, 649 + DMA_BIDIRECTIONAL); 650 + 651 + /* SHA tags need conversion from net-to-host */ 652 + process_tag: 653 + if (IS_DECRYPT(flags)) 654 + authsize = 0; 655 + 656 + if (authsize) { 657 + if (!IS_HASH_MD5(flags)) { 658 + otag = sg_virt(rctx->sg_dst) + len; 659 + for (i = 0; i < (authsize / 4); i++) 660 + otag[i] = be32_to_cpu((__be32 __force)otag[i]); 661 + } 662 + } 663 + 664 + if (rctx->sg_dst != reqdst) { 665 + sg_copy_from_buffer(reqdst, sg_nents(reqdst), 666 + sg_virt(rctx->sg_dst), len + authsize); 667 + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst); 668 + } 669 + } 670 + 671 + void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, 672 + u8 *reqiv) 673 + { 674 + if (rctx->sa_state_ctr) 675 + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, 676 + sizeof(*rctx->sa_state_ctr), 677 + DMA_FROM_DEVICE); 678 + 679 + if (rctx->sa_state) 680 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 681 + sizeof(*rctx->sa_state), 682 + DMA_FROM_DEVICE); 683 + 684 + if (!IS_ECB(rctx->flags)) 685 + memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize); 686 + 687 + kfree(rctx->sa_state_ctr); 688 + kfree(rctx->sa_state); 689 + } 690 + 691 + int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, 692 + unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad, 693 + bool skip_ipad) 694 + { 695 + u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE]; 696 + struct crypto_ahash *ahash_tfm; 697 + struct eip93_hash_reqctx *rctx; 698 + struct ahash_request *req; 699 + DECLARE_CRYPTO_WAIT(wait); 700 + struct scatterlist sg[1]; 701 + const char *alg_name; 702 + int i, ret; 703 + 704 + switch (ctx_flags & EIP93_HASH_MASK) { 705 + case EIP93_HASH_SHA256: 706 + alg_name = "sha256-eip93"; 707 + break; 708 + case EIP93_HASH_SHA224: 709 + alg_name = "sha224-eip93"; 710 + break; 711 + case EIP93_HASH_SHA1: 712 + alg_name = "sha1-eip93"; 713 + break; 714 + case EIP93_HASH_MD5: 715 + alg_name = "md5-eip93"; 716 + break; 717 + default: /* Impossible */ 718 + return -EINVAL; 719 + } 720 + 721 + ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC); 722 + if (IS_ERR(ahash_tfm)) 723 + return PTR_ERR(ahash_tfm); 724 + 725 + req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC); 726 + if (!req) { 727 + ret = -ENOMEM; 728 + goto err_ahash; 729 + } 730 + 731 + rctx = ahash_request_ctx_dma(req); 732 + crypto_init_wait(&wait); 733 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 734 + crypto_req_done, &wait); 735 + 736 + /* Hash the key if > SHA256_BLOCK_SIZE */ 737 + if (keylen > SHA256_BLOCK_SIZE) { 738 + sg_init_one(&sg[0], key, keylen); 739 + 740 + ahash_request_set_crypt(req, sg, ipad, keylen); 741 + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 742 + if (ret) 743 + goto err_req; 744 + 745 + keylen = hashlen; 746 + } else { 747 + memcpy(ipad, key, keylen); 748 + } 749 + 750 + /* Copy to opad */ 751 + memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen); 752 + memcpy(opad, ipad, SHA256_BLOCK_SIZE); 753 + 754 + /* Pad with HMAC constants */ 755 + for (i = 0; i < SHA256_BLOCK_SIZE; i++) { 756 + ipad[i] ^= HMAC_IPAD_VALUE; 757 + opad[i] ^= HMAC_OPAD_VALUE; 758 + } 759 + 760 + if (skip_ipad) { 761 + memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE); 762 + } else { 763 + /* Hash ipad */ 764 + sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE); 765 + ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE); 766 + ret = crypto_ahash_init(req); 767 + if (ret) 768 + goto err_req; 769 + 770 + /* Disable HASH_FINALIZE for ipad hash */ 771 + rctx->partial_hash = true; 772 + 773 + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); 774 + if (ret) 775 + goto err_req; 776 + } 777 + 778 + /* Hash opad */ 779 + sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE); 780 + ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE); 781 + ret = crypto_ahash_init(req); 782 + if (ret) 783 + goto err_req; 784 + 785 + /* Disable HASH_FINALIZE for opad hash */ 786 + rctx->partial_hash = true; 787 + 788 + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); 789 + if (ret) 790 + goto err_req; 791 + 792 + if (!IS_HASH_MD5(ctx_flags)) { 793 + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) { 794 + u32 *ipad_hash = (u32 *)dest_ipad; 795 + u32 *opad_hash = (u32 *)dest_opad; 796 + 797 + if (!skip_ipad) 798 + ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]); 799 + opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]); 800 + } 801 + } 802 + 803 + err_req: 804 + ahash_request_free(req); 805 + err_ahash: 806 + crypto_free_ahash(ahash_tfm); 807 + 808 + return ret; 809 + }
+24
drivers/crypto/inside-secure/eip93/eip93-common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + 9 + #ifndef _EIP93_COMMON_H_ 10 + #define _EIP93_COMMON_H_ 11 + 12 + void *eip93_get_descriptor(struct eip93_device *eip93); 13 + int eip93_put_descriptor(struct eip93_device *eip93, struct eip93_descriptor *desc); 14 + 15 + void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, 16 + const u32 flags); 17 + 18 + int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err); 19 + 20 + int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, 21 + unsigned int hashlen, u8 *ipad, u8 *opad, 22 + bool skip_ipad); 23 + 24 + #endif /* _EIP93_COMMON_H_ */
+16
drivers/crypto/inside-secure/eip93/eip93-des.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_DES_H_ 9 + #define _EIP93_DES_H_ 10 + 11 + extern struct eip93_alg_template eip93_alg_ecb_des; 12 + extern struct eip93_alg_template eip93_alg_cbc_des; 13 + extern struct eip93_alg_template eip93_alg_ecb_des3_ede; 14 + extern struct eip93_alg_template eip93_alg_cbc_des3_ede; 15 + 16 + #endif /* _EIP93_DES_H_ */
+866
drivers/crypto/inside-secure/eip93/eip93-hash.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2024 4 + * 5 + * Christian Marangi <ansuelsmth@gmail.com 6 + */ 7 + 8 + #include <crypto/sha1.h> 9 + #include <crypto/sha2.h> 10 + #include <crypto/md5.h> 11 + #include <crypto/hmac.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/delay.h> 14 + 15 + #include "eip93-cipher.h" 16 + #include "eip93-hash.h" 17 + #include "eip93-main.h" 18 + #include "eip93-common.h" 19 + #include "eip93-regs.h" 20 + 21 + static void eip93_hash_free_data_blocks(struct ahash_request *req) 22 + { 23 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 24 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 25 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 26 + struct eip93_device *eip93 = ctx->eip93; 27 + struct mkt_hash_block *block, *tmp; 28 + 29 + list_for_each_entry_safe(block, tmp, &rctx->blocks, list) { 30 + dma_unmap_single(eip93->dev, block->data_dma, 31 + SHA256_BLOCK_SIZE, DMA_TO_DEVICE); 32 + kfree(block); 33 + } 34 + if (!list_empty(&rctx->blocks)) 35 + INIT_LIST_HEAD(&rctx->blocks); 36 + 37 + if (rctx->finalize) 38 + dma_unmap_single(eip93->dev, rctx->data_dma, 39 + rctx->data_used, 40 + DMA_TO_DEVICE); 41 + } 42 + 43 + static void eip93_hash_free_sa_record(struct ahash_request *req) 44 + { 45 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 46 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 47 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 48 + struct eip93_device *eip93 = ctx->eip93; 49 + 50 + if (IS_HMAC(ctx->flags)) 51 + dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base, 52 + sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE); 53 + 54 + dma_unmap_single(eip93->dev, rctx->sa_record_base, 55 + sizeof(rctx->sa_record), DMA_TO_DEVICE); 56 + } 57 + 58 + void eip93_hash_handle_result(struct crypto_async_request *async, int err) 59 + { 60 + struct ahash_request *req = ahash_request_cast(async); 61 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 62 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 63 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 64 + int digestsize = crypto_ahash_digestsize(ahash); 65 + struct sa_state *sa_state = &rctx->sa_state; 66 + struct eip93_device *eip93 = ctx->eip93; 67 + int i; 68 + 69 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 70 + sizeof(*sa_state), DMA_FROM_DEVICE); 71 + 72 + /* 73 + * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed. 74 + * This is to handle SHA224 that have a 32 byte intermediate digest. 75 + */ 76 + if (rctx->partial_hash) 77 + digestsize = SHA256_DIGEST_SIZE; 78 + 79 + if (rctx->finalize || rctx->partial_hash) { 80 + /* bytes needs to be swapped for req->result */ 81 + if (!IS_HASH_MD5(ctx->flags)) { 82 + for (i = 0; i < digestsize / sizeof(u32); i++) { 83 + u32 *digest = (u32 *)sa_state->state_i_digest; 84 + 85 + digest[i] = be32_to_cpu((__be32 __force)digest[i]); 86 + } 87 + } 88 + 89 + memcpy(req->result, sa_state->state_i_digest, digestsize); 90 + } 91 + 92 + eip93_hash_free_sa_record(req); 93 + eip93_hash_free_data_blocks(req); 94 + 95 + ahash_request_complete(req, err); 96 + } 97 + 98 + static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) 99 + { 100 + u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 101 + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; 102 + u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 103 + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 }; 104 + u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; 105 + u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 }; 106 + 107 + /* Init HASH constant */ 108 + switch (hash) { 109 + case EIP93_HASH_SHA256: 110 + memcpy(digest, sha256_init, sizeof(sha256_init)); 111 + return; 112 + case EIP93_HASH_SHA224: 113 + memcpy(digest, sha224_init, sizeof(sha224_init)); 114 + return; 115 + case EIP93_HASH_SHA1: 116 + memcpy(digest, sha1_init, sizeof(sha1_init)); 117 + return; 118 + case EIP93_HASH_MD5: 119 + memcpy(digest, md5_init, sizeof(md5_init)); 120 + return; 121 + default: /* Impossible */ 122 + return; 123 + } 124 + } 125 + 126 + static void eip93_hash_export_sa_state(struct ahash_request *req, 127 + struct eip93_hash_export_state *state) 128 + { 129 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 130 + struct sa_state *sa_state = &rctx->sa_state; 131 + 132 + /* 133 + * EIP93 have special handling for state_byte_cnt in sa_state. 134 + * Even if a zero packet is passed (and a BADMSG is returned), 135 + * state_byte_cnt is incremented to the digest handled (with the hash 136 + * primitive). This is problematic with export/import as EIP93 137 + * expect 0 state_byte_cnt for the very first iteration. 138 + */ 139 + if (!rctx->len) 140 + memset(state->state_len, 0, sizeof(u32) * 2); 141 + else 142 + memcpy(state->state_len, sa_state->state_byte_cnt, 143 + sizeof(u32) * 2); 144 + memcpy(state->state_hash, sa_state->state_i_digest, 145 + SHA256_DIGEST_SIZE); 146 + state->len = rctx->len; 147 + state->data_used = rctx->data_used; 148 + } 149 + 150 + static void __eip93_hash_init(struct ahash_request *req) 151 + { 152 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 153 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 154 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 155 + struct sa_record *sa_record = &rctx->sa_record; 156 + int digestsize; 157 + 158 + digestsize = crypto_ahash_digestsize(ahash); 159 + 160 + eip93_set_sa_record(sa_record, 0, ctx->flags); 161 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE; 162 + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH; 163 + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; 164 + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, 165 + EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH); 166 + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; 167 + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 168 + digestsize / sizeof(u32)); 169 + 170 + /* 171 + * HMAC special handling 172 + * Enabling CMD_HMAC force the inner hash to be always finalized. 173 + * This cause problems on handling message > 64 byte as we 174 + * need to produce intermediate inner hash on sending intermediate 175 + * 64 bytes blocks. 176 + * 177 + * To handle this, enable CMD_HMAC only on the last block. 178 + * We make a duplicate of sa_record and on the last descriptor, 179 + * we pass a dedicated sa_record with CMD_HMAC enabled to make 180 + * EIP93 apply the outer hash. 181 + */ 182 + if (IS_HMAC(ctx->flags)) { 183 + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; 184 + 185 + memcpy(sa_record_hmac, sa_record, sizeof(*sa_record)); 186 + /* Copy pre-hashed opad for HMAC */ 187 + memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE); 188 + 189 + /* Disable HMAC for hash normal sa_record */ 190 + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC; 191 + } 192 + 193 + rctx->len = 0; 194 + rctx->data_used = 0; 195 + rctx->partial_hash = false; 196 + rctx->finalize = false; 197 + INIT_LIST_HEAD(&rctx->blocks); 198 + } 199 + 200 + static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data, 201 + dma_addr_t *data_dma, u32 len, bool last) 202 + { 203 + struct ahash_request *req = ahash_request_cast(async); 204 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 205 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 206 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 207 + struct eip93_device *eip93 = ctx->eip93; 208 + struct eip93_descriptor cdesc = { }; 209 + dma_addr_t src_addr; 210 + int ret; 211 + 212 + /* Map block data to DMA */ 213 + src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE); 214 + ret = dma_mapping_error(eip93->dev, src_addr); 215 + if (ret) 216 + return ret; 217 + 218 + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, 219 + EIP93_PE_CTRL_HOST_READY); 220 + cdesc.sa_addr = rctx->sa_record_base; 221 + cdesc.arc4_addr = 0; 222 + 223 + cdesc.state_addr = rctx->sa_state_base; 224 + cdesc.src_addr = src_addr; 225 + cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, 226 + EIP93_PE_LENGTH_HOST_READY); 227 + cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, 228 + len); 229 + 230 + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH); 231 + 232 + if (last) { 233 + int crypto_async_idr; 234 + 235 + if (rctx->finalize && !rctx->partial_hash) { 236 + /* For last block, pass sa_record with CMD_HMAC enabled */ 237 + if (IS_HMAC(ctx->flags)) { 238 + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; 239 + 240 + rctx->sa_record_hmac_base = dma_map_single(eip93->dev, 241 + sa_record_hmac, 242 + sizeof(*sa_record_hmac), 243 + DMA_TO_DEVICE); 244 + ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base); 245 + if (ret) 246 + return ret; 247 + 248 + cdesc.sa_addr = rctx->sa_record_hmac_base; 249 + } 250 + 251 + cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL; 252 + } 253 + 254 + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) 255 + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, 256 + EIP93_RING_NUM - 1, GFP_ATOMIC); 257 + 258 + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | 259 + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST); 260 + } 261 + 262 + again: 263 + ret = eip93_put_descriptor(eip93, &cdesc); 264 + if (ret) { 265 + usleep_range(EIP93_RING_BUSY_DELAY, 266 + EIP93_RING_BUSY_DELAY * 2); 267 + goto again; 268 + } 269 + 270 + /* Writing new descriptor count starts DMA action */ 271 + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); 272 + 273 + *data_dma = src_addr; 274 + return 0; 275 + } 276 + 277 + static int eip93_hash_init(struct ahash_request *req) 278 + { 279 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 280 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 281 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 282 + struct sa_state *sa_state = &rctx->sa_state; 283 + 284 + memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2); 285 + eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK, 286 + sa_state->state_i_digest); 287 + 288 + __eip93_hash_init(req); 289 + 290 + /* For HMAC setup the initial block for ipad */ 291 + if (IS_HMAC(ctx->flags)) { 292 + memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE); 293 + 294 + rctx->data_used = SHA256_BLOCK_SIZE; 295 + rctx->len += SHA256_BLOCK_SIZE; 296 + } 297 + 298 + return 0; 299 + } 300 + 301 + /* 302 + * With complete_req true, we wait for the engine to consume all the block in list, 303 + * else we just queue the block to the engine as final() will wait. This is useful 304 + * for finup(). 305 + */ 306 + static int __eip93_hash_update(struct ahash_request *req, bool complete_req) 307 + { 308 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 309 + struct crypto_async_request *async = &req->base; 310 + unsigned int read, to_consume = req->nbytes; 311 + unsigned int max_read, consumed = 0; 312 + struct mkt_hash_block *block; 313 + bool wait_req = false; 314 + int offset; 315 + int ret; 316 + 317 + /* Get the offset and available space to fill req data */ 318 + offset = rctx->data_used; 319 + max_read = SHA256_BLOCK_SIZE - offset; 320 + 321 + /* Consume req in block of SHA256_BLOCK_SIZE. 322 + * to_read is initially set to space available in the req data 323 + * and then reset to SHA256_BLOCK_SIZE. 324 + */ 325 + while (to_consume > max_read) { 326 + block = kzalloc(sizeof(*block), GFP_ATOMIC); 327 + if (!block) { 328 + ret = -ENOMEM; 329 + goto free_blocks; 330 + } 331 + 332 + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), 333 + block->data + offset, 334 + max_read, consumed); 335 + 336 + /* 337 + * For first iteration only, copy req data to block 338 + * and reset offset and max_read for next iteration. 339 + */ 340 + if (offset > 0) { 341 + memcpy(block->data, rctx->data, offset); 342 + offset = 0; 343 + max_read = SHA256_BLOCK_SIZE; 344 + } 345 + 346 + list_add(&block->list, &rctx->blocks); 347 + to_consume -= read; 348 + consumed += read; 349 + } 350 + 351 + /* Write the remaining data to req data */ 352 + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), 353 + rctx->data + offset, to_consume, 354 + consumed); 355 + rctx->data_used = offset + read; 356 + 357 + /* Update counter with processed bytes */ 358 + rctx->len += read + consumed; 359 + 360 + /* Consume all the block added to list */ 361 + list_for_each_entry_reverse(block, &rctx->blocks, list) { 362 + wait_req = complete_req && 363 + list_is_first(&block->list, &rctx->blocks); 364 + 365 + ret = eip93_send_hash_req(async, block->data, 366 + &block->data_dma, 367 + SHA256_BLOCK_SIZE, wait_req); 368 + if (ret) 369 + goto free_blocks; 370 + } 371 + 372 + return wait_req ? -EINPROGRESS : 0; 373 + 374 + free_blocks: 375 + eip93_hash_free_data_blocks(req); 376 + 377 + return ret; 378 + } 379 + 380 + static int eip93_hash_update(struct ahash_request *req) 381 + { 382 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 383 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 384 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 385 + struct sa_record *sa_record = &rctx->sa_record; 386 + struct sa_state *sa_state = &rctx->sa_state; 387 + struct eip93_device *eip93 = ctx->eip93; 388 + int ret; 389 + 390 + if (!req->nbytes) 391 + return 0; 392 + 393 + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, 394 + sizeof(*sa_state), 395 + DMA_TO_DEVICE); 396 + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); 397 + if (ret) 398 + return ret; 399 + 400 + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, 401 + sizeof(*sa_record), 402 + DMA_TO_DEVICE); 403 + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); 404 + if (ret) 405 + goto free_sa_state; 406 + 407 + ret = __eip93_hash_update(req, true); 408 + if (ret && ret != -EINPROGRESS) 409 + goto free_sa_record; 410 + 411 + return ret; 412 + 413 + free_sa_record: 414 + dma_unmap_single(eip93->dev, rctx->sa_record_base, 415 + sizeof(*sa_record), DMA_TO_DEVICE); 416 + 417 + free_sa_state: 418 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 419 + sizeof(*sa_state), DMA_TO_DEVICE); 420 + 421 + return ret; 422 + } 423 + 424 + /* 425 + * With map_data true, we map the sa_record and sa_state. This is needed 426 + * for finup() as the they are mapped before calling update() 427 + */ 428 + static int __eip93_hash_final(struct ahash_request *req, bool map_dma) 429 + { 430 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 431 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 432 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 433 + struct crypto_async_request *async = &req->base; 434 + struct sa_record *sa_record = &rctx->sa_record; 435 + struct sa_state *sa_state = &rctx->sa_state; 436 + struct eip93_device *eip93 = ctx->eip93; 437 + int ret; 438 + 439 + /* EIP93 can't handle zero bytes hash */ 440 + if (!rctx->len && !IS_HMAC(ctx->flags)) { 441 + switch ((ctx->flags & EIP93_HASH_MASK)) { 442 + case EIP93_HASH_SHA256: 443 + memcpy(req->result, sha256_zero_message_hash, 444 + SHA256_DIGEST_SIZE); 445 + break; 446 + case EIP93_HASH_SHA224: 447 + memcpy(req->result, sha224_zero_message_hash, 448 + SHA224_DIGEST_SIZE); 449 + break; 450 + case EIP93_HASH_SHA1: 451 + memcpy(req->result, sha1_zero_message_hash, 452 + SHA1_DIGEST_SIZE); 453 + break; 454 + case EIP93_HASH_MD5: 455 + memcpy(req->result, md5_zero_message_hash, 456 + MD5_DIGEST_SIZE); 457 + break; 458 + default: /* Impossible */ 459 + return -EINVAL; 460 + } 461 + 462 + return 0; 463 + } 464 + 465 + /* Signal interrupt from engine is for last block */ 466 + rctx->finalize = true; 467 + 468 + if (map_dma) { 469 + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, 470 + sizeof(*sa_state), 471 + DMA_TO_DEVICE); 472 + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); 473 + if (ret) 474 + return ret; 475 + 476 + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, 477 + sizeof(*sa_record), 478 + DMA_TO_DEVICE); 479 + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); 480 + if (ret) 481 + goto free_sa_state; 482 + } 483 + 484 + /* Send last block */ 485 + ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma, 486 + rctx->data_used, true); 487 + if (ret) 488 + goto free_blocks; 489 + 490 + return -EINPROGRESS; 491 + 492 + free_blocks: 493 + eip93_hash_free_data_blocks(req); 494 + 495 + dma_unmap_single(eip93->dev, rctx->sa_record_base, 496 + sizeof(*sa_record), DMA_TO_DEVICE); 497 + 498 + free_sa_state: 499 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 500 + sizeof(*sa_state), DMA_TO_DEVICE); 501 + 502 + return ret; 503 + } 504 + 505 + static int eip93_hash_final(struct ahash_request *req) 506 + { 507 + return __eip93_hash_final(req, true); 508 + } 509 + 510 + static int eip93_hash_finup(struct ahash_request *req) 511 + { 512 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 513 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 514 + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); 515 + struct sa_record *sa_record = &rctx->sa_record; 516 + struct sa_state *sa_state = &rctx->sa_state; 517 + struct eip93_device *eip93 = ctx->eip93; 518 + int ret; 519 + 520 + if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) { 521 + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, 522 + sizeof(*sa_state), 523 + DMA_TO_DEVICE); 524 + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); 525 + if (ret) 526 + return ret; 527 + 528 + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, 529 + sizeof(*sa_record), 530 + DMA_TO_DEVICE); 531 + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); 532 + if (ret) 533 + goto free_sa_state; 534 + 535 + ret = __eip93_hash_update(req, false); 536 + if (ret) 537 + goto free_sa_record; 538 + } 539 + 540 + return __eip93_hash_final(req, false); 541 + 542 + free_sa_record: 543 + dma_unmap_single(eip93->dev, rctx->sa_record_base, 544 + sizeof(*sa_record), DMA_TO_DEVICE); 545 + free_sa_state: 546 + dma_unmap_single(eip93->dev, rctx->sa_state_base, 547 + sizeof(*sa_state), DMA_TO_DEVICE); 548 + 549 + return ret; 550 + } 551 + 552 + static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, 553 + u32 keylen) 554 + { 555 + unsigned int digestsize = crypto_ahash_digestsize(ahash); 556 + struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); 557 + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); 558 + 559 + return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize, 560 + ctx->ipad, ctx->opad, true); 561 + } 562 + 563 + static int eip93_hash_cra_init(struct crypto_tfm *tfm) 564 + { 565 + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); 566 + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, 567 + struct eip93_alg_template, alg.ahash.halg.base); 568 + 569 + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), 570 + sizeof(struct eip93_hash_reqctx)); 571 + 572 + ctx->eip93 = tmpl->eip93; 573 + ctx->flags = tmpl->flags; 574 + 575 + return 0; 576 + } 577 + 578 + static int eip93_hash_digest(struct ahash_request *req) 579 + { 580 + int ret; 581 + 582 + ret = eip93_hash_init(req); 583 + if (ret) 584 + return ret; 585 + 586 + return eip93_hash_finup(req); 587 + } 588 + 589 + static int eip93_hash_import(struct ahash_request *req, const void *in) 590 + { 591 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 592 + const struct eip93_hash_export_state *state = in; 593 + struct sa_state *sa_state = &rctx->sa_state; 594 + 595 + memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2); 596 + memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE); 597 + 598 + __eip93_hash_init(req); 599 + 600 + rctx->len = state->len; 601 + rctx->data_used = state->data_used; 602 + 603 + /* Skip copying data if we have nothing to copy */ 604 + if (rctx->len) 605 + memcpy(rctx->data, state->data, rctx->data_used); 606 + 607 + return 0; 608 + } 609 + 610 + static int eip93_hash_export(struct ahash_request *req, void *out) 611 + { 612 + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); 613 + struct eip93_hash_export_state *state = out; 614 + 615 + /* Save the first block in state data */ 616 + if (rctx->len) 617 + memcpy(state->data, rctx->data, rctx->data_used); 618 + 619 + eip93_hash_export_sa_state(req, state); 620 + 621 + return 0; 622 + } 623 + 624 + struct eip93_alg_template eip93_alg_md5 = { 625 + .type = EIP93_ALG_TYPE_HASH, 626 + .flags = EIP93_HASH_MD5, 627 + .alg.ahash = { 628 + .init = eip93_hash_init, 629 + .update = eip93_hash_update, 630 + .final = eip93_hash_final, 631 + .finup = eip93_hash_finup, 632 + .digest = eip93_hash_digest, 633 + .export = eip93_hash_export, 634 + .import = eip93_hash_import, 635 + .halg = { 636 + .digestsize = MD5_DIGEST_SIZE, 637 + .statesize = sizeof(struct eip93_hash_export_state), 638 + .base = { 639 + .cra_name = "md5", 640 + .cra_driver_name = "md5-eip93", 641 + .cra_priority = 300, 642 + .cra_flags = CRYPTO_ALG_ASYNC | 643 + CRYPTO_ALG_KERN_DRIVER_ONLY | 644 + CRYPTO_ALG_ALLOCATES_MEMORY, 645 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 646 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 647 + .cra_init = eip93_hash_cra_init, 648 + .cra_module = THIS_MODULE, 649 + }, 650 + }, 651 + }, 652 + }; 653 + 654 + struct eip93_alg_template eip93_alg_sha1 = { 655 + .type = EIP93_ALG_TYPE_HASH, 656 + .flags = EIP93_HASH_SHA1, 657 + .alg.ahash = { 658 + .init = eip93_hash_init, 659 + .update = eip93_hash_update, 660 + .final = eip93_hash_final, 661 + .finup = eip93_hash_finup, 662 + .digest = eip93_hash_digest, 663 + .export = eip93_hash_export, 664 + .import = eip93_hash_import, 665 + .halg = { 666 + .digestsize = SHA1_DIGEST_SIZE, 667 + .statesize = sizeof(struct eip93_hash_export_state), 668 + .base = { 669 + .cra_name = "sha1", 670 + .cra_driver_name = "sha1-eip93", 671 + .cra_priority = 300, 672 + .cra_flags = CRYPTO_ALG_ASYNC | 673 + CRYPTO_ALG_KERN_DRIVER_ONLY | 674 + CRYPTO_ALG_ALLOCATES_MEMORY, 675 + .cra_blocksize = SHA1_BLOCK_SIZE, 676 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 677 + .cra_init = eip93_hash_cra_init, 678 + .cra_module = THIS_MODULE, 679 + }, 680 + }, 681 + }, 682 + }; 683 + 684 + struct eip93_alg_template eip93_alg_sha224 = { 685 + .type = EIP93_ALG_TYPE_HASH, 686 + .flags = EIP93_HASH_SHA224, 687 + .alg.ahash = { 688 + .init = eip93_hash_init, 689 + .update = eip93_hash_update, 690 + .final = eip93_hash_final, 691 + .finup = eip93_hash_finup, 692 + .digest = eip93_hash_digest, 693 + .export = eip93_hash_export, 694 + .import = eip93_hash_import, 695 + .halg = { 696 + .digestsize = SHA224_DIGEST_SIZE, 697 + .statesize = sizeof(struct eip93_hash_export_state), 698 + .base = { 699 + .cra_name = "sha224", 700 + .cra_driver_name = "sha224-eip93", 701 + .cra_priority = 300, 702 + .cra_flags = CRYPTO_ALG_ASYNC | 703 + CRYPTO_ALG_KERN_DRIVER_ONLY | 704 + CRYPTO_ALG_ALLOCATES_MEMORY, 705 + .cra_blocksize = SHA224_BLOCK_SIZE, 706 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 707 + .cra_init = eip93_hash_cra_init, 708 + .cra_module = THIS_MODULE, 709 + }, 710 + }, 711 + }, 712 + }; 713 + 714 + struct eip93_alg_template eip93_alg_sha256 = { 715 + .type = EIP93_ALG_TYPE_HASH, 716 + .flags = EIP93_HASH_SHA256, 717 + .alg.ahash = { 718 + .init = eip93_hash_init, 719 + .update = eip93_hash_update, 720 + .final = eip93_hash_final, 721 + .finup = eip93_hash_finup, 722 + .digest = eip93_hash_digest, 723 + .export = eip93_hash_export, 724 + .import = eip93_hash_import, 725 + .halg = { 726 + .digestsize = SHA256_DIGEST_SIZE, 727 + .statesize = sizeof(struct eip93_hash_export_state), 728 + .base = { 729 + .cra_name = "sha256", 730 + .cra_driver_name = "sha256-eip93", 731 + .cra_priority = 300, 732 + .cra_flags = CRYPTO_ALG_ASYNC | 733 + CRYPTO_ALG_KERN_DRIVER_ONLY | 734 + CRYPTO_ALG_ALLOCATES_MEMORY, 735 + .cra_blocksize = SHA256_BLOCK_SIZE, 736 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 737 + .cra_init = eip93_hash_cra_init, 738 + .cra_module = THIS_MODULE, 739 + }, 740 + }, 741 + }, 742 + }; 743 + 744 + struct eip93_alg_template eip93_alg_hmac_md5 = { 745 + .type = EIP93_ALG_TYPE_HASH, 746 + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5, 747 + .alg.ahash = { 748 + .init = eip93_hash_init, 749 + .update = eip93_hash_update, 750 + .final = eip93_hash_final, 751 + .finup = eip93_hash_finup, 752 + .digest = eip93_hash_digest, 753 + .setkey = eip93_hash_hmac_setkey, 754 + .export = eip93_hash_export, 755 + .import = eip93_hash_import, 756 + .halg = { 757 + .digestsize = MD5_DIGEST_SIZE, 758 + .statesize = sizeof(struct eip93_hash_export_state), 759 + .base = { 760 + .cra_name = "hmac(md5)", 761 + .cra_driver_name = "hmac(md5-eip93)", 762 + .cra_priority = 300, 763 + .cra_flags = CRYPTO_ALG_ASYNC | 764 + CRYPTO_ALG_KERN_DRIVER_ONLY | 765 + CRYPTO_ALG_ALLOCATES_MEMORY, 766 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 767 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 768 + .cra_init = eip93_hash_cra_init, 769 + .cra_module = THIS_MODULE, 770 + }, 771 + }, 772 + }, 773 + }; 774 + 775 + struct eip93_alg_template eip93_alg_hmac_sha1 = { 776 + .type = EIP93_ALG_TYPE_HASH, 777 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1, 778 + .alg.ahash = { 779 + .init = eip93_hash_init, 780 + .update = eip93_hash_update, 781 + .final = eip93_hash_final, 782 + .finup = eip93_hash_finup, 783 + .digest = eip93_hash_digest, 784 + .setkey = eip93_hash_hmac_setkey, 785 + .export = eip93_hash_export, 786 + .import = eip93_hash_import, 787 + .halg = { 788 + .digestsize = SHA1_DIGEST_SIZE, 789 + .statesize = sizeof(struct eip93_hash_export_state), 790 + .base = { 791 + .cra_name = "hmac(sha1)", 792 + .cra_driver_name = "hmac(sha1-eip93)", 793 + .cra_priority = 300, 794 + .cra_flags = CRYPTO_ALG_ASYNC | 795 + CRYPTO_ALG_KERN_DRIVER_ONLY | 796 + CRYPTO_ALG_ALLOCATES_MEMORY, 797 + .cra_blocksize = SHA1_BLOCK_SIZE, 798 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 799 + .cra_init = eip93_hash_cra_init, 800 + .cra_module = THIS_MODULE, 801 + }, 802 + }, 803 + }, 804 + }; 805 + 806 + struct eip93_alg_template eip93_alg_hmac_sha224 = { 807 + .type = EIP93_ALG_TYPE_HASH, 808 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224, 809 + .alg.ahash = { 810 + .init = eip93_hash_init, 811 + .update = eip93_hash_update, 812 + .final = eip93_hash_final, 813 + .finup = eip93_hash_finup, 814 + .digest = eip93_hash_digest, 815 + .setkey = eip93_hash_hmac_setkey, 816 + .export = eip93_hash_export, 817 + .import = eip93_hash_import, 818 + .halg = { 819 + .digestsize = SHA224_DIGEST_SIZE, 820 + .statesize = sizeof(struct eip93_hash_export_state), 821 + .base = { 822 + .cra_name = "hmac(sha224)", 823 + .cra_driver_name = "hmac(sha224-eip93)", 824 + .cra_priority = 300, 825 + .cra_flags = CRYPTO_ALG_ASYNC | 826 + CRYPTO_ALG_KERN_DRIVER_ONLY | 827 + CRYPTO_ALG_ALLOCATES_MEMORY, 828 + .cra_blocksize = SHA224_BLOCK_SIZE, 829 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 830 + .cra_init = eip93_hash_cra_init, 831 + .cra_module = THIS_MODULE, 832 + }, 833 + }, 834 + }, 835 + }; 836 + 837 + struct eip93_alg_template eip93_alg_hmac_sha256 = { 838 + .type = EIP93_ALG_TYPE_HASH, 839 + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256, 840 + .alg.ahash = { 841 + .init = eip93_hash_init, 842 + .update = eip93_hash_update, 843 + .final = eip93_hash_final, 844 + .finup = eip93_hash_finup, 845 + .digest = eip93_hash_digest, 846 + .setkey = eip93_hash_hmac_setkey, 847 + .export = eip93_hash_export, 848 + .import = eip93_hash_import, 849 + .halg = { 850 + .digestsize = SHA256_DIGEST_SIZE, 851 + .statesize = sizeof(struct eip93_hash_export_state), 852 + .base = { 853 + .cra_name = "hmac(sha256)", 854 + .cra_driver_name = "hmac(sha256-eip93)", 855 + .cra_priority = 300, 856 + .cra_flags = CRYPTO_ALG_ASYNC | 857 + CRYPTO_ALG_KERN_DRIVER_ONLY | 858 + CRYPTO_ALG_ALLOCATES_MEMORY, 859 + .cra_blocksize = SHA256_BLOCK_SIZE, 860 + .cra_ctxsize = sizeof(struct eip93_hash_ctx), 861 + .cra_init = eip93_hash_cra_init, 862 + .cra_module = THIS_MODULE, 863 + }, 864 + }, 865 + }, 866 + };
+82
drivers/crypto/inside-secure/eip93/eip93-hash.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_HASH_H_ 9 + #define _EIP93_HASH_H_ 10 + 11 + #include <crypto/sha2.h> 12 + 13 + #include "eip93-main.h" 14 + #include "eip93-regs.h" 15 + 16 + struct eip93_hash_ctx { 17 + struct eip93_device *eip93; 18 + u32 flags; 19 + 20 + u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 21 + u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); 22 + }; 23 + 24 + struct eip93_hash_reqctx { 25 + /* Placement is important for DMA align */ 26 + struct { 27 + struct sa_record sa_record; 28 + struct sa_record sa_record_hmac; 29 + struct sa_state sa_state; 30 + } __aligned(CRYPTO_DMA_ALIGN); 31 + 32 + dma_addr_t sa_record_base; 33 + dma_addr_t sa_record_hmac_base; 34 + dma_addr_t sa_state_base; 35 + 36 + /* Don't enable HASH_FINALIZE when last block is sent */ 37 + bool partial_hash; 38 + 39 + /* Set to signal interrupt is for final packet */ 40 + bool finalize; 41 + 42 + /* 43 + * EIP93 requires data to be accumulated in block of 64 bytes 44 + * for intermediate hash calculation. 45 + */ 46 + u64 len; 47 + u32 data_used; 48 + 49 + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 50 + dma_addr_t data_dma; 51 + 52 + struct list_head blocks; 53 + }; 54 + 55 + struct mkt_hash_block { 56 + struct list_head list; 57 + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 58 + dma_addr_t data_dma; 59 + }; 60 + 61 + struct eip93_hash_export_state { 62 + u64 len; 63 + u32 data_used; 64 + 65 + u32 state_len[2]; 66 + u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); 67 + 68 + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); 69 + }; 70 + 71 + void eip93_hash_handle_result(struct crypto_async_request *async, int err); 72 + 73 + extern struct eip93_alg_template eip93_alg_md5; 74 + extern struct eip93_alg_template eip93_alg_sha1; 75 + extern struct eip93_alg_template eip93_alg_sha224; 76 + extern struct eip93_alg_template eip93_alg_sha256; 77 + extern struct eip93_alg_template eip93_alg_hmac_md5; 78 + extern struct eip93_alg_template eip93_alg_hmac_sha1; 79 + extern struct eip93_alg_template eip93_alg_hmac_sha224; 80 + extern struct eip93_alg_template eip93_alg_hmac_sha256; 81 + 82 + #endif /* _EIP93_HASH_H_ */
+501
drivers/crypto/inside-secure/eip93/eip93-main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + 9 + #include <linux/atomic.h> 10 + #include <linux/clk.h> 11 + #include <linux/delay.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/module.h> 15 + #include <linux/of.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/spinlock.h> 18 + #include <crypto/aes.h> 19 + #include <crypto/ctr.h> 20 + 21 + #include "eip93-main.h" 22 + #include "eip93-regs.h" 23 + #include "eip93-common.h" 24 + #include "eip93-cipher.h" 25 + #include "eip93-aes.h" 26 + #include "eip93-des.h" 27 + #include "eip93-aead.h" 28 + #include "eip93-hash.h" 29 + 30 + static struct eip93_alg_template *eip93_algs[] = { 31 + &eip93_alg_ecb_des, 32 + &eip93_alg_cbc_des, 33 + &eip93_alg_ecb_des3_ede, 34 + &eip93_alg_cbc_des3_ede, 35 + &eip93_alg_ecb_aes, 36 + &eip93_alg_cbc_aes, 37 + &eip93_alg_ctr_aes, 38 + &eip93_alg_rfc3686_aes, 39 + &eip93_alg_authenc_hmac_md5_cbc_des, 40 + &eip93_alg_authenc_hmac_sha1_cbc_des, 41 + &eip93_alg_authenc_hmac_sha224_cbc_des, 42 + &eip93_alg_authenc_hmac_sha256_cbc_des, 43 + &eip93_alg_authenc_hmac_md5_cbc_des3_ede, 44 + &eip93_alg_authenc_hmac_sha1_cbc_des3_ede, 45 + &eip93_alg_authenc_hmac_sha224_cbc_des3_ede, 46 + &eip93_alg_authenc_hmac_sha256_cbc_des3_ede, 47 + &eip93_alg_authenc_hmac_md5_cbc_aes, 48 + &eip93_alg_authenc_hmac_sha1_cbc_aes, 49 + &eip93_alg_authenc_hmac_sha224_cbc_aes, 50 + &eip93_alg_authenc_hmac_sha256_cbc_aes, 51 + &eip93_alg_authenc_hmac_md5_rfc3686_aes, 52 + &eip93_alg_authenc_hmac_sha1_rfc3686_aes, 53 + &eip93_alg_authenc_hmac_sha224_rfc3686_aes, 54 + &eip93_alg_authenc_hmac_sha256_rfc3686_aes, 55 + &eip93_alg_md5, 56 + &eip93_alg_sha1, 57 + &eip93_alg_sha224, 58 + &eip93_alg_sha256, 59 + &eip93_alg_hmac_md5, 60 + &eip93_alg_hmac_sha1, 61 + &eip93_alg_hmac_sha224, 62 + &eip93_alg_hmac_sha256, 63 + }; 64 + 65 + inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask) 66 + { 67 + __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE); 68 + } 69 + 70 + inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask) 71 + { 72 + __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE); 73 + } 74 + 75 + inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask) 76 + { 77 + __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR); 78 + } 79 + 80 + static void eip93_unregister_algs(unsigned int i) 81 + { 82 + unsigned int j; 83 + 84 + for (j = 0; j < i; j++) { 85 + switch (eip93_algs[j]->type) { 86 + case EIP93_ALG_TYPE_SKCIPHER: 87 + crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher); 88 + break; 89 + case EIP93_ALG_TYPE_AEAD: 90 + crypto_unregister_aead(&eip93_algs[j]->alg.aead); 91 + break; 92 + case EIP93_ALG_TYPE_HASH: 93 + crypto_unregister_ahash(&eip93_algs[i]->alg.ahash); 94 + break; 95 + } 96 + } 97 + } 98 + 99 + static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags) 100 + { 101 + unsigned int i; 102 + int ret = 0; 103 + 104 + for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) { 105 + u32 alg_flags = eip93_algs[i]->flags; 106 + 107 + eip93_algs[i]->eip93 = eip93; 108 + 109 + if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) && 110 + !(supported_algo_flags & EIP93_PE_OPTION_TDES)) 111 + continue; 112 + 113 + if (IS_AES(alg_flags)) { 114 + if (!(supported_algo_flags & EIP93_PE_OPTION_AES)) 115 + continue; 116 + 117 + if (!IS_HMAC(alg_flags)) { 118 + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128) 119 + eip93_algs[i]->alg.skcipher.max_keysize = 120 + AES_KEYSIZE_128; 121 + 122 + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192) 123 + eip93_algs[i]->alg.skcipher.max_keysize = 124 + AES_KEYSIZE_192; 125 + 126 + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256) 127 + eip93_algs[i]->alg.skcipher.max_keysize = 128 + AES_KEYSIZE_256; 129 + 130 + if (IS_RFC3686(alg_flags)) 131 + eip93_algs[i]->alg.skcipher.max_keysize += 132 + CTR_RFC3686_NONCE_SIZE; 133 + } 134 + } 135 + 136 + if (IS_HASH_MD5(alg_flags) && 137 + !(supported_algo_flags & EIP93_PE_OPTION_MD5)) 138 + continue; 139 + 140 + if (IS_HASH_SHA1(alg_flags) && 141 + !(supported_algo_flags & EIP93_PE_OPTION_SHA_1)) 142 + continue; 143 + 144 + if (IS_HASH_SHA224(alg_flags) && 145 + !(supported_algo_flags & EIP93_PE_OPTION_SHA_224)) 146 + continue; 147 + 148 + if (IS_HASH_SHA256(alg_flags) && 149 + !(supported_algo_flags & EIP93_PE_OPTION_SHA_256)) 150 + continue; 151 + 152 + switch (eip93_algs[i]->type) { 153 + case EIP93_ALG_TYPE_SKCIPHER: 154 + ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher); 155 + break; 156 + case EIP93_ALG_TYPE_AEAD: 157 + ret = crypto_register_aead(&eip93_algs[i]->alg.aead); 158 + break; 159 + case EIP93_ALG_TYPE_HASH: 160 + ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash); 161 + break; 162 + } 163 + if (ret) 164 + goto fail; 165 + } 166 + 167 + return 0; 168 + 169 + fail: 170 + eip93_unregister_algs(i); 171 + 172 + return ret; 173 + } 174 + 175 + static void eip93_handle_result_descriptor(struct eip93_device *eip93) 176 + { 177 + struct crypto_async_request *async; 178 + struct eip93_descriptor *rdesc; 179 + u16 desc_flags, crypto_idr; 180 + bool last_entry; 181 + int handled, left, err; 182 + u32 pe_ctrl_stat; 183 + u32 pe_length; 184 + 185 + get_more: 186 + handled = 0; 187 + 188 + left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT; 189 + 190 + if (!left) { 191 + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); 192 + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); 193 + return; 194 + } 195 + 196 + last_entry = false; 197 + 198 + while (left) { 199 + scoped_guard(spinlock_irqsave, &eip93->ring->read_lock) 200 + rdesc = eip93_get_descriptor(eip93); 201 + if (IS_ERR(rdesc)) { 202 + dev_err(eip93->dev, "Ndesc: %d nreq: %d\n", 203 + handled, left); 204 + err = -EIO; 205 + break; 206 + } 207 + /* make sure DMA is finished writing */ 208 + do { 209 + pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word); 210 + pe_length = READ_ONCE(rdesc->pe_length_word); 211 + } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) != 212 + EIP93_PE_CTRL_PE_READY || 213 + FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) != 214 + EIP93_PE_LENGTH_PE_READY); 215 + 216 + err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE | 217 + EIP93_PE_CTRL_PE_EXT_ERR | 218 + EIP93_PE_CTRL_PE_SEQNUM_ERR | 219 + EIP93_PE_CTRL_PE_PAD_ERR | 220 + EIP93_PE_CTRL_PE_AUTH_ERR); 221 + 222 + desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id); 223 + crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id); 224 + 225 + writel(1, eip93->base + EIP93_REG_PE_RD_COUNT); 226 + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); 227 + 228 + handled++; 229 + left--; 230 + 231 + if (desc_flags & EIP93_DESC_LAST) { 232 + last_entry = true; 233 + break; 234 + } 235 + } 236 + 237 + if (!last_entry) 238 + goto get_more; 239 + 240 + /* Get crypto async ref only for last descriptor */ 241 + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) { 242 + async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr); 243 + idr_remove(&eip93->ring->crypto_async_idr, crypto_idr); 244 + } 245 + 246 + /* Parse error in ctrl stat word */ 247 + err = eip93_parse_ctrl_stat_err(eip93, err); 248 + 249 + if (desc_flags & EIP93_DESC_SKCIPHER) 250 + eip93_skcipher_handle_result(async, err); 251 + 252 + if (desc_flags & EIP93_DESC_AEAD) 253 + eip93_aead_handle_result(async, err); 254 + 255 + if (desc_flags & EIP93_DESC_HASH) 256 + eip93_hash_handle_result(async, err); 257 + 258 + goto get_more; 259 + } 260 + 261 + static void eip93_done_task(unsigned long data) 262 + { 263 + struct eip93_device *eip93 = (struct eip93_device *)data; 264 + 265 + eip93_handle_result_descriptor(eip93); 266 + } 267 + 268 + static irqreturn_t eip93_irq_handler(int irq, void *data) 269 + { 270 + struct eip93_device *eip93 = data; 271 + u32 irq_status; 272 + 273 + irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT); 274 + if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) { 275 + eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH); 276 + tasklet_schedule(&eip93->ring->done_task); 277 + return IRQ_HANDLED; 278 + } 279 + 280 + /* Ignore errors in AUTO mode, handled by the RDR */ 281 + eip93_irq_clear(eip93, irq_status); 282 + if (irq_status) 283 + eip93_irq_disable(eip93, irq_status); 284 + 285 + return IRQ_NONE; 286 + } 287 + 288 + static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags) 289 + { 290 + u32 val; 291 + 292 + /* Reset PE and rings */ 293 + val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING; 294 + val |= EIP93_PE_TARGET_AUTO_RING_MODE; 295 + /* For Auto more, update the CDR ring owner after processing */ 296 + val |= EIP93_PE_CONFIG_EN_CDR_UPDATE; 297 + writel(val, eip93->base + EIP93_REG_PE_CONFIG); 298 + 299 + /* Wait for PE and ring to reset */ 300 + usleep_range(10, 20); 301 + 302 + /* Release PE and ring reset */ 303 + val = readl(eip93->base + EIP93_REG_PE_CONFIG); 304 + val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING); 305 + writel(val, eip93->base + EIP93_REG_PE_CONFIG); 306 + 307 + /* Config Clocks */ 308 + val = EIP93_PE_CLOCK_EN_PE_CLK; 309 + if (supported_algo_flags & EIP93_PE_OPTION_TDES) 310 + val |= EIP93_PE_CLOCK_EN_DES_CLK; 311 + if (supported_algo_flags & EIP93_PE_OPTION_AES) 312 + val |= EIP93_PE_CLOCK_EN_AES_CLK; 313 + if (supported_algo_flags & 314 + (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 | 315 + EIP93_PE_OPTION_SHA_256)) 316 + val |= EIP93_PE_CLOCK_EN_HASH_CLK; 317 + writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL); 318 + 319 + /* Config DMA thresholds */ 320 + val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) | 321 + FIELD_PREP(EIP93_PE_INBUF_THRESH, 128); 322 + writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH); 323 + 324 + /* Clear/ack all interrupts before disable all */ 325 + eip93_irq_clear(eip93, EIP93_INT_ALL); 326 + eip93_irq_disable(eip93, EIP93_INT_ALL); 327 + 328 + /* Setup CRD threshold to trigger interrupt */ 329 + val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY); 330 + /* 331 + * Configure RDR interrupt to be triggered if RD counter is not 0 332 + * for more than 2^(N+10) system clocks. 333 + */ 334 + val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN; 335 + writel(val, eip93->base + EIP93_REG_PE_RING_THRESH); 336 + } 337 + 338 + static void eip93_desc_free(struct eip93_device *eip93) 339 + { 340 + writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG); 341 + writel(0, eip93->base + EIP93_REG_PE_CDR_BASE); 342 + writel(0, eip93->base + EIP93_REG_PE_RDR_BASE); 343 + } 344 + 345 + static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring) 346 + { 347 + ring->offset = sizeof(struct eip93_descriptor); 348 + ring->base = dmam_alloc_coherent(eip93->dev, 349 + sizeof(struct eip93_descriptor) * EIP93_RING_NUM, 350 + &ring->base_dma, GFP_KERNEL); 351 + if (!ring->base) 352 + return -ENOMEM; 353 + 354 + ring->write = ring->base; 355 + ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1); 356 + ring->read = ring->base; 357 + 358 + return 0; 359 + } 360 + 361 + static int eip93_desc_init(struct eip93_device *eip93) 362 + { 363 + struct eip93_desc_ring *cdr = &eip93->ring->cdr; 364 + struct eip93_desc_ring *rdr = &eip93->ring->rdr; 365 + int ret; 366 + u32 val; 367 + 368 + ret = eip93_set_ring(eip93, cdr); 369 + if (ret) 370 + return ret; 371 + 372 + ret = eip93_set_ring(eip93, rdr); 373 + if (ret) 374 + return ret; 375 + 376 + writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE); 377 + writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE); 378 + 379 + val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1); 380 + writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG); 381 + 382 + return 0; 383 + } 384 + 385 + static void eip93_cleanup(struct eip93_device *eip93) 386 + { 387 + tasklet_kill(&eip93->ring->done_task); 388 + 389 + /* Clear/ack all interrupts before disable all */ 390 + eip93_irq_clear(eip93, EIP93_INT_ALL); 391 + eip93_irq_disable(eip93, EIP93_INT_ALL); 392 + 393 + writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL); 394 + 395 + eip93_desc_free(eip93); 396 + 397 + idr_destroy(&eip93->ring->crypto_async_idr); 398 + } 399 + 400 + static int eip93_crypto_probe(struct platform_device *pdev) 401 + { 402 + struct device *dev = &pdev->dev; 403 + struct eip93_device *eip93; 404 + u32 ver, algo_flags; 405 + int ret; 406 + 407 + eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL); 408 + if (!eip93) 409 + return -ENOMEM; 410 + 411 + eip93->dev = dev; 412 + platform_set_drvdata(pdev, eip93); 413 + 414 + eip93->base = devm_platform_ioremap_resource(pdev, 0); 415 + if (IS_ERR(eip93->base)) 416 + return PTR_ERR(eip93->base); 417 + 418 + eip93->irq = platform_get_irq(pdev, 0); 419 + if (eip93->irq < 0) 420 + return eip93->irq; 421 + 422 + ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler, 423 + NULL, IRQF_ONESHOT, 424 + dev_name(eip93->dev), eip93); 425 + 426 + eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL); 427 + if (!eip93->ring) 428 + return -ENOMEM; 429 + 430 + ret = eip93_desc_init(eip93); 431 + 432 + if (ret) 433 + return ret; 434 + 435 + tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93); 436 + 437 + spin_lock_init(&eip93->ring->read_lock); 438 + spin_lock_init(&eip93->ring->write_lock); 439 + 440 + spin_lock_init(&eip93->ring->idr_lock); 441 + idr_init(&eip93->ring->crypto_async_idr); 442 + 443 + algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1); 444 + 445 + eip93_initialize(eip93, algo_flags); 446 + 447 + /* Init finished, enable RDR interrupt */ 448 + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); 449 + 450 + ret = eip93_register_algs(eip93, algo_flags); 451 + if (ret) { 452 + eip93_cleanup(eip93); 453 + return ret; 454 + } 455 + 456 + ver = readl(eip93->base + EIP93_REG_PE_REVISION); 457 + /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */ 458 + dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n", 459 + FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver), 460 + FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver), 461 + FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver), 462 + FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver), 463 + algo_flags, 464 + readl(eip93->base + EIP93_REG_PE_OPTION_0)); 465 + 466 + return 0; 467 + } 468 + 469 + static void eip93_crypto_remove(struct platform_device *pdev) 470 + { 471 + struct eip93_device *eip93 = platform_get_drvdata(pdev); 472 + 473 + eip93_unregister_algs(ARRAY_SIZE(eip93_algs)); 474 + eip93_cleanup(eip93); 475 + } 476 + 477 + static const struct of_device_id eip93_crypto_of_match[] = { 478 + { .compatible = "inside-secure,safexcel-eip93i", }, 479 + { .compatible = "inside-secure,safexcel-eip93ie", }, 480 + { .compatible = "inside-secure,safexcel-eip93is", }, 481 + { .compatible = "inside-secure,safexcel-eip93ies", }, 482 + /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */ 483 + /* { .compatible = "inside-secure,safexcel-eip93iw", }, */ 484 + {} 485 + }; 486 + MODULE_DEVICE_TABLE(of, eip93_crypto_of_match); 487 + 488 + static struct platform_driver eip93_crypto_driver = { 489 + .probe = eip93_crypto_probe, 490 + .remove = eip93_crypto_remove, 491 + .driver = { 492 + .name = "inside-secure-eip93", 493 + .of_match_table = eip93_crypto_of_match, 494 + }, 495 + }; 496 + module_platform_driver(eip93_crypto_driver); 497 + 498 + MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>"); 499 + MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); 500 + MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver"); 501 + MODULE_LICENSE("GPL");
+151
drivers/crypto/inside-secure/eip93/eip93-main.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef _EIP93_MAIN_H_ 9 + #define _EIP93_MAIN_H_ 10 + 11 + #include <crypto/internal/aead.h> 12 + #include <crypto/internal/hash.h> 13 + #include <crypto/internal/skcipher.h> 14 + #include <linux/bitfield.h> 15 + #include <linux/interrupt.h> 16 + 17 + #define EIP93_RING_BUSY_DELAY 500 18 + 19 + #define EIP93_RING_NUM 512 20 + #define EIP93_RING_BUSY 32 21 + #define EIP93_CRA_PRIORITY 1500 22 + 23 + #define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx)) 24 + #define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \ 25 + ((idx) * sizeof(struct sa_state))) 26 + 27 + /* cipher algorithms */ 28 + #define EIP93_ALG_DES BIT(0) 29 + #define EIP93_ALG_3DES BIT(1) 30 + #define EIP93_ALG_AES BIT(2) 31 + #define EIP93_ALG_MASK GENMASK(2, 0) 32 + /* hash and hmac algorithms */ 33 + #define EIP93_HASH_MD5 BIT(3) 34 + #define EIP93_HASH_SHA1 BIT(4) 35 + #define EIP93_HASH_SHA224 BIT(5) 36 + #define EIP93_HASH_SHA256 BIT(6) 37 + #define EIP93_HASH_HMAC BIT(7) 38 + #define EIP93_HASH_MASK GENMASK(6, 3) 39 + /* cipher modes */ 40 + #define EIP93_MODE_CBC BIT(8) 41 + #define EIP93_MODE_ECB BIT(9) 42 + #define EIP93_MODE_CTR BIT(10) 43 + #define EIP93_MODE_RFC3686 BIT(11) 44 + #define EIP93_MODE_MASK GENMASK(10, 8) 45 + 46 + /* cipher encryption/decryption operations */ 47 + #define EIP93_ENCRYPT BIT(12) 48 + #define EIP93_DECRYPT BIT(13) 49 + 50 + #define EIP93_BUSY BIT(14) 51 + 52 + /* descriptor flags */ 53 + #define EIP93_DESC_DMA_IV BIT(0) 54 + #define EIP93_DESC_IPSEC BIT(1) 55 + #define EIP93_DESC_FINISH BIT(2) 56 + #define EIP93_DESC_LAST BIT(3) 57 + #define EIP93_DESC_FAKE_HMAC BIT(4) 58 + #define EIP93_DESC_PRNG BIT(5) 59 + #define EIP93_DESC_HASH BIT(6) 60 + #define EIP93_DESC_AEAD BIT(7) 61 + #define EIP93_DESC_SKCIPHER BIT(8) 62 + #define EIP93_DESC_ASYNC BIT(9) 63 + 64 + #define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV) 65 + 66 + #define IS_DES(flags) ((flags) & EIP93_ALG_DES) 67 + #define IS_3DES(flags) ((flags) & EIP93_ALG_3DES) 68 + #define IS_AES(flags) ((flags) & EIP93_ALG_AES) 69 + 70 + #define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5) 71 + #define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1) 72 + #define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224) 73 + #define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256) 74 + #define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC) 75 + 76 + #define IS_CBC(mode) ((mode) & EIP93_MODE_CBC) 77 + #define IS_ECB(mode) ((mode) & EIP93_MODE_ECB) 78 + #define IS_CTR(mode) ((mode) & EIP93_MODE_CTR) 79 + #define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686) 80 + 81 + #define IS_BUSY(flags) ((flags) & EIP93_BUSY) 82 + 83 + #define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT) 84 + #define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT) 85 + 86 + #define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \ 87 + EIP93_ALG_3DES | \ 88 + EIP93_ALG_AES)) 89 + 90 + #define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \ 91 + EIP93_HASH_SHA1 | \ 92 + EIP93_HASH_SHA224 | \ 93 + EIP93_HASH_SHA256)) 94 + 95 + /** 96 + * struct eip93_device - crypto engine device structure 97 + */ 98 + struct eip93_device { 99 + void __iomem *base; 100 + struct device *dev; 101 + struct clk *clk; 102 + int irq; 103 + struct eip93_ring *ring; 104 + }; 105 + 106 + struct eip93_desc_ring { 107 + void *base; 108 + void *base_end; 109 + dma_addr_t base_dma; 110 + /* write and read pointers */ 111 + void *read; 112 + void *write; 113 + /* descriptor element offset */ 114 + u32 offset; 115 + }; 116 + 117 + struct eip93_state_pool { 118 + void *base; 119 + dma_addr_t base_dma; 120 + }; 121 + 122 + struct eip93_ring { 123 + struct tasklet_struct done_task; 124 + /* command/result rings */ 125 + struct eip93_desc_ring cdr; 126 + struct eip93_desc_ring rdr; 127 + spinlock_t write_lock; 128 + spinlock_t read_lock; 129 + /* aync idr */ 130 + spinlock_t idr_lock; 131 + struct idr crypto_async_idr; 132 + }; 133 + 134 + enum eip93_alg_type { 135 + EIP93_ALG_TYPE_AEAD, 136 + EIP93_ALG_TYPE_SKCIPHER, 137 + EIP93_ALG_TYPE_HASH, 138 + }; 139 + 140 + struct eip93_alg_template { 141 + struct eip93_device *eip93; 142 + enum eip93_alg_type type; 143 + u32 flags; 144 + union { 145 + struct aead_alg aead; 146 + struct skcipher_alg skcipher; 147 + struct ahash_alg ahash; 148 + } alg; 149 + }; 150 + 151 + #endif /* _EIP93_MAIN_H_ */
+335
drivers/crypto/inside-secure/eip93/eip93-regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2019 - 2021 4 + * 5 + * Richard van Schagen <vschagen@icloud.com> 6 + * Christian Marangi <ansuelsmth@gmail.com 7 + */ 8 + #ifndef REG_EIP93_H 9 + #define REG_EIP93_H 10 + 11 + #define EIP93_REG_PE_CTRL_STAT 0x0 12 + #define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24) 13 + #define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20) 14 + #define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8 15 + #define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7 16 + #define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6 17 + #define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5 18 + #define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4 19 + #define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3 20 + #define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2 21 + #define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1 22 + #define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0 23 + #define EIP93_PE_CTRL_PE_EXT_ERR BIT(19) 24 + #define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18) 25 + #define EIP93_PE_CTRL_PE_PAD_ERR BIT(17) 26 + #define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16) 27 + #define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8) 28 + #define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6) 29 + #define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4) 30 + #define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3) 31 + #define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0) 32 + #define EIP93_PE_CTRL_PE_READY 0x2 33 + #define EIP93_PE_CTRL_HOST_READY 0x1 34 + #define EIP93_REG_PE_SOURCE_ADDR 0x4 35 + #define EIP93_REG_PE_DEST_ADDR 0x8 36 + #define EIP93_REG_PE_SA_ADDR 0xc 37 + #define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */ 38 + /* 39 + * Special implementation for user ID 40 + * user_id in eip93_descriptor is used to identify the 41 + * descriptor and is opaque and can be used by the driver 42 + * in custom way. 43 + * 44 + * The usage of this should be to put an address to the crypto 45 + * request struct from the kernel but this can't work in 64bit 46 + * world. 47 + * 48 + * Also it's required to put some flags to identify the last 49 + * descriptor. 50 + * 51 + * To handle this, split the u32 in 2 part: 52 + * - 31:16 descriptor flags 53 + * - 15:0 IDR to connect the crypto request address 54 + */ 55 + #define EIP93_REG_PE_USER_ID 0x18 56 + #define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16) 57 + #define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0) 58 + #define EIP93_REG_PE_LENGTH 0x1c 59 + #define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24) 60 + #define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22) 61 + #define EIP93_PE_LENGTH_PE_READY 0x2 62 + #define EIP93_PE_LENGTH_HOST_READY 0x1 63 + #define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0) 64 + 65 + /* PACKET ENGINE RING configuration registers */ 66 + #define EIP93_REG_PE_CDR_BASE 0x80 67 + #define EIP93_REG_PE_RDR_BASE 0x84 68 + #define EIP93_REG_PE_RING_CONFIG 0x88 69 + #define EIP93_PE_EN_EXT_TRIG BIT(31) 70 + /* Absent in later revision of eip93 */ 71 + /* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */ 72 + #define EIP93_PE_RING_SIZE GENMASK(9, 0) 73 + #define EIP93_REG_PE_RING_THRESH 0x8c 74 + #define EIPR93_PE_TIMEROUT_EN BIT(31) 75 + #define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26) 76 + #define EIPR93_PE_RDR_THRESH GENMASK(25, 16) 77 + #define EIPR93_PE_CDR_THRESH GENMASK(9, 0) 78 + #define EIP93_REG_PE_CD_COUNT 0x90 79 + #define EIP93_PE_CD_COUNT GENMASK(10, 0) 80 + /* 81 + * In the same register, writing a value in GENMASK(7, 0) will 82 + * increment the descriptor count and start DMA action. 83 + */ 84 + #define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0) 85 + #define EIP93_REG_PE_RD_COUNT 0x94 86 + #define EIP93_PE_RD_COUNT GENMASK(10, 0) 87 + /* 88 + * In the same register, writing a value in GENMASK(7, 0) will 89 + * increment the descriptor count and start DMA action. 90 + */ 91 + #define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0) 92 + #define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */ 93 + 94 + /* PACKET ENGINE configuration registers */ 95 + #define EIP93_REG_PE_CONFIG 0x100 96 + #define EIP93_PE_CONFIG_SWAP_TARGET BIT(20) 97 + #define EIP93_PE_CONFIG_SWAP_DATA BIT(18) 98 + #define EIP93_PE_CONFIG_SWAP_SA BIT(17) 99 + #define EIP93_PE_CONFIG_SWAP_CDRD BIT(16) 100 + #define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10) 101 + #define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8) 102 + #define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3) 103 + #define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2) 104 + #define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1) 105 + #define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0) 106 + #define EIP93_PE_CONFIG_RST_RING BIT(2) 107 + #define EIP93_PE_CONFIG_RST_PE BIT(0) 108 + #define EIP93_REG_PE_STATUS 0x104 109 + #define EIP93_REG_PE_BUF_THRESH 0x10c 110 + #define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16) 111 + #define EIP93_PE_INBUF_THRESH GENMASK(7, 0) 112 + #define EIP93_REG_PE_INBUF_COUNT 0x100 113 + #define EIP93_REG_PE_OUTBUF_COUNT 0x114 114 + #define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */ 115 + 116 + /* PACKET ENGINE endian config */ 117 + #define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc 118 + #define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0 119 + #define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16) 120 + #define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0) 121 + /* 122 + * Byte goes 2 and 2 and are referenced by ID 123 + * Split GENMASK(7, 0) in 4 part, one for each byte. 124 + * Example LITTLE ENDIAN: Example BIG ENDIAN 125 + * GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0 126 + * GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1 127 + * GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2 128 + * GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3 129 + */ 130 + #define EIP93_PE_ENDIAN_BYTE0 0x0 131 + #define EIP93_PE_ENDIAN_BYTE1 0x1 132 + #define EIP93_PE_ENDIAN_BYTE2 0x2 133 + #define EIP93_PE_ENDIAN_BYTE3 0x3 134 + 135 + /* EIP93 CLOCK control registers */ 136 + #define EIP93_REG_PE_CLOCK_CTRL 0x1e8 137 + #define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4) 138 + #define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3) 139 + #define EIP93_PE_CLOCK_EN_AES_CLK BIT(2) 140 + #define EIP93_PE_CLOCK_EN_DES_CLK BIT(1) 141 + #define EIP93_PE_CLOCK_EN_PE_CLK BIT(0) 142 + 143 + /* EIP93 Device Option and Revision Register */ 144 + #define EIP93_REG_PE_OPTION_1 0x1f4 145 + #define EIP93_PE_OPTION_MAC_KEY256 BIT(31) 146 + #define EIP93_PE_OPTION_MAC_KEY192 BIT(30) 147 + #define EIP93_PE_OPTION_MAC_KEY128 BIT(29) 148 + #define EIP93_PE_OPTION_AES_CBC_MAC BIT(28) 149 + #define EIP93_PE_OPTION_AES_XCBX BIT(23) 150 + #define EIP93_PE_OPTION_SHA_256 BIT(19) 151 + #define EIP93_PE_OPTION_SHA_224 BIT(18) 152 + #define EIP93_PE_OPTION_SHA_1 BIT(17) 153 + #define EIP93_PE_OPTION_MD5 BIT(16) 154 + #define EIP93_PE_OPTION_AES_KEY256 BIT(15) 155 + #define EIP93_PE_OPTION_AES_KEY192 BIT(14) 156 + #define EIP93_PE_OPTION_AES_KEY128 BIT(13) 157 + #define EIP93_PE_OPTION_AES BIT(2) 158 + #define EIP93_PE_OPTION_ARC4 BIT(1) 159 + #define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */ 160 + #define EIP93_REG_PE_OPTION_0 0x1f8 161 + #define EIP93_REG_PE_REVISION 0x1fc 162 + #define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24) 163 + #define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20) 164 + #define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16) 165 + #define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0) 166 + 167 + /* EIP93 Interrupt Control Register */ 168 + #define EIP93_REG_INT_UNMASK_STAT 0x200 169 + #define EIP93_REG_INT_MASK_STAT 0x204 170 + #define EIP93_REG_INT_CLR 0x204 171 + #define EIP93_REG_INT_MASK 0x208 /* INT_EN */ 172 + /* Each int reg have the same bitmap */ 173 + #define EIP93_INT_INTERFACE_ERR BIT(18) 174 + #define EIP93_INT_RPOC_ERR BIT(17) 175 + #define EIP93_INT_PE_RING_ERR BIT(16) 176 + #define EIP93_INT_HALT BIT(15) 177 + #define EIP93_INT_OUTBUF_THRESH BIT(11) 178 + #define EIP93_INT_INBUF_THRESH BIT(10) 179 + #define EIP93_INT_OPERATION_DONE BIT(9) 180 + #define EIP93_INT_RDR_THRESH BIT(1) 181 + #define EIP93_INT_CDR_THRESH BIT(0) 182 + #define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \ 183 + EIP93_INT_RPOC_ERR | \ 184 + EIP93_INT_PE_RING_ERR | \ 185 + EIP93_INT_HALT | \ 186 + EIP93_INT_OUTBUF_THRESH | \ 187 + EIP93_INT_INBUF_THRESH | \ 188 + EIP93_INT_OPERATION_DONE | \ 189 + EIP93_INT_RDR_THRESH | \ 190 + EIP93_INT_CDR_THRESH) 191 + 192 + #define EIP93_REG_INT_CFG 0x20c 193 + #define EIP93_INT_TYPE_PULSE BIT(0) 194 + #define EIP93_REG_MASK_ENABLE 0x210 195 + #define EIP93_REG_MASK_DISABLE 0x214 196 + 197 + /* EIP93 SA Record register */ 198 + #define EIP93_REG_SA_CMD_0 0x400 199 + #define EIP93_SA_CMD_SAVE_HASH BIT(29) 200 + #define EIP93_SA_CMD_SAVE_IV BIT(28) 201 + #define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26) 202 + #define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3) 203 + #define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2) 204 + #define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0) 205 + #define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24) 206 + #define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3) 207 + #define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2) 208 + #define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1) 209 + #define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0) 210 + #define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20) 211 + #define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */ 212 + #define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */ 213 + #define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */ 214 + #define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6) 215 + #define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */ 216 + #define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */ 217 + #define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */ 218 + #define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2) 219 + #define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1) 220 + #define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */ 221 + #define EIP93_SA_CMD_HDR_PROC BIT(19) 222 + #define EIP93_SA_CMD_EXT_PAD BIT(18) 223 + #define EIP93_SA_CMD_SCPAD BIT(17) 224 + #define EIP93_SA_CMD_HASH GENMASK(15, 12) 225 + #define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf) 226 + #define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3) 227 + #define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2) 228 + #define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1) 229 + #define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0) 230 + #define EIP93_SA_CMD_CIPHER GENMASK(11, 8) 231 + #define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf) 232 + #define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3) 233 + #define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2) 234 + #define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1) 235 + #define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0) 236 + #define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6) 237 + #define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6) 238 + #define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5) 239 + #define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3) 240 + #define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2) 241 + #define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1) 242 + #define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0) 243 + #define EIP93_SA_CMD_OPGROUP GENMASK(5, 4) 244 + #define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2) 245 + #define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1) 246 + #define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0) 247 + #define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */ 248 + #define EIP93_SA_CMD_OPCODE GENMASK(2, 0) 249 + #define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7 250 + #define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3 251 + #define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1 252 + #define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0 253 + #define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3 254 + #define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1 255 + #define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0 256 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0 257 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4 258 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5 259 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7 260 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0 261 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2 262 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3 263 + #define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7 264 + #define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1 265 + #define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4 266 + #define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5 267 + #define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6 268 + #define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1 269 + #define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4 270 + #define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5 271 + #define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6 272 + #define EIP93_REG_SA_CMD_1 0x404 273 + #define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29) 274 + /* This mask can be either used for ARC4 or AES */ 275 + #define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24) 276 + #define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */ 277 + #define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24) 278 + #define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4) 279 + #define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3) 280 + #define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2) 281 + #define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16) 282 + #define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */ 283 + #define EIP93_SA_CMD_HMAC BIT(12) 284 + #define EIP93_SA_CMD_SSL_MAC BIT(12) 285 + /* This mask can be either used for ARC4 or AES */ 286 + #define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8) 287 + /* AES or DES operations */ 288 + #define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3) 289 + #define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2) 290 + #define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) 291 + #define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) 292 + /* ARC4 operations */ 293 + #define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) 294 + #define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) 295 + #define EIP93_SA_CMD_COPY_PAD BIT(3) 296 + #define EIP93_SA_CMD_COPY_PAYLOAD BIT(2) 297 + #define EIP93_SA_CMD_COPY_HEADER BIT(1) 298 + #define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */ 299 + 300 + /* State save register */ 301 + #define EIP93_REG_STATE_IV_0 0x500 302 + #define EIP93_REG_STATE_IV_1 0x504 303 + 304 + #define EIP93_REG_PE_ARC4STATE 0x700 305 + 306 + struct sa_record { 307 + u32 sa_cmd0_word; 308 + u32 sa_cmd1_word; 309 + u32 sa_key[8]; 310 + u8 sa_i_digest[32]; 311 + u8 sa_o_digest[32]; 312 + u32 sa_spi; 313 + u32 sa_seqnum[2]; 314 + u32 sa_seqmum_mask[2]; 315 + u32 sa_nonce; 316 + } __packed; 317 + 318 + struct sa_state { 319 + u32 state_iv[4]; 320 + u32 state_byte_cnt[2]; 321 + u8 state_i_digest[32]; 322 + } __packed; 323 + 324 + struct eip93_descriptor { 325 + u32 pe_ctrl_stat_word; 326 + u32 src_addr; 327 + u32 dst_addr; 328 + u32 sa_addr; 329 + u32 state_addr; 330 + u32 arc4_addr; 331 + u32 user_id; 332 + u32 pe_length_word; 333 + } __packed; 334 + 335 + #endif