Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: cavium/nitrox - Added AEAD cipher support

Added support to offload AEAD ciphers to NITROX. Currently supported
AEAD cipher is 'gcm(aes)'.

Signed-off-by: Nagadheeraj Rottela <rnagadheeraj@marvell.com>
Reviewed-by: Srikanth Jampala <jsrikanth@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Nagadheeraj Rottela and committed by
Herbert Xu
c9613335 2326828e

+1107 -609
+3 -1
drivers/crypto/cavium/nitrox/Makefile
··· 7 7 nitrox_hal.o \ 8 8 nitrox_reqmgr.o \ 9 9 nitrox_algs.o \ 10 - nitrox_mbx.o 10 + nitrox_mbx.o \ 11 + nitrox_skcipher.o \ 12 + nitrox_aead.o 11 13 12 14 n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o 13 15 n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
+364
drivers/crypto/cavium/nitrox/nitrox_aead.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/kernel.h> 3 + #include <linux/printk.h> 4 + #include <linux/crypto.h> 5 + #include <linux/rtnetlink.h> 6 + 7 + #include <crypto/aead.h> 8 + #include <crypto/authenc.h> 9 + #include <crypto/des.h> 10 + #include <crypto/sha.h> 11 + #include <crypto/internal/aead.h> 12 + #include <crypto/scatterwalk.h> 13 + #include <crypto/gcm.h> 14 + 15 + #include "nitrox_dev.h" 16 + #include "nitrox_common.h" 17 + #include "nitrox_req.h" 18 + 19 + #define GCM_AES_SALT_SIZE 4 20 + 21 + /** 22 + * struct nitrox_crypt_params - Params to set nitrox crypto request. 23 + * @cryptlen: Encryption/Decryption data length 24 + * @authlen: Assoc data length + Cryptlen 25 + * @srclen: Input buffer length 26 + * @dstlen: Output buffer length 27 + * @iv: IV data 28 + * @ivsize: IV data length 29 + * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT) 30 + */ 31 + struct nitrox_crypt_params { 32 + unsigned int cryptlen; 33 + unsigned int authlen; 34 + unsigned int srclen; 35 + unsigned int dstlen; 36 + u8 *iv; 37 + int ivsize; 38 + u8 ctrl_arg; 39 + }; 40 + 41 + union gph_p3 { 42 + struct { 43 + #ifdef __BIG_ENDIAN_BITFIELD 44 + u16 iv_offset : 8; 45 + u16 auth_offset : 8; 46 + #else 47 + u16 auth_offset : 8; 48 + u16 iv_offset : 8; 49 + #endif 50 + }; 51 + u16 param; 52 + }; 53 + 54 + static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, 55 + unsigned int keylen) 56 + { 57 + int aes_keylen; 58 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 59 + struct flexi_crypto_context *fctx; 60 + union fc_ctx_flags flags; 61 + 62 + aes_keylen = flexi_aes_keylen(keylen); 63 + if (aes_keylen < 0) { 64 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 65 + return -EINVAL; 66 + } 67 + 68 + /* fill crypto context */ 69 + fctx = nctx->u.fctx; 70 + flags.f = be64_to_cpu(fctx->flags.f); 71 + flags.w0.aes_keylen = aes_keylen; 72 + fctx->flags.f = cpu_to_be64(flags.f); 73 + 74 + /* copy enc key to context */ 75 + memset(&fctx->crypto, 0, sizeof(fctx->crypto)); 76 + memcpy(fctx->crypto.u.key, key, keylen); 77 + 78 + return 0; 79 + } 80 + 81 + static int nitrox_aead_setauthsize(struct crypto_aead *aead, 82 + unsigned int authsize) 83 + { 84 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 85 + struct flexi_crypto_context *fctx = nctx->u.fctx; 86 + union fc_ctx_flags flags; 87 + 88 + flags.f = be64_to_cpu(fctx->flags.f); 89 + flags.w0.mac_len = authsize; 90 + fctx->flags.f = cpu_to_be64(flags.f); 91 + 92 + aead->authsize = authsize; 93 + 94 + return 0; 95 + } 96 + 97 + static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize, 98 + int buflen) 99 + { 100 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 101 + int nents = sg_nents_for_len(areq->src, buflen) + 1; 102 + int ret; 103 + 104 + if (nents < 0) 105 + return nents; 106 + 107 + /* Allocate buffer to hold IV and input scatterlist array */ 108 + ret = alloc_src_req_buf(nkreq, nents, ivsize); 109 + if (ret) 110 + return ret; 111 + 112 + nitrox_creq_copy_iv(nkreq->src, iv, ivsize); 113 + nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen); 114 + 115 + return 0; 116 + } 117 + 118 + static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen) 119 + { 120 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 121 + int nents = sg_nents_for_len(areq->dst, buflen) + 3; 122 + int ret; 123 + 124 + if (nents < 0) 125 + return nents; 126 + 127 + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist 128 + * array 129 + */ 130 + ret = alloc_dst_req_buf(nkreq, nents); 131 + if (ret) 132 + return ret; 133 + 134 + nitrox_creq_set_orh(nkreq); 135 + nitrox_creq_set_comp(nkreq); 136 + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen); 137 + 138 + return 0; 139 + } 140 + 141 + static void free_src_sglist(struct aead_request *areq) 142 + { 143 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 144 + 145 + kfree(nkreq->src); 146 + } 147 + 148 + static void free_dst_sglist(struct aead_request *areq) 149 + { 150 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 151 + 152 + kfree(nkreq->dst); 153 + } 154 + 155 + static int nitrox_set_creq(struct aead_request *areq, 156 + struct nitrox_crypt_params *params) 157 + { 158 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 159 + struct se_crypto_request *creq = &nkreq->creq; 160 + struct crypto_aead *aead = crypto_aead_reqtfm(areq); 161 + union gph_p3 param3; 162 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 163 + int ret; 164 + 165 + creq->flags = areq->base.flags; 166 + creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 167 + GFP_KERNEL : GFP_ATOMIC; 168 + 169 + creq->ctrl.value = 0; 170 + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 171 + creq->ctrl.s.arg = params->ctrl_arg; 172 + 173 + creq->gph.param0 = cpu_to_be16(params->cryptlen); 174 + creq->gph.param1 = cpu_to_be16(params->authlen); 175 + creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen); 176 + param3.iv_offset = 0; 177 + param3.auth_offset = params->ivsize; 178 + creq->gph.param3 = cpu_to_be16(param3.param); 179 + 180 + creq->ctx_handle = nctx->u.ctx_handle; 181 + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); 182 + 183 + ret = alloc_src_sglist(areq, params->iv, params->ivsize, 184 + params->srclen); 185 + if (ret) 186 + return ret; 187 + 188 + ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen); 189 + if (ret) { 190 + free_src_sglist(areq); 191 + return ret; 192 + } 193 + 194 + return 0; 195 + } 196 + 197 + static void nitrox_aead_callback(void *arg, int err) 198 + { 199 + struct aead_request *areq = arg; 200 + 201 + free_src_sglist(areq); 202 + free_dst_sglist(areq); 203 + if (err) { 204 + pr_err_ratelimited("request failed status 0x%0x\n", err); 205 + err = -EINVAL; 206 + } 207 + 208 + areq->base.complete(&areq->base, err); 209 + } 210 + 211 + static int nitrox_aes_gcm_enc(struct aead_request *areq) 212 + { 213 + struct crypto_aead *aead = crypto_aead_reqtfm(areq); 214 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 215 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 216 + struct se_crypto_request *creq = &nkreq->creq; 217 + struct flexi_crypto_context *fctx = nctx->u.fctx; 218 + struct nitrox_crypt_params params; 219 + int ret; 220 + 221 + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); 222 + 223 + memset(&params, 0, sizeof(params)); 224 + params.cryptlen = areq->cryptlen; 225 + params.authlen = areq->assoclen + params.cryptlen; 226 + params.srclen = params.authlen; 227 + params.dstlen = params.srclen + aead->authsize; 228 + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; 229 + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; 230 + params.ctrl_arg = ENCRYPT; 231 + ret = nitrox_set_creq(areq, &params); 232 + if (ret) 233 + return ret; 234 + 235 + /* send the crypto request */ 236 + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, 237 + areq); 238 + } 239 + 240 + static int nitrox_aes_gcm_dec(struct aead_request *areq) 241 + { 242 + struct crypto_aead *aead = crypto_aead_reqtfm(areq); 243 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 244 + struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq); 245 + struct se_crypto_request *creq = &nkreq->creq; 246 + struct flexi_crypto_context *fctx = nctx->u.fctx; 247 + struct nitrox_crypt_params params; 248 + int ret; 249 + 250 + memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); 251 + 252 + memset(&params, 0, sizeof(params)); 253 + params.cryptlen = areq->cryptlen - aead->authsize; 254 + params.authlen = areq->assoclen + params.cryptlen; 255 + params.srclen = areq->cryptlen + areq->assoclen; 256 + params.dstlen = params.srclen - aead->authsize; 257 + params.iv = &areq->iv[GCM_AES_SALT_SIZE]; 258 + params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; 259 + params.ctrl_arg = DECRYPT; 260 + ret = nitrox_set_creq(areq, &params); 261 + if (ret) 262 + return ret; 263 + 264 + /* send the crypto request */ 265 + return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, 266 + areq); 267 + } 268 + 269 + static int nitrox_aead_init(struct crypto_aead *aead) 270 + { 271 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 272 + struct crypto_ctx_hdr *chdr; 273 + 274 + /* get the first device */ 275 + nctx->ndev = nitrox_get_first_device(); 276 + if (!nctx->ndev) 277 + return -ENODEV; 278 + 279 + /* allocate nitrox crypto context */ 280 + chdr = crypto_alloc_context(nctx->ndev); 281 + if (!chdr) { 282 + nitrox_put_device(nctx->ndev); 283 + return -ENOMEM; 284 + } 285 + nctx->chdr = chdr; 286 + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + 287 + sizeof(struct ctx_hdr)); 288 + nctx->u.fctx->flags.f = 0; 289 + 290 + return 0; 291 + } 292 + 293 + static int nitrox_aes_gcm_init(struct crypto_aead *aead) 294 + { 295 + int ret; 296 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 297 + union fc_ctx_flags *flags; 298 + 299 + ret = nitrox_aead_init(aead); 300 + if (ret) 301 + return ret; 302 + 303 + flags = &nctx->u.fctx->flags; 304 + flags->w0.cipher_type = CIPHER_AES_GCM; 305 + flags->w0.hash_type = AUTH_NULL; 306 + flags->w0.iv_source = IV_FROM_DPTR; 307 + /* ask microcode to calculate ipad/opad */ 308 + flags->w0.auth_input_type = 1; 309 + flags->f = be64_to_cpu(flags->f); 310 + 311 + crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + 312 + sizeof(struct nitrox_kcrypt_request)); 313 + 314 + return 0; 315 + } 316 + 317 + static void nitrox_aead_exit(struct crypto_aead *aead) 318 + { 319 + struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); 320 + 321 + /* free the nitrox crypto context */ 322 + if (nctx->u.ctx_handle) { 323 + struct flexi_crypto_context *fctx = nctx->u.fctx; 324 + 325 + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); 326 + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); 327 + crypto_free_context((void *)nctx->chdr); 328 + } 329 + nitrox_put_device(nctx->ndev); 330 + 331 + nctx->u.ctx_handle = 0; 332 + nctx->ndev = NULL; 333 + } 334 + 335 + static struct aead_alg nitrox_aeads[] = { { 336 + .base = { 337 + .cra_name = "gcm(aes)", 338 + .cra_driver_name = "n5_aes_gcm", 339 + .cra_priority = PRIO, 340 + .cra_flags = CRYPTO_ALG_ASYNC, 341 + .cra_blocksize = AES_BLOCK_SIZE, 342 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 343 + .cra_alignmask = 0, 344 + .cra_module = THIS_MODULE, 345 + }, 346 + .setkey = nitrox_aes_gcm_setkey, 347 + .setauthsize = nitrox_aead_setauthsize, 348 + .encrypt = nitrox_aes_gcm_enc, 349 + .decrypt = nitrox_aes_gcm_dec, 350 + .init = nitrox_aes_gcm_init, 351 + .exit = nitrox_aead_exit, 352 + .ivsize = GCM_AES_IV_SIZE, 353 + .maxauthsize = AES_BLOCK_SIZE, 354 + } }; 355 + 356 + int nitrox_register_aeads(void) 357 + { 358 + return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); 359 + } 360 + 361 + void nitrox_unregister_aeads(void) 362 + { 363 + crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); 364 + }
+15 -552
drivers/crypto/cavium/nitrox/nitrox_algs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/crypto.h> 3 - #include <linux/kernel.h> 4 - #include <linux/module.h> 5 - #include <linux/printk.h> 6 - 7 - #include <crypto/aes.h> 8 - #include <crypto/skcipher.h> 9 - #include <crypto/ctr.h> 10 - #include <crypto/des.h> 11 - #include <crypto/xts.h> 12 - 13 - #include "nitrox_dev.h" 14 1 #include "nitrox_common.h" 15 - #include "nitrox_req.h" 16 - 17 - #define PRIO 4001 18 - 19 - struct nitrox_cipher { 20 - const char *name; 21 - enum flexi_cipher value; 22 - }; 23 - 24 - /** 25 - * supported cipher list 26 - */ 27 - static const struct nitrox_cipher flexi_cipher_table[] = { 28 - { "null", CIPHER_NULL }, 29 - { "cbc(des3_ede)", CIPHER_3DES_CBC }, 30 - { "ecb(des3_ede)", CIPHER_3DES_ECB }, 31 - { "cbc(aes)", CIPHER_AES_CBC }, 32 - { "ecb(aes)", CIPHER_AES_ECB }, 33 - { "cfb(aes)", CIPHER_AES_CFB }, 34 - { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, 35 - { "xts(aes)", CIPHER_AES_XTS }, 36 - { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, 37 - { NULL, CIPHER_INVALID } 38 - }; 39 - 40 - static enum flexi_cipher flexi_cipher_type(const char *name) 41 - { 42 - const struct nitrox_cipher *cipher = flexi_cipher_table; 43 - 44 - while (cipher->name) { 45 - if (!strcmp(cipher->name, name)) 46 - break; 47 - cipher++; 48 - } 49 - return cipher->value; 50 - } 51 - 52 - static int flexi_aes_keylen(int keylen) 53 - { 54 - int aes_keylen; 55 - 56 - switch (keylen) { 57 - case AES_KEYSIZE_128: 58 - aes_keylen = 1; 59 - break; 60 - case AES_KEYSIZE_192: 61 - aes_keylen = 2; 62 - break; 63 - case AES_KEYSIZE_256: 64 - aes_keylen = 3; 65 - break; 66 - default: 67 - aes_keylen = -EINVAL; 68 - break; 69 - } 70 - return aes_keylen; 71 - } 72 - 73 - static int nitrox_skcipher_init(struct crypto_skcipher *tfm) 74 - { 75 - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); 76 - struct crypto_ctx_hdr *chdr; 77 - 78 - /* get the first device */ 79 - nctx->ndev = nitrox_get_first_device(); 80 - if (!nctx->ndev) 81 - return -ENODEV; 82 - 83 - /* allocate nitrox crypto context */ 84 - chdr = crypto_alloc_context(nctx->ndev); 85 - if (!chdr) { 86 - nitrox_put_device(nctx->ndev); 87 - return -ENOMEM; 88 - } 89 - nctx->chdr = chdr; 90 - nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + 91 - sizeof(struct ctx_hdr)); 92 - crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + 93 - sizeof(struct nitrox_kcrypt_request)); 94 - return 0; 95 - } 96 - 97 - static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) 98 - { 99 - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); 100 - 101 - /* free the nitrox crypto context */ 102 - if (nctx->u.ctx_handle) { 103 - struct flexi_crypto_context *fctx = nctx->u.fctx; 104 - 105 - memset(&fctx->crypto, 0, sizeof(struct crypto_keys)); 106 - memset(&fctx->auth, 0, sizeof(struct auth_keys)); 107 - crypto_free_context((void *)nctx->chdr); 108 - } 109 - nitrox_put_device(nctx->ndev); 110 - 111 - nctx->u.ctx_handle = 0; 112 - nctx->ndev = NULL; 113 - } 114 - 115 - static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, 116 - int aes_keylen, const u8 *key, 117 - unsigned int keylen) 118 - { 119 - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 120 - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 121 - struct flexi_crypto_context *fctx; 122 - enum flexi_cipher cipher_type; 123 - const char *name; 124 - 125 - name = crypto_tfm_alg_name(tfm); 126 - cipher_type = flexi_cipher_type(name); 127 - if (unlikely(cipher_type == CIPHER_INVALID)) { 128 - pr_err("unsupported cipher: %s\n", name); 129 - return -EINVAL; 130 - } 131 - 132 - /* fill crypto context */ 133 - fctx = nctx->u.fctx; 134 - fctx->flags = 0; 135 - fctx->w0.cipher_type = cipher_type; 136 - fctx->w0.aes_keylen = aes_keylen; 137 - fctx->w0.iv_source = IV_FROM_DPTR; 138 - fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0); 139 - /* copy the key to context */ 140 - memcpy(fctx->crypto.u.key, key, keylen); 141 - 142 - return 0; 143 - } 144 - 145 - static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 146 - unsigned int keylen) 147 - { 148 - int aes_keylen; 149 - 150 - aes_keylen = flexi_aes_keylen(keylen); 151 - if (aes_keylen < 0) { 152 - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 153 - return -EINVAL; 154 - } 155 - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 156 - } 157 - 158 - static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) 159 - { 160 - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 161 - int nents = sg_nents(skreq->src) + 1; 162 - struct se_crypto_request *creq = &nkreq->creq; 163 - char *iv; 164 - struct scatterlist *sg; 165 - 166 - /* Allocate buffer to hold IV and input scatterlist array */ 167 - nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); 168 - if (!nkreq->src) 169 - return -ENOMEM; 170 - 171 - /* copy iv */ 172 - iv = nkreq->src; 173 - memcpy(iv, skreq->iv, ivsize); 174 - 175 - sg = (struct scatterlist *)(iv + ivsize); 176 - creq->src = sg; 177 - sg_init_table(sg, nents); 178 - 179 - /* Input format: 180 - * +----+----------------+ 181 - * | IV | SRC sg entries | 182 - * +----+----------------+ 183 - */ 184 - 185 - /* IV */ 186 - sg = create_single_sg(sg, iv, ivsize); 187 - /* SRC entries */ 188 - create_multi_sg(sg, skreq->src); 189 - 190 - return 0; 191 - } 192 - 193 - static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) 194 - { 195 - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 196 - int nents = sg_nents(skreq->dst) + 3; 197 - int extralen = ORH_HLEN + COMP_HLEN; 198 - struct se_crypto_request *creq = &nkreq->creq; 199 - struct scatterlist *sg; 200 - char *iv = nkreq->src; 201 - 202 - /* Allocate buffer to hold ORH, COMPLETION and output scatterlist 203 - * array 204 - */ 205 - nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); 206 - if (!nkreq->dst) 207 - return -ENOMEM; 208 - 209 - creq->orh = (u64 *)(nkreq->dst); 210 - set_orh_value(creq->orh); 211 - 212 - creq->comp = (u64 *)(nkreq->dst + ORH_HLEN); 213 - set_comp_value(creq->comp); 214 - 215 - sg = (struct scatterlist *)(nkreq->dst + ORH_HLEN + COMP_HLEN); 216 - creq->dst = sg; 217 - sg_init_table(sg, nents); 218 - 219 - /* Output format: 220 - * +-----+----+----------------+-----------------+ 221 - * | ORH | IV | DST sg entries | COMPLETION Bytes| 222 - * +-----+----+----------------+-----------------+ 223 - */ 224 - 225 - /* ORH */ 226 - sg = create_single_sg(sg, creq->orh, ORH_HLEN); 227 - /* IV */ 228 - sg = create_single_sg(sg, iv, ivsize); 229 - /* DST entries */ 230 - sg = create_multi_sg(sg, skreq->dst); 231 - /* COMPLETION Bytes */ 232 - create_single_sg(sg, creq->comp, COMP_HLEN); 233 - 234 - return 0; 235 - } 236 - 237 - static void free_src_sglist(struct skcipher_request *skreq) 238 - { 239 - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 240 - 241 - kfree(nkreq->src); 242 - } 243 - 244 - static void free_dst_sglist(struct skcipher_request *skreq) 245 - { 246 - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 247 - 248 - kfree(nkreq->dst); 249 - } 250 - 251 - static void nitrox_skcipher_callback(struct skcipher_request *skreq, 252 - int err) 253 - { 254 - free_src_sglist(skreq); 255 - free_dst_sglist(skreq); 256 - if (err) { 257 - pr_err_ratelimited("request failed status 0x%0x\n", err); 258 - err = -EINVAL; 259 - } 260 - 261 - skcipher_request_complete(skreq, err); 262 - } 263 - 264 - static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) 265 - { 266 - struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); 267 - struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); 268 - struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 269 - int ivsize = crypto_skcipher_ivsize(cipher); 270 - struct se_crypto_request *creq; 271 - int ret; 272 - 273 - creq = &nkreq->creq; 274 - creq->flags = skreq->base.flags; 275 - creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 276 - GFP_KERNEL : GFP_ATOMIC; 277 - 278 - /* fill the request */ 279 - creq->ctrl.value = 0; 280 - creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 281 - creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); 282 - /* param0: length of the data to be encrypted */ 283 - creq->gph.param0 = cpu_to_be16(skreq->cryptlen); 284 - creq->gph.param1 = 0; 285 - /* param2: encryption data offset */ 286 - creq->gph.param2 = cpu_to_be16(ivsize); 287 - creq->gph.param3 = 0; 288 - 289 - creq->ctx_handle = nctx->u.ctx_handle; 290 - creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); 291 - 292 - ret = alloc_src_sglist(skreq, ivsize); 293 - if (ret) 294 - return ret; 295 - 296 - ret = alloc_dst_sglist(skreq, ivsize); 297 - if (ret) { 298 - free_src_sglist(skreq); 299 - return ret; 300 - } 301 - 302 - nkreq->nctx = nctx; 303 - nkreq->skreq = skreq; 304 - 305 - /* send the crypto request */ 306 - return nitrox_process_se_request(nctx->ndev, creq, 307 - nitrox_skcipher_callback, skreq); 308 - } 309 - 310 - static int nitrox_aes_encrypt(struct skcipher_request *skreq) 311 - { 312 - return nitrox_skcipher_crypt(skreq, true); 313 - } 314 - 315 - static int nitrox_aes_decrypt(struct skcipher_request *skreq) 316 - { 317 - return nitrox_skcipher_crypt(skreq, false); 318 - } 319 - 320 - static int nitrox_3des_setkey(struct crypto_skcipher *cipher, 321 - const u8 *key, unsigned int keylen) 322 - { 323 - if (keylen != DES3_EDE_KEY_SIZE) { 324 - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 325 - return -EINVAL; 326 - } 327 - 328 - return nitrox_skcipher_setkey(cipher, 0, key, keylen); 329 - } 330 - 331 - static int nitrox_3des_encrypt(struct skcipher_request *skreq) 332 - { 333 - return nitrox_skcipher_crypt(skreq, true); 334 - } 335 - 336 - static int nitrox_3des_decrypt(struct skcipher_request *skreq) 337 - { 338 - return nitrox_skcipher_crypt(skreq, false); 339 - } 340 - 341 - static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, 342 - const u8 *key, unsigned int keylen) 343 - { 344 - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 345 - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 346 - struct flexi_crypto_context *fctx; 347 - int aes_keylen, ret; 348 - 349 - ret = xts_check_key(tfm, key, keylen); 350 - if (ret) 351 - return ret; 352 - 353 - keylen /= 2; 354 - 355 - aes_keylen = flexi_aes_keylen(keylen); 356 - if (aes_keylen < 0) { 357 - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 358 - return -EINVAL; 359 - } 360 - 361 - fctx = nctx->u.fctx; 362 - /* copy KEY2 */ 363 - memcpy(fctx->auth.u.key2, (key + keylen), keylen); 364 - 365 - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 366 - } 367 - 368 - static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, 369 - const u8 *key, unsigned int keylen) 370 - { 371 - struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 372 - struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 373 - struct flexi_crypto_context *fctx; 374 - int aes_keylen; 375 - 376 - if (keylen < CTR_RFC3686_NONCE_SIZE) 377 - return -EINVAL; 378 - 379 - fctx = nctx->u.fctx; 380 - 381 - memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), 382 - CTR_RFC3686_NONCE_SIZE); 383 - 384 - keylen -= CTR_RFC3686_NONCE_SIZE; 385 - 386 - aes_keylen = flexi_aes_keylen(keylen); 387 - if (aes_keylen < 0) { 388 - crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 389 - return -EINVAL; 390 - } 391 - return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 392 - } 393 - 394 - static struct skcipher_alg nitrox_skciphers[] = { { 395 - .base = { 396 - .cra_name = "cbc(aes)", 397 - .cra_driver_name = "n5_cbc(aes)", 398 - .cra_priority = PRIO, 399 - .cra_flags = CRYPTO_ALG_ASYNC, 400 - .cra_blocksize = AES_BLOCK_SIZE, 401 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 402 - .cra_alignmask = 0, 403 - .cra_module = THIS_MODULE, 404 - }, 405 - .min_keysize = AES_MIN_KEY_SIZE, 406 - .max_keysize = AES_MAX_KEY_SIZE, 407 - .ivsize = AES_BLOCK_SIZE, 408 - .setkey = nitrox_aes_setkey, 409 - .encrypt = nitrox_aes_encrypt, 410 - .decrypt = nitrox_aes_decrypt, 411 - .init = nitrox_skcipher_init, 412 - .exit = nitrox_skcipher_exit, 413 - }, { 414 - .base = { 415 - .cra_name = "ecb(aes)", 416 - .cra_driver_name = "n5_ecb(aes)", 417 - .cra_priority = PRIO, 418 - .cra_flags = CRYPTO_ALG_ASYNC, 419 - .cra_blocksize = AES_BLOCK_SIZE, 420 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 421 - .cra_alignmask = 0, 422 - .cra_module = THIS_MODULE, 423 - }, 424 - .min_keysize = AES_MIN_KEY_SIZE, 425 - .max_keysize = AES_MAX_KEY_SIZE, 426 - .ivsize = AES_BLOCK_SIZE, 427 - .setkey = nitrox_aes_setkey, 428 - .encrypt = nitrox_aes_encrypt, 429 - .decrypt = nitrox_aes_decrypt, 430 - .init = nitrox_skcipher_init, 431 - .exit = nitrox_skcipher_exit, 432 - }, { 433 - .base = { 434 - .cra_name = "cfb(aes)", 435 - .cra_driver_name = "n5_cfb(aes)", 436 - .cra_priority = PRIO, 437 - .cra_flags = CRYPTO_ALG_ASYNC, 438 - .cra_blocksize = AES_BLOCK_SIZE, 439 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 440 - .cra_alignmask = 0, 441 - .cra_module = THIS_MODULE, 442 - }, 443 - .min_keysize = AES_MIN_KEY_SIZE, 444 - .max_keysize = AES_MAX_KEY_SIZE, 445 - .ivsize = AES_BLOCK_SIZE, 446 - .setkey = nitrox_aes_setkey, 447 - .encrypt = nitrox_aes_encrypt, 448 - .decrypt = nitrox_aes_decrypt, 449 - .init = nitrox_skcipher_init, 450 - .exit = nitrox_skcipher_exit, 451 - }, { 452 - .base = { 453 - .cra_name = "xts(aes)", 454 - .cra_driver_name = "n5_xts(aes)", 455 - .cra_priority = PRIO, 456 - .cra_flags = CRYPTO_ALG_ASYNC, 457 - .cra_blocksize = AES_BLOCK_SIZE, 458 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 459 - .cra_alignmask = 0, 460 - .cra_module = THIS_MODULE, 461 - }, 462 - .min_keysize = 2 * AES_MIN_KEY_SIZE, 463 - .max_keysize = 2 * AES_MAX_KEY_SIZE, 464 - .ivsize = AES_BLOCK_SIZE, 465 - .setkey = nitrox_aes_xts_setkey, 466 - .encrypt = nitrox_aes_encrypt, 467 - .decrypt = nitrox_aes_decrypt, 468 - .init = nitrox_skcipher_init, 469 - .exit = nitrox_skcipher_exit, 470 - }, { 471 - .base = { 472 - .cra_name = "rfc3686(ctr(aes))", 473 - .cra_driver_name = "n5_rfc3686(ctr(aes))", 474 - .cra_priority = PRIO, 475 - .cra_flags = CRYPTO_ALG_ASYNC, 476 - .cra_blocksize = 1, 477 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 478 - .cra_alignmask = 0, 479 - .cra_module = THIS_MODULE, 480 - }, 481 - .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 482 - .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 483 - .ivsize = CTR_RFC3686_IV_SIZE, 484 - .init = nitrox_skcipher_init, 485 - .exit = nitrox_skcipher_exit, 486 - .setkey = nitrox_aes_ctr_rfc3686_setkey, 487 - .encrypt = nitrox_aes_encrypt, 488 - .decrypt = nitrox_aes_decrypt, 489 - }, { 490 - .base = { 491 - .cra_name = "cts(cbc(aes))", 492 - .cra_driver_name = "n5_cts(cbc(aes))", 493 - .cra_priority = PRIO, 494 - .cra_flags = CRYPTO_ALG_ASYNC, 495 - .cra_blocksize = AES_BLOCK_SIZE, 496 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 497 - .cra_alignmask = 0, 498 - .cra_type = &crypto_ablkcipher_type, 499 - .cra_module = THIS_MODULE, 500 - }, 501 - .min_keysize = AES_MIN_KEY_SIZE, 502 - .max_keysize = AES_MAX_KEY_SIZE, 503 - .ivsize = AES_BLOCK_SIZE, 504 - .setkey = nitrox_aes_setkey, 505 - .encrypt = nitrox_aes_encrypt, 506 - .decrypt = nitrox_aes_decrypt, 507 - .init = nitrox_skcipher_init, 508 - .exit = nitrox_skcipher_exit, 509 - }, { 510 - .base = { 511 - .cra_name = "cbc(des3_ede)", 512 - .cra_driver_name = "n5_cbc(des3_ede)", 513 - .cra_priority = PRIO, 514 - .cra_flags = CRYPTO_ALG_ASYNC, 515 - .cra_blocksize = DES3_EDE_BLOCK_SIZE, 516 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 517 - .cra_alignmask = 0, 518 - .cra_module = THIS_MODULE, 519 - }, 520 - .min_keysize = DES3_EDE_KEY_SIZE, 521 - .max_keysize = DES3_EDE_KEY_SIZE, 522 - .ivsize = DES3_EDE_BLOCK_SIZE, 523 - .setkey = nitrox_3des_setkey, 524 - .encrypt = nitrox_3des_encrypt, 525 - .decrypt = nitrox_3des_decrypt, 526 - .init = nitrox_skcipher_init, 527 - .exit = nitrox_skcipher_exit, 528 - }, { 529 - .base = { 530 - .cra_name = "ecb(des3_ede)", 531 - .cra_driver_name = "n5_ecb(des3_ede)", 532 - .cra_priority = PRIO, 533 - .cra_flags = CRYPTO_ALG_ASYNC, 534 - .cra_blocksize = DES3_EDE_BLOCK_SIZE, 535 - .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 536 - .cra_alignmask = 0, 537 - .cra_module = THIS_MODULE, 538 - }, 539 - .min_keysize = DES3_EDE_KEY_SIZE, 540 - .max_keysize = DES3_EDE_KEY_SIZE, 541 - .ivsize = DES3_EDE_BLOCK_SIZE, 542 - .setkey = nitrox_3des_setkey, 543 - .encrypt = nitrox_3des_encrypt, 544 - .decrypt = nitrox_3des_decrypt, 545 - .init = nitrox_skcipher_init, 546 - .exit = nitrox_skcipher_exit, 547 - } 548 - 549 - }; 550 2 551 3 int nitrox_crypto_register(void) 552 4 { 553 - return crypto_register_skciphers(nitrox_skciphers, 554 - ARRAY_SIZE(nitrox_skciphers)); 5 + int err; 6 + 7 + err = nitrox_register_skciphers(); 8 + if (err) 9 + return err; 10 + 11 + err = nitrox_register_aeads(); 12 + if (err) { 13 + nitrox_unregister_skciphers(); 14 + return err; 15 + } 16 + 17 + return 0; 555 18 } 556 19 557 20 void nitrox_crypto_unregister(void) 558 21 { 559 - crypto_unregister_skciphers(nitrox_skciphers, 560 - ARRAY_SIZE(nitrox_skciphers)); 22 + nitrox_unregister_aeads(); 23 + nitrox_unregister_skciphers(); 561 24 }
+5 -1
drivers/crypto/cavium/nitrox/nitrox_common.h
··· 7 7 8 8 int nitrox_crypto_register(void); 9 9 void nitrox_crypto_unregister(void); 10 + int nitrox_register_aeads(void); 11 + void nitrox_unregister_aeads(void); 12 + int nitrox_register_skciphers(void); 13 + void nitrox_unregister_skciphers(void); 10 14 void *crypto_alloc_context(struct nitrox_device *ndev); 11 15 void crypto_free_context(void *ctx); 12 16 struct nitrox_device *nitrox_get_first_device(void); ··· 23 19 int nitrox_process_se_request(struct nitrox_device *ndev, 24 20 struct se_crypto_request *req, 25 21 completion_t cb, 26 - struct skcipher_request *skreq); 22 + void *cb_arg); 27 23 void backlog_qflush_work(struct work_struct *work); 28 24 29 25
+201 -38
drivers/crypto/cavium/nitrox/nitrox_req.h
··· 8 8 #include "nitrox_dev.h" 9 9 10 10 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL 11 + #define PRIO 4001 11 12 12 13 /** 13 14 * struct gphdr - General purpose Header ··· 107 106 CIPHER_INVALID 108 107 }; 109 108 109 + enum flexi_auth { 110 + AUTH_NULL = 0, 111 + AUTH_MD5, 112 + AUTH_SHA1, 113 + AUTH_SHA2_SHA224, 114 + AUTH_SHA2_SHA256, 115 + AUTH_SHA2_SHA384, 116 + AUTH_SHA2_SHA512, 117 + AUTH_GMAC, 118 + AUTH_INVALID 119 + }; 120 + 110 121 /** 111 122 * struct crypto_keys - Crypto keys 112 123 * @key: Encryption key or KEY1 for AES-XTS ··· 145 132 u8 opad[64]; 146 133 }; 147 134 135 + union fc_ctx_flags { 136 + __be64 f; 137 + struct { 138 + #if defined(__BIG_ENDIAN_BITFIELD) 139 + u64 cipher_type : 4; 140 + u64 reserved_59 : 1; 141 + u64 aes_keylen : 2; 142 + u64 iv_source : 1; 143 + u64 hash_type : 4; 144 + u64 reserved_49_51 : 3; 145 + u64 auth_input_type: 1; 146 + u64 mac_len : 8; 147 + u64 reserved_0_39 : 40; 148 + #else 149 + u64 reserved_0_39 : 40; 150 + u64 mac_len : 8; 151 + u64 auth_input_type: 1; 152 + u64 reserved_49_51 : 3; 153 + u64 hash_type : 4; 154 + u64 iv_source : 1; 155 + u64 aes_keylen : 2; 156 + u64 reserved_59 : 1; 157 + u64 cipher_type : 4; 158 + #endif 159 + } w0; 160 + }; 148 161 /** 149 162 * struct flexi_crypto_context - Crypto context 150 163 * @cipher_type: Encryption cipher type ··· 185 146 * @auth: Authentication keys 186 147 */ 187 148 struct flexi_crypto_context { 188 - union { 189 - __be64 flags; 190 - struct { 191 - #if defined(__BIG_ENDIAN_BITFIELD) 192 - u64 cipher_type : 4; 193 - u64 reserved_59 : 1; 194 - u64 aes_keylen : 2; 195 - u64 iv_source : 1; 196 - u64 hash_type : 4; 197 - u64 reserved_49_51 : 3; 198 - u64 auth_input_type: 1; 199 - u64 mac_len : 8; 200 - u64 reserved_0_39 : 40; 201 - #else 202 - u64 reserved_0_39 : 40; 203 - u64 mac_len : 8; 204 - u64 auth_input_type: 1; 205 - u64 reserved_49_51 : 3; 206 - u64 hash_type : 4; 207 - u64 iv_source : 1; 208 - u64 aes_keylen : 2; 209 - u64 reserved_59 : 1; 210 - u64 cipher_type : 4; 211 - #endif 212 - } w0; 213 - }; 214 - 149 + union fc_ctx_flags flags; 215 150 struct crypto_keys crypto; 216 151 struct auth_keys auth; 217 152 }; ··· 207 194 208 195 struct nitrox_kcrypt_request { 209 196 struct se_crypto_request creq; 210 - struct nitrox_crypto_ctx *nctx; 211 - struct skcipher_request *skreq; 212 197 u8 *src; 213 198 u8 *dst; 214 199 }; ··· 411 400 u64 *completion; 412 401 }; 413 402 414 - typedef void (*completion_t)(struct skcipher_request *skreq, int err); 403 + typedef void (*completion_t)(void *arg, int err); 415 404 416 405 /** 417 406 * struct nitrox_softreq - Represents the NIROX Request. ··· 446 435 unsigned long tstamp; 447 436 448 437 completion_t callback; 449 - struct skcipher_request *skreq; 438 + void *cb_arg; 450 439 }; 440 + 441 + static inline int flexi_aes_keylen(int keylen) 442 + { 443 + int aes_keylen; 444 + 445 + switch (keylen) { 446 + case AES_KEYSIZE_128: 447 + aes_keylen = 1; 448 + break; 449 + case AES_KEYSIZE_192: 450 + aes_keylen = 2; 451 + break; 452 + case AES_KEYSIZE_256: 453 + aes_keylen = 3; 454 + break; 455 + default: 456 + aes_keylen = -EINVAL; 457 + break; 458 + } 459 + return aes_keylen; 460 + } 451 461 452 462 static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp) 453 463 { ··· 480 448 return kzalloc(size, gfp); 481 449 } 482 450 451 + /** 452 + * create_single_sg - Point SG entry to the data 453 + * @sg: Destination SG list 454 + * @buf: Data 455 + * @buflen: Data length 456 + * 457 + * Returns next free entry in the destination SG list 458 + **/ 483 459 static inline struct scatterlist *create_single_sg(struct scatterlist *sg, 484 460 void *buf, int buflen) 485 461 { ··· 496 456 return sg; 497 457 } 498 458 459 + /** 460 + * create_multi_sg - Create multiple sg entries with buflen data length from 461 + * source sglist 462 + * @to_sg: Destination SG list 463 + * @from_sg: Source SG list 464 + * @buflen: Data length 465 + * 466 + * Returns next free entry in the destination SG list 467 + **/ 499 468 static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg, 500 - struct scatterlist *from_sg) 469 + struct scatterlist *from_sg, 470 + int buflen) 501 471 { 502 - struct scatterlist *sg; 503 - int i; 472 + struct scatterlist *sg = to_sg; 473 + unsigned int sglen; 504 474 505 - for_each_sg(from_sg, sg, sg_nents(from_sg), i) { 506 - sg_set_buf(to_sg, sg_virt(sg), sg->length); 507 - to_sg++; 475 + for (; buflen; buflen -= sglen) { 476 + sglen = from_sg->length; 477 + if (sglen > buflen) 478 + sglen = buflen; 479 + 480 + sg_set_buf(sg, sg_virt(from_sg), sglen); 481 + from_sg = sg_next(from_sg); 482 + sg++; 508 483 } 509 484 510 - return to_sg; 485 + return sg; 511 486 } 512 487 513 488 static inline void set_orh_value(u64 *orh) ··· 533 478 static inline void set_comp_value(u64 *comp) 534 479 { 535 480 WRITE_ONCE(*comp, PENDING_SIG); 481 + } 482 + 483 + static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq, 484 + int nents, int ivsize) 485 + { 486 + struct se_crypto_request *creq = &nkreq->creq; 487 + 488 + nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp); 489 + if (!nkreq->src) 490 + return -ENOMEM; 491 + 492 + return 0; 493 + } 494 + 495 + static inline void nitrox_creq_copy_iv(char *dst, char *src, int size) 496 + { 497 + memcpy(dst, src, size); 498 + } 499 + 500 + static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize) 501 + { 502 + return (struct scatterlist *)(iv + ivsize); 503 + } 504 + 505 + static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq, 506 + int nents, int ivsize, 507 + struct scatterlist *src, int buflen) 508 + { 509 + char *iv = nkreq->src; 510 + struct scatterlist *sg; 511 + struct se_crypto_request *creq = &nkreq->creq; 512 + 513 + creq->src = nitrox_creq_src_sg(iv, ivsize); 514 + sg = creq->src; 515 + sg_init_table(sg, nents); 516 + 517 + /* Input format: 518 + * +----+----------------+ 519 + * | IV | SRC sg entries | 520 + * +----+----------------+ 521 + */ 522 + 523 + /* IV */ 524 + sg = create_single_sg(sg, iv, ivsize); 525 + /* SRC entries */ 526 + create_multi_sg(sg, src, buflen); 527 + } 528 + 529 + static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq, 530 + int nents) 531 + { 532 + int extralen = ORH_HLEN + COMP_HLEN; 533 + struct se_crypto_request *creq = &nkreq->creq; 534 + 535 + nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp); 536 + if (!nkreq->dst) 537 + return -ENOMEM; 538 + 539 + return 0; 540 + } 541 + 542 + static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq) 543 + { 544 + struct se_crypto_request *creq = &nkreq->creq; 545 + 546 + creq->orh = (u64 *)(nkreq->dst); 547 + set_orh_value(creq->orh); 548 + } 549 + 550 + static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq) 551 + { 552 + struct se_crypto_request *creq = &nkreq->creq; 553 + 554 + creq->comp = (u64 *)(nkreq->dst + ORH_HLEN); 555 + set_comp_value(creq->comp); 556 + } 557 + 558 + static inline struct scatterlist *nitrox_creq_dst_sg(char *dst) 559 + { 560 + return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN); 561 + } 562 + 563 + static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq, 564 + int nents, int ivsize, 565 + struct scatterlist *dst, int buflen) 566 + { 567 + struct se_crypto_request *creq = &nkreq->creq; 568 + struct scatterlist *sg; 569 + char *iv = nkreq->src; 570 + 571 + creq->dst = nitrox_creq_dst_sg(nkreq->dst); 572 + sg = creq->dst; 573 + sg_init_table(sg, nents); 574 + 575 + /* Output format: 576 + * +-----+----+----------------+-----------------+ 577 + * | ORH | IV | DST sg entries | COMPLETION Bytes| 578 + * +-----+----+----------------+-----------------+ 579 + */ 580 + 581 + /* ORH */ 582 + sg = create_single_sg(sg, creq->orh, ORH_HLEN); 583 + /* IV */ 584 + sg = create_single_sg(sg, iv, ivsize); 585 + /* DST entries */ 586 + sg = create_multi_sg(sg, dst, buflen); 587 + /* COMPLETION Bytes */ 588 + create_single_sg(sg, creq->comp, COMP_HLEN); 536 589 } 537 590 538 591 #endif /* __NITROX_REQ_H */
+21 -17
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 269 269 smp_mb__after_atomic(); 270 270 return true; 271 271 } 272 + /* sync with other cpus */ 273 + smp_mb__after_atomic(); 272 274 return false; 273 275 } 274 276 ··· 326 324 spin_lock_bh(&cmdq->backlog_qlock); 327 325 328 326 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { 329 - struct skcipher_request *skreq; 330 - 331 327 /* submit until space available */ 332 328 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 333 329 ret = -ENOSPC; ··· 337 337 /* sync with other cpus */ 338 338 smp_mb__after_atomic(); 339 339 340 - skreq = sr->skreq; 341 340 /* post the command */ 342 341 post_se_instr(sr, cmdq); 343 - 344 - /* backlog requests are posted, wakeup with -EINPROGRESS */ 345 - skcipher_request_complete(skreq, -EINPROGRESS); 346 342 } 347 343 spin_unlock_bh(&cmdq->backlog_qlock); 348 344 ··· 361 365 } 362 366 /* add to backlog list */ 363 367 backlog_list_add(sr, cmdq); 364 - return -EBUSY; 368 + return -EINPROGRESS; 365 369 } 366 370 post_se_instr(sr, cmdq); 367 371 ··· 378 382 int nitrox_process_se_request(struct nitrox_device *ndev, 379 383 struct se_crypto_request *req, 380 384 completion_t callback, 381 - struct skcipher_request *skreq) 385 + void *cb_arg) 382 386 { 383 387 struct nitrox_softreq *sr; 384 388 dma_addr_t ctx_handle = 0; ··· 395 399 sr->flags = req->flags; 396 400 sr->gfp = req->gfp; 397 401 sr->callback = callback; 398 - sr->skreq = skreq; 402 + sr->cb_arg = cb_arg; 399 403 400 404 atomic_set(&sr->status, REQ_NOT_POSTED); 401 405 ··· 509 513 510 514 static bool sr_completed(struct nitrox_softreq *sr) 511 515 { 512 - return (READ_ONCE(*sr->resp.orh) != READ_ONCE(*sr->resp.completion)); 516 + u64 orh = READ_ONCE(*sr->resp.orh); 517 + unsigned long timeout = jiffies + msecs_to_jiffies(1); 518 + 519 + if ((orh != PENDING_SIG) && (orh & 0xff)) 520 + return true; 521 + 522 + while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { 523 + if (time_after(jiffies, timeout)) { 524 + pr_err("comp not done\n"); 525 + return false; 526 + } 527 + } 528 + 529 + return true; 513 530 } 514 531 515 532 /** ··· 536 527 { 537 528 struct nitrox_device *ndev = cmdq->ndev; 538 529 struct nitrox_softreq *sr; 539 - struct skcipher_request *skreq; 540 - completion_t callback; 541 530 int req_completed = 0, err = 0, budget; 542 531 543 532 /* check all pending requests */ ··· 565 558 /* remove from response list */ 566 559 response_list_del(sr, cmdq); 567 560 568 - callback = sr->callback; 569 - skreq = sr->skreq; 570 - 571 561 /* ORH error code */ 572 562 err = READ_ONCE(*sr->resp.orh) & 0xff; 573 563 softreq_destroy(sr); 574 564 575 - if (callback) 576 - callback(skreq, err); 565 + if (sr->callback) 566 + sr->callback(sr->cb_arg, err); 577 567 578 568 req_completed++; 579 569 }
+498
drivers/crypto/cavium/nitrox/nitrox_skcipher.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/crypto.h> 3 + #include <linux/kernel.h> 4 + #include <linux/module.h> 5 + #include <linux/printk.h> 6 + 7 + #include <crypto/aes.h> 8 + #include <crypto/skcipher.h> 9 + #include <crypto/ctr.h> 10 + #include <crypto/des.h> 11 + #include <crypto/xts.h> 12 + 13 + #include "nitrox_dev.h" 14 + #include "nitrox_common.h" 15 + #include "nitrox_req.h" 16 + 17 + struct nitrox_cipher { 18 + const char *name; 19 + enum flexi_cipher value; 20 + }; 21 + 22 + /** 23 + * supported cipher list 24 + */ 25 + static const struct nitrox_cipher flexi_cipher_table[] = { 26 + { "null", CIPHER_NULL }, 27 + { "cbc(des3_ede)", CIPHER_3DES_CBC }, 28 + { "ecb(des3_ede)", CIPHER_3DES_ECB }, 29 + { "cbc(aes)", CIPHER_AES_CBC }, 30 + { "ecb(aes)", CIPHER_AES_ECB }, 31 + { "cfb(aes)", CIPHER_AES_CFB }, 32 + { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, 33 + { "xts(aes)", CIPHER_AES_XTS }, 34 + { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, 35 + { NULL, CIPHER_INVALID } 36 + }; 37 + 38 + static enum flexi_cipher flexi_cipher_type(const char *name) 39 + { 40 + const struct nitrox_cipher *cipher = flexi_cipher_table; 41 + 42 + while (cipher->name) { 43 + if (!strcmp(cipher->name, name)) 44 + break; 45 + cipher++; 46 + } 47 + return cipher->value; 48 + } 49 + 50 + static int nitrox_skcipher_init(struct crypto_skcipher *tfm) 51 + { 52 + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); 53 + struct crypto_ctx_hdr *chdr; 54 + 55 + /* get the first device */ 56 + nctx->ndev = nitrox_get_first_device(); 57 + if (!nctx->ndev) 58 + return -ENODEV; 59 + 60 + /* allocate nitrox crypto context */ 61 + chdr = crypto_alloc_context(nctx->ndev); 62 + if (!chdr) { 63 + nitrox_put_device(nctx->ndev); 64 + return -ENOMEM; 65 + } 66 + nctx->chdr = chdr; 67 + nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + 68 + sizeof(struct ctx_hdr)); 69 + crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + 70 + sizeof(struct nitrox_kcrypt_request)); 71 + return 0; 72 + } 73 + 74 + static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) 75 + { 76 + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); 77 + 78 + /* free the nitrox crypto context */ 79 + if (nctx->u.ctx_handle) { 80 + struct flexi_crypto_context *fctx = nctx->u.fctx; 81 + 82 + memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); 83 + memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); 84 + crypto_free_context((void *)nctx->chdr); 85 + } 86 + nitrox_put_device(nctx->ndev); 87 + 88 + nctx->u.ctx_handle = 0; 89 + nctx->ndev = NULL; 90 + } 91 + 92 + static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, 93 + int aes_keylen, const u8 *key, 94 + unsigned int keylen) 95 + { 96 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 97 + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 98 + struct flexi_crypto_context *fctx; 99 + union fc_ctx_flags *flags; 100 + enum flexi_cipher cipher_type; 101 + const char *name; 102 + 103 + name = crypto_tfm_alg_name(tfm); 104 + cipher_type = flexi_cipher_type(name); 105 + if (unlikely(cipher_type == CIPHER_INVALID)) { 106 + pr_err("unsupported cipher: %s\n", name); 107 + return -EINVAL; 108 + } 109 + 110 + /* fill crypto context */ 111 + fctx = nctx->u.fctx; 112 + flags = &fctx->flags; 113 + flags->f = 0; 114 + flags->w0.cipher_type = cipher_type; 115 + flags->w0.aes_keylen = aes_keylen; 116 + flags->w0.iv_source = IV_FROM_DPTR; 117 + flags->f = cpu_to_be64(*(u64 *)&flags->w0); 118 + /* copy the key to context */ 119 + memcpy(fctx->crypto.u.key, key, keylen); 120 + 121 + return 0; 122 + } 123 + 124 + static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 125 + unsigned int keylen) 126 + { 127 + int aes_keylen; 128 + 129 + aes_keylen = flexi_aes_keylen(keylen); 130 + if (aes_keylen < 0) { 131 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 132 + return -EINVAL; 133 + } 134 + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 135 + } 136 + 137 + static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) 138 + { 139 + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 140 + int nents = sg_nents(skreq->src) + 1; 141 + int ret; 142 + 143 + /* Allocate buffer to hold IV and input scatterlist array */ 144 + ret = alloc_src_req_buf(nkreq, nents, ivsize); 145 + if (ret) 146 + return ret; 147 + 148 + nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize); 149 + nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src, 150 + skreq->cryptlen); 151 + 152 + return 0; 153 + } 154 + 155 + static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) 156 + { 157 + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 158 + int nents = sg_nents(skreq->dst) + 3; 159 + int ret; 160 + 161 + /* Allocate buffer to hold ORH, COMPLETION and output scatterlist 162 + * array 163 + */ 164 + ret = alloc_dst_req_buf(nkreq, nents); 165 + if (ret) 166 + return ret; 167 + 168 + nitrox_creq_set_orh(nkreq); 169 + nitrox_creq_set_comp(nkreq); 170 + nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst, 171 + skreq->cryptlen); 172 + 173 + return 0; 174 + } 175 + 176 + static void free_src_sglist(struct skcipher_request *skreq) 177 + { 178 + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 179 + 180 + kfree(nkreq->src); 181 + } 182 + 183 + static void free_dst_sglist(struct skcipher_request *skreq) 184 + { 185 + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 186 + 187 + kfree(nkreq->dst); 188 + } 189 + 190 + static void nitrox_skcipher_callback(void *arg, int err) 191 + { 192 + struct skcipher_request *skreq = arg; 193 + 194 + free_src_sglist(skreq); 195 + free_dst_sglist(skreq); 196 + if (err) { 197 + pr_err_ratelimited("request failed status 0x%0x\n", err); 198 + err = -EINVAL; 199 + } 200 + 201 + skcipher_request_complete(skreq, err); 202 + } 203 + 204 + static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) 205 + { 206 + struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); 207 + struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); 208 + struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); 209 + int ivsize = crypto_skcipher_ivsize(cipher); 210 + struct se_crypto_request *creq; 211 + int ret; 212 + 213 + creq = &nkreq->creq; 214 + creq->flags = skreq->base.flags; 215 + creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 216 + GFP_KERNEL : GFP_ATOMIC; 217 + 218 + /* fill the request */ 219 + creq->ctrl.value = 0; 220 + creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; 221 + creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); 222 + /* param0: length of the data to be encrypted */ 223 + creq->gph.param0 = cpu_to_be16(skreq->cryptlen); 224 + creq->gph.param1 = 0; 225 + /* param2: encryption data offset */ 226 + creq->gph.param2 = cpu_to_be16(ivsize); 227 + creq->gph.param3 = 0; 228 + 229 + creq->ctx_handle = nctx->u.ctx_handle; 230 + creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); 231 + 232 + ret = alloc_src_sglist(skreq, ivsize); 233 + if (ret) 234 + return ret; 235 + 236 + ret = alloc_dst_sglist(skreq, ivsize); 237 + if (ret) { 238 + free_src_sglist(skreq); 239 + return ret; 240 + } 241 + 242 + /* send the crypto request */ 243 + return nitrox_process_se_request(nctx->ndev, creq, 244 + nitrox_skcipher_callback, skreq); 245 + } 246 + 247 + static int nitrox_aes_encrypt(struct skcipher_request *skreq) 248 + { 249 + return nitrox_skcipher_crypt(skreq, true); 250 + } 251 + 252 + static int nitrox_aes_decrypt(struct skcipher_request *skreq) 253 + { 254 + return nitrox_skcipher_crypt(skreq, false); 255 + } 256 + 257 + static int nitrox_3des_setkey(struct crypto_skcipher *cipher, 258 + const u8 *key, unsigned int keylen) 259 + { 260 + if (keylen != DES3_EDE_KEY_SIZE) { 261 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 262 + return -EINVAL; 263 + } 264 + 265 + return nitrox_skcipher_setkey(cipher, 0, key, keylen); 266 + } 267 + 268 + static int nitrox_3des_encrypt(struct skcipher_request *skreq) 269 + { 270 + return nitrox_skcipher_crypt(skreq, true); 271 + } 272 + 273 + static int nitrox_3des_decrypt(struct skcipher_request *skreq) 274 + { 275 + return nitrox_skcipher_crypt(skreq, false); 276 + } 277 + 278 + static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, 279 + const u8 *key, unsigned int keylen) 280 + { 281 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 282 + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 283 + struct flexi_crypto_context *fctx; 284 + int aes_keylen, ret; 285 + 286 + ret = xts_check_key(tfm, key, keylen); 287 + if (ret) 288 + return ret; 289 + 290 + keylen /= 2; 291 + 292 + aes_keylen = flexi_aes_keylen(keylen); 293 + if (aes_keylen < 0) { 294 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 295 + return -EINVAL; 296 + } 297 + 298 + fctx = nctx->u.fctx; 299 + /* copy KEY2 */ 300 + memcpy(fctx->auth.u.key2, (key + keylen), keylen); 301 + 302 + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 303 + } 304 + 305 + static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, 306 + const u8 *key, unsigned int keylen) 307 + { 308 + struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 309 + struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); 310 + struct flexi_crypto_context *fctx; 311 + int aes_keylen; 312 + 313 + if (keylen < CTR_RFC3686_NONCE_SIZE) 314 + return -EINVAL; 315 + 316 + fctx = nctx->u.fctx; 317 + 318 + memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), 319 + CTR_RFC3686_NONCE_SIZE); 320 + 321 + keylen -= CTR_RFC3686_NONCE_SIZE; 322 + 323 + aes_keylen = flexi_aes_keylen(keylen); 324 + if (aes_keylen < 0) { 325 + crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 326 + return -EINVAL; 327 + } 328 + return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); 329 + } 330 + 331 + static struct skcipher_alg nitrox_skciphers[] = { { 332 + .base = { 333 + .cra_name = "cbc(aes)", 334 + .cra_driver_name = "n5_cbc(aes)", 335 + .cra_priority = PRIO, 336 + .cra_flags = CRYPTO_ALG_ASYNC, 337 + .cra_blocksize = AES_BLOCK_SIZE, 338 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 339 + .cra_alignmask = 0, 340 + .cra_module = THIS_MODULE, 341 + }, 342 + .min_keysize = AES_MIN_KEY_SIZE, 343 + .max_keysize = AES_MAX_KEY_SIZE, 344 + .ivsize = AES_BLOCK_SIZE, 345 + .setkey = nitrox_aes_setkey, 346 + .encrypt = nitrox_aes_encrypt, 347 + .decrypt = nitrox_aes_decrypt, 348 + .init = nitrox_skcipher_init, 349 + .exit = nitrox_skcipher_exit, 350 + }, { 351 + .base = { 352 + .cra_name = "ecb(aes)", 353 + .cra_driver_name = "n5_ecb(aes)", 354 + .cra_priority = PRIO, 355 + .cra_flags = CRYPTO_ALG_ASYNC, 356 + .cra_blocksize = AES_BLOCK_SIZE, 357 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 358 + .cra_alignmask = 0, 359 + .cra_module = THIS_MODULE, 360 + }, 361 + .min_keysize = AES_MIN_KEY_SIZE, 362 + .max_keysize = AES_MAX_KEY_SIZE, 363 + .ivsize = AES_BLOCK_SIZE, 364 + .setkey = nitrox_aes_setkey, 365 + .encrypt = nitrox_aes_encrypt, 366 + .decrypt = nitrox_aes_decrypt, 367 + .init = nitrox_skcipher_init, 368 + .exit = nitrox_skcipher_exit, 369 + }, { 370 + .base = { 371 + .cra_name = "cfb(aes)", 372 + .cra_driver_name = "n5_cfb(aes)", 373 + .cra_priority = PRIO, 374 + .cra_flags = CRYPTO_ALG_ASYNC, 375 + .cra_blocksize = AES_BLOCK_SIZE, 376 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 377 + .cra_alignmask = 0, 378 + .cra_module = THIS_MODULE, 379 + }, 380 + .min_keysize = AES_MIN_KEY_SIZE, 381 + .max_keysize = AES_MAX_KEY_SIZE, 382 + .ivsize = AES_BLOCK_SIZE, 383 + .setkey = nitrox_aes_setkey, 384 + .encrypt = nitrox_aes_encrypt, 385 + .decrypt = nitrox_aes_decrypt, 386 + .init = nitrox_skcipher_init, 387 + .exit = nitrox_skcipher_exit, 388 + }, { 389 + .base = { 390 + .cra_name = "xts(aes)", 391 + .cra_driver_name = "n5_xts(aes)", 392 + .cra_priority = PRIO, 393 + .cra_flags = CRYPTO_ALG_ASYNC, 394 + .cra_blocksize = AES_BLOCK_SIZE, 395 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 396 + .cra_alignmask = 0, 397 + .cra_module = THIS_MODULE, 398 + }, 399 + .min_keysize = 2 * AES_MIN_KEY_SIZE, 400 + .max_keysize = 2 * AES_MAX_KEY_SIZE, 401 + .ivsize = AES_BLOCK_SIZE, 402 + .setkey = nitrox_aes_xts_setkey, 403 + .encrypt = nitrox_aes_encrypt, 404 + .decrypt = nitrox_aes_decrypt, 405 + .init = nitrox_skcipher_init, 406 + .exit = nitrox_skcipher_exit, 407 + }, { 408 + .base = { 409 + .cra_name = "rfc3686(ctr(aes))", 410 + .cra_driver_name = "n5_rfc3686(ctr(aes))", 411 + .cra_priority = PRIO, 412 + .cra_flags = CRYPTO_ALG_ASYNC, 413 + .cra_blocksize = 1, 414 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 415 + .cra_alignmask = 0, 416 + .cra_module = THIS_MODULE, 417 + }, 418 + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 419 + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, 420 + .ivsize = CTR_RFC3686_IV_SIZE, 421 + .init = nitrox_skcipher_init, 422 + .exit = nitrox_skcipher_exit, 423 + .setkey = nitrox_aes_ctr_rfc3686_setkey, 424 + .encrypt = nitrox_aes_encrypt, 425 + .decrypt = nitrox_aes_decrypt, 426 + }, { 427 + .base = { 428 + .cra_name = "cts(cbc(aes))", 429 + .cra_driver_name = "n5_cts(cbc(aes))", 430 + .cra_priority = PRIO, 431 + .cra_flags = CRYPTO_ALG_ASYNC, 432 + .cra_blocksize = AES_BLOCK_SIZE, 433 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 434 + .cra_alignmask = 0, 435 + .cra_type = &crypto_ablkcipher_type, 436 + .cra_module = THIS_MODULE, 437 + }, 438 + .min_keysize = AES_MIN_KEY_SIZE, 439 + .max_keysize = AES_MAX_KEY_SIZE, 440 + .ivsize = AES_BLOCK_SIZE, 441 + .setkey = nitrox_aes_setkey, 442 + .encrypt = nitrox_aes_encrypt, 443 + .decrypt = nitrox_aes_decrypt, 444 + .init = nitrox_skcipher_init, 445 + .exit = nitrox_skcipher_exit, 446 + }, { 447 + .base = { 448 + .cra_name = "cbc(des3_ede)", 449 + .cra_driver_name = "n5_cbc(des3_ede)", 450 + .cra_priority = PRIO, 451 + .cra_flags = CRYPTO_ALG_ASYNC, 452 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 453 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 454 + .cra_alignmask = 0, 455 + .cra_module = THIS_MODULE, 456 + }, 457 + .min_keysize = DES3_EDE_KEY_SIZE, 458 + .max_keysize = DES3_EDE_KEY_SIZE, 459 + .ivsize = DES3_EDE_BLOCK_SIZE, 460 + .setkey = nitrox_3des_setkey, 461 + .encrypt = nitrox_3des_encrypt, 462 + .decrypt = nitrox_3des_decrypt, 463 + .init = nitrox_skcipher_init, 464 + .exit = nitrox_skcipher_exit, 465 + }, { 466 + .base = { 467 + .cra_name = "ecb(des3_ede)", 468 + .cra_driver_name = "n5_ecb(des3_ede)", 469 + .cra_priority = PRIO, 470 + .cra_flags = CRYPTO_ALG_ASYNC, 471 + .cra_blocksize = DES3_EDE_BLOCK_SIZE, 472 + .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), 473 + .cra_alignmask = 0, 474 + .cra_module = THIS_MODULE, 475 + }, 476 + .min_keysize = DES3_EDE_KEY_SIZE, 477 + .max_keysize = DES3_EDE_KEY_SIZE, 478 + .ivsize = DES3_EDE_BLOCK_SIZE, 479 + .setkey = nitrox_3des_setkey, 480 + .encrypt = nitrox_3des_encrypt, 481 + .decrypt = nitrox_3des_decrypt, 482 + .init = nitrox_skcipher_init, 483 + .exit = nitrox_skcipher_exit, 484 + } 485 + 486 + }; 487 + 488 + int nitrox_register_skciphers(void) 489 + { 490 + return crypto_register_skciphers(nitrox_skciphers, 491 + ARRAY_SIZE(nitrox_skciphers)); 492 + } 493 + 494 + void nitrox_unregister_skciphers(void) 495 + { 496 + crypto_unregister_skciphers(nitrox_skciphers, 497 + ARRAY_SIZE(nitrox_skciphers)); 498 + }