Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.9 1486 lines 37 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Intel IXP4xx NPE-C crypto driver 4 * 5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> 6 */ 7 8#include <linux/platform_device.h> 9#include <linux/dma-mapping.h> 10#include <linux/dmapool.h> 11#include <linux/crypto.h> 12#include <linux/kernel.h> 13#include <linux/rtnetlink.h> 14#include <linux/interrupt.h> 15#include <linux/spinlock.h> 16#include <linux/gfp.h> 17#include <linux/module.h> 18 19#include <crypto/ctr.h> 20#include <crypto/internal/des.h> 21#include <crypto/aes.h> 22#include <crypto/hmac.h> 23#include <crypto/sha.h> 24#include <crypto/algapi.h> 25#include <crypto/internal/aead.h> 26#include <crypto/internal/skcipher.h> 27#include <crypto/authenc.h> 28#include <crypto/scatterwalk.h> 29 30#include <linux/soc/ixp4xx/npe.h> 31#include <linux/soc/ixp4xx/qmgr.h> 32 33#define MAX_KEYLEN 32 34 35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ 36#define NPE_CTX_LEN 80 37#define AES_BLOCK128 16 38 39#define NPE_OP_HASH_VERIFY 0x01 40#define NPE_OP_CCM_ENABLE 0x04 41#define NPE_OP_CRYPT_ENABLE 0x08 42#define NPE_OP_HASH_ENABLE 0x10 43#define NPE_OP_NOT_IN_PLACE 0x20 44#define NPE_OP_HMAC_DISABLE 0x40 45#define NPE_OP_CRYPT_ENCRYPT 0x80 46 47#define NPE_OP_CCM_GEN_MIC 0xcc 48#define NPE_OP_HASH_GEN_ICV 0x50 49#define NPE_OP_ENC_GEN_KEY 0xc9 50 51#define MOD_ECB 0x0000 52#define MOD_CTR 0x1000 53#define MOD_CBC_ENC 0x2000 54#define MOD_CBC_DEC 0x3000 55#define MOD_CCM_ENC 0x4000 56#define MOD_CCM_DEC 0x5000 57 58#define KEYLEN_128 4 59#define KEYLEN_192 6 60#define KEYLEN_256 8 61 62#define CIPH_DECR 0x0000 63#define CIPH_ENCR 0x0400 64 65#define MOD_DES 0x0000 66#define MOD_TDEA2 0x0100 67#define MOD_3DES 0x0200 68#define MOD_AES 0x0800 69#define MOD_AES128 (0x0800 | KEYLEN_128) 70#define MOD_AES192 (0x0900 | KEYLEN_192) 71#define MOD_AES256 (0x0a00 | KEYLEN_256) 72 73#define MAX_IVLEN 16 74#define NPE_ID 2 /* NPE C */ 75#define NPE_QLEN 16 76/* Space for registering when the first 77 * NPE_QLEN crypt_ctl are busy */ 78#define NPE_QLEN_TOTAL 64 79 80#define SEND_QID 29 81#define RECV_QID 30 82 83#define CTL_FLAG_UNUSED 0x0000 84#define CTL_FLAG_USED 0x1000 85#define CTL_FLAG_PERFORM_ABLK 0x0001 86#define CTL_FLAG_GEN_ICV 0x0002 87#define CTL_FLAG_GEN_REVAES 0x0004 88#define CTL_FLAG_PERFORM_AEAD 0x0008 89#define CTL_FLAG_MASK 0x000f 90 91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE 92 93#define MD5_DIGEST_SIZE 16 94 95struct buffer_desc { 96 u32 phys_next; 97#ifdef __ARMEB__ 98 u16 buf_len; 99 u16 pkt_len; 100#else 101 u16 pkt_len; 102 u16 buf_len; 103#endif 104 dma_addr_t phys_addr; 105 u32 __reserved[4]; 106 struct buffer_desc *next; 107 enum dma_data_direction dir; 108}; 109 110struct crypt_ctl { 111#ifdef __ARMEB__ 112 u8 mode; /* NPE_OP_* operation mode */ 113 u8 init_len; 114 u16 reserved; 115#else 116 u16 reserved; 117 u8 init_len; 118 u8 mode; /* NPE_OP_* operation mode */ 119#endif 120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 121 dma_addr_t icv_rev_aes; /* icv or rev aes */ 122 dma_addr_t src_buf; 123 dma_addr_t dst_buf; 124#ifdef __ARMEB__ 125 u16 auth_offs; /* Authentication start offset */ 126 u16 auth_len; /* Authentication data length */ 127 u16 crypt_offs; /* Cryption start offset */ 128 u16 crypt_len; /* Cryption data length */ 129#else 130 u16 auth_len; /* Authentication data length */ 131 u16 auth_offs; /* Authentication start offset */ 132 u16 crypt_len; /* Cryption data length */ 133 u16 crypt_offs; /* Cryption start offset */ 134#endif 135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ 136 u32 crypto_ctx; /* NPE Crypto Param structure address */ 137 138 /* Used by Host: 4*4 bytes*/ 139 unsigned ctl_flags; 140 union { 141 struct skcipher_request *ablk_req; 142 struct aead_request *aead_req; 143 struct crypto_tfm *tfm; 144 } data; 145 struct buffer_desc *regist_buf; 146 u8 *regist_ptr; 147}; 148 149struct ablk_ctx { 150 struct buffer_desc *src; 151 struct buffer_desc *dst; 152}; 153 154struct aead_ctx { 155 struct buffer_desc *src; 156 struct buffer_desc *dst; 157 struct scatterlist ivlist; 158 /* used when the hmac is not on one sg entry */ 159 u8 *hmac_virt; 160 int encrypt; 161}; 162 163struct ix_hash_algo { 164 u32 cfgword; 165 unsigned char *icv; 166}; 167 168struct ix_sa_dir { 169 unsigned char *npe_ctx; 170 dma_addr_t npe_ctx_phys; 171 int npe_ctx_idx; 172 u8 npe_mode; 173}; 174 175struct ixp_ctx { 176 struct ix_sa_dir encrypt; 177 struct ix_sa_dir decrypt; 178 int authkey_len; 179 u8 authkey[MAX_KEYLEN]; 180 int enckey_len; 181 u8 enckey[MAX_KEYLEN]; 182 u8 salt[MAX_IVLEN]; 183 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 184 unsigned salted; 185 atomic_t configuring; 186 struct completion completion; 187}; 188 189struct ixp_alg { 190 struct skcipher_alg crypto; 191 const struct ix_hash_algo *hash; 192 u32 cfg_enc; 193 u32 cfg_dec; 194 195 int registered; 196}; 197 198struct ixp_aead_alg { 199 struct aead_alg crypto; 200 const struct ix_hash_algo *hash; 201 u32 cfg_enc; 202 u32 cfg_dec; 203 204 int registered; 205}; 206 207static const struct ix_hash_algo hash_alg_md5 = { 208 .cfgword = 0xAA010004, 209 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 210 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 211}; 212static const struct ix_hash_algo hash_alg_sha1 = { 213 .cfgword = 0x00000005, 214 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" 215 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", 216}; 217 218static struct npe *npe_c; 219static struct dma_pool *buffer_pool = NULL; 220static struct dma_pool *ctx_pool = NULL; 221 222static struct crypt_ctl *crypt_virt = NULL; 223static dma_addr_t crypt_phys; 224 225static int support_aes = 1; 226 227#define DRIVER_NAME "ixp4xx_crypto" 228 229static struct platform_device *pdev; 230 231static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 232{ 233 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); 234} 235 236static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) 237{ 238 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); 239} 240 241static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) 242{ 243 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc; 244} 245 246static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) 247{ 248 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec; 249} 250 251static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) 252{ 253 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash; 254} 255 256static int setup_crypt_desc(void) 257{ 258 struct device *dev = &pdev->dev; 259 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 260 crypt_virt = dma_alloc_coherent(dev, 261 NPE_QLEN * sizeof(struct crypt_ctl), 262 &crypt_phys, GFP_ATOMIC); 263 if (!crypt_virt) 264 return -ENOMEM; 265 return 0; 266} 267 268static spinlock_t desc_lock; 269static struct crypt_ctl *get_crypt_desc(void) 270{ 271 int i; 272 static int idx = 0; 273 unsigned long flags; 274 275 spin_lock_irqsave(&desc_lock, flags); 276 277 if (unlikely(!crypt_virt)) 278 setup_crypt_desc(); 279 if (unlikely(!crypt_virt)) { 280 spin_unlock_irqrestore(&desc_lock, flags); 281 return NULL; 282 } 283 i = idx; 284 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 285 if (++idx >= NPE_QLEN) 286 idx = 0; 287 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 288 spin_unlock_irqrestore(&desc_lock, flags); 289 return crypt_virt +i; 290 } else { 291 spin_unlock_irqrestore(&desc_lock, flags); 292 return NULL; 293 } 294} 295 296static spinlock_t emerg_lock; 297static struct crypt_ctl *get_crypt_desc_emerg(void) 298{ 299 int i; 300 static int idx = NPE_QLEN; 301 struct crypt_ctl *desc; 302 unsigned long flags; 303 304 desc = get_crypt_desc(); 305 if (desc) 306 return desc; 307 if (unlikely(!crypt_virt)) 308 return NULL; 309 310 spin_lock_irqsave(&emerg_lock, flags); 311 i = idx; 312 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 313 if (++idx >= NPE_QLEN_TOTAL) 314 idx = NPE_QLEN; 315 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 316 spin_unlock_irqrestore(&emerg_lock, flags); 317 return crypt_virt +i; 318 } else { 319 spin_unlock_irqrestore(&emerg_lock, flags); 320 return NULL; 321 } 322} 323 324static void free_buf_chain(struct device *dev, struct buffer_desc *buf, 325 dma_addr_t phys) 326{ 327 while (buf) { 328 struct buffer_desc *buf1; 329 u32 phys1; 330 331 buf1 = buf->next; 332 phys1 = buf->phys_next; 333 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); 334 dma_pool_free(buffer_pool, buf, phys); 335 buf = buf1; 336 phys = phys1; 337 } 338} 339 340static struct tasklet_struct crypto_done_tasklet; 341 342static void finish_scattered_hmac(struct crypt_ctl *crypt) 343{ 344 struct aead_request *req = crypt->data.aead_req; 345 struct aead_ctx *req_ctx = aead_request_ctx(req); 346 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 347 int authsize = crypto_aead_authsize(tfm); 348 int decryptlen = req->assoclen + req->cryptlen - authsize; 349 350 if (req_ctx->encrypt) { 351 scatterwalk_map_and_copy(req_ctx->hmac_virt, 352 req->dst, decryptlen, authsize, 1); 353 } 354 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); 355} 356 357static void one_packet(dma_addr_t phys) 358{ 359 struct device *dev = &pdev->dev; 360 struct crypt_ctl *crypt; 361 struct ixp_ctx *ctx; 362 int failed; 363 364 failed = phys & 0x1 ? -EBADMSG : 0; 365 phys &= ~0x3; 366 crypt = crypt_phys2virt(phys); 367 368 switch (crypt->ctl_flags & CTL_FLAG_MASK) { 369 case CTL_FLAG_PERFORM_AEAD: { 370 struct aead_request *req = crypt->data.aead_req; 371 struct aead_ctx *req_ctx = aead_request_ctx(req); 372 373 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 374 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 375 if (req_ctx->hmac_virt) { 376 finish_scattered_hmac(crypt); 377 } 378 req->base.complete(&req->base, failed); 379 break; 380 } 381 case CTL_FLAG_PERFORM_ABLK: { 382 struct skcipher_request *req = crypt->data.ablk_req; 383 struct ablk_ctx *req_ctx = skcipher_request_ctx(req); 384 385 if (req_ctx->dst) { 386 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 387 } 388 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 389 req->base.complete(&req->base, failed); 390 break; 391 } 392 case CTL_FLAG_GEN_ICV: 393 ctx = crypto_tfm_ctx(crypt->data.tfm); 394 dma_pool_free(ctx_pool, crypt->regist_ptr, 395 crypt->regist_buf->phys_addr); 396 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); 397 if (atomic_dec_and_test(&ctx->configuring)) 398 complete(&ctx->completion); 399 break; 400 case CTL_FLAG_GEN_REVAES: 401 ctx = crypto_tfm_ctx(crypt->data.tfm); 402 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); 403 if (atomic_dec_and_test(&ctx->configuring)) 404 complete(&ctx->completion); 405 break; 406 default: 407 BUG(); 408 } 409 crypt->ctl_flags = CTL_FLAG_UNUSED; 410} 411 412static void irqhandler(void *_unused) 413{ 414 tasklet_schedule(&crypto_done_tasklet); 415} 416 417static void crypto_done_action(unsigned long arg) 418{ 419 int i; 420 421 for(i=0; i<4; i++) { 422 dma_addr_t phys = qmgr_get_entry(RECV_QID); 423 if (!phys) 424 return; 425 one_packet(phys); 426 } 427 tasklet_schedule(&crypto_done_tasklet); 428} 429 430static int init_ixp_crypto(struct device *dev) 431{ 432 int ret = -ENODEV; 433 u32 msg[2] = { 0, 0 }; 434 435 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | 436 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { 437 printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); 438 return ret; 439 } 440 npe_c = npe_request(NPE_ID); 441 if (!npe_c) 442 return ret; 443 444 if (!npe_running(npe_c)) { 445 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); 446 if (ret) 447 goto npe_release; 448 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 449 goto npe_error; 450 } else { 451 if (npe_send_message(npe_c, msg, "STATUS_MSG")) 452 goto npe_error; 453 454 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 455 goto npe_error; 456 } 457 458 switch ((msg[1]>>16) & 0xff) { 459 case 3: 460 printk(KERN_WARNING "Firmware of %s lacks AES support\n", 461 npe_name(npe_c)); 462 support_aes = 0; 463 break; 464 case 4: 465 case 5: 466 support_aes = 1; 467 break; 468 default: 469 printk(KERN_ERR "Firmware of %s lacks crypto support\n", 470 npe_name(npe_c)); 471 ret = -ENODEV; 472 goto npe_release; 473 } 474 /* buffer_pool will also be used to sometimes store the hmac, 475 * so assure it is large enough 476 */ 477 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); 478 buffer_pool = dma_pool_create("buffer", dev, 479 sizeof(struct buffer_desc), 32, 0); 480 ret = -ENOMEM; 481 if (!buffer_pool) { 482 goto err; 483 } 484 ctx_pool = dma_pool_create("context", dev, 485 NPE_CTX_LEN, 16, 0); 486 if (!ctx_pool) { 487 goto err; 488 } 489 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0, 490 "ixp_crypto:out", NULL); 491 if (ret) 492 goto err; 493 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0, 494 "ixp_crypto:in", NULL); 495 if (ret) { 496 qmgr_release_queue(SEND_QID); 497 goto err; 498 } 499 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); 500 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); 501 502 qmgr_enable_irq(RECV_QID); 503 return 0; 504 505npe_error: 506 printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); 507 ret = -EIO; 508err: 509 dma_pool_destroy(ctx_pool); 510 dma_pool_destroy(buffer_pool); 511npe_release: 512 npe_release(npe_c); 513 return ret; 514} 515 516static void release_ixp_crypto(struct device *dev) 517{ 518 qmgr_disable_irq(RECV_QID); 519 tasklet_kill(&crypto_done_tasklet); 520 521 qmgr_release_queue(SEND_QID); 522 qmgr_release_queue(RECV_QID); 523 524 dma_pool_destroy(ctx_pool); 525 dma_pool_destroy(buffer_pool); 526 527 npe_release(npe_c); 528 529 if (crypt_virt) { 530 dma_free_coherent(dev, 531 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), 532 crypt_virt, crypt_phys); 533 } 534} 535 536static void reset_sa_dir(struct ix_sa_dir *dir) 537{ 538 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 539 dir->npe_ctx_idx = 0; 540 dir->npe_mode = 0; 541} 542 543static int init_sa_dir(struct ix_sa_dir *dir) 544{ 545 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); 546 if (!dir->npe_ctx) { 547 return -ENOMEM; 548 } 549 reset_sa_dir(dir); 550 return 0; 551} 552 553static void free_sa_dir(struct ix_sa_dir *dir) 554{ 555 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 556 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); 557} 558 559static int init_tfm(struct crypto_tfm *tfm) 560{ 561 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 562 int ret; 563 564 atomic_set(&ctx->configuring, 0); 565 ret = init_sa_dir(&ctx->encrypt); 566 if (ret) 567 return ret; 568 ret = init_sa_dir(&ctx->decrypt); 569 if (ret) { 570 free_sa_dir(&ctx->encrypt); 571 } 572 return ret; 573} 574 575static int init_tfm_ablk(struct crypto_skcipher *tfm) 576{ 577 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx)); 578 return init_tfm(crypto_skcipher_tfm(tfm)); 579} 580 581static int init_tfm_aead(struct crypto_aead *tfm) 582{ 583 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx)); 584 return init_tfm(crypto_aead_tfm(tfm)); 585} 586 587static void exit_tfm(struct crypto_tfm *tfm) 588{ 589 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 590 free_sa_dir(&ctx->encrypt); 591 free_sa_dir(&ctx->decrypt); 592} 593 594static void exit_tfm_ablk(struct crypto_skcipher *tfm) 595{ 596 exit_tfm(crypto_skcipher_tfm(tfm)); 597} 598 599static void exit_tfm_aead(struct crypto_aead *tfm) 600{ 601 exit_tfm(crypto_aead_tfm(tfm)); 602} 603 604static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, 605 int init_len, u32 ctx_addr, const u8 *key, int key_len) 606{ 607 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 608 struct crypt_ctl *crypt; 609 struct buffer_desc *buf; 610 int i; 611 u8 *pad; 612 dma_addr_t pad_phys, buf_phys; 613 614 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 615 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); 616 if (!pad) 617 return -ENOMEM; 618 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); 619 if (!buf) { 620 dma_pool_free(ctx_pool, pad, pad_phys); 621 return -ENOMEM; 622 } 623 crypt = get_crypt_desc_emerg(); 624 if (!crypt) { 625 dma_pool_free(ctx_pool, pad, pad_phys); 626 dma_pool_free(buffer_pool, buf, buf_phys); 627 return -EAGAIN; 628 } 629 630 memcpy(pad, key, key_len); 631 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); 632 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) { 633 pad[i] ^= xpad; 634 } 635 636 crypt->data.tfm = tfm; 637 crypt->regist_ptr = pad; 638 crypt->regist_buf = buf; 639 640 crypt->auth_offs = 0; 641 crypt->auth_len = HMAC_PAD_BLOCKLEN; 642 crypt->crypto_ctx = ctx_addr; 643 crypt->src_buf = buf_phys; 644 crypt->icv_rev_aes = target; 645 crypt->mode = NPE_OP_HASH_GEN_ICV; 646 crypt->init_len = init_len; 647 crypt->ctl_flags |= CTL_FLAG_GEN_ICV; 648 649 buf->next = 0; 650 buf->buf_len = HMAC_PAD_BLOCKLEN; 651 buf->pkt_len = 0; 652 buf->phys_addr = pad_phys; 653 654 atomic_inc(&ctx->configuring); 655 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 656 BUG_ON(qmgr_stat_overflow(SEND_QID)); 657 return 0; 658} 659 660static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize, 661 const u8 *key, int key_len, unsigned digest_len) 662{ 663 u32 itarget, otarget, npe_ctx_addr; 664 unsigned char *cinfo; 665 int init_len, ret = 0; 666 u32 cfgword; 667 struct ix_sa_dir *dir; 668 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 669 const struct ix_hash_algo *algo; 670 671 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 672 cinfo = dir->npe_ctx + dir->npe_ctx_idx; 673 algo = ix_hash(tfm); 674 675 /* write cfg word to cryptinfo */ 676 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */ 677#ifndef __ARMEB__ 678 cfgword ^= 0xAA000000; /* change the "byte swap" flags */ 679#endif 680 *(u32*)cinfo = cpu_to_be32(cfgword); 681 cinfo += sizeof(cfgword); 682 683 /* write ICV to cryptinfo */ 684 memcpy(cinfo, algo->icv, digest_len); 685 cinfo += digest_len; 686 687 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx 688 + sizeof(algo->cfgword); 689 otarget = itarget + digest_len; 690 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); 691 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; 692 693 dir->npe_ctx_idx += init_len; 694 dir->npe_mode |= NPE_OP_HASH_ENABLE; 695 696 if (!encrypt) 697 dir->npe_mode |= NPE_OP_HASH_VERIFY; 698 699 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, 700 init_len, npe_ctx_addr, key, key_len); 701 if (ret) 702 return ret; 703 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, 704 init_len, npe_ctx_addr, key, key_len); 705} 706 707static int gen_rev_aes_key(struct crypto_tfm *tfm) 708{ 709 struct crypt_ctl *crypt; 710 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 711 struct ix_sa_dir *dir = &ctx->decrypt; 712 713 crypt = get_crypt_desc_emerg(); 714 if (!crypt) { 715 return -EAGAIN; 716 } 717 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); 718 719 crypt->data.tfm = tfm; 720 crypt->crypt_offs = 0; 721 crypt->crypt_len = AES_BLOCK128; 722 crypt->src_buf = 0; 723 crypt->crypto_ctx = dir->npe_ctx_phys; 724 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); 725 crypt->mode = NPE_OP_ENC_GEN_KEY; 726 crypt->init_len = dir->npe_ctx_idx; 727 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; 728 729 atomic_inc(&ctx->configuring); 730 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 731 BUG_ON(qmgr_stat_overflow(SEND_QID)); 732 return 0; 733} 734 735static int setup_cipher(struct crypto_tfm *tfm, int encrypt, 736 const u8 *key, int key_len) 737{ 738 u8 *cinfo; 739 u32 cipher_cfg; 740 u32 keylen_cfg = 0; 741 struct ix_sa_dir *dir; 742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 743 int err; 744 745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 746 cinfo = dir->npe_ctx; 747 748 if (encrypt) { 749 cipher_cfg = cipher_cfg_enc(tfm); 750 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; 751 } else { 752 cipher_cfg = cipher_cfg_dec(tfm); 753 } 754 if (cipher_cfg & MOD_AES) { 755 switch (key_len) { 756 case 16: keylen_cfg = MOD_AES128; break; 757 case 24: keylen_cfg = MOD_AES192; break; 758 case 32: keylen_cfg = MOD_AES256; break; 759 default: 760 return -EINVAL; 761 } 762 cipher_cfg |= keylen_cfg; 763 } else { 764 err = crypto_des_verify_key(tfm, key); 765 if (err) 766 return err; 767 } 768 /* write cfg word to cryptinfo */ 769 *(u32*)cinfo = cpu_to_be32(cipher_cfg); 770 cinfo += sizeof(cipher_cfg); 771 772 /* write cipher key to cryptinfo */ 773 memcpy(cinfo, key, key_len); 774 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ 775 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { 776 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len); 777 key_len = DES3_EDE_KEY_SIZE; 778 } 779 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; 780 dir->npe_mode |= NPE_OP_CRYPT_ENABLE; 781 if ((cipher_cfg & MOD_AES) && !encrypt) { 782 return gen_rev_aes_key(tfm); 783 } 784 return 0; 785} 786 787static struct buffer_desc *chainup_buffers(struct device *dev, 788 struct scatterlist *sg, unsigned nbytes, 789 struct buffer_desc *buf, gfp_t flags, 790 enum dma_data_direction dir) 791{ 792 for (; nbytes > 0; sg = sg_next(sg)) { 793 unsigned len = min(nbytes, sg->length); 794 struct buffer_desc *next_buf; 795 dma_addr_t next_buf_phys; 796 void *ptr; 797 798 nbytes -= len; 799 ptr = sg_virt(sg); 800 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 801 if (!next_buf) { 802 buf = NULL; 803 break; 804 } 805 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); 806 buf->next = next_buf; 807 buf->phys_next = next_buf_phys; 808 buf = next_buf; 809 810 buf->phys_addr = sg_dma_address(sg); 811 buf->buf_len = len; 812 buf->dir = dir; 813 } 814 buf->next = NULL; 815 buf->phys_next = 0; 816 return buf; 817} 818 819static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key, 820 unsigned int key_len) 821{ 822 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 823 int ret; 824 825 init_completion(&ctx->completion); 826 atomic_inc(&ctx->configuring); 827 828 reset_sa_dir(&ctx->encrypt); 829 reset_sa_dir(&ctx->decrypt); 830 831 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; 832 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; 833 834 ret = setup_cipher(&tfm->base, 0, key, key_len); 835 if (ret) 836 goto out; 837 ret = setup_cipher(&tfm->base, 1, key, key_len); 838out: 839 if (!atomic_dec_and_test(&ctx->configuring)) 840 wait_for_completion(&ctx->completion); 841 return ret; 842} 843 844static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, 845 unsigned int key_len) 846{ 847 return verify_skcipher_des3_key(tfm, key) ?: 848 ablk_setkey(tfm, key, key_len); 849} 850 851static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, 852 unsigned int key_len) 853{ 854 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 855 856 /* the nonce is stored in bytes at end of key */ 857 if (key_len < CTR_RFC3686_NONCE_SIZE) 858 return -EINVAL; 859 860 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), 861 CTR_RFC3686_NONCE_SIZE); 862 863 key_len -= CTR_RFC3686_NONCE_SIZE; 864 return ablk_setkey(tfm, key, key_len); 865} 866 867static int ablk_perform(struct skcipher_request *req, int encrypt) 868{ 869 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 870 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 871 unsigned ivsize = crypto_skcipher_ivsize(tfm); 872 struct ix_sa_dir *dir; 873 struct crypt_ctl *crypt; 874 unsigned int nbytes = req->cryptlen; 875 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 876 struct ablk_ctx *req_ctx = skcipher_request_ctx(req); 877 struct buffer_desc src_hook; 878 struct device *dev = &pdev->dev; 879 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 880 GFP_KERNEL : GFP_ATOMIC; 881 882 if (qmgr_stat_full(SEND_QID)) 883 return -EAGAIN; 884 if (atomic_read(&ctx->configuring)) 885 return -EAGAIN; 886 887 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 888 889 crypt = get_crypt_desc(); 890 if (!crypt) 891 return -ENOMEM; 892 893 crypt->data.ablk_req = req; 894 crypt->crypto_ctx = dir->npe_ctx_phys; 895 crypt->mode = dir->npe_mode; 896 crypt->init_len = dir->npe_ctx_idx; 897 898 crypt->crypt_offs = 0; 899 crypt->crypt_len = nbytes; 900 901 BUG_ON(ivsize && !req->iv); 902 memcpy(crypt->iv, req->iv, ivsize); 903 if (req->src != req->dst) { 904 struct buffer_desc dst_hook; 905 crypt->mode |= NPE_OP_NOT_IN_PLACE; 906 /* This was never tested by Intel 907 * for more than one dst buffer, I think. */ 908 req_ctx->dst = NULL; 909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 910 flags, DMA_FROM_DEVICE)) 911 goto free_buf_dest; 912 src_direction = DMA_TO_DEVICE; 913 req_ctx->dst = dst_hook.next; 914 crypt->dst_buf = dst_hook.phys_next; 915 } else { 916 req_ctx->dst = NULL; 917 } 918 req_ctx->src = NULL; 919 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, 920 flags, src_direction)) 921 goto free_buf_src; 922 923 req_ctx->src = src_hook.next; 924 crypt->src_buf = src_hook.phys_next; 925 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; 926 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 927 BUG_ON(qmgr_stat_overflow(SEND_QID)); 928 return -EINPROGRESS; 929 930free_buf_src: 931 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 932free_buf_dest: 933 if (req->src != req->dst) { 934 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 935 } 936 crypt->ctl_flags = CTL_FLAG_UNUSED; 937 return -ENOMEM; 938} 939 940static int ablk_encrypt(struct skcipher_request *req) 941{ 942 return ablk_perform(req, 1); 943} 944 945static int ablk_decrypt(struct skcipher_request *req) 946{ 947 return ablk_perform(req, 0); 948} 949 950static int ablk_rfc3686_crypt(struct skcipher_request *req) 951{ 952 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 953 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm); 954 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 955 u8 *info = req->iv; 956 int ret; 957 958 /* set up counter block */ 959 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 960 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); 961 962 /* initialize counter portion of counter block */ 963 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 964 cpu_to_be32(1); 965 966 req->iv = iv; 967 ret = ablk_perform(req, 1); 968 req->iv = info; 969 return ret; 970} 971 972static int aead_perform(struct aead_request *req, int encrypt, 973 int cryptoffset, int eff_cryptlen, u8 *iv) 974{ 975 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 976 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 977 unsigned ivsize = crypto_aead_ivsize(tfm); 978 unsigned authsize = crypto_aead_authsize(tfm); 979 struct ix_sa_dir *dir; 980 struct crypt_ctl *crypt; 981 unsigned int cryptlen; 982 struct buffer_desc *buf, src_hook; 983 struct aead_ctx *req_ctx = aead_request_ctx(req); 984 struct device *dev = &pdev->dev; 985 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 986 GFP_KERNEL : GFP_ATOMIC; 987 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 988 unsigned int lastlen; 989 990 if (qmgr_stat_full(SEND_QID)) 991 return -EAGAIN; 992 if (atomic_read(&ctx->configuring)) 993 return -EAGAIN; 994 995 if (encrypt) { 996 dir = &ctx->encrypt; 997 cryptlen = req->cryptlen; 998 } else { 999 dir = &ctx->decrypt; 1000 /* req->cryptlen includes the authsize when decrypting */ 1001 cryptlen = req->cryptlen -authsize; 1002 eff_cryptlen -= authsize; 1003 } 1004 crypt = get_crypt_desc(); 1005 if (!crypt) 1006 return -ENOMEM; 1007 1008 crypt->data.aead_req = req; 1009 crypt->crypto_ctx = dir->npe_ctx_phys; 1010 crypt->mode = dir->npe_mode; 1011 crypt->init_len = dir->npe_ctx_idx; 1012 1013 crypt->crypt_offs = cryptoffset; 1014 crypt->crypt_len = eff_cryptlen; 1015 1016 crypt->auth_offs = 0; 1017 crypt->auth_len = req->assoclen + cryptlen; 1018 BUG_ON(ivsize && !req->iv); 1019 memcpy(crypt->iv, req->iv, ivsize); 1020 1021 buf = chainup_buffers(dev, req->src, crypt->auth_len, 1022 &src_hook, flags, src_direction); 1023 req_ctx->src = src_hook.next; 1024 crypt->src_buf = src_hook.phys_next; 1025 if (!buf) 1026 goto free_buf_src; 1027 1028 lastlen = buf->buf_len; 1029 if (lastlen >= authsize) 1030 crypt->icv_rev_aes = buf->phys_addr + 1031 buf->buf_len - authsize; 1032 1033 req_ctx->dst = NULL; 1034 1035 if (req->src != req->dst) { 1036 struct buffer_desc dst_hook; 1037 1038 crypt->mode |= NPE_OP_NOT_IN_PLACE; 1039 src_direction = DMA_TO_DEVICE; 1040 1041 buf = chainup_buffers(dev, req->dst, crypt->auth_len, 1042 &dst_hook, flags, DMA_FROM_DEVICE); 1043 req_ctx->dst = dst_hook.next; 1044 crypt->dst_buf = dst_hook.phys_next; 1045 1046 if (!buf) 1047 goto free_buf_dst; 1048 1049 if (encrypt) { 1050 lastlen = buf->buf_len; 1051 if (lastlen >= authsize) 1052 crypt->icv_rev_aes = buf->phys_addr + 1053 buf->buf_len - authsize; 1054 } 1055 } 1056 1057 if (unlikely(lastlen < authsize)) { 1058 /* The 12 hmac bytes are scattered, 1059 * we need to copy them into a safe buffer */ 1060 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1061 &crypt->icv_rev_aes); 1062 if (unlikely(!req_ctx->hmac_virt)) 1063 goto free_buf_dst; 1064 if (!encrypt) { 1065 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1066 req->src, cryptlen, authsize, 0); 1067 } 1068 req_ctx->encrypt = encrypt; 1069 } else { 1070 req_ctx->hmac_virt = NULL; 1071 } 1072 1073 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; 1074 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 1075 BUG_ON(qmgr_stat_overflow(SEND_QID)); 1076 return -EINPROGRESS; 1077 1078free_buf_dst: 1079 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 1080free_buf_src: 1081 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 1082 crypt->ctl_flags = CTL_FLAG_UNUSED; 1083 return -ENOMEM; 1084} 1085 1086static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) 1087{ 1088 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1089 unsigned digest_len = crypto_aead_maxauthsize(tfm); 1090 int ret; 1091 1092 if (!ctx->enckey_len && !ctx->authkey_len) 1093 return 0; 1094 init_completion(&ctx->completion); 1095 atomic_inc(&ctx->configuring); 1096 1097 reset_sa_dir(&ctx->encrypt); 1098 reset_sa_dir(&ctx->decrypt); 1099 1100 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); 1101 if (ret) 1102 goto out; 1103 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); 1104 if (ret) 1105 goto out; 1106 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, 1107 ctx->authkey_len, digest_len); 1108 if (ret) 1109 goto out; 1110 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, 1111 ctx->authkey_len, digest_len); 1112out: 1113 if (!atomic_dec_and_test(&ctx->configuring)) 1114 wait_for_completion(&ctx->completion); 1115 return ret; 1116} 1117 1118static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1119{ 1120 int max = crypto_aead_maxauthsize(tfm) >> 2; 1121 1122 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) 1123 return -EINVAL; 1124 return aead_setup(tfm, authsize); 1125} 1126 1127static int aead_setkey(struct crypto_aead *tfm, const u8 *key, 1128 unsigned int keylen) 1129{ 1130 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1131 struct crypto_authenc_keys keys; 1132 1133 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 1134 goto badkey; 1135 1136 if (keys.authkeylen > sizeof(ctx->authkey)) 1137 goto badkey; 1138 1139 if (keys.enckeylen > sizeof(ctx->enckey)) 1140 goto badkey; 1141 1142 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1143 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1144 ctx->authkey_len = keys.authkeylen; 1145 ctx->enckey_len = keys.enckeylen; 1146 1147 memzero_explicit(&keys, sizeof(keys)); 1148 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1149badkey: 1150 memzero_explicit(&keys, sizeof(keys)); 1151 return -EINVAL; 1152} 1153 1154static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1155 unsigned int keylen) 1156{ 1157 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1158 struct crypto_authenc_keys keys; 1159 int err; 1160 1161 err = crypto_authenc_extractkeys(&keys, key, keylen); 1162 if (unlikely(err)) 1163 goto badkey; 1164 1165 err = -EINVAL; 1166 if (keys.authkeylen > sizeof(ctx->authkey)) 1167 goto badkey; 1168 1169 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen); 1170 if (err) 1171 goto badkey; 1172 1173 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1174 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1175 ctx->authkey_len = keys.authkeylen; 1176 ctx->enckey_len = keys.enckeylen; 1177 1178 memzero_explicit(&keys, sizeof(keys)); 1179 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1180badkey: 1181 memzero_explicit(&keys, sizeof(keys)); 1182 return err; 1183} 1184 1185static int aead_encrypt(struct aead_request *req) 1186{ 1187 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); 1188} 1189 1190static int aead_decrypt(struct aead_request *req) 1191{ 1192 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv); 1193} 1194 1195static struct ixp_alg ixp4xx_algos[] = { 1196{ 1197 .crypto = { 1198 .base.cra_name = "cbc(des)", 1199 .base.cra_blocksize = DES_BLOCK_SIZE, 1200 1201 .min_keysize = DES_KEY_SIZE, 1202 .max_keysize = DES_KEY_SIZE, 1203 .ivsize = DES_BLOCK_SIZE, 1204 }, 1205 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1206 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1207 1208}, { 1209 .crypto = { 1210 .base.cra_name = "ecb(des)", 1211 .base.cra_blocksize = DES_BLOCK_SIZE, 1212 .min_keysize = DES_KEY_SIZE, 1213 .max_keysize = DES_KEY_SIZE, 1214 }, 1215 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, 1216 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, 1217}, { 1218 .crypto = { 1219 .base.cra_name = "cbc(des3_ede)", 1220 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1221 1222 .min_keysize = DES3_EDE_KEY_SIZE, 1223 .max_keysize = DES3_EDE_KEY_SIZE, 1224 .ivsize = DES3_EDE_BLOCK_SIZE, 1225 .setkey = ablk_des3_setkey, 1226 }, 1227 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1228 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1229}, { 1230 .crypto = { 1231 .base.cra_name = "ecb(des3_ede)", 1232 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 1233 1234 .min_keysize = DES3_EDE_KEY_SIZE, 1235 .max_keysize = DES3_EDE_KEY_SIZE, 1236 .setkey = ablk_des3_setkey, 1237 }, 1238 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, 1239 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, 1240}, { 1241 .crypto = { 1242 .base.cra_name = "cbc(aes)", 1243 .base.cra_blocksize = AES_BLOCK_SIZE, 1244 1245 .min_keysize = AES_MIN_KEY_SIZE, 1246 .max_keysize = AES_MAX_KEY_SIZE, 1247 .ivsize = AES_BLOCK_SIZE, 1248 }, 1249 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1250 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1251}, { 1252 .crypto = { 1253 .base.cra_name = "ecb(aes)", 1254 .base.cra_blocksize = AES_BLOCK_SIZE, 1255 1256 .min_keysize = AES_MIN_KEY_SIZE, 1257 .max_keysize = AES_MAX_KEY_SIZE, 1258 }, 1259 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, 1260 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, 1261}, { 1262 .crypto = { 1263 .base.cra_name = "ctr(aes)", 1264 .base.cra_blocksize = 1, 1265 1266 .min_keysize = AES_MIN_KEY_SIZE, 1267 .max_keysize = AES_MAX_KEY_SIZE, 1268 .ivsize = AES_BLOCK_SIZE, 1269 }, 1270 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1271 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1272}, { 1273 .crypto = { 1274 .base.cra_name = "rfc3686(ctr(aes))", 1275 .base.cra_blocksize = 1, 1276 1277 .min_keysize = AES_MIN_KEY_SIZE, 1278 .max_keysize = AES_MAX_KEY_SIZE, 1279 .ivsize = AES_BLOCK_SIZE, 1280 .setkey = ablk_rfc3686_setkey, 1281 .encrypt = ablk_rfc3686_crypt, 1282 .decrypt = ablk_rfc3686_crypt, 1283 }, 1284 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1285 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1286} }; 1287 1288static struct ixp_aead_alg ixp4xx_aeads[] = { 1289{ 1290 .crypto = { 1291 .base = { 1292 .cra_name = "authenc(hmac(md5),cbc(des))", 1293 .cra_blocksize = DES_BLOCK_SIZE, 1294 }, 1295 .ivsize = DES_BLOCK_SIZE, 1296 .maxauthsize = MD5_DIGEST_SIZE, 1297 }, 1298 .hash = &hash_alg_md5, 1299 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1300 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1301}, { 1302 .crypto = { 1303 .base = { 1304 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1305 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1306 }, 1307 .ivsize = DES3_EDE_BLOCK_SIZE, 1308 .maxauthsize = MD5_DIGEST_SIZE, 1309 .setkey = des3_aead_setkey, 1310 }, 1311 .hash = &hash_alg_md5, 1312 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1313 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1314}, { 1315 .crypto = { 1316 .base = { 1317 .cra_name = "authenc(hmac(sha1),cbc(des))", 1318 .cra_blocksize = DES_BLOCK_SIZE, 1319 }, 1320 .ivsize = DES_BLOCK_SIZE, 1321 .maxauthsize = SHA1_DIGEST_SIZE, 1322 }, 1323 .hash = &hash_alg_sha1, 1324 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1325 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1326}, { 1327 .crypto = { 1328 .base = { 1329 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1330 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1331 }, 1332 .ivsize = DES3_EDE_BLOCK_SIZE, 1333 .maxauthsize = SHA1_DIGEST_SIZE, 1334 .setkey = des3_aead_setkey, 1335 }, 1336 .hash = &hash_alg_sha1, 1337 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1338 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1339}, { 1340 .crypto = { 1341 .base = { 1342 .cra_name = "authenc(hmac(md5),cbc(aes))", 1343 .cra_blocksize = AES_BLOCK_SIZE, 1344 }, 1345 .ivsize = AES_BLOCK_SIZE, 1346 .maxauthsize = MD5_DIGEST_SIZE, 1347 }, 1348 .hash = &hash_alg_md5, 1349 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1350 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1351}, { 1352 .crypto = { 1353 .base = { 1354 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1355 .cra_blocksize = AES_BLOCK_SIZE, 1356 }, 1357 .ivsize = AES_BLOCK_SIZE, 1358 .maxauthsize = SHA1_DIGEST_SIZE, 1359 }, 1360 .hash = &hash_alg_sha1, 1361 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1362 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1363} }; 1364 1365#define IXP_POSTFIX "-ixp4xx" 1366 1367static const struct platform_device_info ixp_dev_info __initdata = { 1368 .name = DRIVER_NAME, 1369 .id = 0, 1370 .dma_mask = DMA_BIT_MASK(32), 1371}; 1372 1373static int __init ixp_module_init(void) 1374{ 1375 int num = ARRAY_SIZE(ixp4xx_algos); 1376 int i, err; 1377 1378 pdev = platform_device_register_full(&ixp_dev_info); 1379 if (IS_ERR(pdev)) 1380 return PTR_ERR(pdev); 1381 1382 spin_lock_init(&desc_lock); 1383 spin_lock_init(&emerg_lock); 1384 1385 err = init_ixp_crypto(&pdev->dev); 1386 if (err) { 1387 platform_device_unregister(pdev); 1388 return err; 1389 } 1390 for (i=0; i< num; i++) { 1391 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto; 1392 1393 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 1394 "%s"IXP_POSTFIX, cra->base.cra_name) >= 1395 CRYPTO_MAX_ALG_NAME) 1396 { 1397 continue; 1398 } 1399 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { 1400 continue; 1401 } 1402 1403 /* block ciphers */ 1404 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1405 CRYPTO_ALG_ASYNC | 1406 CRYPTO_ALG_ALLOCATES_MEMORY; 1407 if (!cra->setkey) 1408 cra->setkey = ablk_setkey; 1409 if (!cra->encrypt) 1410 cra->encrypt = ablk_encrypt; 1411 if (!cra->decrypt) 1412 cra->decrypt = ablk_decrypt; 1413 cra->init = init_tfm_ablk; 1414 cra->exit = exit_tfm_ablk; 1415 1416 cra->base.cra_ctxsize = sizeof(struct ixp_ctx); 1417 cra->base.cra_module = THIS_MODULE; 1418 cra->base.cra_alignmask = 3; 1419 cra->base.cra_priority = 300; 1420 if (crypto_register_skcipher(cra)) 1421 printk(KERN_ERR "Failed to register '%s'\n", 1422 cra->base.cra_name); 1423 else 1424 ixp4xx_algos[i].registered = 1; 1425 } 1426 1427 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1428 struct aead_alg *cra = &ixp4xx_aeads[i].crypto; 1429 1430 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 1431 "%s"IXP_POSTFIX, cra->base.cra_name) >= 1432 CRYPTO_MAX_ALG_NAME) 1433 continue; 1434 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) 1435 continue; 1436 1437 /* authenc */ 1438 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1439 CRYPTO_ALG_ASYNC | 1440 CRYPTO_ALG_ALLOCATES_MEMORY; 1441 cra->setkey = cra->setkey ?: aead_setkey; 1442 cra->setauthsize = aead_setauthsize; 1443 cra->encrypt = aead_encrypt; 1444 cra->decrypt = aead_decrypt; 1445 cra->init = init_tfm_aead; 1446 cra->exit = exit_tfm_aead; 1447 1448 cra->base.cra_ctxsize = sizeof(struct ixp_ctx); 1449 cra->base.cra_module = THIS_MODULE; 1450 cra->base.cra_alignmask = 3; 1451 cra->base.cra_priority = 300; 1452 1453 if (crypto_register_aead(cra)) 1454 printk(KERN_ERR "Failed to register '%s'\n", 1455 cra->base.cra_driver_name); 1456 else 1457 ixp4xx_aeads[i].registered = 1; 1458 } 1459 return 0; 1460} 1461 1462static void __exit ixp_module_exit(void) 1463{ 1464 int num = ARRAY_SIZE(ixp4xx_algos); 1465 int i; 1466 1467 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1468 if (ixp4xx_aeads[i].registered) 1469 crypto_unregister_aead(&ixp4xx_aeads[i].crypto); 1470 } 1471 1472 for (i=0; i< num; i++) { 1473 if (ixp4xx_algos[i].registered) 1474 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto); 1475 } 1476 release_ixp_crypto(&pdev->dev); 1477 platform_device_unregister(pdev); 1478} 1479 1480module_init(ixp_module_init); 1481module_exit(ixp_module_exit); 1482 1483MODULE_LICENSE("GPL"); 1484MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); 1485MODULE_DESCRIPTION("IXP4xx hardware crypto"); 1486