Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc6 1497 lines 37 kB view raw
1/* 2 * Intel IXP4xx NPE-C crypto driver 3 * 4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 * 10 */ 11 12#include <linux/platform_device.h> 13#include <linux/dma-mapping.h> 14#include <linux/dmapool.h> 15#include <linux/crypto.h> 16#include <linux/kernel.h> 17#include <linux/rtnetlink.h> 18#include <linux/interrupt.h> 19#include <linux/spinlock.h> 20#include <linux/gfp.h> 21#include <linux/module.h> 22 23#include <crypto/ctr.h> 24#include <crypto/des.h> 25#include <crypto/aes.h> 26#include <crypto/sha.h> 27#include <crypto/algapi.h> 28#include <crypto/internal/aead.h> 29#include <crypto/authenc.h> 30#include <crypto/scatterwalk.h> 31 32#include <mach/npe.h> 33#include <mach/qmgr.h> 34 35#define MAX_KEYLEN 32 36 37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */ 38#define NPE_CTX_LEN 80 39#define AES_BLOCK128 16 40 41#define NPE_OP_HASH_VERIFY 0x01 42#define NPE_OP_CCM_ENABLE 0x04 43#define NPE_OP_CRYPT_ENABLE 0x08 44#define NPE_OP_HASH_ENABLE 0x10 45#define NPE_OP_NOT_IN_PLACE 0x20 46#define NPE_OP_HMAC_DISABLE 0x40 47#define NPE_OP_CRYPT_ENCRYPT 0x80 48 49#define NPE_OP_CCM_GEN_MIC 0xcc 50#define NPE_OP_HASH_GEN_ICV 0x50 51#define NPE_OP_ENC_GEN_KEY 0xc9 52 53#define MOD_ECB 0x0000 54#define MOD_CTR 0x1000 55#define MOD_CBC_ENC 0x2000 56#define MOD_CBC_DEC 0x3000 57#define MOD_CCM_ENC 0x4000 58#define MOD_CCM_DEC 0x5000 59 60#define KEYLEN_128 4 61#define KEYLEN_192 6 62#define KEYLEN_256 8 63 64#define CIPH_DECR 0x0000 65#define CIPH_ENCR 0x0400 66 67#define MOD_DES 0x0000 68#define MOD_TDEA2 0x0100 69#define MOD_3DES 0x0200 70#define MOD_AES 0x0800 71#define MOD_AES128 (0x0800 | KEYLEN_128) 72#define MOD_AES192 (0x0900 | KEYLEN_192) 73#define MOD_AES256 (0x0a00 | KEYLEN_256) 74 75#define MAX_IVLEN 16 76#define NPE_ID 2 /* NPE C */ 77#define NPE_QLEN 16 78/* Space for registering when the first 79 * NPE_QLEN crypt_ctl are busy */ 80#define NPE_QLEN_TOTAL 64 81 82#define SEND_QID 29 83#define RECV_QID 30 84 85#define CTL_FLAG_UNUSED 0x0000 86#define CTL_FLAG_USED 0x1000 87#define CTL_FLAG_PERFORM_ABLK 0x0001 88#define CTL_FLAG_GEN_ICV 0x0002 89#define CTL_FLAG_GEN_REVAES 0x0004 90#define CTL_FLAG_PERFORM_AEAD 0x0008 91#define CTL_FLAG_MASK 0x000f 92 93#define HMAC_IPAD_VALUE 0x36 94#define HMAC_OPAD_VALUE 0x5C 95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE 96 97#define MD5_DIGEST_SIZE 16 98 99struct buffer_desc { 100 u32 phys_next; 101#ifdef __ARMEB__ 102 u16 buf_len; 103 u16 pkt_len; 104#else 105 u16 pkt_len; 106 u16 buf_len; 107#endif 108 u32 phys_addr; 109 u32 __reserved[4]; 110 struct buffer_desc *next; 111 enum dma_data_direction dir; 112}; 113 114struct crypt_ctl { 115#ifdef __ARMEB__ 116 u8 mode; /* NPE_OP_* operation mode */ 117 u8 init_len; 118 u16 reserved; 119#else 120 u16 reserved; 121 u8 init_len; 122 u8 mode; /* NPE_OP_* operation mode */ 123#endif 124 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */ 125 u32 icv_rev_aes; /* icv or rev aes */ 126 u32 src_buf; 127 u32 dst_buf; 128#ifdef __ARMEB__ 129 u16 auth_offs; /* Authentication start offset */ 130 u16 auth_len; /* Authentication data length */ 131 u16 crypt_offs; /* Cryption start offset */ 132 u16 crypt_len; /* Cryption data length */ 133#else 134 u16 auth_len; /* Authentication data length */ 135 u16 auth_offs; /* Authentication start offset */ 136 u16 crypt_len; /* Cryption data length */ 137 u16 crypt_offs; /* Cryption start offset */ 138#endif 139 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ 140 u32 crypto_ctx; /* NPE Crypto Param structure address */ 141 142 /* Used by Host: 4*4 bytes*/ 143 unsigned ctl_flags; 144 union { 145 struct ablkcipher_request *ablk_req; 146 struct aead_request *aead_req; 147 struct crypto_tfm *tfm; 148 } data; 149 struct buffer_desc *regist_buf; 150 u8 *regist_ptr; 151}; 152 153struct ablk_ctx { 154 struct buffer_desc *src; 155 struct buffer_desc *dst; 156}; 157 158struct aead_ctx { 159 struct buffer_desc *src; 160 struct buffer_desc *dst; 161 struct scatterlist ivlist; 162 /* used when the hmac is not on one sg entry */ 163 u8 *hmac_virt; 164 int encrypt; 165}; 166 167struct ix_hash_algo { 168 u32 cfgword; 169 unsigned char *icv; 170}; 171 172struct ix_sa_dir { 173 unsigned char *npe_ctx; 174 dma_addr_t npe_ctx_phys; 175 int npe_ctx_idx; 176 u8 npe_mode; 177}; 178 179struct ixp_ctx { 180 struct ix_sa_dir encrypt; 181 struct ix_sa_dir decrypt; 182 int authkey_len; 183 u8 authkey[MAX_KEYLEN]; 184 int enckey_len; 185 u8 enckey[MAX_KEYLEN]; 186 u8 salt[MAX_IVLEN]; 187 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 188 unsigned salted; 189 atomic_t configuring; 190 struct completion completion; 191}; 192 193struct ixp_alg { 194 struct crypto_alg crypto; 195 const struct ix_hash_algo *hash; 196 u32 cfg_enc; 197 u32 cfg_dec; 198 199 int registered; 200}; 201 202struct ixp_aead_alg { 203 struct aead_alg crypto; 204 const struct ix_hash_algo *hash; 205 u32 cfg_enc; 206 u32 cfg_dec; 207 208 int registered; 209}; 210 211static const struct ix_hash_algo hash_alg_md5 = { 212 .cfgword = 0xAA010004, 213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" 214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10", 215}; 216static const struct ix_hash_algo hash_alg_sha1 = { 217 .cfgword = 0x00000005, 218 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" 219 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", 220}; 221 222static struct npe *npe_c; 223static struct dma_pool *buffer_pool = NULL; 224static struct dma_pool *ctx_pool = NULL; 225 226static struct crypt_ctl *crypt_virt = NULL; 227static dma_addr_t crypt_phys; 228 229static int support_aes = 1; 230 231#define DRIVER_NAME "ixp4xx_crypto" 232 233static struct platform_device *pdev; 234 235static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt) 236{ 237 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl); 238} 239 240static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys) 241{ 242 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl); 243} 244 245static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm) 246{ 247 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc; 248} 249 250static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm) 251{ 252 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec; 253} 254 255static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm) 256{ 257 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash; 258} 259 260static int setup_crypt_desc(void) 261{ 262 struct device *dev = &pdev->dev; 263 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 264 crypt_virt = dma_alloc_coherent(dev, 265 NPE_QLEN * sizeof(struct crypt_ctl), 266 &crypt_phys, GFP_ATOMIC); 267 if (!crypt_virt) 268 return -ENOMEM; 269 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); 270 return 0; 271} 272 273static spinlock_t desc_lock; 274static struct crypt_ctl *get_crypt_desc(void) 275{ 276 int i; 277 static int idx = 0; 278 unsigned long flags; 279 280 spin_lock_irqsave(&desc_lock, flags); 281 282 if (unlikely(!crypt_virt)) 283 setup_crypt_desc(); 284 if (unlikely(!crypt_virt)) { 285 spin_unlock_irqrestore(&desc_lock, flags); 286 return NULL; 287 } 288 i = idx; 289 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 290 if (++idx >= NPE_QLEN) 291 idx = 0; 292 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 293 spin_unlock_irqrestore(&desc_lock, flags); 294 return crypt_virt +i; 295 } else { 296 spin_unlock_irqrestore(&desc_lock, flags); 297 return NULL; 298 } 299} 300 301static spinlock_t emerg_lock; 302static struct crypt_ctl *get_crypt_desc_emerg(void) 303{ 304 int i; 305 static int idx = NPE_QLEN; 306 struct crypt_ctl *desc; 307 unsigned long flags; 308 309 desc = get_crypt_desc(); 310 if (desc) 311 return desc; 312 if (unlikely(!crypt_virt)) 313 return NULL; 314 315 spin_lock_irqsave(&emerg_lock, flags); 316 i = idx; 317 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) { 318 if (++idx >= NPE_QLEN_TOTAL) 319 idx = NPE_QLEN; 320 crypt_virt[i].ctl_flags = CTL_FLAG_USED; 321 spin_unlock_irqrestore(&emerg_lock, flags); 322 return crypt_virt +i; 323 } else { 324 spin_unlock_irqrestore(&emerg_lock, flags); 325 return NULL; 326 } 327} 328 329static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) 330{ 331 while (buf) { 332 struct buffer_desc *buf1; 333 u32 phys1; 334 335 buf1 = buf->next; 336 phys1 = buf->phys_next; 337 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); 338 dma_pool_free(buffer_pool, buf, phys); 339 buf = buf1; 340 phys = phys1; 341 } 342} 343 344static struct tasklet_struct crypto_done_tasklet; 345 346static void finish_scattered_hmac(struct crypt_ctl *crypt) 347{ 348 struct aead_request *req = crypt->data.aead_req; 349 struct aead_ctx *req_ctx = aead_request_ctx(req); 350 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 351 int authsize = crypto_aead_authsize(tfm); 352 int decryptlen = req->assoclen + req->cryptlen - authsize; 353 354 if (req_ctx->encrypt) { 355 scatterwalk_map_and_copy(req_ctx->hmac_virt, 356 req->dst, decryptlen, authsize, 1); 357 } 358 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes); 359} 360 361static void one_packet(dma_addr_t phys) 362{ 363 struct device *dev = &pdev->dev; 364 struct crypt_ctl *crypt; 365 struct ixp_ctx *ctx; 366 int failed; 367 368 failed = phys & 0x1 ? -EBADMSG : 0; 369 phys &= ~0x3; 370 crypt = crypt_phys2virt(phys); 371 372 switch (crypt->ctl_flags & CTL_FLAG_MASK) { 373 case CTL_FLAG_PERFORM_AEAD: { 374 struct aead_request *req = crypt->data.aead_req; 375 struct aead_ctx *req_ctx = aead_request_ctx(req); 376 377 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 378 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 379 if (req_ctx->hmac_virt) { 380 finish_scattered_hmac(crypt); 381 } 382 req->base.complete(&req->base, failed); 383 break; 384 } 385 case CTL_FLAG_PERFORM_ABLK: { 386 struct ablkcipher_request *req = crypt->data.ablk_req; 387 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 388 389 if (req_ctx->dst) { 390 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 391 } 392 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 393 req->base.complete(&req->base, failed); 394 break; 395 } 396 case CTL_FLAG_GEN_ICV: 397 ctx = crypto_tfm_ctx(crypt->data.tfm); 398 dma_pool_free(ctx_pool, crypt->regist_ptr, 399 crypt->regist_buf->phys_addr); 400 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf); 401 if (atomic_dec_and_test(&ctx->configuring)) 402 complete(&ctx->completion); 403 break; 404 case CTL_FLAG_GEN_REVAES: 405 ctx = crypto_tfm_ctx(crypt->data.tfm); 406 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR); 407 if (atomic_dec_and_test(&ctx->configuring)) 408 complete(&ctx->completion); 409 break; 410 default: 411 BUG(); 412 } 413 crypt->ctl_flags = CTL_FLAG_UNUSED; 414} 415 416static void irqhandler(void *_unused) 417{ 418 tasklet_schedule(&crypto_done_tasklet); 419} 420 421static void crypto_done_action(unsigned long arg) 422{ 423 int i; 424 425 for(i=0; i<4; i++) { 426 dma_addr_t phys = qmgr_get_entry(RECV_QID); 427 if (!phys) 428 return; 429 one_packet(phys); 430 } 431 tasklet_schedule(&crypto_done_tasklet); 432} 433 434static int init_ixp_crypto(struct device *dev) 435{ 436 int ret = -ENODEV; 437 u32 msg[2] = { 0, 0 }; 438 439 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH | 440 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) { 441 printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); 442 return ret; 443 } 444 npe_c = npe_request(NPE_ID); 445 if (!npe_c) 446 return ret; 447 448 if (!npe_running(npe_c)) { 449 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev); 450 if (ret) 451 goto npe_release; 452 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 453 goto npe_error; 454 } else { 455 if (npe_send_message(npe_c, msg, "STATUS_MSG")) 456 goto npe_error; 457 458 if (npe_recv_message(npe_c, msg, "STATUS_MSG")) 459 goto npe_error; 460 } 461 462 switch ((msg[1]>>16) & 0xff) { 463 case 3: 464 printk(KERN_WARNING "Firmware of %s lacks AES support\n", 465 npe_name(npe_c)); 466 support_aes = 0; 467 break; 468 case 4: 469 case 5: 470 support_aes = 1; 471 break; 472 default: 473 printk(KERN_ERR "Firmware of %s lacks crypto support\n", 474 npe_name(npe_c)); 475 ret = -ENODEV; 476 goto npe_release; 477 } 478 /* buffer_pool will also be used to sometimes store the hmac, 479 * so assure it is large enough 480 */ 481 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc)); 482 buffer_pool = dma_pool_create("buffer", dev, 483 sizeof(struct buffer_desc), 32, 0); 484 ret = -ENOMEM; 485 if (!buffer_pool) { 486 goto err; 487 } 488 ctx_pool = dma_pool_create("context", dev, 489 NPE_CTX_LEN, 16, 0); 490 if (!ctx_pool) { 491 goto err; 492 } 493 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0, 494 "ixp_crypto:out", NULL); 495 if (ret) 496 goto err; 497 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0, 498 "ixp_crypto:in", NULL); 499 if (ret) { 500 qmgr_release_queue(SEND_QID); 501 goto err; 502 } 503 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL); 504 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0); 505 506 qmgr_enable_irq(RECV_QID); 507 return 0; 508 509npe_error: 510 printk(KERN_ERR "%s not responding\n", npe_name(npe_c)); 511 ret = -EIO; 512err: 513 dma_pool_destroy(ctx_pool); 514 dma_pool_destroy(buffer_pool); 515npe_release: 516 npe_release(npe_c); 517 return ret; 518} 519 520static void release_ixp_crypto(struct device *dev) 521{ 522 qmgr_disable_irq(RECV_QID); 523 tasklet_kill(&crypto_done_tasklet); 524 525 qmgr_release_queue(SEND_QID); 526 qmgr_release_queue(RECV_QID); 527 528 dma_pool_destroy(ctx_pool); 529 dma_pool_destroy(buffer_pool); 530 531 npe_release(npe_c); 532 533 if (crypt_virt) { 534 dma_free_coherent(dev, 535 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), 536 crypt_virt, crypt_phys); 537 } 538 return; 539} 540 541static void reset_sa_dir(struct ix_sa_dir *dir) 542{ 543 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 544 dir->npe_ctx_idx = 0; 545 dir->npe_mode = 0; 546} 547 548static int init_sa_dir(struct ix_sa_dir *dir) 549{ 550 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys); 551 if (!dir->npe_ctx) { 552 return -ENOMEM; 553 } 554 reset_sa_dir(dir); 555 return 0; 556} 557 558static void free_sa_dir(struct ix_sa_dir *dir) 559{ 560 memset(dir->npe_ctx, 0, NPE_CTX_LEN); 561 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys); 562} 563 564static int init_tfm(struct crypto_tfm *tfm) 565{ 566 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 567 int ret; 568 569 atomic_set(&ctx->configuring, 0); 570 ret = init_sa_dir(&ctx->encrypt); 571 if (ret) 572 return ret; 573 ret = init_sa_dir(&ctx->decrypt); 574 if (ret) { 575 free_sa_dir(&ctx->encrypt); 576 } 577 return ret; 578} 579 580static int init_tfm_ablk(struct crypto_tfm *tfm) 581{ 582 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx); 583 return init_tfm(tfm); 584} 585 586static int init_tfm_aead(struct crypto_aead *tfm) 587{ 588 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx)); 589 return init_tfm(crypto_aead_tfm(tfm)); 590} 591 592static void exit_tfm(struct crypto_tfm *tfm) 593{ 594 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 595 free_sa_dir(&ctx->encrypt); 596 free_sa_dir(&ctx->decrypt); 597} 598 599static void exit_tfm_aead(struct crypto_aead *tfm) 600{ 601 exit_tfm(crypto_aead_tfm(tfm)); 602} 603 604static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target, 605 int init_len, u32 ctx_addr, const u8 *key, int key_len) 606{ 607 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 608 struct crypt_ctl *crypt; 609 struct buffer_desc *buf; 610 int i; 611 u8 *pad; 612 u32 pad_phys, buf_phys; 613 614 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN); 615 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys); 616 if (!pad) 617 return -ENOMEM; 618 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys); 619 if (!buf) { 620 dma_pool_free(ctx_pool, pad, pad_phys); 621 return -ENOMEM; 622 } 623 crypt = get_crypt_desc_emerg(); 624 if (!crypt) { 625 dma_pool_free(ctx_pool, pad, pad_phys); 626 dma_pool_free(buffer_pool, buf, buf_phys); 627 return -EAGAIN; 628 } 629 630 memcpy(pad, key, key_len); 631 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len); 632 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) { 633 pad[i] ^= xpad; 634 } 635 636 crypt->data.tfm = tfm; 637 crypt->regist_ptr = pad; 638 crypt->regist_buf = buf; 639 640 crypt->auth_offs = 0; 641 crypt->auth_len = HMAC_PAD_BLOCKLEN; 642 crypt->crypto_ctx = ctx_addr; 643 crypt->src_buf = buf_phys; 644 crypt->icv_rev_aes = target; 645 crypt->mode = NPE_OP_HASH_GEN_ICV; 646 crypt->init_len = init_len; 647 crypt->ctl_flags |= CTL_FLAG_GEN_ICV; 648 649 buf->next = 0; 650 buf->buf_len = HMAC_PAD_BLOCKLEN; 651 buf->pkt_len = 0; 652 buf->phys_addr = pad_phys; 653 654 atomic_inc(&ctx->configuring); 655 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 656 BUG_ON(qmgr_stat_overflow(SEND_QID)); 657 return 0; 658} 659 660static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize, 661 const u8 *key, int key_len, unsigned digest_len) 662{ 663 u32 itarget, otarget, npe_ctx_addr; 664 unsigned char *cinfo; 665 int init_len, ret = 0; 666 u32 cfgword; 667 struct ix_sa_dir *dir; 668 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 669 const struct ix_hash_algo *algo; 670 671 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 672 cinfo = dir->npe_ctx + dir->npe_ctx_idx; 673 algo = ix_hash(tfm); 674 675 /* write cfg word to cryptinfo */ 676 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */ 677#ifndef __ARMEB__ 678 cfgword ^= 0xAA000000; /* change the "byte swap" flags */ 679#endif 680 *(u32*)cinfo = cpu_to_be32(cfgword); 681 cinfo += sizeof(cfgword); 682 683 /* write ICV to cryptinfo */ 684 memcpy(cinfo, algo->icv, digest_len); 685 cinfo += digest_len; 686 687 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx 688 + sizeof(algo->cfgword); 689 otarget = itarget + digest_len; 690 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); 691 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; 692 693 dir->npe_ctx_idx += init_len; 694 dir->npe_mode |= NPE_OP_HASH_ENABLE; 695 696 if (!encrypt) 697 dir->npe_mode |= NPE_OP_HASH_VERIFY; 698 699 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget, 700 init_len, npe_ctx_addr, key, key_len); 701 if (ret) 702 return ret; 703 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget, 704 init_len, npe_ctx_addr, key, key_len); 705} 706 707static int gen_rev_aes_key(struct crypto_tfm *tfm) 708{ 709 struct crypt_ctl *crypt; 710 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 711 struct ix_sa_dir *dir = &ctx->decrypt; 712 713 crypt = get_crypt_desc_emerg(); 714 if (!crypt) { 715 return -EAGAIN; 716 } 717 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR); 718 719 crypt->data.tfm = tfm; 720 crypt->crypt_offs = 0; 721 crypt->crypt_len = AES_BLOCK128; 722 crypt->src_buf = 0; 723 crypt->crypto_ctx = dir->npe_ctx_phys; 724 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32); 725 crypt->mode = NPE_OP_ENC_GEN_KEY; 726 crypt->init_len = dir->npe_ctx_idx; 727 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES; 728 729 atomic_inc(&ctx->configuring); 730 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 731 BUG_ON(qmgr_stat_overflow(SEND_QID)); 732 return 0; 733} 734 735static int setup_cipher(struct crypto_tfm *tfm, int encrypt, 736 const u8 *key, int key_len) 737{ 738 u8 *cinfo; 739 u32 cipher_cfg; 740 u32 keylen_cfg = 0; 741 struct ix_sa_dir *dir; 742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm); 743 u32 *flags = &tfm->crt_flags; 744 745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 746 cinfo = dir->npe_ctx; 747 748 if (encrypt) { 749 cipher_cfg = cipher_cfg_enc(tfm); 750 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; 751 } else { 752 cipher_cfg = cipher_cfg_dec(tfm); 753 } 754 if (cipher_cfg & MOD_AES) { 755 switch (key_len) { 756 case 16: keylen_cfg = MOD_AES128; break; 757 case 24: keylen_cfg = MOD_AES192; break; 758 case 32: keylen_cfg = MOD_AES256; break; 759 default: 760 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 761 return -EINVAL; 762 } 763 cipher_cfg |= keylen_cfg; 764 } else if (cipher_cfg & MOD_3DES) { 765 const u32 *K = (const u32 *)key; 766 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 767 !((K[2] ^ K[4]) | (K[3] ^ K[5])))) 768 { 769 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 770 return -EINVAL; 771 } 772 } else { 773 u32 tmp[DES_EXPKEY_WORDS]; 774 if (des_ekey(tmp, key) == 0) { 775 *flags |= CRYPTO_TFM_RES_WEAK_KEY; 776 } 777 } 778 /* write cfg word to cryptinfo */ 779 *(u32*)cinfo = cpu_to_be32(cipher_cfg); 780 cinfo += sizeof(cipher_cfg); 781 782 /* write cipher key to cryptinfo */ 783 memcpy(cinfo, key, key_len); 784 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */ 785 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) { 786 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len); 787 key_len = DES3_EDE_KEY_SIZE; 788 } 789 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len; 790 dir->npe_mode |= NPE_OP_CRYPT_ENABLE; 791 if ((cipher_cfg & MOD_AES) && !encrypt) { 792 return gen_rev_aes_key(tfm); 793 } 794 return 0; 795} 796 797static struct buffer_desc *chainup_buffers(struct device *dev, 798 struct scatterlist *sg, unsigned nbytes, 799 struct buffer_desc *buf, gfp_t flags, 800 enum dma_data_direction dir) 801{ 802 for (; nbytes > 0; sg = sg_next(sg)) { 803 unsigned len = min(nbytes, sg->length); 804 struct buffer_desc *next_buf; 805 u32 next_buf_phys; 806 void *ptr; 807 808 nbytes -= len; 809 ptr = page_address(sg_page(sg)) + sg->offset; 810 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); 811 if (!next_buf) { 812 buf = NULL; 813 break; 814 } 815 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); 816 buf->next = next_buf; 817 buf->phys_next = next_buf_phys; 818 buf = next_buf; 819 820 buf->phys_addr = sg_dma_address(sg); 821 buf->buf_len = len; 822 buf->dir = dir; 823 } 824 buf->next = NULL; 825 buf->phys_next = 0; 826 return buf; 827} 828 829static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 830 unsigned int key_len) 831{ 832 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 833 u32 *flags = &tfm->base.crt_flags; 834 int ret; 835 836 init_completion(&ctx->completion); 837 atomic_inc(&ctx->configuring); 838 839 reset_sa_dir(&ctx->encrypt); 840 reset_sa_dir(&ctx->decrypt); 841 842 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE; 843 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE; 844 845 ret = setup_cipher(&tfm->base, 0, key, key_len); 846 if (ret) 847 goto out; 848 ret = setup_cipher(&tfm->base, 1, key, key_len); 849 if (ret) 850 goto out; 851 852 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { 853 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { 854 ret = -EINVAL; 855 } else { 856 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; 857 } 858 } 859out: 860 if (!atomic_dec_and_test(&ctx->configuring)) 861 wait_for_completion(&ctx->completion); 862 return ret; 863} 864 865static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 866 unsigned int key_len) 867{ 868 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 869 870 /* the nonce is stored in bytes at end of key */ 871 if (key_len < CTR_RFC3686_NONCE_SIZE) 872 return -EINVAL; 873 874 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE), 875 CTR_RFC3686_NONCE_SIZE); 876 877 key_len -= CTR_RFC3686_NONCE_SIZE; 878 return ablk_setkey(tfm, key, key_len); 879} 880 881static int ablk_perform(struct ablkcipher_request *req, int encrypt) 882{ 883 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 884 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 885 unsigned ivsize = crypto_ablkcipher_ivsize(tfm); 886 struct ix_sa_dir *dir; 887 struct crypt_ctl *crypt; 888 unsigned int nbytes = req->nbytes; 889 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 890 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); 891 struct buffer_desc src_hook; 892 struct device *dev = &pdev->dev; 893 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 894 GFP_KERNEL : GFP_ATOMIC; 895 896 if (qmgr_stat_full(SEND_QID)) 897 return -EAGAIN; 898 if (atomic_read(&ctx->configuring)) 899 return -EAGAIN; 900 901 dir = encrypt ? &ctx->encrypt : &ctx->decrypt; 902 903 crypt = get_crypt_desc(); 904 if (!crypt) 905 return -ENOMEM; 906 907 crypt->data.ablk_req = req; 908 crypt->crypto_ctx = dir->npe_ctx_phys; 909 crypt->mode = dir->npe_mode; 910 crypt->init_len = dir->npe_ctx_idx; 911 912 crypt->crypt_offs = 0; 913 crypt->crypt_len = nbytes; 914 915 BUG_ON(ivsize && !req->info); 916 memcpy(crypt->iv, req->info, ivsize); 917 if (req->src != req->dst) { 918 struct buffer_desc dst_hook; 919 crypt->mode |= NPE_OP_NOT_IN_PLACE; 920 /* This was never tested by Intel 921 * for more than one dst buffer, I think. */ 922 req_ctx->dst = NULL; 923 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 924 flags, DMA_FROM_DEVICE)) 925 goto free_buf_dest; 926 src_direction = DMA_TO_DEVICE; 927 req_ctx->dst = dst_hook.next; 928 crypt->dst_buf = dst_hook.phys_next; 929 } else { 930 req_ctx->dst = NULL; 931 } 932 req_ctx->src = NULL; 933 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, 934 flags, src_direction)) 935 goto free_buf_src; 936 937 req_ctx->src = src_hook.next; 938 crypt->src_buf = src_hook.phys_next; 939 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; 940 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 941 BUG_ON(qmgr_stat_overflow(SEND_QID)); 942 return -EINPROGRESS; 943 944free_buf_src: 945 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 946free_buf_dest: 947 if (req->src != req->dst) { 948 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 949 } 950 crypt->ctl_flags = CTL_FLAG_UNUSED; 951 return -ENOMEM; 952} 953 954static int ablk_encrypt(struct ablkcipher_request *req) 955{ 956 return ablk_perform(req, 1); 957} 958 959static int ablk_decrypt(struct ablkcipher_request *req) 960{ 961 return ablk_perform(req, 0); 962} 963 964static int ablk_rfc3686_crypt(struct ablkcipher_request *req) 965{ 966 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 967 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 968 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 969 u8 *info = req->info; 970 int ret; 971 972 /* set up counter block */ 973 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 974 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE); 975 976 /* initialize counter portion of counter block */ 977 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 978 cpu_to_be32(1); 979 980 req->info = iv; 981 ret = ablk_perform(req, 1); 982 req->info = info; 983 return ret; 984} 985 986static int aead_perform(struct aead_request *req, int encrypt, 987 int cryptoffset, int eff_cryptlen, u8 *iv) 988{ 989 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 990 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 991 unsigned ivsize = crypto_aead_ivsize(tfm); 992 unsigned authsize = crypto_aead_authsize(tfm); 993 struct ix_sa_dir *dir; 994 struct crypt_ctl *crypt; 995 unsigned int cryptlen; 996 struct buffer_desc *buf, src_hook; 997 struct aead_ctx *req_ctx = aead_request_ctx(req); 998 struct device *dev = &pdev->dev; 999 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 1000 GFP_KERNEL : GFP_ATOMIC; 1001 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; 1002 unsigned int lastlen; 1003 1004 if (qmgr_stat_full(SEND_QID)) 1005 return -EAGAIN; 1006 if (atomic_read(&ctx->configuring)) 1007 return -EAGAIN; 1008 1009 if (encrypt) { 1010 dir = &ctx->encrypt; 1011 cryptlen = req->cryptlen; 1012 } else { 1013 dir = &ctx->decrypt; 1014 /* req->cryptlen includes the authsize when decrypting */ 1015 cryptlen = req->cryptlen -authsize; 1016 eff_cryptlen -= authsize; 1017 } 1018 crypt = get_crypt_desc(); 1019 if (!crypt) 1020 return -ENOMEM; 1021 1022 crypt->data.aead_req = req; 1023 crypt->crypto_ctx = dir->npe_ctx_phys; 1024 crypt->mode = dir->npe_mode; 1025 crypt->init_len = dir->npe_ctx_idx; 1026 1027 crypt->crypt_offs = cryptoffset; 1028 crypt->crypt_len = eff_cryptlen; 1029 1030 crypt->auth_offs = 0; 1031 crypt->auth_len = req->assoclen + cryptlen; 1032 BUG_ON(ivsize && !req->iv); 1033 memcpy(crypt->iv, req->iv, ivsize); 1034 1035 buf = chainup_buffers(dev, req->src, crypt->auth_len, 1036 &src_hook, flags, src_direction); 1037 req_ctx->src = src_hook.next; 1038 crypt->src_buf = src_hook.phys_next; 1039 if (!buf) 1040 goto free_buf_src; 1041 1042 lastlen = buf->buf_len; 1043 if (lastlen >= authsize) 1044 crypt->icv_rev_aes = buf->phys_addr + 1045 buf->buf_len - authsize; 1046 1047 req_ctx->dst = NULL; 1048 1049 if (req->src != req->dst) { 1050 struct buffer_desc dst_hook; 1051 1052 crypt->mode |= NPE_OP_NOT_IN_PLACE; 1053 src_direction = DMA_TO_DEVICE; 1054 1055 buf = chainup_buffers(dev, req->dst, crypt->auth_len, 1056 &dst_hook, flags, DMA_FROM_DEVICE); 1057 req_ctx->dst = dst_hook.next; 1058 crypt->dst_buf = dst_hook.phys_next; 1059 1060 if (!buf) 1061 goto free_buf_dst; 1062 1063 if (encrypt) { 1064 lastlen = buf->buf_len; 1065 if (lastlen >= authsize) 1066 crypt->icv_rev_aes = buf->phys_addr + 1067 buf->buf_len - authsize; 1068 } 1069 } 1070 1071 if (unlikely(lastlen < authsize)) { 1072 /* The 12 hmac bytes are scattered, 1073 * we need to copy them into a safe buffer */ 1074 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1075 &crypt->icv_rev_aes); 1076 if (unlikely(!req_ctx->hmac_virt)) 1077 goto free_buf_src; 1078 if (!encrypt) { 1079 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1080 req->src, cryptlen, authsize, 0); 1081 } 1082 req_ctx->encrypt = encrypt; 1083 } else { 1084 req_ctx->hmac_virt = NULL; 1085 } 1086 1087 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; 1088 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); 1089 BUG_ON(qmgr_stat_overflow(SEND_QID)); 1090 return -EINPROGRESS; 1091 1092free_buf_src: 1093 free_buf_chain(dev, req_ctx->src, crypt->src_buf); 1094free_buf_dst: 1095 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 1096 crypt->ctl_flags = CTL_FLAG_UNUSED; 1097 return -ENOMEM; 1098} 1099 1100static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) 1101{ 1102 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1103 u32 *flags = &tfm->base.crt_flags; 1104 unsigned digest_len = crypto_aead_maxauthsize(tfm); 1105 int ret; 1106 1107 if (!ctx->enckey_len && !ctx->authkey_len) 1108 return 0; 1109 init_completion(&ctx->completion); 1110 atomic_inc(&ctx->configuring); 1111 1112 reset_sa_dir(&ctx->encrypt); 1113 reset_sa_dir(&ctx->decrypt); 1114 1115 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len); 1116 if (ret) 1117 goto out; 1118 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len); 1119 if (ret) 1120 goto out; 1121 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey, 1122 ctx->authkey_len, digest_len); 1123 if (ret) 1124 goto out; 1125 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey, 1126 ctx->authkey_len, digest_len); 1127 if (ret) 1128 goto out; 1129 1130 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { 1131 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { 1132 ret = -EINVAL; 1133 goto out; 1134 } else { 1135 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; 1136 } 1137 } 1138out: 1139 if (!atomic_dec_and_test(&ctx->configuring)) 1140 wait_for_completion(&ctx->completion); 1141 return ret; 1142} 1143 1144static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 1145{ 1146 int max = crypto_aead_maxauthsize(tfm) >> 2; 1147 1148 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3)) 1149 return -EINVAL; 1150 return aead_setup(tfm, authsize); 1151} 1152 1153static int aead_setkey(struct crypto_aead *tfm, const u8 *key, 1154 unsigned int keylen) 1155{ 1156 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1157 struct crypto_authenc_keys keys; 1158 1159 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 1160 goto badkey; 1161 1162 if (keys.authkeylen > sizeof(ctx->authkey)) 1163 goto badkey; 1164 1165 if (keys.enckeylen > sizeof(ctx->enckey)) 1166 goto badkey; 1167 1168 memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1169 memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1170 ctx->authkey_len = keys.authkeylen; 1171 ctx->enckey_len = keys.enckeylen; 1172 1173 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1174badkey: 1175 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1176 return -EINVAL; 1177} 1178 1179static int aead_encrypt(struct aead_request *req) 1180{ 1181 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv); 1182} 1183 1184static int aead_decrypt(struct aead_request *req) 1185{ 1186 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv); 1187} 1188 1189static struct ixp_alg ixp4xx_algos[] = { 1190{ 1191 .crypto = { 1192 .cra_name = "cbc(des)", 1193 .cra_blocksize = DES_BLOCK_SIZE, 1194 .cra_u = { .ablkcipher = { 1195 .min_keysize = DES_KEY_SIZE, 1196 .max_keysize = DES_KEY_SIZE, 1197 .ivsize = DES_BLOCK_SIZE, 1198 .geniv = "eseqiv", 1199 } 1200 } 1201 }, 1202 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1203 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1204 1205}, { 1206 .crypto = { 1207 .cra_name = "ecb(des)", 1208 .cra_blocksize = DES_BLOCK_SIZE, 1209 .cra_u = { .ablkcipher = { 1210 .min_keysize = DES_KEY_SIZE, 1211 .max_keysize = DES_KEY_SIZE, 1212 } 1213 } 1214 }, 1215 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, 1216 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, 1217}, { 1218 .crypto = { 1219 .cra_name = "cbc(des3_ede)", 1220 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1221 .cra_u = { .ablkcipher = { 1222 .min_keysize = DES3_EDE_KEY_SIZE, 1223 .max_keysize = DES3_EDE_KEY_SIZE, 1224 .ivsize = DES3_EDE_BLOCK_SIZE, 1225 .geniv = "eseqiv", 1226 } 1227 } 1228 }, 1229 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1230 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1231}, { 1232 .crypto = { 1233 .cra_name = "ecb(des3_ede)", 1234 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1235 .cra_u = { .ablkcipher = { 1236 .min_keysize = DES3_EDE_KEY_SIZE, 1237 .max_keysize = DES3_EDE_KEY_SIZE, 1238 } 1239 } 1240 }, 1241 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192, 1242 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192, 1243}, { 1244 .crypto = { 1245 .cra_name = "cbc(aes)", 1246 .cra_blocksize = AES_BLOCK_SIZE, 1247 .cra_u = { .ablkcipher = { 1248 .min_keysize = AES_MIN_KEY_SIZE, 1249 .max_keysize = AES_MAX_KEY_SIZE, 1250 .ivsize = AES_BLOCK_SIZE, 1251 .geniv = "eseqiv", 1252 } 1253 } 1254 }, 1255 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1256 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1257}, { 1258 .crypto = { 1259 .cra_name = "ecb(aes)", 1260 .cra_blocksize = AES_BLOCK_SIZE, 1261 .cra_u = { .ablkcipher = { 1262 .min_keysize = AES_MIN_KEY_SIZE, 1263 .max_keysize = AES_MAX_KEY_SIZE, 1264 } 1265 } 1266 }, 1267 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB, 1268 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB, 1269}, { 1270 .crypto = { 1271 .cra_name = "ctr(aes)", 1272 .cra_blocksize = AES_BLOCK_SIZE, 1273 .cra_u = { .ablkcipher = { 1274 .min_keysize = AES_MIN_KEY_SIZE, 1275 .max_keysize = AES_MAX_KEY_SIZE, 1276 .ivsize = AES_BLOCK_SIZE, 1277 .geniv = "eseqiv", 1278 } 1279 } 1280 }, 1281 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1282 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1283}, { 1284 .crypto = { 1285 .cra_name = "rfc3686(ctr(aes))", 1286 .cra_blocksize = AES_BLOCK_SIZE, 1287 .cra_u = { .ablkcipher = { 1288 .min_keysize = AES_MIN_KEY_SIZE, 1289 .max_keysize = AES_MAX_KEY_SIZE, 1290 .ivsize = AES_BLOCK_SIZE, 1291 .geniv = "eseqiv", 1292 .setkey = ablk_rfc3686_setkey, 1293 .encrypt = ablk_rfc3686_crypt, 1294 .decrypt = ablk_rfc3686_crypt } 1295 } 1296 }, 1297 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR, 1298 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR, 1299} }; 1300 1301static struct ixp_aead_alg ixp4xx_aeads[] = { 1302{ 1303 .crypto = { 1304 .base = { 1305 .cra_name = "authenc(hmac(md5),cbc(des))", 1306 .cra_blocksize = DES_BLOCK_SIZE, 1307 }, 1308 .ivsize = DES_BLOCK_SIZE, 1309 .maxauthsize = MD5_DIGEST_SIZE, 1310 }, 1311 .hash = &hash_alg_md5, 1312 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1313 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1314}, { 1315 .crypto = { 1316 .base = { 1317 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 1318 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1319 }, 1320 .ivsize = DES3_EDE_BLOCK_SIZE, 1321 .maxauthsize = MD5_DIGEST_SIZE, 1322 }, 1323 .hash = &hash_alg_md5, 1324 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1325 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1326}, { 1327 .crypto = { 1328 .base = { 1329 .cra_name = "authenc(hmac(sha1),cbc(des))", 1330 .cra_blocksize = DES_BLOCK_SIZE, 1331 }, 1332 .ivsize = DES_BLOCK_SIZE, 1333 .maxauthsize = SHA1_DIGEST_SIZE, 1334 }, 1335 .hash = &hash_alg_sha1, 1336 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, 1337 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, 1338}, { 1339 .crypto = { 1340 .base = { 1341 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 1342 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 1343 }, 1344 .ivsize = DES3_EDE_BLOCK_SIZE, 1345 .maxauthsize = SHA1_DIGEST_SIZE, 1346 }, 1347 .hash = &hash_alg_sha1, 1348 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192, 1349 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192, 1350}, { 1351 .crypto = { 1352 .base = { 1353 .cra_name = "authenc(hmac(md5),cbc(aes))", 1354 .cra_blocksize = AES_BLOCK_SIZE, 1355 }, 1356 .ivsize = AES_BLOCK_SIZE, 1357 .maxauthsize = MD5_DIGEST_SIZE, 1358 }, 1359 .hash = &hash_alg_md5, 1360 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1361 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1362}, { 1363 .crypto = { 1364 .base = { 1365 .cra_name = "authenc(hmac(sha1),cbc(aes))", 1366 .cra_blocksize = AES_BLOCK_SIZE, 1367 }, 1368 .ivsize = AES_BLOCK_SIZE, 1369 .maxauthsize = SHA1_DIGEST_SIZE, 1370 }, 1371 .hash = &hash_alg_sha1, 1372 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC, 1373 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC, 1374} }; 1375 1376#define IXP_POSTFIX "-ixp4xx" 1377 1378static const struct platform_device_info ixp_dev_info __initdata = { 1379 .name = DRIVER_NAME, 1380 .id = 0, 1381 .dma_mask = DMA_BIT_MASK(32), 1382}; 1383 1384static int __init ixp_module_init(void) 1385{ 1386 int num = ARRAY_SIZE(ixp4xx_algos); 1387 int i, err; 1388 1389 pdev = platform_device_register_full(&ixp_dev_info); 1390 if (IS_ERR(pdev)) 1391 return PTR_ERR(pdev); 1392 1393 spin_lock_init(&desc_lock); 1394 spin_lock_init(&emerg_lock); 1395 1396 err = init_ixp_crypto(&pdev->dev); 1397 if (err) { 1398 platform_device_unregister(pdev); 1399 return err; 1400 } 1401 for (i=0; i< num; i++) { 1402 struct crypto_alg *cra = &ixp4xx_algos[i].crypto; 1403 1404 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME, 1405 "%s"IXP_POSTFIX, cra->cra_name) >= 1406 CRYPTO_MAX_ALG_NAME) 1407 { 1408 continue; 1409 } 1410 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) { 1411 continue; 1412 } 1413 1414 /* block ciphers */ 1415 cra->cra_type = &crypto_ablkcipher_type; 1416 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1417 CRYPTO_ALG_KERN_DRIVER_ONLY | 1418 CRYPTO_ALG_ASYNC; 1419 if (!cra->cra_ablkcipher.setkey) 1420 cra->cra_ablkcipher.setkey = ablk_setkey; 1421 if (!cra->cra_ablkcipher.encrypt) 1422 cra->cra_ablkcipher.encrypt = ablk_encrypt; 1423 if (!cra->cra_ablkcipher.decrypt) 1424 cra->cra_ablkcipher.decrypt = ablk_decrypt; 1425 cra->cra_init = init_tfm_ablk; 1426 1427 cra->cra_ctxsize = sizeof(struct ixp_ctx); 1428 cra->cra_module = THIS_MODULE; 1429 cra->cra_alignmask = 3; 1430 cra->cra_priority = 300; 1431 cra->cra_exit = exit_tfm; 1432 if (crypto_register_alg(cra)) 1433 printk(KERN_ERR "Failed to register '%s'\n", 1434 cra->cra_name); 1435 else 1436 ixp4xx_algos[i].registered = 1; 1437 } 1438 1439 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1440 struct aead_alg *cra = &ixp4xx_aeads[i].crypto; 1441 1442 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 1443 "%s"IXP_POSTFIX, cra->base.cra_name) >= 1444 CRYPTO_MAX_ALG_NAME) 1445 continue; 1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) 1447 continue; 1448 1449 /* authenc */ 1450 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1451 CRYPTO_ALG_ASYNC; 1452 cra->setkey = aead_setkey; 1453 cra->setauthsize = aead_setauthsize; 1454 cra->encrypt = aead_encrypt; 1455 cra->decrypt = aead_decrypt; 1456 cra->init = init_tfm_aead; 1457 cra->exit = exit_tfm_aead; 1458 1459 cra->base.cra_ctxsize = sizeof(struct ixp_ctx); 1460 cra->base.cra_module = THIS_MODULE; 1461 cra->base.cra_alignmask = 3; 1462 cra->base.cra_priority = 300; 1463 1464 if (crypto_register_aead(cra)) 1465 printk(KERN_ERR "Failed to register '%s'\n", 1466 cra->base.cra_driver_name); 1467 else 1468 ixp4xx_aeads[i].registered = 1; 1469 } 1470 return 0; 1471} 1472 1473static void __exit ixp_module_exit(void) 1474{ 1475 int num = ARRAY_SIZE(ixp4xx_algos); 1476 int i; 1477 1478 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) { 1479 if (ixp4xx_aeads[i].registered) 1480 crypto_unregister_aead(&ixp4xx_aeads[i].crypto); 1481 } 1482 1483 for (i=0; i< num; i++) { 1484 if (ixp4xx_algos[i].registered) 1485 crypto_unregister_alg(&ixp4xx_algos[i].crypto); 1486 } 1487 release_ixp_crypto(&pdev->dev); 1488 platform_device_unregister(pdev); 1489} 1490 1491module_init(ixp_module_init); 1492module_exit(ixp_module_exit); 1493 1494MODULE_LICENSE("GPL"); 1495MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); 1496MODULE_DESCRIPTION("IXP4xx hardware crypto"); 1497