Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: ccree: add ahash support

Add CryptoCell async. hash and HMAC support.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Gilad Ben-Yossef and committed by
Greg Kroah-Hartman
50cfbbb7 abefd674

+3263 -14
+6
drivers/staging/ccree/Kconfig
··· 2 2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 3 3 depends on CRYPTO_HW && OF && HAS_DMA 4 4 default n 5 + select CRYPTO_HASH 6 + select CRYPTO_SHA1 7 + select CRYPTO_MD5 8 + select CRYPTO_SHA256 9 + select CRYPTO_SHA512 10 + select CRYPTO_HMAC 5 11 help 6 12 Say 'Y' to enable a driver for the Arm TrustZone CryptoCell 7 13 C7xx. Currently only the CryptoCell 712 REE is supported.
+1 -1
drivers/staging/ccree/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 2 - ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_sram_mgr.o ssi_pm.o ssi_pm_ext.o 2 + ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_hash.o ssi_sram_mgr.o ssi_pm.o ssi_pm_ext.o
+22
drivers/staging/ccree/cc_crypto_ctx.h
··· 220 220 } __attribute__((__may_alias__)); 221 221 222 222 223 + struct drv_ctx_hash { 224 + enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HASH */ 225 + enum drv_hash_mode mode; 226 + uint8_t digest[CC_DIGEST_SIZE_MAX]; 227 + /* reserve to end of allocated context size */ 228 + uint8_t reserved[CC_CTX_SIZE - 2 * sizeof(uint32_t) - 229 + CC_DIGEST_SIZE_MAX]; 230 + }; 231 + 232 + /* !!!! drv_ctx_hmac should have the same structure as drv_ctx_hash except 233 + k0, k0_size fields */ 234 + struct drv_ctx_hmac { 235 + enum drv_crypto_alg alg; /* DRV_CRYPTO_ALG_HMAC */ 236 + enum drv_hash_mode mode; 237 + uint8_t digest[CC_DIGEST_SIZE_MAX]; 238 + uint32_t k0[CC_HMAC_BLOCK_SIZE_MAX/sizeof(uint32_t)]; 239 + uint32_t k0_size; 240 + /* reserve to end of allocated context size */ 241 + uint8_t reserved[CC_CTX_SIZE - 3 * sizeof(uint32_t) - 242 + CC_DIGEST_SIZE_MAX - CC_HMAC_BLOCK_SIZE_MAX]; 243 + }; 244 + 223 245 /*******************************************************************/ 224 246 /***************** MESSAGE BASED CONTEXTS **************************/ 225 247 /*******************************************************************/
+78
drivers/staging/ccree/hash_defs.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef _HASH_DEFS_H__ 18 + #define _HASH_DEFS_H__ 19 + 20 + #include "cc_crypto_ctx.h" 21 + 22 + /* this files provides definitions required for hash engine drivers */ 23 + #ifndef CC_CONFIG_HASH_SHA_512_SUPPORTED 24 + #define SEP_HASH_LENGTH_WORDS 2 25 + #else 26 + #define SEP_HASH_LENGTH_WORDS 4 27 + #endif 28 + 29 + #ifdef BIG__ENDIAN 30 + #define OPAD_CURRENT_LENGTH 0x40000000, 0x00000000 , 0x00000000, 0x00000000 31 + #define HASH_LARVAL_MD5 0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567 32 + #define HASH_LARVAL_SHA1 0xF0E1D2C3, 0x76543210, 0xFEDCBA98, 0x89ABCDEF, 0x01234567 33 + #define HASH_LARVAL_SHA224 0XA44FFABE, 0XA78FF964, 0X11155868, 0X310BC0FF, 0X39590EF7, 0X17DD7030, 0X07D57C36, 0XD89E05C1 34 + #define HASH_LARVAL_SHA256 0X19CDE05B, 0XABD9831F, 0X8C68059B, 0X7F520E51, 0X3AF54FA5, 0X72F36E3C, 0X85AE67BB, 0X67E6096A 35 + #define HASH_LARVAL_SHA384 0X1D48B547, 0XA44FFABE, 0X0D2E0CDB, 0XA78FF964, 0X874AB48E, 0X11155868, 0X67263367, 0X310BC0FF, 0XD8EC2F15, 0X39590EF7, 0X5A015991, 0X17DD7030, 0X2A299A62, 0X07D57C36, 0X5D9DBBCB, 0XD89E05C1 36 + #define HASH_LARVAL_SHA512 0X19CDE05B, 0X79217E13, 0XABD9831F, 0X6BBD41FB, 0X8C68059B, 0X1F6C3E2B, 0X7F520E51, 0XD182E6AD, 0X3AF54FA5, 0XF1361D5F, 0X72F36E3C, 0X2BF894FE, 0X85AE67BB, 0X3BA7CA84, 0X67E6096A, 0X08C9BCF3 37 + #else 38 + #define OPAD_CURRENT_LENGTH 0x00000040, 0x00000000, 0x00000000, 0x00000000 39 + #define HASH_LARVAL_MD5 0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301 40 + #define HASH_LARVAL_SHA1 0xC3D2E1F0, 0x10325476, 0x98BADCFE, 0xEFCDAB89, 0x67452301 41 + #define HASH_LARVAL_SHA224 0xbefa4fa4, 0x64f98fa7, 0x68581511, 0xffc00b31, 0xf70e5939, 0x3070dd17, 0x367cd507, 0xc1059ed8 42 + #define HASH_LARVAL_SHA256 0x5be0cd19, 0x1f83d9ab, 0x9b05688c, 0x510e527f, 0xa54ff53a, 0x3c6ef372, 0xbb67ae85, 0x6a09e667 43 + #define HASH_LARVAL_SHA384 0X47B5481D, 0XBEFA4FA4, 0XDB0C2E0D, 0X64F98FA7, 0X8EB44A87, 0X68581511, 0X67332667, 0XFFC00B31, 0X152FECD8, 0XF70E5939, 0X9159015A, 0X3070DD17, 0X629A292A, 0X367CD507, 0XCBBB9D5D, 0XC1059ED8 44 + #define HASH_LARVAL_SHA512 0x5be0cd19, 0x137e2179, 0x1f83d9ab, 0xfb41bd6b, 0x9b05688c, 0x2b3e6c1f, 0x510e527f, 0xade682d1, 0xa54ff53a, 0x5f1d36f1, 0x3c6ef372, 0xfe94f82b, 0xbb67ae85, 0x84caa73b, 0x6a09e667, 0xf3bcc908 45 + #endif 46 + 47 + enum HashConfig1Padding { 48 + HASH_PADDING_DISABLED = 0, 49 + HASH_PADDING_ENABLED = 1, 50 + HASH_DIGEST_RESULT_LITTLE_ENDIAN = 2, 51 + HASH_CONFIG1_PADDING_RESERVE32 = INT32_MAX, 52 + }; 53 + 54 + enum HashCipherDoPadding { 55 + DO_NOT_PAD = 0, 56 + DO_PAD = 1, 57 + HASH_CIPHER_DO_PADDING_RESERVE32 = INT32_MAX, 58 + }; 59 + 60 + typedef struct SepHashPrivateContext { 61 + /* The current length is placed at the end of the context buffer because the hash 62 + context is used for all HMAC operations as well. HMAC context includes a 64 bytes 63 + K0 field. The size of struct drv_ctx_hash reserved field is 88/184 bytes depend if t 64 + he SHA512 is supported ( in this case teh context size is 256 bytes). 65 + The size of struct drv_ctx_hash reseved field is 20 or 52 depend if the SHA512 is supported. 66 + This means that this structure size (without the reserved field can be up to 20 bytes , 67 + in case sha512 is not suppported it is 20 bytes (SEP_HASH_LENGTH_WORDS define to 2 ) and in the other 68 + case it is 28 (SEP_HASH_LENGTH_WORDS define to 4) */ 69 + uint32_t reserved[(sizeof(struct drv_ctx_hash)/sizeof(uint32_t)) - SEP_HASH_LENGTH_WORDS - 3]; 70 + uint32_t CurrentDigestedLength[SEP_HASH_LENGTH_WORDS]; 71 + uint32_t KeyType; 72 + uint32_t dataCompleted; 73 + uint32_t hmacFinalization; 74 + /* no space left */ 75 + } SepHashPrivateContext_s; 76 + 77 + #endif /*_HASH_DEFS_H__*/ 78 +
+301 -10
drivers/staging/ccree/ssi_buffer_mgr.c
··· 17 17 #include <linux/crypto.h> 18 18 #include <linux/version.h> 19 19 #include <crypto/algapi.h> 20 + #include <crypto/hash.h> 20 21 #include <crypto/authenc.h> 21 22 #include <crypto/scatterwalk.h> 22 23 #include <linux/dmapool.h> ··· 28 27 29 28 #include "ssi_buffer_mgr.h" 30 29 #include "cc_lli_defs.h" 30 + #include "ssi_hash.h" 31 31 32 32 #define LLI_MAX_NUM_OF_DATA_ENTRIES 128 33 33 #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4 ··· 283 281 return 0; 284 282 } 285 283 286 - static int ssi_buffer_mgr_generate_mlli ( 287 - struct device *dev, 288 - struct buffer_array *sg_data, 289 - struct mlli_params *mlli_params) __maybe_unused; 290 - 291 284 static int ssi_buffer_mgr_generate_mlli( 292 285 struct device *dev, 293 286 struct buffer_array *sg_data, ··· 424 427 return 0; 425 428 } 426 429 427 - static int ssi_buffer_mgr_map_scatterlist (struct device *dev, 428 - struct scatterlist *sg, unsigned int nbytes, int direction, 429 - uint32_t *nents, uint32_t max_sg_nents, uint32_t *lbytes, 430 - uint32_t *mapped_nents) __maybe_unused; 431 - 432 430 static int ssi_buffer_mgr_map_scatterlist( 433 431 struct device *dev, struct scatterlist *sg, 434 432 unsigned int nbytes, int direction, ··· 483 491 } 484 492 485 493 return 0; 494 + } 495 + 496 + static inline int ssi_ahash_handle_curr_buf(struct device *dev, 497 + struct ahash_req_ctx *areq_ctx, 498 + uint8_t* curr_buff, 499 + uint32_t curr_buff_cnt, 500 + struct buffer_array *sg_data) 501 + { 502 + SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt); 503 + /* create sg for the current buffer */ 504 + sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt); 505 + if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1, 506 + DMA_TO_DEVICE) != 1)) { 507 + SSI_LOG_ERR("dma_map_sg() " 508 + "src buffer failed\n"); 509 + return -ENOMEM; 510 + } 511 + SSI_LOG_DEBUG("Mapped curr_buff: dma_address=0x%llX " 512 + "page_link=0x%08lX addr=%pK " 513 + "offset=%u length=%u\n", 514 + (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 515 + areq_ctx->buff_sg->page_link, 516 + sg_virt(areq_ctx->buff_sg), 517 + areq_ctx->buff_sg->offset, 518 + areq_ctx->buff_sg->length); 519 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI; 520 + areq_ctx->curr_sg = areq_ctx->buff_sg; 521 + areq_ctx->in_nents = 0; 522 + /* prepare for case of MLLI */ 523 + ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg, 524 + curr_buff_cnt, 0, false, NULL); 525 + return 0; 526 + } 527 + 528 + int ssi_buffer_mgr_map_hash_request_final( 529 + struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update) 530 + { 531 + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 532 + struct device *dev = &drvdata->plat_dev->dev; 533 + uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 : 534 + areq_ctx->buff0; 535 + uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt : 536 + &areq_ctx->buff0_cnt; 537 + struct mlli_params *mlli_params = &areq_ctx->mlli_params; 538 + struct buffer_array sg_data; 539 + struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 540 + uint32_t dummy = 0; 541 + uint32_t mapped_nents = 0; 542 + 543 + SSI_LOG_DEBUG(" final params : curr_buff=%pK " 544 + "curr_buff_cnt=0x%X nbytes = 0x%X " 545 + "src=%pK curr_index=%u\n", 546 + curr_buff, *curr_buff_cnt, nbytes, 547 + src, areq_ctx->buff_index); 548 + /* Init the type of the dma buffer */ 549 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL; 550 + mlli_params->curr_pool = NULL; 551 + sg_data.num_of_buffers = 0; 552 + areq_ctx->in_nents = 0; 553 + 554 + if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) { 555 + /* nothing to do */ 556 + return 0; 557 + } 558 + 559 + /*TODO: copy data in case that buffer is enough for operation */ 560 + /* map the previous buffer */ 561 + if (*curr_buff_cnt != 0 ) { 562 + if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff, 563 + *curr_buff_cnt, &sg_data) != 0) { 564 + return -ENOMEM; 565 + } 566 + } 567 + 568 + if (src && (nbytes > 0) && do_update) { 569 + if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src, 570 + nbytes, 571 + DMA_TO_DEVICE, 572 + &areq_ctx->in_nents, 573 + LLI_MAX_NUM_OF_DATA_ENTRIES, 574 + &dummy, &mapped_nents))){ 575 + goto unmap_curr_buff; 576 + } 577 + if ( src && (mapped_nents == 1) 578 + && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) { 579 + memcpy(areq_ctx->buff_sg,src, 580 + sizeof(struct scatterlist)); 581 + areq_ctx->buff_sg->length = nbytes; 582 + areq_ctx->curr_sg = areq_ctx->buff_sg; 583 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI; 584 + } else { 585 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI; 586 + } 587 + 588 + } 589 + 590 + /*build mlli */ 591 + if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) { 592 + mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 593 + /* add the src data to the sg_data */ 594 + ssi_buffer_mgr_add_scatterlist_entry(&sg_data, 595 + areq_ctx->in_nents, 596 + src, 597 + nbytes, 0, 598 + true, &areq_ctx->mlli_nents); 599 + if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data, 600 + mlli_params) != 0)) { 601 + goto fail_unmap_din; 602 + } 603 + } 604 + /* change the buffer index for the unmap function */ 605 + areq_ctx->buff_index = (areq_ctx->buff_index^1); 606 + SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n", 607 + GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type)); 608 + return 0; 609 + 610 + fail_unmap_din: 611 + dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 612 + 613 + unmap_curr_buff: 614 + if (*curr_buff_cnt != 0 ) { 615 + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 616 + } 617 + return -ENOMEM; 618 + } 619 + 620 + int ssi_buffer_mgr_map_hash_request_update( 621 + struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size) 622 + { 623 + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 624 + struct device *dev = &drvdata->plat_dev->dev; 625 + uint8_t* curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 : 626 + areq_ctx->buff0; 627 + uint32_t *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt : 628 + &areq_ctx->buff0_cnt; 629 + uint8_t* next_buff = areq_ctx->buff_index ? areq_ctx->buff0 : 630 + areq_ctx->buff1; 631 + uint32_t *next_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff0_cnt : 632 + &areq_ctx->buff1_cnt; 633 + struct mlli_params *mlli_params = &areq_ctx->mlli_params; 634 + unsigned int update_data_len; 635 + uint32_t total_in_len = nbytes + *curr_buff_cnt; 636 + struct buffer_array sg_data; 637 + struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 638 + unsigned int swap_index = 0; 639 + uint32_t dummy = 0; 640 + uint32_t mapped_nents = 0; 641 + 642 + SSI_LOG_DEBUG(" update params : curr_buff=%pK " 643 + "curr_buff_cnt=0x%X nbytes=0x%X " 644 + "src=%pK curr_index=%u \n", 645 + curr_buff, *curr_buff_cnt, nbytes, 646 + src, areq_ctx->buff_index); 647 + /* Init the type of the dma buffer */ 648 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL; 649 + mlli_params->curr_pool = NULL; 650 + areq_ctx->curr_sg = NULL; 651 + sg_data.num_of_buffers = 0; 652 + areq_ctx->in_nents = 0; 653 + 654 + if (unlikely(total_in_len < block_size)) { 655 + SSI_LOG_DEBUG(" less than one block: curr_buff=%pK " 656 + "*curr_buff_cnt=0x%X copy_to=%pK\n", 657 + curr_buff, *curr_buff_cnt, 658 + &curr_buff[*curr_buff_cnt]); 659 + areq_ctx->in_nents = 660 + ssi_buffer_mgr_get_sgl_nents(src, 661 + nbytes, 662 + &dummy, NULL); 663 + sg_copy_to_buffer(src, areq_ctx->in_nents, 664 + &curr_buff[*curr_buff_cnt], nbytes); 665 + *curr_buff_cnt += nbytes; 666 + return 1; 667 + } 668 + 669 + /* Calculate the residue size*/ 670 + *next_buff_cnt = total_in_len & (block_size - 1); 671 + /* update data len */ 672 + update_data_len = total_in_len - *next_buff_cnt; 673 + 674 + SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X " 675 + "update_data_len=0x%X\n", 676 + *next_buff_cnt, update_data_len); 677 + 678 + /* Copy the new residue to next buffer */ 679 + if (*next_buff_cnt != 0) { 680 + SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u" 681 + " residue %u \n", next_buff, 682 + (update_data_len - *curr_buff_cnt), 683 + *next_buff_cnt); 684 + ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src, 685 + (update_data_len -*curr_buff_cnt), 686 + nbytes,SSI_SG_TO_BUF); 687 + /* change the buffer index for next operation */ 688 + swap_index = 1; 689 + } 690 + 691 + if (*curr_buff_cnt != 0) { 692 + if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff, 693 + *curr_buff_cnt, &sg_data) != 0) { 694 + return -ENOMEM; 695 + } 696 + /* change the buffer index for next operation */ 697 + swap_index = 1; 698 + } 699 + 700 + if ( update_data_len > *curr_buff_cnt ) { 701 + if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src, 702 + (update_data_len -*curr_buff_cnt), 703 + DMA_TO_DEVICE, 704 + &areq_ctx->in_nents, 705 + LLI_MAX_NUM_OF_DATA_ENTRIES, 706 + &dummy, &mapped_nents))){ 707 + goto unmap_curr_buff; 708 + } 709 + if ( (mapped_nents == 1) 710 + && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) { 711 + /* only one entry in the SG and no previous data */ 712 + memcpy(areq_ctx->buff_sg,src, 713 + sizeof(struct scatterlist)); 714 + areq_ctx->buff_sg->length = update_data_len; 715 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI; 716 + areq_ctx->curr_sg = areq_ctx->buff_sg; 717 + } else { 718 + areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI; 719 + } 720 + } 721 + 722 + if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) { 723 + mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 724 + /* add the src data to the sg_data */ 725 + ssi_buffer_mgr_add_scatterlist_entry(&sg_data, 726 + areq_ctx->in_nents, 727 + src, 728 + (update_data_len - *curr_buff_cnt), 0, 729 + true, &areq_ctx->mlli_nents); 730 + if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data, 731 + mlli_params) != 0)) { 732 + goto fail_unmap_din; 733 + } 734 + 735 + } 736 + areq_ctx->buff_index = (areq_ctx->buff_index^swap_index); 737 + 738 + return 0; 739 + 740 + fail_unmap_din: 741 + dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 742 + 743 + unmap_curr_buff: 744 + if (*curr_buff_cnt != 0 ) { 745 + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 746 + } 747 + return -ENOMEM; 748 + } 749 + 750 + void ssi_buffer_mgr_unmap_hash_request( 751 + struct device *dev, void *ctx, struct scatterlist *src, bool do_revert) 752 + { 753 + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 754 + uint32_t *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt : 755 + &areq_ctx->buff1_cnt; 756 + 757 + /*In case a pool was set, a table was 758 + allocated and should be released */ 759 + if (areq_ctx->mlli_params.curr_pool != NULL) { 760 + SSI_LOG_DEBUG("free MLLI buffer: dma=0x%llX virt=%pK\n", 761 + (unsigned long long)areq_ctx->mlli_params.mlli_dma_addr, 762 + areq_ctx->mlli_params.mlli_virt_addr); 763 + SSI_RESTORE_DMA_ADDR_TO_48BIT(areq_ctx->mlli_params.mlli_dma_addr); 764 + dma_pool_free(areq_ctx->mlli_params.curr_pool, 765 + areq_ctx->mlli_params.mlli_virt_addr, 766 + areq_ctx->mlli_params.mlli_dma_addr); 767 + } 768 + 769 + if ((src) && likely(areq_ctx->in_nents != 0)) { 770 + SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=0x%llX len=0x%X\n", 771 + sg_virt(src), 772 + (unsigned long long)sg_dma_address(src), 773 + sg_dma_len(src)); 774 + SSI_RESTORE_DMA_ADDR_TO_48BIT(sg_dma_address(src)); 775 + dma_unmap_sg(dev, src, 776 + areq_ctx->in_nents, DMA_TO_DEVICE); 777 + } 778 + 779 + if (*prev_len != 0) { 780 + SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK" 781 + "dma=0x%llX len 0x%X\n", 782 + sg_virt(areq_ctx->buff_sg), 783 + (unsigned long long)sg_dma_address(areq_ctx->buff_sg), 784 + sg_dma_len(areq_ctx->buff_sg)); 785 + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 786 + if (!do_revert) { 787 + /* clean the previous data length for update operation */ 788 + *prev_len = 0; 789 + } else { 790 + areq_ctx->buff_index ^= 1; 791 + } 792 + } 486 793 } 487 794 488 795 int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+6
drivers/staging/ccree/ssi_buffer_mgr.h
··· 55 55 56 56 int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata); 57 57 58 + int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update); 59 + 60 + int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size); 61 + 62 + void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert); 63 + 58 64 void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, uint32_t to_skip, uint32_t end, enum ssi_sg_cpy_direct direct); 59 65 60 66 void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len);
+9 -2
drivers/staging/ccree/ssi_driver.c
··· 61 61 #include "ssi_request_mgr.h" 62 62 #include "ssi_buffer_mgr.h" 63 63 #include "ssi_sysfs.h" 64 + #include "ssi_hash.h" 64 65 #include "ssi_sram_mgr.h" 65 66 #include "ssi_pm.h" 66 67 ··· 219 218 goto init_cc_res_err; 220 219 } 221 220 222 - new_drvdata->inflight_counter = 0; 223 - 224 221 dev_set_drvdata(&plat_dev->dev, new_drvdata); 225 222 /* Get device resources */ 226 223 /* First CC registers space */ ··· 343 344 goto init_cc_res_err; 344 345 } 345 346 347 + rc = ssi_hash_alloc(new_drvdata); 348 + if (unlikely(rc != 0)) { 349 + SSI_LOG_ERR("ssi_hash_alloc failed\n"); 350 + goto init_cc_res_err; 351 + } 352 + 346 353 return 0; 347 354 348 355 init_cc_res_err: 349 356 SSI_LOG_ERR("Freeing CC HW resources!\n"); 350 357 351 358 if (new_drvdata != NULL) { 359 + ssi_hash_free(new_drvdata); 352 360 ssi_power_mgr_fini(new_drvdata); 353 361 ssi_buffer_mgr_fini(new_drvdata); 354 362 request_mgr_fini(new_drvdata); ··· 395 389 struct ssi_drvdata *drvdata = 396 390 (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev); 397 391 392 + ssi_hash_free(drvdata); 398 393 ssi_power_mgr_fini(drvdata); 399 394 ssi_buffer_mgr_fini(drvdata); 400 395 request_mgr_fini(drvdata);
+3 -1
drivers/staging/ccree/ssi_driver.h
··· 32 32 #include <crypto/aes.h> 33 33 #include <crypto/sha.h> 34 34 #include <crypto/authenc.h> 35 + #include <crypto/hash.h> 35 36 #include <linux/version.h> 36 37 37 38 #ifndef INT32_MAX /* Missing in Linux kernel */ ··· 51 50 #define CC_SUPPORT_SHA DX_DEV_SHA_MAX 52 51 #include "cc_crypto_ctx.h" 53 52 #include "ssi_sysfs.h" 53 + #include "hash_defs.h" 54 54 55 55 #define DRV_MODULE_VERSION "3.0" 56 56 ··· 140 138 ssi_sram_addr_t mlli_sram_addr; 141 139 struct completion icache_setup_completion; 142 140 void *buff_mgr_handle; 141 + void *hash_handle; 143 142 void *request_mgr_handle; 144 143 void *sram_mgr_handle; 145 144 146 145 #ifdef ENABLE_CYCLE_COUNT 147 146 cycles_t isr_exit_cycles; /* Save for isr-to-tasklet latency */ 148 147 #endif 149 - uint32_t inflight_counter; 150 148 151 149 }; 152 150
+2732
drivers/staging/ccree/ssi_hash.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/platform_device.h> 20 + #include <crypto/algapi.h> 21 + #include <crypto/hash.h> 22 + #include <crypto/sha.h> 23 + #include <crypto/md5.h> 24 + #include <crypto/internal/hash.h> 25 + 26 + #include "ssi_config.h" 27 + #include "ssi_driver.h" 28 + #include "ssi_request_mgr.h" 29 + #include "ssi_buffer_mgr.h" 30 + #include "ssi_sysfs.h" 31 + #include "ssi_hash.h" 32 + #include "ssi_sram_mgr.h" 33 + 34 + #define SSI_MAX_AHASH_SEQ_LEN 12 35 + #define SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE MAX(SSI_MAX_HASH_BLCK_SIZE, 3 * AES_BLOCK_SIZE) 36 + 37 + struct ssi_hash_handle { 38 + ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/ 39 + ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */ 40 + struct list_head hash_list; 41 + struct completion init_comp; 42 + }; 43 + 44 + static const uint32_t digest_len_init[] = { 45 + 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; 46 + static const uint32_t md5_init[] = { 47 + SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; 48 + static const uint32_t sha1_init[] = { 49 + SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; 50 + static const uint32_t sha224_init[] = { 51 + SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, 52 + SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; 53 + static const uint32_t sha256_init[] = { 54 + SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, 55 + SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; 56 + #if (DX_DEV_SHA_MAX > 256) 57 + static const uint32_t digest_len_sha512_init[] = { 58 + 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; 59 + static const uint64_t sha384_init[] = { 60 + SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, 61 + SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; 62 + static const uint64_t sha512_init[] = { 63 + SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, 64 + SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; 65 + #endif 66 + 67 + static void ssi_hash_create_xcbc_setup( 68 + struct ahash_request *areq, 69 + HwDesc_s desc[], 70 + unsigned int *seq_size); 71 + 72 + static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 73 + HwDesc_s desc[], 74 + unsigned int *seq_size); 75 + 76 + struct ssi_hash_alg { 77 + struct list_head entry; 78 + bool synchronize; 79 + int hash_mode; 80 + int hw_mode; 81 + int inter_digestsize; 82 + struct ssi_drvdata *drvdata; 83 + union { 84 + struct ahash_alg ahash_alg; 85 + struct shash_alg shash_alg; 86 + }; 87 + }; 88 + 89 + 90 + struct hash_key_req_ctx { 91 + uint32_t keylen; 92 + dma_addr_t key_dma_addr; 93 + }; 94 + 95 + /* hash per-session context */ 96 + struct ssi_hash_ctx { 97 + struct ssi_drvdata *drvdata; 98 + /* holds the origin digest; the digest after "setkey" if HMAC,* 99 + the initial digest if HASH. */ 100 + uint8_t digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned; 101 + uint8_t opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned; 102 + dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned; 103 + dma_addr_t digest_buff_dma_addr; 104 + /* use for hmac with key large then mode block size */ 105 + struct hash_key_req_ctx key_params; 106 + int hash_mode; 107 + int hw_mode; 108 + int inter_digestsize; 109 + struct completion setkey_comp; 110 + bool is_hmac; 111 + }; 112 + 113 + static const struct crypto_type crypto_shash_type; 114 + 115 + static void ssi_hash_create_data_desc( 116 + struct ahash_req_ctx *areq_ctx, 117 + struct ssi_hash_ctx *ctx, 118 + unsigned int flow_mode,HwDesc_s desc[], 119 + bool is_not_last_data, 120 + unsigned int *seq_size); 121 + 122 + static inline void ssi_set_hash_endianity(uint32_t mode, HwDesc_s *desc) 123 + { 124 + if (unlikely((mode == DRV_HASH_MD5) || 125 + (mode == DRV_HASH_SHA384) || 126 + (mode == DRV_HASH_SHA512))) { 127 + HW_DESC_SET_BYTES_SWAP(desc, 1); 128 + } else { 129 + HW_DESC_SET_CIPHER_CONFIG0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN); 130 + } 131 + } 132 + 133 + static int ssi_hash_map_result(struct device *dev, 134 + struct ahash_req_ctx *state, 135 + unsigned int digestsize) 136 + { 137 + state->digest_result_dma_addr = 138 + dma_map_single(dev, (void *)state->digest_result_buff, 139 + digestsize, 140 + DMA_BIDIRECTIONAL); 141 + if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) { 142 + SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n", 143 + digestsize); 144 + return -ENOMEM; 145 + } 146 + SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr, 147 + digestsize); 148 + SSI_LOG_DEBUG("Mapped digest result buffer %u B " 149 + "at va=%pK to dma=0x%llX\n", 150 + digestsize, state->digest_result_buff, 151 + (unsigned long long)state->digest_result_dma_addr); 152 + 153 + return 0; 154 + } 155 + 156 + static int ssi_hash_map_request(struct device *dev, 157 + struct ahash_req_ctx *state, 158 + struct ssi_hash_ctx *ctx) 159 + { 160 + bool is_hmac = ctx->is_hmac; 161 + ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr( 162 + ctx->drvdata, ctx->hash_mode); 163 + struct ssi_crypto_req ssi_req = {}; 164 + HwDesc_s desc; 165 + int rc = -ENOMEM; 166 + 167 + state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA); 168 + if (!state->buff0) { 169 + SSI_LOG_ERR("Allocating buff0 in context failed\n"); 170 + goto fail0; 171 + } 172 + state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE ,GFP_KERNEL|GFP_DMA); 173 + if (!state->buff1) { 174 + SSI_LOG_ERR("Allocating buff1 in context failed\n"); 175 + goto fail_buff0; 176 + } 177 + state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE ,GFP_KERNEL|GFP_DMA); 178 + if (!state->digest_result_buff) { 179 + SSI_LOG_ERR("Allocating digest_result_buff in context failed\n"); 180 + goto fail_buff1; 181 + } 182 + state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA); 183 + if (!state->digest_buff) { 184 + SSI_LOG_ERR("Allocating digest-buffer in context failed\n"); 185 + goto fail_digest_result_buff; 186 + } 187 + 188 + SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff); 189 + if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) { 190 + state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL|GFP_DMA); 191 + if (!state->digest_bytes_len) { 192 + SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n"); 193 + goto fail1; 194 + } 195 + SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len); 196 + } else { 197 + state->digest_bytes_len = NULL; 198 + } 199 + 200 + state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL|GFP_DMA); 201 + if (!state->opad_digest_buff) { 202 + SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n"); 203 + goto fail2; 204 + } 205 + SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff); 206 + 207 + state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 208 + if (dma_mapping_error(dev, state->digest_buff_dma_addr)) { 209 + SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n", 210 + ctx->inter_digestsize, state->digest_buff); 211 + goto fail3; 212 + } 213 + SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 214 + ctx->inter_digestsize); 215 + SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=0x%llX\n", 216 + ctx->inter_digestsize, state->digest_buff, 217 + (unsigned long long)state->digest_buff_dma_addr); 218 + 219 + if (is_hmac) { 220 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr); 221 + dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 222 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr, 223 + ctx->inter_digestsize); 224 + if ((ctx->hw_mode == DRV_CIPHER_XCBC_MAC) || (ctx->hw_mode == DRV_CIPHER_CMAC)) { 225 + memset(state->digest_buff, 0, ctx->inter_digestsize); 226 + } else { /*sha*/ 227 + memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize); 228 + #if (DX_DEV_SHA_MAX > 256) 229 + if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) { 230 + memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE); 231 + } else { 232 + memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE); 233 + } 234 + #else 235 + memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE); 236 + #endif 237 + } 238 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr); 239 + dma_sync_single_for_device(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 240 + SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr, 241 + ctx->inter_digestsize); 242 + 243 + if (ctx->hash_mode != DRV_HASH_NULL) { 244 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr); 245 + dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 246 + memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize); 247 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, 248 + ctx->inter_digestsize); 249 + } 250 + } else { /*hash*/ 251 + /* Copy the initial digests if hash flow. The SRAM contains the 252 + initial digests in the expected order for all SHA* */ 253 + HW_DESC_INIT(&desc); 254 + HW_DESC_SET_DIN_SRAM(&desc, larval_digest_addr, ctx->inter_digestsize); 255 + HW_DESC_SET_DOUT_DLLI(&desc, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); 256 + HW_DESC_SET_FLOW_MODE(&desc, BYPASS); 257 + 258 + rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0); 259 + if (unlikely(rc != 0)) { 260 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 261 + goto fail4; 262 + } 263 + } 264 + 265 + if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) { 266 + state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL); 267 + if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) { 268 + SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n", 269 + HASH_LEN_SIZE, state->digest_bytes_len); 270 + goto fail4; 271 + } 272 + SSI_UPDATE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr, 273 + HASH_LEN_SIZE); 274 + SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=0x%llX\n", 275 + HASH_LEN_SIZE, state->digest_bytes_len, 276 + (unsigned long long)state->digest_bytes_len_dma_addr); 277 + } else { 278 + state->digest_bytes_len_dma_addr = 0; 279 + } 280 + 281 + if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) { 282 + state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 283 + if (dma_mapping_error(dev, state->opad_digest_dma_addr)) { 284 + SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n", 285 + ctx->inter_digestsize, state->opad_digest_buff); 286 + goto fail5; 287 + } 288 + SSI_UPDATE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr, 289 + ctx->inter_digestsize); 290 + SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=0x%llX\n", 291 + ctx->inter_digestsize, state->opad_digest_buff, 292 + (unsigned long long)state->opad_digest_dma_addr); 293 + } else { 294 + state->opad_digest_dma_addr = 0; 295 + } 296 + state->buff0_cnt = 0; 297 + state->buff1_cnt = 0; 298 + state->buff_index = 0; 299 + state->mlli_params.curr_pool = NULL; 300 + 301 + return 0; 302 + 303 + fail5: 304 + if (state->digest_bytes_len_dma_addr != 0) { 305 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr); 306 + dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, DMA_BIDIRECTIONAL); 307 + state->digest_bytes_len_dma_addr = 0; 308 + } 309 + fail4: 310 + if (state->digest_buff_dma_addr != 0) { 311 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr); 312 + dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); 313 + state->digest_buff_dma_addr = 0; 314 + } 315 + fail3: 316 + if (state->opad_digest_buff != NULL) 317 + kfree(state->opad_digest_buff); 318 + fail2: 319 + if (state->digest_bytes_len != NULL) 320 + kfree(state->digest_bytes_len); 321 + fail1: 322 + if (state->digest_buff != NULL) 323 + kfree(state->digest_buff); 324 + fail_digest_result_buff: 325 + if (state->digest_result_buff != NULL) { 326 + kfree(state->digest_result_buff); 327 + state->digest_result_buff = NULL; 328 + } 329 + fail_buff1: 330 + if (state->buff1 != NULL) { 331 + kfree(state->buff1); 332 + state->buff1 = NULL; 333 + } 334 + fail_buff0: 335 + if (state->buff0 != NULL) { 336 + kfree(state->buff0); 337 + state->buff0 = NULL; 338 + } 339 + fail0: 340 + return rc; 341 + } 342 + 343 + static void ssi_hash_unmap_request(struct device *dev, 344 + struct ahash_req_ctx *state, 345 + struct ssi_hash_ctx *ctx) 346 + { 347 + if (state->digest_buff_dma_addr != 0) { 348 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_buff_dma_addr); 349 + dma_unmap_single(dev, state->digest_buff_dma_addr, 350 + ctx->inter_digestsize, DMA_BIDIRECTIONAL); 351 + SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=0x%llX\n", 352 + (unsigned long long)state->digest_buff_dma_addr); 353 + state->digest_buff_dma_addr = 0; 354 + } 355 + if (state->digest_bytes_len_dma_addr != 0) { 356 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_bytes_len_dma_addr); 357 + dma_unmap_single(dev, state->digest_bytes_len_dma_addr, 358 + HASH_LEN_SIZE, DMA_BIDIRECTIONAL); 359 + SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=0x%llX\n", 360 + (unsigned long long)state->digest_bytes_len_dma_addr); 361 + state->digest_bytes_len_dma_addr = 0; 362 + } 363 + if (state->opad_digest_dma_addr != 0) { 364 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->opad_digest_dma_addr); 365 + dma_unmap_single(dev, state->opad_digest_dma_addr, 366 + ctx->inter_digestsize, DMA_BIDIRECTIONAL); 367 + SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=0x%llX\n", 368 + (unsigned long long)state->opad_digest_dma_addr); 369 + state->opad_digest_dma_addr = 0; 370 + } 371 + 372 + if (state->opad_digest_buff != NULL) 373 + kfree(state->opad_digest_buff); 374 + if (state->digest_bytes_len != NULL) 375 + kfree(state->digest_bytes_len); 376 + if (state->digest_buff != NULL) 377 + kfree(state->digest_buff); 378 + if (state->digest_result_buff != NULL) 379 + kfree(state->digest_result_buff); 380 + if (state->buff1 != NULL) 381 + kfree(state->buff1); 382 + if (state->buff0 != NULL) 383 + kfree(state->buff0); 384 + } 385 + 386 + static void ssi_hash_unmap_result(struct device *dev, 387 + struct ahash_req_ctx *state, 388 + unsigned int digestsize, u8 *result) 389 + { 390 + if (state->digest_result_dma_addr != 0) { 391 + SSI_RESTORE_DMA_ADDR_TO_48BIT(state->digest_result_dma_addr); 392 + dma_unmap_single(dev, 393 + state->digest_result_dma_addr, 394 + digestsize, 395 + DMA_BIDIRECTIONAL); 396 + SSI_LOG_DEBUG("unmpa digest result buffer " 397 + "va (%pK) pa (%llx) len %u\n", 398 + state->digest_result_buff, 399 + (unsigned long long)state->digest_result_dma_addr, 400 + digestsize); 401 + memcpy(result, 402 + state->digest_result_buff, 403 + digestsize); 404 + } 405 + state->digest_result_dma_addr = 0; 406 + } 407 + 408 + static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base) 409 + { 410 + struct ahash_request *req = (struct ahash_request *)ssi_req; 411 + struct ahash_req_ctx *state = ahash_request_ctx(req); 412 + 413 + SSI_LOG_DEBUG("req=%pK\n", req); 414 + 415 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false); 416 + req->base.complete(&req->base, 0); 417 + } 418 + 419 + static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base) 420 + { 421 + struct ahash_request *req = (struct ahash_request *)ssi_req; 422 + struct ahash_req_ctx *state = ahash_request_ctx(req); 423 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 424 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 425 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 426 + 427 + SSI_LOG_DEBUG("req=%pK\n", req); 428 + 429 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false); 430 + ssi_hash_unmap_result(dev, state, digestsize, req->result); 431 + ssi_hash_unmap_request(dev, state, ctx); 432 + req->base.complete(&req->base, 0); 433 + } 434 + 435 + static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base) 436 + { 437 + struct ahash_request *req = (struct ahash_request *)ssi_req; 438 + struct ahash_req_ctx *state = ahash_request_ctx(req); 439 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 440 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 441 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 442 + 443 + SSI_LOG_DEBUG("req=%pK\n", req); 444 + 445 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false); 446 + ssi_hash_unmap_result(dev, state, digestsize, req->result); 447 + ssi_hash_unmap_request(dev, state, ctx); 448 + req->base.complete(&req->base, 0); 449 + } 450 + 451 + static int ssi_hash_digest(struct ahash_req_ctx *state, 452 + struct ssi_hash_ctx *ctx, 453 + unsigned int digestsize, 454 + struct scatterlist *src, 455 + unsigned int nbytes, u8 *result, 456 + void *async_req) 457 + { 458 + struct device *dev = &ctx->drvdata->plat_dev->dev; 459 + bool is_hmac = ctx->is_hmac; 460 + struct ssi_crypto_req ssi_req = {}; 461 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 462 + ssi_sram_addr_t larval_digest_addr = ssi_ahash_get_larval_digest_sram_addr( 463 + ctx->drvdata, ctx->hash_mode); 464 + int idx = 0; 465 + int rc = 0; 466 + 467 + 468 + SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac?"hmac":"hash", nbytes); 469 + 470 + if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) { 471 + SSI_LOG_ERR("map_ahash_source() failed\n"); 472 + return -ENOMEM; 473 + } 474 + 475 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 476 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 477 + return -ENOMEM; 478 + } 479 + 480 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) { 481 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 482 + return -ENOMEM; 483 + } 484 + 485 + if (async_req) { 486 + /* Setup DX request structure */ 487 + ssi_req.user_cb = (void *)ssi_hash_digest_complete; 488 + ssi_req.user_arg = (void *)async_req; 489 + #ifdef ENABLE_CYCLE_COUNT 490 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 491 + #endif 492 + } 493 + 494 + /* If HMAC then load hash IPAD xor key, if HASH then load initial digest */ 495 + HW_DESC_INIT(&desc[idx]); 496 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 497 + if (is_hmac) { 498 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); 499 + } else { 500 + HW_DESC_SET_DIN_SRAM(&desc[idx], larval_digest_addr, ctx->inter_digestsize); 501 + } 502 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 503 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 504 + idx++; 505 + 506 + /* Load the hash current length */ 507 + HW_DESC_INIT(&desc[idx]); 508 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 509 + 510 + if (is_hmac) { 511 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT); 512 + } else { 513 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE); 514 + if (likely(nbytes != 0)) { 515 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 516 + } else { 517 + HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD); 518 + } 519 + } 520 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 521 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 522 + idx++; 523 + 524 + ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx); 525 + 526 + if (is_hmac) { 527 + /* HW last hash block padding (aka. "DO_PAD") */ 528 + HW_DESC_INIT(&desc[idx]); 529 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 530 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, HASH_LEN_SIZE, NS_BIT, 0); 531 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 532 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1); 533 + HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD); 534 + idx++; 535 + 536 + /* store the hash digest result in the context */ 537 + HW_DESC_INIT(&desc[idx]); 538 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 539 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0); 540 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 541 + ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]); 542 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 543 + idx++; 544 + 545 + /* Loading hash opad xor key state */ 546 + HW_DESC_INIT(&desc[idx]); 547 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 548 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT); 549 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 550 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 551 + idx++; 552 + 553 + /* Load the hash current length */ 554 + HW_DESC_INIT(&desc[idx]); 555 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 556 + HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); 557 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 558 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 559 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 560 + idx++; 561 + 562 + /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ 563 + HW_DESC_INIT(&desc[idx]); 564 + HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0); 565 + HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1); 566 + idx++; 567 + 568 + /* Perform HASH update */ 569 + HW_DESC_INIT(&desc[idx]); 570 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT); 571 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH); 572 + idx++; 573 + } 574 + 575 + /* Get final MAC result */ 576 + HW_DESC_INIT(&desc[idx]); 577 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 578 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/ 579 + if (async_req) { 580 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 581 + } 582 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 583 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 584 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED); 585 + ssi_set_hash_endianity(ctx->hash_mode, &desc[idx]); 586 + idx++; 587 + 588 + if (async_req) { 589 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 590 + if (unlikely(rc != -EINPROGRESS)) { 591 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 592 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 593 + ssi_hash_unmap_result(dev, state, digestsize, result); 594 + ssi_hash_unmap_request(dev, state, ctx); 595 + } 596 + } else { 597 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 598 + if (rc != 0) { 599 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 600 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 601 + } else { 602 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, false); 603 + } 604 + ssi_hash_unmap_result(dev, state, digestsize, result); 605 + ssi_hash_unmap_request(dev, state, ctx); 606 + } 607 + return rc; 608 + } 609 + 610 + static int ssi_hash_update(struct ahash_req_ctx *state, 611 + struct ssi_hash_ctx *ctx, 612 + unsigned int block_size, 613 + struct scatterlist *src, 614 + unsigned int nbytes, 615 + void *async_req) 616 + { 617 + struct device *dev = &ctx->drvdata->plat_dev->dev; 618 + struct ssi_crypto_req ssi_req = {}; 619 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 620 + uint32_t idx = 0; 621 + int rc; 622 + 623 + SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ? 624 + "hmac":"hash", nbytes); 625 + 626 + if (nbytes == 0) { 627 + /* no real updates required */ 628 + return 0; 629 + } 630 + 631 + if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size))) { 632 + if (rc == 1) { 633 + SSI_LOG_DEBUG(" data size not require HW update %x\n", 634 + nbytes); 635 + /* No hardware updates are required */ 636 + return 0; 637 + } 638 + SSI_LOG_ERR("map_ahash_request_update() failed\n"); 639 + return -ENOMEM; 640 + } 641 + 642 + if (async_req) { 643 + /* Setup DX request structure */ 644 + ssi_req.user_cb = (void *)ssi_hash_update_complete; 645 + ssi_req.user_arg = async_req; 646 + #ifdef ENABLE_CYCLE_COUNT 647 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 648 + #endif 649 + } 650 + 651 + /* Restore hash digest */ 652 + HW_DESC_INIT(&desc[idx]); 653 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 654 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); 655 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 656 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 657 + idx++; 658 + /* Restore hash current length */ 659 + HW_DESC_INIT(&desc[idx]); 660 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 661 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT); 662 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 663 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 664 + idx++; 665 + 666 + ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx); 667 + 668 + /* store the hash digest result in context */ 669 + HW_DESC_INIT(&desc[idx]); 670 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 671 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); 672 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 673 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 674 + idx++; 675 + 676 + /* store current hash length in context */ 677 + HW_DESC_INIT(&desc[idx]); 678 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 679 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, async_req? 1:0); 680 + if (async_req) { 681 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 682 + } 683 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 684 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1); 685 + idx++; 686 + 687 + if (async_req) { 688 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 689 + if (unlikely(rc != -EINPROGRESS)) { 690 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 691 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 692 + } 693 + } else { 694 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 695 + if (rc != 0) { 696 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 697 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 698 + } else { 699 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, false); 700 + } 701 + } 702 + return rc; 703 + } 704 + 705 + static int ssi_hash_finup(struct ahash_req_ctx *state, 706 + struct ssi_hash_ctx *ctx, 707 + unsigned int digestsize, 708 + struct scatterlist *src, 709 + unsigned int nbytes, 710 + u8 *result, 711 + void *async_req) 712 + { 713 + struct device *dev = &ctx->drvdata->plat_dev->dev; 714 + bool is_hmac = ctx->is_hmac; 715 + struct ssi_crypto_req ssi_req = {}; 716 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 717 + int idx = 0; 718 + int rc; 719 + 720 + SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac?"hmac":"hash", nbytes); 721 + 722 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src , nbytes, 1) != 0)) { 723 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 724 + return -ENOMEM; 725 + } 726 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 727 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 728 + return -ENOMEM; 729 + } 730 + 731 + if (async_req) { 732 + /* Setup DX request structure */ 733 + ssi_req.user_cb = (void *)ssi_hash_complete; 734 + ssi_req.user_arg = async_req; 735 + #ifdef ENABLE_CYCLE_COUNT 736 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 737 + #endif 738 + } 739 + 740 + /* Restore hash digest */ 741 + HW_DESC_INIT(&desc[idx]); 742 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 743 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); 744 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 745 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 746 + idx++; 747 + 748 + /* Restore hash current length */ 749 + HW_DESC_INIT(&desc[idx]); 750 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 751 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 752 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT); 753 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 754 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 755 + idx++; 756 + 757 + ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx); 758 + 759 + if (is_hmac) { 760 + /* Store the hash digest result in the context */ 761 + HW_DESC_INIT(&desc[idx]); 762 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 763 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0); 764 + ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]); 765 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 766 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 767 + idx++; 768 + 769 + /* Loading hash OPAD xor key state */ 770 + HW_DESC_INIT(&desc[idx]); 771 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 772 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT); 773 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 774 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 775 + idx++; 776 + 777 + /* Load the hash current length */ 778 + HW_DESC_INIT(&desc[idx]); 779 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 780 + HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); 781 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 782 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 783 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 784 + idx++; 785 + 786 + /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ 787 + HW_DESC_INIT(&desc[idx]); 788 + HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0); 789 + HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1); 790 + idx++; 791 + 792 + /* Perform HASH update on last digest */ 793 + HW_DESC_INIT(&desc[idx]); 794 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT); 795 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH); 796 + idx++; 797 + } 798 + 799 + /* Get final MAC result */ 800 + HW_DESC_INIT(&desc[idx]); 801 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); /*TODO*/ 802 + if (async_req) { 803 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 804 + } 805 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 806 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED); 807 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 808 + ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]); 809 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 810 + idx++; 811 + 812 + if (async_req) { 813 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 814 + if (unlikely(rc != -EINPROGRESS)) { 815 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 816 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 817 + ssi_hash_unmap_result(dev, state, digestsize, result); 818 + } 819 + } else { 820 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 821 + if (rc != 0) { 822 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 823 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 824 + ssi_hash_unmap_result(dev, state, digestsize, result); 825 + } else { 826 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, false); 827 + ssi_hash_unmap_result(dev, state, digestsize, result); 828 + ssi_hash_unmap_request(dev, state, ctx); 829 + } 830 + } 831 + return rc; 832 + } 833 + 834 + static int ssi_hash_final(struct ahash_req_ctx *state, 835 + struct ssi_hash_ctx *ctx, 836 + unsigned int digestsize, 837 + struct scatterlist *src, 838 + unsigned int nbytes, 839 + u8 *result, 840 + void *async_req) 841 + { 842 + struct device *dev = &ctx->drvdata->plat_dev->dev; 843 + bool is_hmac = ctx->is_hmac; 844 + struct ssi_crypto_req ssi_req = {}; 845 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 846 + int idx = 0; 847 + int rc; 848 + 849 + SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac?"hmac":"hash", nbytes); 850 + 851 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) { 852 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 853 + return -ENOMEM; 854 + } 855 + 856 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 857 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 858 + return -ENOMEM; 859 + } 860 + 861 + if (async_req) { 862 + /* Setup DX request structure */ 863 + ssi_req.user_cb = (void *)ssi_hash_complete; 864 + ssi_req.user_arg = async_req; 865 + #ifdef ENABLE_CYCLE_COUNT 866 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 867 + #endif 868 + } 869 + 870 + /* Restore hash digest */ 871 + HW_DESC_INIT(&desc[idx]); 872 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 873 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); 874 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 875 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 876 + idx++; 877 + 878 + /* Restore hash current length */ 879 + HW_DESC_INIT(&desc[idx]); 880 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 881 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED); 882 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT); 883 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 884 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 885 + idx++; 886 + 887 + ssi_hash_create_data_desc(state, ctx, DIN_HASH, desc, false, &idx); 888 + 889 + /* "DO-PAD" must be enabled only when writing current length to HW */ 890 + HW_DESC_INIT(&desc[idx]); 891 + HW_DESC_SET_CIPHER_DO(&desc[idx], DO_PAD); 892 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 893 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_bytes_len_dma_addr, HASH_LEN_SIZE, NS_BIT, 0); 894 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE1); 895 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 896 + idx++; 897 + 898 + if (is_hmac) { 899 + /* Store the hash digest result in the context */ 900 + HW_DESC_INIT(&desc[idx]); 901 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 902 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0); 903 + ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]); 904 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 905 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 906 + idx++; 907 + 908 + /* Loading hash OPAD xor key state */ 909 + HW_DESC_INIT(&desc[idx]); 910 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 911 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT); 912 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 913 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 914 + idx++; 915 + 916 + /* Load the hash current length */ 917 + HW_DESC_INIT(&desc[idx]); 918 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 919 + HW_DESC_SET_DIN_SRAM(&desc[idx], ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE); 920 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 921 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 922 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 923 + idx++; 924 + 925 + /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ 926 + HW_DESC_INIT(&desc[idx]); 927 + HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0); 928 + HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1); 929 + idx++; 930 + 931 + /* Perform HASH update on last digest */ 932 + HW_DESC_INIT(&desc[idx]); 933 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT); 934 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH); 935 + idx++; 936 + } 937 + 938 + /* Get final MAC result */ 939 + HW_DESC_INIT(&desc[idx]); 940 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, async_req? 1:0); 941 + if (async_req) { 942 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 943 + } 944 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 945 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED); 946 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 947 + ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]); 948 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 949 + idx++; 950 + 951 + if (async_req) { 952 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 953 + if (unlikely(rc != -EINPROGRESS)) { 954 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 955 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 956 + ssi_hash_unmap_result(dev, state, digestsize, result); 957 + } 958 + } else { 959 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 960 + if (rc != 0) { 961 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 962 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, true); 963 + ssi_hash_unmap_result(dev, state, digestsize, result); 964 + } else { 965 + ssi_buffer_mgr_unmap_hash_request(dev, state, src, false); 966 + ssi_hash_unmap_result(dev, state, digestsize, result); 967 + ssi_hash_unmap_request(dev, state, ctx); 968 + } 969 + } 970 + return rc; 971 + } 972 + 973 + static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx) 974 + { 975 + struct device *dev = &ctx->drvdata->plat_dev->dev; 976 + state->xcbc_count = 0; 977 + 978 + ssi_hash_map_request(dev, state, ctx); 979 + 980 + return 0; 981 + } 982 + 983 + #ifdef EXPORT_FIXED 984 + static int ssi_hash_export(struct ssi_hash_ctx *ctx, void *out) 985 + { 986 + memcpy(out, ctx, sizeof(struct ssi_hash_ctx)); 987 + return 0; 988 + } 989 + 990 + static int ssi_hash_import(struct ssi_hash_ctx *ctx, const void *in) 991 + { 992 + memcpy(ctx, in, sizeof(struct ssi_hash_ctx)); 993 + return 0; 994 + } 995 + #endif 996 + 997 + static int ssi_hash_setkey(void *hash, 998 + const u8 *key, 999 + unsigned int keylen, 1000 + bool synchronize) 1001 + { 1002 + unsigned int hmacPadConst[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; 1003 + struct ssi_crypto_req ssi_req = {}; 1004 + struct ssi_hash_ctx *ctx = NULL; 1005 + int blocksize = 0; 1006 + int digestsize = 0; 1007 + int i, idx = 0, rc = 0; 1008 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1009 + ssi_sram_addr_t larval_addr; 1010 + 1011 + SSI_LOG_DEBUG("ssi_hash_setkey: start keylen: %d", keylen); 1012 + 1013 + if (synchronize) { 1014 + ctx = crypto_shash_ctx(((struct crypto_shash *)hash)); 1015 + blocksize = crypto_tfm_alg_blocksize(&((struct crypto_shash *)hash)->base); 1016 + digestsize = crypto_shash_digestsize(((struct crypto_shash *)hash)); 1017 + } else { 1018 + ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash)); 1019 + blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base); 1020 + digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash)); 1021 + } 1022 + 1023 + larval_addr = ssi_ahash_get_larval_digest_sram_addr( 1024 + ctx->drvdata, ctx->hash_mode); 1025 + 1026 + /* The keylen value distinguishes HASH in case keylen is ZERO bytes, 1027 + any NON-ZERO value utilizes HMAC flow */ 1028 + ctx->key_params.keylen = keylen; 1029 + ctx->key_params.key_dma_addr = 0; 1030 + ctx->is_hmac = true; 1031 + 1032 + if (keylen != 0) { 1033 + ctx->key_params.key_dma_addr = dma_map_single( 1034 + &ctx->drvdata->plat_dev->dev, 1035 + (void *)key, 1036 + keylen, DMA_TO_DEVICE); 1037 + if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev, 1038 + ctx->key_params.key_dma_addr))) { 1039 + SSI_LOG_ERR("Mapping key va=0x%p len=%u for" 1040 + " DMA failed\n", key, keylen); 1041 + return -ENOMEM; 1042 + } 1043 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen); 1044 + SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX " 1045 + "keylen=%u\n", 1046 + (unsigned long long)ctx->key_params.key_dma_addr, 1047 + ctx->key_params.keylen); 1048 + 1049 + if (keylen > blocksize) { 1050 + /* Load hash initial state */ 1051 + HW_DESC_INIT(&desc[idx]); 1052 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1053 + HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, 1054 + ctx->inter_digestsize); 1055 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 1056 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 1057 + idx++; 1058 + 1059 + /* Load the hash current length*/ 1060 + HW_DESC_INIT(&desc[idx]); 1061 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1062 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE); 1063 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_ENABLED); 1064 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 1065 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 1066 + idx++; 1067 + 1068 + HW_DESC_INIT(&desc[idx]); 1069 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 1070 + ctx->key_params.key_dma_addr, 1071 + keylen, NS_BIT); 1072 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH); 1073 + idx++; 1074 + 1075 + /* Get hashed key */ 1076 + HW_DESC_INIT(&desc[idx]); 1077 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1078 + HW_DESC_SET_DOUT_DLLI(&desc[idx], ctx->opad_tmp_keys_dma_addr, 1079 + digestsize, NS_BIT, 0); 1080 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 1081 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1082 + HW_DESC_SET_CIPHER_CONFIG1(&desc[idx], HASH_PADDING_DISABLED); 1083 + ssi_set_hash_endianity(ctx->hash_mode,&desc[idx]); 1084 + idx++; 1085 + 1086 + HW_DESC_INIT(&desc[idx]); 1087 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - digestsize)); 1088 + HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS); 1089 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1090 + (ctx->opad_tmp_keys_dma_addr + digestsize), 1091 + (blocksize - digestsize), 1092 + NS_BIT, 0); 1093 + idx++; 1094 + } else { 1095 + HW_DESC_INIT(&desc[idx]); 1096 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 1097 + ctx->key_params.key_dma_addr, 1098 + keylen, NS_BIT); 1099 + HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS); 1100 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1101 + (ctx->opad_tmp_keys_dma_addr), 1102 + keylen, NS_BIT, 0); 1103 + idx++; 1104 + 1105 + if ((blocksize - keylen) != 0) { 1106 + HW_DESC_INIT(&desc[idx]); 1107 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, (blocksize - keylen)); 1108 + HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS); 1109 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1110 + (ctx->opad_tmp_keys_dma_addr + keylen), 1111 + (blocksize - keylen), 1112 + NS_BIT, 0); 1113 + idx++; 1114 + } 1115 + } 1116 + } else { 1117 + HW_DESC_INIT(&desc[idx]); 1118 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, blocksize); 1119 + HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS); 1120 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1121 + (ctx->opad_tmp_keys_dma_addr), 1122 + blocksize, 1123 + NS_BIT, 0); 1124 + idx++; 1125 + } 1126 + 1127 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 1128 + if (unlikely(rc != 0)) { 1129 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 1130 + goto out; 1131 + } 1132 + 1133 + /* calc derived HMAC key */ 1134 + for (idx = 0, i = 0; i < 2; i++) { 1135 + /* Load hash initial state */ 1136 + HW_DESC_INIT(&desc[idx]); 1137 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1138 + HW_DESC_SET_DIN_SRAM(&desc[idx], larval_addr, 1139 + ctx->inter_digestsize); 1140 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 1141 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 1142 + idx++; 1143 + 1144 + /* Load the hash current length*/ 1145 + HW_DESC_INIT(&desc[idx]); 1146 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1147 + HW_DESC_SET_DIN_CONST(&desc[idx], 0, HASH_LEN_SIZE); 1148 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 1149 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 1150 + idx++; 1151 + 1152 + /* Prepare ipad key */ 1153 + HW_DESC_INIT(&desc[idx]); 1154 + HW_DESC_SET_XOR_VAL(&desc[idx], hmacPadConst[i]); 1155 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1156 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_HASH); 1157 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1); 1158 + idx++; 1159 + 1160 + /* Perform HASH update */ 1161 + HW_DESC_INIT(&desc[idx]); 1162 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 1163 + ctx->opad_tmp_keys_dma_addr, 1164 + blocksize, NS_BIT); 1165 + HW_DESC_SET_CIPHER_MODE(&desc[idx],ctx->hw_mode); 1166 + HW_DESC_SET_XOR_ACTIVE(&desc[idx]); 1167 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_HASH); 1168 + idx++; 1169 + 1170 + /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest of the first HASH "update" state) */ 1171 + HW_DESC_INIT(&desc[idx]); 1172 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1173 + if (i > 0) /* Not first iteration */ 1174 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1175 + ctx->opad_tmp_keys_dma_addr, 1176 + ctx->inter_digestsize, 1177 + NS_BIT, 0); 1178 + else /* First iteration */ 1179 + HW_DESC_SET_DOUT_DLLI(&desc[idx], 1180 + ctx->digest_buff_dma_addr, 1181 + ctx->inter_digestsize, 1182 + NS_BIT, 0); 1183 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_HASH_to_DOUT); 1184 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1185 + idx++; 1186 + } 1187 + 1188 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 1189 + 1190 + out: 1191 + if (rc != 0) { 1192 + if (synchronize) { 1193 + crypto_shash_set_flags((struct crypto_shash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN); 1194 + } else { 1195 + crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN); 1196 + } 1197 + } 1198 + 1199 + if (ctx->key_params.key_dma_addr) { 1200 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr); 1201 + dma_unmap_single(&ctx->drvdata->plat_dev->dev, 1202 + ctx->key_params.key_dma_addr, 1203 + ctx->key_params.keylen, DMA_TO_DEVICE); 1204 + SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n", 1205 + (unsigned long long)ctx->key_params.key_dma_addr, 1206 + ctx->key_params.keylen); 1207 + } 1208 + return rc; 1209 + } 1210 + 1211 + 1212 + static int ssi_xcbc_setkey(struct crypto_ahash *ahash, 1213 + const u8 *key, unsigned int keylen) 1214 + { 1215 + struct ssi_crypto_req ssi_req = {}; 1216 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1217 + int idx = 0, rc = 0; 1218 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1219 + 1220 + SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen); 1221 + 1222 + switch (keylen) { 1223 + case AES_KEYSIZE_128: 1224 + case AES_KEYSIZE_192: 1225 + case AES_KEYSIZE_256: 1226 + break; 1227 + default: 1228 + return -EINVAL; 1229 + } 1230 + 1231 + ctx->key_params.keylen = keylen; 1232 + 1233 + ctx->key_params.key_dma_addr = dma_map_single( 1234 + &ctx->drvdata->plat_dev->dev, 1235 + (void *)key, 1236 + keylen, DMA_TO_DEVICE); 1237 + if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev, 1238 + ctx->key_params.key_dma_addr))) { 1239 + SSI_LOG_ERR("Mapping key va=0x%p len=%u for" 1240 + " DMA failed\n", key, keylen); 1241 + return -ENOMEM; 1242 + } 1243 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr, keylen); 1244 + SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=0x%llX " 1245 + "keylen=%u\n", 1246 + (unsigned long long)ctx->key_params.key_dma_addr, 1247 + ctx->key_params.keylen); 1248 + 1249 + ctx->is_hmac = true; 1250 + /* 1. Load the AES key */ 1251 + HW_DESC_INIT(&desc[idx]); 1252 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT); 1253 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB); 1254 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); 1255 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keylen); 1256 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 1257 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 1258 + idx++; 1259 + 1260 + HW_DESC_INIT(&desc[idx]); 1261 + HW_DESC_SET_DIN_CONST(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE); 1262 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT); 1263 + HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 1264 + XCBC_MAC_K1_OFFSET), 1265 + CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); 1266 + idx++; 1267 + 1268 + HW_DESC_INIT(&desc[idx]); 1269 + HW_DESC_SET_DIN_CONST(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE); 1270 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT); 1271 + HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 1272 + XCBC_MAC_K2_OFFSET), 1273 + CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); 1274 + idx++; 1275 + 1276 + HW_DESC_INIT(&desc[idx]); 1277 + HW_DESC_SET_DIN_CONST(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE); 1278 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT); 1279 + HW_DESC_SET_DOUT_DLLI(&desc[idx], (ctx->opad_tmp_keys_dma_addr + 1280 + XCBC_MAC_K3_OFFSET), 1281 + CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); 1282 + idx++; 1283 + 1284 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0); 1285 + 1286 + if (rc != 0) 1287 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN); 1288 + 1289 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->key_params.key_dma_addr); 1290 + dma_unmap_single(&ctx->drvdata->plat_dev->dev, 1291 + ctx->key_params.key_dma_addr, 1292 + ctx->key_params.keylen, DMA_TO_DEVICE); 1293 + SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=0x%llX keylen=%u\n", 1294 + (unsigned long long)ctx->key_params.key_dma_addr, 1295 + ctx->key_params.keylen); 1296 + 1297 + return rc; 1298 + } 1299 + #if SSI_CC_HAS_CMAC 1300 + static int ssi_cmac_setkey(struct crypto_ahash *ahash, 1301 + const u8 *key, unsigned int keylen) 1302 + { 1303 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1304 + DECL_CYCLE_COUNT_RESOURCES; 1305 + SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen); 1306 + 1307 + ctx->is_hmac = true; 1308 + 1309 + switch (keylen) { 1310 + case AES_KEYSIZE_128: 1311 + case AES_KEYSIZE_192: 1312 + case AES_KEYSIZE_256: 1313 + break; 1314 + default: 1315 + return -EINVAL; 1316 + } 1317 + 1318 + ctx->key_params.keylen = keylen; 1319 + 1320 + /* STAT_PHASE_1: Copy key to ctx */ 1321 + START_CYCLE_COUNT(); 1322 + 1323 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr); 1324 + dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev, 1325 + ctx->opad_tmp_keys_dma_addr, 1326 + keylen, DMA_TO_DEVICE); 1327 + 1328 + memcpy(ctx->opad_tmp_keys_buff, key, keylen); 1329 + if (keylen == 24) 1330 + memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 1331 + 1332 + dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev, 1333 + ctx->opad_tmp_keys_dma_addr, 1334 + keylen, DMA_TO_DEVICE); 1335 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, keylen); 1336 + 1337 + ctx->key_params.keylen = keylen; 1338 + 1339 + END_CYCLE_COUNT(STAT_OP_TYPE_SETKEY, STAT_PHASE_1); 1340 + 1341 + return 0; 1342 + } 1343 + #endif 1344 + 1345 + static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx) 1346 + { 1347 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1348 + 1349 + if (ctx->digest_buff_dma_addr != 0) { 1350 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr); 1351 + dma_unmap_single(dev, ctx->digest_buff_dma_addr, 1352 + sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); 1353 + SSI_LOG_DEBUG("Unmapped digest-buffer: " 1354 + "digest_buff_dma_addr=0x%llX\n", 1355 + (unsigned long long)ctx->digest_buff_dma_addr); 1356 + ctx->digest_buff_dma_addr = 0; 1357 + } 1358 + if (ctx->opad_tmp_keys_dma_addr != 0) { 1359 + SSI_RESTORE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr); 1360 + dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr, 1361 + sizeof(ctx->opad_tmp_keys_buff), 1362 + DMA_BIDIRECTIONAL); 1363 + SSI_LOG_DEBUG("Unmapped opad-digest: " 1364 + "opad_tmp_keys_dma_addr=0x%llX\n", 1365 + (unsigned long long)ctx->opad_tmp_keys_dma_addr); 1366 + ctx->opad_tmp_keys_dma_addr = 0; 1367 + } 1368 + 1369 + ctx->key_params.keylen = 0; 1370 + 1371 + } 1372 + 1373 + 1374 + static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx) 1375 + { 1376 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1377 + 1378 + ctx->key_params.keylen = 0; 1379 + 1380 + ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); 1381 + if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { 1382 + SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n", 1383 + sizeof(ctx->digest_buff), ctx->digest_buff); 1384 + goto fail; 1385 + } 1386 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->digest_buff_dma_addr, 1387 + sizeof(ctx->digest_buff)); 1388 + SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=0x%llX\n", 1389 + sizeof(ctx->digest_buff), ctx->digest_buff, 1390 + (unsigned long long)ctx->digest_buff_dma_addr); 1391 + 1392 + ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); 1393 + if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { 1394 + SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n", 1395 + sizeof(ctx->opad_tmp_keys_buff), 1396 + ctx->opad_tmp_keys_buff); 1397 + goto fail; 1398 + } 1399 + SSI_UPDATE_DMA_ADDR_TO_48BIT(ctx->opad_tmp_keys_dma_addr, 1400 + sizeof(ctx->opad_tmp_keys_buff)); 1401 + SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=0x%llX\n", 1402 + sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, 1403 + (unsigned long long)ctx->opad_tmp_keys_dma_addr); 1404 + 1405 + ctx->is_hmac = false; 1406 + return 0; 1407 + 1408 + fail: 1409 + ssi_hash_free_ctx(ctx); 1410 + return -ENOMEM; 1411 + } 1412 + 1413 + static int ssi_shash_cra_init(struct crypto_tfm *tfm) 1414 + { 1415 + struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1416 + struct shash_alg * shash_alg = 1417 + container_of(tfm->__crt_alg, struct shash_alg, base); 1418 + struct ssi_hash_alg *ssi_alg = 1419 + container_of(shash_alg, struct ssi_hash_alg, shash_alg); 1420 + 1421 + ctx->hash_mode = ssi_alg->hash_mode; 1422 + ctx->hw_mode = ssi_alg->hw_mode; 1423 + ctx->inter_digestsize = ssi_alg->inter_digestsize; 1424 + ctx->drvdata = ssi_alg->drvdata; 1425 + 1426 + return ssi_hash_alloc_ctx(ctx); 1427 + } 1428 + 1429 + static int ssi_ahash_cra_init(struct crypto_tfm *tfm) 1430 + { 1431 + struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1432 + struct hash_alg_common * hash_alg_common = 1433 + container_of(tfm->__crt_alg, struct hash_alg_common, base); 1434 + struct ahash_alg *ahash_alg = 1435 + container_of(hash_alg_common, struct ahash_alg, halg); 1436 + struct ssi_hash_alg *ssi_alg = 1437 + container_of(ahash_alg, struct ssi_hash_alg, ahash_alg); 1438 + 1439 + 1440 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1441 + sizeof(struct ahash_req_ctx)); 1442 + 1443 + ctx->hash_mode = ssi_alg->hash_mode; 1444 + ctx->hw_mode = ssi_alg->hw_mode; 1445 + ctx->inter_digestsize = ssi_alg->inter_digestsize; 1446 + ctx->drvdata = ssi_alg->drvdata; 1447 + 1448 + return ssi_hash_alloc_ctx(ctx); 1449 + } 1450 + 1451 + static void ssi_hash_cra_exit(struct crypto_tfm *tfm) 1452 + { 1453 + struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1454 + 1455 + SSI_LOG_DEBUG("ssi_hash_cra_exit"); 1456 + ssi_hash_free_ctx(ctx); 1457 + } 1458 + 1459 + static int ssi_mac_update(struct ahash_request *req) 1460 + { 1461 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1462 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1463 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1464 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1465 + unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); 1466 + struct ssi_crypto_req ssi_req = {}; 1467 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1468 + int rc; 1469 + uint32_t idx = 0; 1470 + 1471 + if (req->nbytes == 0) { 1472 + /* no real updates required */ 1473 + return 0; 1474 + } 1475 + 1476 + state->xcbc_count++; 1477 + 1478 + if (unlikely(rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size))) { 1479 + if (rc == 1) { 1480 + SSI_LOG_DEBUG(" data size not require HW update %x\n", 1481 + req->nbytes); 1482 + /* No hardware updates are required */ 1483 + return 0; 1484 + } 1485 + SSI_LOG_ERR("map_ahash_request_update() failed\n"); 1486 + return -ENOMEM; 1487 + } 1488 + 1489 + if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { 1490 + ssi_hash_create_xcbc_setup(req, desc, &idx); 1491 + } else { 1492 + ssi_hash_create_cmac_setup(req, desc, &idx); 1493 + } 1494 + 1495 + ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx); 1496 + 1497 + /* store the hash digest result in context */ 1498 + HW_DESC_INIT(&desc[idx]); 1499 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1500 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1); 1501 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 1502 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT); 1503 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1504 + idx++; 1505 + 1506 + /* Setup DX request structure */ 1507 + ssi_req.user_cb = (void *)ssi_hash_update_complete; 1508 + ssi_req.user_arg = (void *)req; 1509 + #ifdef ENABLE_CYCLE_COUNT 1510 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 1511 + #endif 1512 + 1513 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 1514 + if (unlikely(rc != -EINPROGRESS)) { 1515 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 1516 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true); 1517 + } 1518 + return rc; 1519 + } 1520 + 1521 + static int ssi_mac_final(struct ahash_request *req) 1522 + { 1523 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1524 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1525 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1526 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1527 + struct ssi_crypto_req ssi_req = {}; 1528 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1529 + int idx = 0; 1530 + int rc = 0; 1531 + uint32_t keySize, keyLen; 1532 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1533 + 1534 + uint32_t rem_cnt = state->buff_index ? state->buff1_cnt : 1535 + state->buff0_cnt; 1536 + 1537 + 1538 + if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { 1539 + keySize = CC_AES_128_BIT_KEY_SIZE; 1540 + keyLen = CC_AES_128_BIT_KEY_SIZE; 1541 + } else { 1542 + keySize = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen; 1543 + keyLen = ctx->key_params.keylen; 1544 + } 1545 + 1546 + SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt); 1547 + 1548 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) { 1549 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 1550 + return -ENOMEM; 1551 + } 1552 + 1553 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 1554 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 1555 + return -ENOMEM; 1556 + } 1557 + 1558 + /* Setup DX request structure */ 1559 + ssi_req.user_cb = (void *)ssi_hash_complete; 1560 + ssi_req.user_arg = (void *)req; 1561 + #ifdef ENABLE_CYCLE_COUNT 1562 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 1563 + #endif 1564 + 1565 + if (state->xcbc_count && (rem_cnt == 0)) { 1566 + /* Load key for ECB decryption */ 1567 + HW_DESC_INIT(&desc[idx]); 1568 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_ECB); 1569 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT); 1570 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 1571 + (ctx->opad_tmp_keys_dma_addr + 1572 + XCBC_MAC_K1_OFFSET), 1573 + keySize, NS_BIT); 1574 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen); 1575 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 1576 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 1577 + idx++; 1578 + 1579 + 1580 + /* Initiate decryption of block state to previous block_state-XOR-M[n] */ 1581 + HW_DESC_INIT(&desc[idx]); 1582 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); 1583 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,0); 1584 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT); 1585 + idx++; 1586 + 1587 + /* Memory Barrier: wait for axi write to complete */ 1588 + HW_DESC_INIT(&desc[idx]); 1589 + HW_DESC_SET_DIN_NO_DMA(&desc[idx], 0, 0xfffff0); 1590 + HW_DESC_SET_DOUT_NO_DMA(&desc[idx], 0, 0, 1); 1591 + idx++; 1592 + } 1593 + 1594 + if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { 1595 + ssi_hash_create_xcbc_setup(req, desc, &idx); 1596 + } else { 1597 + ssi_hash_create_cmac_setup(req, desc, &idx); 1598 + } 1599 + 1600 + if (state->xcbc_count == 0) { 1601 + HW_DESC_INIT(&desc[idx]); 1602 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1603 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen); 1604 + HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]); 1605 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 1606 + idx++; 1607 + } else if (rem_cnt > 0) { 1608 + ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); 1609 + } else { 1610 + HW_DESC_INIT(&desc[idx]); 1611 + HW_DESC_SET_DIN_CONST(&desc[idx], 0x00, CC_AES_BLOCK_SIZE); 1612 + HW_DESC_SET_FLOW_MODE(&desc[idx], DIN_AES_DOUT); 1613 + idx++; 1614 + } 1615 + 1616 + /* Get final MAC result */ 1617 + HW_DESC_INIT(&desc[idx]); 1618 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/ 1619 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 1620 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT); 1621 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1622 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1623 + idx++; 1624 + 1625 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 1626 + if (unlikely(rc != -EINPROGRESS)) { 1627 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 1628 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true); 1629 + ssi_hash_unmap_result(dev, state, digestsize, req->result); 1630 + } 1631 + return rc; 1632 + } 1633 + 1634 + static int ssi_mac_finup(struct ahash_request *req) 1635 + { 1636 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1637 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1638 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1639 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1640 + struct ssi_crypto_req ssi_req = {}; 1641 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1642 + int idx = 0; 1643 + int rc = 0; 1644 + uint32_t key_len = 0; 1645 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1646 + 1647 + SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes); 1648 + 1649 + if (state->xcbc_count > 0 && req->nbytes == 0) { 1650 + SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final \n"); 1651 + return ssi_mac_final(req); 1652 + } 1653 + 1654 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) { 1655 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 1656 + return -ENOMEM; 1657 + } 1658 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 1659 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 1660 + return -ENOMEM; 1661 + } 1662 + 1663 + /* Setup DX request structure */ 1664 + ssi_req.user_cb = (void *)ssi_hash_complete; 1665 + ssi_req.user_arg = (void *)req; 1666 + #ifdef ENABLE_CYCLE_COUNT 1667 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 1668 + #endif 1669 + 1670 + if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { 1671 + key_len = CC_AES_128_BIT_KEY_SIZE; 1672 + ssi_hash_create_xcbc_setup(req, desc, &idx); 1673 + } else { 1674 + key_len = ctx->key_params.keylen; 1675 + ssi_hash_create_cmac_setup(req, desc, &idx); 1676 + } 1677 + 1678 + if (req->nbytes == 0) { 1679 + HW_DESC_INIT(&desc[idx]); 1680 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1681 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], key_len); 1682 + HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]); 1683 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 1684 + idx++; 1685 + } else { 1686 + ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); 1687 + } 1688 + 1689 + /* Get final MAC result */ 1690 + HW_DESC_INIT(&desc[idx]); 1691 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); /*TODO*/ 1692 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 1693 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT); 1694 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1695 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1696 + idx++; 1697 + 1698 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 1699 + if (unlikely(rc != -EINPROGRESS)) { 1700 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 1701 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true); 1702 + ssi_hash_unmap_result(dev, state, digestsize, req->result); 1703 + } 1704 + return rc; 1705 + } 1706 + 1707 + static int ssi_mac_digest(struct ahash_request *req) 1708 + { 1709 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1710 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1711 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1712 + struct device *dev = &ctx->drvdata->plat_dev->dev; 1713 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1714 + struct ssi_crypto_req ssi_req = {}; 1715 + HwDesc_s desc[SSI_MAX_AHASH_SEQ_LEN]; 1716 + uint32_t keyLen; 1717 + int idx = 0; 1718 + int rc; 1719 + 1720 + SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes); 1721 + 1722 + if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) { 1723 + SSI_LOG_ERR("map_ahash_source() failed\n"); 1724 + return -ENOMEM; 1725 + } 1726 + if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) { 1727 + SSI_LOG_ERR("map_ahash_digest() failed\n"); 1728 + return -ENOMEM; 1729 + } 1730 + 1731 + if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) { 1732 + SSI_LOG_ERR("map_ahash_request_final() failed\n"); 1733 + return -ENOMEM; 1734 + } 1735 + 1736 + /* Setup DX request structure */ 1737 + ssi_req.user_cb = (void *)ssi_hash_digest_complete; 1738 + ssi_req.user_arg = (void *)req; 1739 + #ifdef ENABLE_CYCLE_COUNT 1740 + ssi_req.op_type = STAT_OP_TYPE_ENCODE; /* Use "Encode" stats */ 1741 + #endif 1742 + 1743 + 1744 + if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { 1745 + keyLen = CC_AES_128_BIT_KEY_SIZE; 1746 + ssi_hash_create_xcbc_setup(req, desc, &idx); 1747 + } else { 1748 + keyLen = ctx->key_params.keylen; 1749 + ssi_hash_create_cmac_setup(req, desc, &idx); 1750 + } 1751 + 1752 + if (req->nbytes == 0) { 1753 + HW_DESC_INIT(&desc[idx]); 1754 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1755 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], keyLen); 1756 + HW_DESC_SET_CMAC_SIZE0_MODE(&desc[idx]); 1757 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 1758 + idx++; 1759 + } else { 1760 + ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); 1761 + } 1762 + 1763 + /* Get final MAC result */ 1764 + HW_DESC_INIT(&desc[idx]); 1765 + HW_DESC_SET_DOUT_DLLI(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT,1); 1766 + HW_DESC_SET_QUEUE_LAST_IND(&desc[idx]); 1767 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_AES_to_DOUT); 1768 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_WRITE_STATE0); 1769 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx],DESC_DIRECTION_ENCRYPT_ENCRYPT); 1770 + HW_DESC_SET_CIPHER_MODE(&desc[idx], ctx->hw_mode); 1771 + idx++; 1772 + 1773 + rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1); 1774 + if (unlikely(rc != -EINPROGRESS)) { 1775 + SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc); 1776 + ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true); 1777 + ssi_hash_unmap_result(dev, state, digestsize, req->result); 1778 + ssi_hash_unmap_request(dev, state, ctx); 1779 + } 1780 + return rc; 1781 + } 1782 + 1783 + //shash wrap functions 1784 + #ifdef SYNC_ALGS 1785 + static int ssi_shash_digest(struct shash_desc *desc, 1786 + const u8 *data, unsigned int len, u8 *out) 1787 + { 1788 + struct ahash_req_ctx *state = shash_desc_ctx(desc); 1789 + struct crypto_shash *tfm = desc->tfm; 1790 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1791 + uint32_t digestsize = crypto_shash_digestsize(tfm); 1792 + struct scatterlist src; 1793 + 1794 + if (len == 0) { 1795 + return ssi_hash_digest(state, ctx, digestsize, NULL, 0, out, NULL); 1796 + } 1797 + 1798 + /* sg_init_one may crash when len is 0 (depends on kernel configuration) */ 1799 + sg_init_one(&src, (const void *)data, len); 1800 + 1801 + return ssi_hash_digest(state, ctx, digestsize, &src, len, out, NULL); 1802 + } 1803 + 1804 + static int ssi_shash_update(struct shash_desc *desc, 1805 + const u8 *data, unsigned int len) 1806 + { 1807 + struct ahash_req_ctx *state = shash_desc_ctx(desc); 1808 + struct crypto_shash *tfm = desc->tfm; 1809 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1810 + uint32_t blocksize = crypto_tfm_alg_blocksize(&tfm->base); 1811 + struct scatterlist src; 1812 + 1813 + sg_init_one(&src, (const void *)data, len); 1814 + 1815 + return ssi_hash_update(state, ctx, blocksize, &src, len, NULL); 1816 + } 1817 + 1818 + static int ssi_shash_finup(struct shash_desc *desc, 1819 + const u8 *data, unsigned int len, u8 *out) 1820 + { 1821 + struct ahash_req_ctx *state = shash_desc_ctx(desc); 1822 + struct crypto_shash *tfm = desc->tfm; 1823 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1824 + uint32_t digestsize = crypto_shash_digestsize(tfm); 1825 + struct scatterlist src; 1826 + 1827 + sg_init_one(&src, (const void *)data, len); 1828 + 1829 + return ssi_hash_finup(state, ctx, digestsize, &src, len, out, NULL); 1830 + } 1831 + 1832 + static int ssi_shash_final(struct shash_desc *desc, u8 *out) 1833 + { 1834 + struct ahash_req_ctx *state = shash_desc_ctx(desc); 1835 + struct crypto_shash *tfm = desc->tfm; 1836 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1837 + uint32_t digestsize = crypto_shash_digestsize(tfm); 1838 + 1839 + return ssi_hash_final(state, ctx, digestsize, NULL, 0, out, NULL); 1840 + } 1841 + 1842 + static int ssi_shash_init(struct shash_desc *desc) 1843 + { 1844 + struct ahash_req_ctx *state = shash_desc_ctx(desc); 1845 + struct crypto_shash *tfm = desc->tfm; 1846 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1847 + 1848 + return ssi_hash_init(state, ctx); 1849 + } 1850 + 1851 + #ifdef EXPORT_FIXED 1852 + static int ssi_shash_export(struct shash_desc *desc, void *out) 1853 + { 1854 + struct crypto_shash *tfm = desc->tfm; 1855 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1856 + 1857 + return ssi_hash_export(ctx, out); 1858 + } 1859 + 1860 + static int ssi_shash_import(struct shash_desc *desc, const void *in) 1861 + { 1862 + struct crypto_shash *tfm = desc->tfm; 1863 + struct ssi_hash_ctx *ctx = crypto_shash_ctx(tfm); 1864 + 1865 + return ssi_hash_import(ctx, in); 1866 + } 1867 + #endif 1868 + 1869 + static int ssi_shash_setkey(struct crypto_shash *tfm, 1870 + const u8 *key, unsigned int keylen) 1871 + { 1872 + return ssi_hash_setkey((void *) tfm, key, keylen, true); 1873 + } 1874 + 1875 + #endif /* SYNC_ALGS */ 1876 + 1877 + //ahash wrap functions 1878 + static int ssi_ahash_digest(struct ahash_request *req) 1879 + { 1880 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1881 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1882 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1883 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1884 + 1885 + return ssi_hash_digest(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req); 1886 + } 1887 + 1888 + static int ssi_ahash_update(struct ahash_request *req) 1889 + { 1890 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1891 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1892 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1893 + unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); 1894 + 1895 + return ssi_hash_update(state, ctx, block_size, req->src, req->nbytes, (void *)req); 1896 + } 1897 + 1898 + static int ssi_ahash_finup(struct ahash_request *req) 1899 + { 1900 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1901 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1902 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1903 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1904 + 1905 + return ssi_hash_finup(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req); 1906 + } 1907 + 1908 + static int ssi_ahash_final(struct ahash_request *req) 1909 + { 1910 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1911 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1912 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1913 + uint32_t digestsize = crypto_ahash_digestsize(tfm); 1914 + 1915 + return ssi_hash_final(state, ctx, digestsize, req->src, req->nbytes, req->result, (void *)req); 1916 + } 1917 + 1918 + static int ssi_ahash_init(struct ahash_request *req) 1919 + { 1920 + struct ahash_req_ctx *state = ahash_request_ctx(req); 1921 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 1922 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 1923 + 1924 + SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes); 1925 + 1926 + return ssi_hash_init(state, ctx); 1927 + } 1928 + 1929 + #ifdef EXPORT_FIXED 1930 + static int ssi_ahash_export(struct ahash_request *req, void *out) 1931 + { 1932 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1933 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1934 + 1935 + return ssi_hash_export(ctx, out); 1936 + } 1937 + 1938 + static int ssi_ahash_import(struct ahash_request *req, const void *in) 1939 + { 1940 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1941 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash); 1942 + 1943 + return ssi_hash_import(ctx, in); 1944 + } 1945 + #endif 1946 + 1947 + static int ssi_ahash_setkey(struct crypto_ahash *ahash, 1948 + const u8 *key, unsigned int keylen) 1949 + { 1950 + return ssi_hash_setkey((void *) ahash, key, keylen, false); 1951 + } 1952 + 1953 + struct ssi_hash_template { 1954 + char name[CRYPTO_MAX_ALG_NAME]; 1955 + char driver_name[CRYPTO_MAX_ALG_NAME]; 1956 + char hmac_name[CRYPTO_MAX_ALG_NAME]; 1957 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; 1958 + unsigned int blocksize; 1959 + bool synchronize; 1960 + union { 1961 + struct ahash_alg template_ahash; 1962 + struct shash_alg template_shash; 1963 + }; 1964 + int hash_mode; 1965 + int hw_mode; 1966 + int inter_digestsize; 1967 + struct ssi_drvdata *drvdata; 1968 + }; 1969 + 1970 + /* hash descriptors */ 1971 + static struct ssi_hash_template driver_hash[] = { 1972 + //Asynchronize hash template 1973 + { 1974 + .name = "sha1", 1975 + .driver_name = "sha1-dx", 1976 + .hmac_name = "hmac(sha1)", 1977 + .hmac_driver_name = "hmac-sha1-dx", 1978 + .blocksize = SHA1_BLOCK_SIZE, 1979 + .synchronize = false, 1980 + .template_ahash = { 1981 + .init = ssi_ahash_init, 1982 + .update = ssi_ahash_update, 1983 + .final = ssi_ahash_final, 1984 + .finup = ssi_ahash_finup, 1985 + .digest = ssi_ahash_digest, 1986 + #ifdef EXPORT_FIXED 1987 + .export = ssi_ahash_export, 1988 + .import = ssi_ahash_import, 1989 + #endif 1990 + .setkey = ssi_ahash_setkey, 1991 + .halg = { 1992 + .digestsize = SHA1_DIGEST_SIZE, 1993 + .statesize = sizeof(struct sha1_state), 1994 + }, 1995 + }, 1996 + .hash_mode = DRV_HASH_SHA1, 1997 + .hw_mode = DRV_HASH_HW_SHA1, 1998 + .inter_digestsize = SHA1_DIGEST_SIZE, 1999 + }, 2000 + { 2001 + .name = "sha256", 2002 + .driver_name = "sha256-dx", 2003 + .hmac_name = "hmac(sha256)", 2004 + .hmac_driver_name = "hmac-sha256-dx", 2005 + .blocksize = SHA256_BLOCK_SIZE, 2006 + .synchronize = false, 2007 + .template_ahash = { 2008 + .init = ssi_ahash_init, 2009 + .update = ssi_ahash_update, 2010 + .final = ssi_ahash_final, 2011 + .finup = ssi_ahash_finup, 2012 + .digest = ssi_ahash_digest, 2013 + #ifdef EXPORT_FIXED 2014 + .export = ssi_ahash_export, 2015 + .import = ssi_ahash_import, 2016 + #endif 2017 + .setkey = ssi_ahash_setkey, 2018 + .halg = { 2019 + .digestsize = SHA256_DIGEST_SIZE, 2020 + .statesize = sizeof(struct sha256_state), 2021 + }, 2022 + }, 2023 + .hash_mode = DRV_HASH_SHA256, 2024 + .hw_mode = DRV_HASH_HW_SHA256, 2025 + .inter_digestsize = SHA256_DIGEST_SIZE, 2026 + }, 2027 + { 2028 + .name = "sha224", 2029 + .driver_name = "sha224-dx", 2030 + .hmac_name = "hmac(sha224)", 2031 + .hmac_driver_name = "hmac-sha224-dx", 2032 + .blocksize = SHA224_BLOCK_SIZE, 2033 + .synchronize = false, 2034 + .template_ahash = { 2035 + .init = ssi_ahash_init, 2036 + .update = ssi_ahash_update, 2037 + .final = ssi_ahash_final, 2038 + .finup = ssi_ahash_finup, 2039 + .digest = ssi_ahash_digest, 2040 + #ifdef EXPORT_FIXED 2041 + .export = ssi_ahash_export, 2042 + .import = ssi_ahash_import, 2043 + #endif 2044 + .setkey = ssi_ahash_setkey, 2045 + .halg = { 2046 + .digestsize = SHA224_DIGEST_SIZE, 2047 + .statesize = sizeof(struct sha256_state), 2048 + }, 2049 + }, 2050 + .hash_mode = DRV_HASH_SHA224, 2051 + .hw_mode = DRV_HASH_HW_SHA256, 2052 + .inter_digestsize = SHA256_DIGEST_SIZE, 2053 + }, 2054 + #if (DX_DEV_SHA_MAX > 256) 2055 + { 2056 + .name = "sha384", 2057 + .driver_name = "sha384-dx", 2058 + .hmac_name = "hmac(sha384)", 2059 + .hmac_driver_name = "hmac-sha384-dx", 2060 + .blocksize = SHA384_BLOCK_SIZE, 2061 + .synchronize = false, 2062 + .template_ahash = { 2063 + .init = ssi_ahash_init, 2064 + .update = ssi_ahash_update, 2065 + .final = ssi_ahash_final, 2066 + .finup = ssi_ahash_finup, 2067 + .digest = ssi_ahash_digest, 2068 + #ifdef EXPORT_FIXED 2069 + .export = ssi_ahash_export, 2070 + .import = ssi_ahash_import, 2071 + #endif 2072 + .setkey = ssi_ahash_setkey, 2073 + .halg = { 2074 + .digestsize = SHA384_DIGEST_SIZE, 2075 + .statesize = sizeof(struct sha512_state), 2076 + }, 2077 + }, 2078 + .hash_mode = DRV_HASH_SHA384, 2079 + .hw_mode = DRV_HASH_HW_SHA512, 2080 + .inter_digestsize = SHA512_DIGEST_SIZE, 2081 + }, 2082 + { 2083 + .name = "sha512", 2084 + .driver_name = "sha512-dx", 2085 + .hmac_name = "hmac(sha512)", 2086 + .hmac_driver_name = "hmac-sha512-dx", 2087 + .blocksize = SHA512_BLOCK_SIZE, 2088 + .synchronize = false, 2089 + .template_ahash = { 2090 + .init = ssi_ahash_init, 2091 + .update = ssi_ahash_update, 2092 + .final = ssi_ahash_final, 2093 + .finup = ssi_ahash_finup, 2094 + .digest = ssi_ahash_digest, 2095 + #ifdef EXPORT_FIXED 2096 + .export = ssi_ahash_export, 2097 + .import = ssi_ahash_import, 2098 + #endif 2099 + .setkey = ssi_ahash_setkey, 2100 + .halg = { 2101 + .digestsize = SHA512_DIGEST_SIZE, 2102 + .statesize = sizeof(struct sha512_state), 2103 + }, 2104 + }, 2105 + .hash_mode = DRV_HASH_SHA512, 2106 + .hw_mode = DRV_HASH_HW_SHA512, 2107 + .inter_digestsize = SHA512_DIGEST_SIZE, 2108 + }, 2109 + #endif 2110 + { 2111 + .name = "md5", 2112 + .driver_name = "md5-dx", 2113 + .hmac_name = "hmac(md5)", 2114 + .hmac_driver_name = "hmac-md5-dx", 2115 + .blocksize = MD5_HMAC_BLOCK_SIZE, 2116 + .synchronize = false, 2117 + .template_ahash = { 2118 + .init = ssi_ahash_init, 2119 + .update = ssi_ahash_update, 2120 + .final = ssi_ahash_final, 2121 + .finup = ssi_ahash_finup, 2122 + .digest = ssi_ahash_digest, 2123 + #ifdef EXPORT_FIXED 2124 + .export = ssi_ahash_export, 2125 + .import = ssi_ahash_import, 2126 + #endif 2127 + .setkey = ssi_ahash_setkey, 2128 + .halg = { 2129 + .digestsize = MD5_DIGEST_SIZE, 2130 + .statesize = sizeof(struct md5_state), 2131 + }, 2132 + }, 2133 + .hash_mode = DRV_HASH_MD5, 2134 + .hw_mode = DRV_HASH_HW_MD5, 2135 + .inter_digestsize = MD5_DIGEST_SIZE, 2136 + }, 2137 + { 2138 + .name = "xcbc(aes)", 2139 + .driver_name = "xcbc-aes-dx", 2140 + .blocksize = AES_BLOCK_SIZE, 2141 + .synchronize = false, 2142 + .template_ahash = { 2143 + .init = ssi_ahash_init, 2144 + .update = ssi_mac_update, 2145 + .final = ssi_mac_final, 2146 + .finup = ssi_mac_finup, 2147 + .digest = ssi_mac_digest, 2148 + .setkey = ssi_xcbc_setkey, 2149 + #ifdef EXPORT_FIXED 2150 + .export = ssi_ahash_export, 2151 + .import = ssi_ahash_import, 2152 + #endif 2153 + .halg = { 2154 + .digestsize = AES_BLOCK_SIZE, 2155 + .statesize = sizeof(struct aeshash_state), 2156 + }, 2157 + }, 2158 + .hash_mode = DRV_HASH_NULL, 2159 + .hw_mode = DRV_CIPHER_XCBC_MAC, 2160 + .inter_digestsize = AES_BLOCK_SIZE, 2161 + }, 2162 + #if SSI_CC_HAS_CMAC 2163 + { 2164 + .name = "cmac(aes)", 2165 + .driver_name = "cmac-aes-dx", 2166 + .blocksize = AES_BLOCK_SIZE, 2167 + .synchronize = false, 2168 + .template_ahash = { 2169 + .init = ssi_ahash_init, 2170 + .update = ssi_mac_update, 2171 + .final = ssi_mac_final, 2172 + .finup = ssi_mac_finup, 2173 + .digest = ssi_mac_digest, 2174 + .setkey = ssi_cmac_setkey, 2175 + #ifdef EXPORT_FIXED 2176 + .export = ssi_ahash_export, 2177 + .import = ssi_ahash_import, 2178 + #endif 2179 + .halg = { 2180 + .digestsize = AES_BLOCK_SIZE, 2181 + .statesize = sizeof(struct aeshash_state), 2182 + }, 2183 + }, 2184 + .hash_mode = DRV_HASH_NULL, 2185 + .hw_mode = DRV_CIPHER_CMAC, 2186 + .inter_digestsize = AES_BLOCK_SIZE, 2187 + }, 2188 + #endif 2189 + 2190 + }; 2191 + 2192 + static struct ssi_hash_alg * 2193 + ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed) 2194 + { 2195 + struct ssi_hash_alg *t_crypto_alg; 2196 + struct crypto_alg *alg; 2197 + 2198 + t_crypto_alg = kzalloc(sizeof(struct ssi_hash_alg), GFP_KERNEL); 2199 + if (!t_crypto_alg) { 2200 + SSI_LOG_ERR("failed to allocate t_alg\n"); 2201 + return ERR_PTR(-ENOMEM); 2202 + } 2203 + 2204 + t_crypto_alg->synchronize = template->synchronize; 2205 + if (template->synchronize) { 2206 + struct shash_alg *halg; 2207 + t_crypto_alg->shash_alg = template->template_shash; 2208 + halg = &t_crypto_alg->shash_alg; 2209 + alg = &halg->base; 2210 + if (!keyed) halg->setkey = NULL; 2211 + } else { 2212 + struct ahash_alg *halg; 2213 + t_crypto_alg->ahash_alg = template->template_ahash; 2214 + halg = &t_crypto_alg->ahash_alg; 2215 + alg = &halg->halg.base; 2216 + if (!keyed) halg->setkey = NULL; 2217 + } 2218 + 2219 + if (keyed) { 2220 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 2221 + template->hmac_name); 2222 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2223 + template->hmac_driver_name); 2224 + } else { 2225 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", 2226 + template->name); 2227 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 2228 + template->driver_name); 2229 + } 2230 + alg->cra_module = THIS_MODULE; 2231 + alg->cra_ctxsize = sizeof(struct ssi_hash_ctx); 2232 + alg->cra_priority = SSI_CRA_PRIO; 2233 + alg->cra_blocksize = template->blocksize; 2234 + alg->cra_alignmask = 0; 2235 + alg->cra_exit = ssi_hash_cra_exit; 2236 + 2237 + if (template->synchronize) { 2238 + alg->cra_init = ssi_shash_cra_init; 2239 + alg->cra_flags = CRYPTO_ALG_TYPE_SHASH | 2240 + CRYPTO_ALG_KERN_DRIVER_ONLY; 2241 + alg->cra_type = &crypto_shash_type; 2242 + } else { 2243 + alg->cra_init = ssi_ahash_cra_init; 2244 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | 2245 + CRYPTO_ALG_KERN_DRIVER_ONLY; 2246 + alg->cra_type = &crypto_ahash_type; 2247 + } 2248 + 2249 + t_crypto_alg->hash_mode = template->hash_mode; 2250 + t_crypto_alg->hw_mode = template->hw_mode; 2251 + t_crypto_alg->inter_digestsize = template->inter_digestsize; 2252 + 2253 + return t_crypto_alg; 2254 + } 2255 + 2256 + int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata) 2257 + { 2258 + struct ssi_hash_handle *hash_handle = drvdata->hash_handle; 2259 + ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr; 2260 + unsigned int larval_seq_len = 0; 2261 + HwDesc_s larval_seq[CC_DIGEST_SIZE_MAX/sizeof(uint32_t)]; 2262 + int rc = 0; 2263 + #if (DX_DEV_SHA_MAX > 256) 2264 + int i; 2265 + #endif 2266 + 2267 + /* Copy-to-sram digest-len */ 2268 + ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs, 2269 + ARRAY_SIZE(digest_len_init), larval_seq, &larval_seq_len); 2270 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2271 + if (unlikely(rc != 0)) 2272 + goto init_digest_const_err; 2273 + 2274 + sram_buff_ofs += sizeof(digest_len_init); 2275 + larval_seq_len = 0; 2276 + 2277 + #if (DX_DEV_SHA_MAX > 256) 2278 + /* Copy-to-sram digest-len for sha384/512 */ 2279 + ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs, 2280 + ARRAY_SIZE(digest_len_sha512_init), larval_seq, &larval_seq_len); 2281 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2282 + if (unlikely(rc != 0)) 2283 + goto init_digest_const_err; 2284 + 2285 + sram_buff_ofs += sizeof(digest_len_sha512_init); 2286 + larval_seq_len = 0; 2287 + #endif 2288 + 2289 + /* The initial digests offset */ 2290 + hash_handle->larval_digest_sram_addr = sram_buff_ofs; 2291 + 2292 + /* Copy-to-sram initial SHA* digests */ 2293 + ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs, 2294 + ARRAY_SIZE(md5_init), larval_seq, &larval_seq_len); 2295 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2296 + if (unlikely(rc != 0)) 2297 + goto init_digest_const_err; 2298 + sram_buff_ofs += sizeof(md5_init); 2299 + larval_seq_len = 0; 2300 + 2301 + ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs, 2302 + ARRAY_SIZE(sha1_init), larval_seq, &larval_seq_len); 2303 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2304 + if (unlikely(rc != 0)) 2305 + goto init_digest_const_err; 2306 + sram_buff_ofs += sizeof(sha1_init); 2307 + larval_seq_len = 0; 2308 + 2309 + ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs, 2310 + ARRAY_SIZE(sha224_init), larval_seq, &larval_seq_len); 2311 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2312 + if (unlikely(rc != 0)) 2313 + goto init_digest_const_err; 2314 + sram_buff_ofs += sizeof(sha224_init); 2315 + larval_seq_len = 0; 2316 + 2317 + ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs, 2318 + ARRAY_SIZE(sha256_init), larval_seq, &larval_seq_len); 2319 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2320 + if (unlikely(rc != 0)) 2321 + goto init_digest_const_err; 2322 + sram_buff_ofs += sizeof(sha256_init); 2323 + larval_seq_len = 0; 2324 + 2325 + #if (DX_DEV_SHA_MAX > 256) 2326 + /* We are forced to swap each double-word larval before copying to sram */ 2327 + for (i = 0; i < ARRAY_SIZE(sha384_init); i++) { 2328 + const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[1]; 2329 + const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha384_init[i]))[0]; 2330 + 2331 + ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1, 2332 + larval_seq, &larval_seq_len); 2333 + sram_buff_ofs += sizeof(uint32_t); 2334 + ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1, 2335 + larval_seq, &larval_seq_len); 2336 + sram_buff_ofs += sizeof(uint32_t); 2337 + } 2338 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2339 + if (unlikely(rc != 0)) { 2340 + SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc); 2341 + goto init_digest_const_err; 2342 + } 2343 + larval_seq_len = 0; 2344 + 2345 + for (i = 0; i < ARRAY_SIZE(sha512_init); i++) { 2346 + const uint32_t const0 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[1]; 2347 + const uint32_t const1 = ((uint32_t *)((uint64_t *)&sha512_init[i]))[0]; 2348 + 2349 + ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1, 2350 + larval_seq, &larval_seq_len); 2351 + sram_buff_ofs += sizeof(uint32_t); 2352 + ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1, 2353 + larval_seq, &larval_seq_len); 2354 + sram_buff_ofs += sizeof(uint32_t); 2355 + } 2356 + rc = send_request_init(drvdata, larval_seq, larval_seq_len); 2357 + if (unlikely(rc != 0)) { 2358 + SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc); 2359 + goto init_digest_const_err; 2360 + } 2361 + #endif 2362 + 2363 + init_digest_const_err: 2364 + return rc; 2365 + } 2366 + 2367 + int ssi_hash_alloc(struct ssi_drvdata *drvdata) 2368 + { 2369 + struct ssi_hash_handle *hash_handle; 2370 + ssi_sram_addr_t sram_buff; 2371 + uint32_t sram_size_to_alloc; 2372 + int rc = 0; 2373 + int alg; 2374 + 2375 + hash_handle = kzalloc(sizeof(struct ssi_hash_handle), GFP_KERNEL); 2376 + if (hash_handle == NULL) { 2377 + SSI_LOG_ERR("kzalloc failed to allocate %zu B\n", 2378 + sizeof(struct ssi_hash_handle)); 2379 + rc = -ENOMEM; 2380 + goto fail; 2381 + } 2382 + 2383 + drvdata->hash_handle = hash_handle; 2384 + 2385 + sram_size_to_alloc = sizeof(digest_len_init) + 2386 + #if (DX_DEV_SHA_MAX > 256) 2387 + sizeof(digest_len_sha512_init) + 2388 + sizeof(sha384_init) + 2389 + sizeof(sha512_init) + 2390 + #endif 2391 + sizeof(md5_init) + 2392 + sizeof(sha1_init) + 2393 + sizeof(sha224_init) + 2394 + sizeof(sha256_init); 2395 + 2396 + sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc); 2397 + if (sram_buff == NULL_SRAM_ADDR) { 2398 + SSI_LOG_ERR("SRAM pool exhausted\n"); 2399 + rc = -ENOMEM; 2400 + goto fail; 2401 + } 2402 + 2403 + /* The initial digest-len offset */ 2404 + hash_handle->digest_len_sram_addr = sram_buff; 2405 + 2406 + /*must be set before the alg registration as it is being used there*/ 2407 + rc = ssi_hash_init_sram_digest_consts(drvdata); 2408 + if (unlikely(rc != 0)) { 2409 + SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc); 2410 + goto fail; 2411 + } 2412 + 2413 + INIT_LIST_HEAD(&hash_handle->hash_list); 2414 + 2415 + /* ahash registration */ 2416 + for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) { 2417 + struct ssi_hash_alg *t_alg; 2418 + 2419 + /* register hmac version */ 2420 + 2421 + if ((((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_XCBC_MAC) && 2422 + (((struct ssi_hash_template)driver_hash[alg]).hw_mode != DRV_CIPHER_CMAC)) { 2423 + t_alg = ssi_hash_create_alg(&driver_hash[alg], true); 2424 + if (IS_ERR(t_alg)) { 2425 + rc = PTR_ERR(t_alg); 2426 + SSI_LOG_ERR("%s alg allocation failed\n", 2427 + driver_hash[alg].driver_name); 2428 + goto fail; 2429 + } 2430 + t_alg->drvdata = drvdata; 2431 + 2432 + if (t_alg->synchronize) { 2433 + rc = crypto_register_shash(&t_alg->shash_alg); 2434 + if (unlikely(rc != 0)) { 2435 + SSI_LOG_ERR("%s alg registration failed\n", 2436 + t_alg->shash_alg.base.cra_driver_name); 2437 + kfree(t_alg); 2438 + goto fail; 2439 + } else 2440 + list_add_tail(&t_alg->entry, &hash_handle->hash_list); 2441 + } else { 2442 + rc = crypto_register_ahash(&t_alg->ahash_alg); 2443 + if (unlikely(rc != 0)) { 2444 + SSI_LOG_ERR("%s alg registration failed\n", 2445 + t_alg->ahash_alg.halg.base.cra_driver_name); 2446 + kfree(t_alg); 2447 + goto fail; 2448 + } else 2449 + list_add_tail(&t_alg->entry, &hash_handle->hash_list); 2450 + } 2451 + } 2452 + 2453 + /* register hash version */ 2454 + t_alg = ssi_hash_create_alg(&driver_hash[alg], false); 2455 + if (IS_ERR(t_alg)) { 2456 + rc = PTR_ERR(t_alg); 2457 + SSI_LOG_ERR("%s alg allocation failed\n", 2458 + driver_hash[alg].driver_name); 2459 + goto fail; 2460 + } 2461 + t_alg->drvdata = drvdata; 2462 + 2463 + if (t_alg->synchronize) { 2464 + rc = crypto_register_shash(&t_alg->shash_alg); 2465 + if (unlikely(rc != 0)) { 2466 + SSI_LOG_ERR("%s alg registration failed\n", 2467 + t_alg->shash_alg.base.cra_driver_name); 2468 + kfree(t_alg); 2469 + goto fail; 2470 + } else 2471 + list_add_tail(&t_alg->entry, &hash_handle->hash_list); 2472 + 2473 + } else { 2474 + rc = crypto_register_ahash(&t_alg->ahash_alg); 2475 + if (unlikely(rc != 0)) { 2476 + SSI_LOG_ERR("%s alg registration failed\n", 2477 + t_alg->ahash_alg.halg.base.cra_driver_name); 2478 + kfree(t_alg); 2479 + goto fail; 2480 + } else 2481 + list_add_tail(&t_alg->entry, &hash_handle->hash_list); 2482 + } 2483 + } 2484 + 2485 + return 0; 2486 + 2487 + fail: 2488 + 2489 + if (drvdata->hash_handle != NULL) { 2490 + kfree(drvdata->hash_handle); 2491 + drvdata->hash_handle = NULL; 2492 + } 2493 + return rc; 2494 + } 2495 + 2496 + int ssi_hash_free(struct ssi_drvdata *drvdata) 2497 + { 2498 + struct ssi_hash_alg *t_hash_alg, *hash_n; 2499 + struct ssi_hash_handle *hash_handle = drvdata->hash_handle; 2500 + 2501 + if (hash_handle != NULL) { 2502 + 2503 + list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) { 2504 + if (t_hash_alg->synchronize) { 2505 + crypto_unregister_shash(&t_hash_alg->shash_alg); 2506 + } else { 2507 + crypto_unregister_ahash(&t_hash_alg->ahash_alg); 2508 + } 2509 + list_del(&t_hash_alg->entry); 2510 + kfree(t_hash_alg); 2511 + } 2512 + 2513 + kfree(hash_handle); 2514 + drvdata->hash_handle = NULL; 2515 + } 2516 + return 0; 2517 + } 2518 + 2519 + static void ssi_hash_create_xcbc_setup(struct ahash_request *areq, 2520 + HwDesc_s desc[], 2521 + unsigned int *seq_size) { 2522 + unsigned int idx = *seq_size; 2523 + struct ahash_req_ctx *state = ahash_request_ctx(areq); 2524 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2525 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 2526 + 2527 + /* Setup XCBC MAC K1 */ 2528 + HW_DESC_INIT(&desc[idx]); 2529 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 2530 + + XCBC_MAC_K1_OFFSET), 2531 + CC_AES_128_BIT_KEY_SIZE, NS_BIT); 2532 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 2533 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 2534 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2535 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 2536 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2537 + idx++; 2538 + 2539 + /* Setup XCBC MAC K2 */ 2540 + HW_DESC_INIT(&desc[idx]); 2541 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 2542 + + XCBC_MAC_K2_OFFSET), 2543 + CC_AES_128_BIT_KEY_SIZE, NS_BIT); 2544 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE1); 2545 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 2546 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2547 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 2548 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2549 + idx++; 2550 + 2551 + /* Setup XCBC MAC K3 */ 2552 + HW_DESC_INIT(&desc[idx]); 2553 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr 2554 + + XCBC_MAC_K3_OFFSET), 2555 + CC_AES_128_BIT_KEY_SIZE, NS_BIT); 2556 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE2); 2557 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 2558 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2559 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 2560 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2561 + idx++; 2562 + 2563 + /* Loading MAC state */ 2564 + HW_DESC_INIT(&desc[idx]); 2565 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); 2566 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 2567 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_XCBC_MAC); 2568 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2569 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], CC_AES_128_BIT_KEY_SIZE); 2570 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2571 + idx++; 2572 + *seq_size = idx; 2573 + } 2574 + 2575 + static void ssi_hash_create_cmac_setup(struct ahash_request *areq, 2576 + HwDesc_s desc[], 2577 + unsigned int *seq_size) 2578 + { 2579 + unsigned int idx = *seq_size; 2580 + struct ahash_req_ctx *state = ahash_request_ctx(areq); 2581 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2582 + struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm); 2583 + 2584 + /* Setup CMAC Key */ 2585 + HW_DESC_INIT(&desc[idx]); 2586 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr, 2587 + ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT); 2588 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_KEY0); 2589 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC); 2590 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2591 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen); 2592 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2593 + idx++; 2594 + 2595 + /* Load MAC state */ 2596 + HW_DESC_INIT(&desc[idx]); 2597 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); 2598 + HW_DESC_SET_SETUP_MODE(&desc[idx], SETUP_LOAD_STATE0); 2599 + HW_DESC_SET_CIPHER_MODE(&desc[idx], DRV_CIPHER_CMAC); 2600 + HW_DESC_SET_CIPHER_CONFIG0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); 2601 + HW_DESC_SET_KEY_SIZE_AES(&desc[idx], ctx->key_params.keylen); 2602 + HW_DESC_SET_FLOW_MODE(&desc[idx], S_DIN_to_AES); 2603 + idx++; 2604 + *seq_size = idx; 2605 + } 2606 + 2607 + static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx, 2608 + struct ssi_hash_ctx *ctx, 2609 + unsigned int flow_mode, 2610 + HwDesc_s desc[], 2611 + bool is_not_last_data, 2612 + unsigned int *seq_size) 2613 + { 2614 + unsigned int idx = *seq_size; 2615 + 2616 + if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) { 2617 + HW_DESC_INIT(&desc[idx]); 2618 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 2619 + sg_dma_address(areq_ctx->curr_sg), 2620 + areq_ctx->curr_sg->length, NS_BIT); 2621 + HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode); 2622 + idx++; 2623 + } else { 2624 + if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) { 2625 + SSI_LOG_DEBUG(" NULL mode\n"); 2626 + /* nothing to build */ 2627 + return; 2628 + } 2629 + /* bypass */ 2630 + HW_DESC_INIT(&desc[idx]); 2631 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_DLLI, 2632 + areq_ctx->mlli_params.mlli_dma_addr, 2633 + areq_ctx->mlli_params.mlli_len, 2634 + NS_BIT); 2635 + HW_DESC_SET_DOUT_SRAM(&desc[idx], 2636 + ctx->drvdata->mlli_sram_addr, 2637 + areq_ctx->mlli_params.mlli_len); 2638 + HW_DESC_SET_FLOW_MODE(&desc[idx], BYPASS); 2639 + idx++; 2640 + /* process */ 2641 + HW_DESC_INIT(&desc[idx]); 2642 + HW_DESC_SET_DIN_TYPE(&desc[idx], DMA_MLLI, 2643 + ctx->drvdata->mlli_sram_addr, 2644 + areq_ctx->mlli_nents, 2645 + NS_BIT); 2646 + HW_DESC_SET_FLOW_MODE(&desc[idx], flow_mode); 2647 + idx++; 2648 + } 2649 + if (is_not_last_data) { 2650 + HW_DESC_SET_DIN_NOT_LAST_INDICATION(&desc[idx-1]); 2651 + } 2652 + /* return updated desc sequence size */ 2653 + *seq_size = idx; 2654 + } 2655 + 2656 + /*! 2657 + * Gets the address of the initial digest in SRAM 2658 + * according to the given hash mode 2659 + * 2660 + * \param drvdata 2661 + * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 2662 + * 2663 + * \return uint32_t The address of the inital digest in SRAM 2664 + */ 2665 + ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode) 2666 + { 2667 + struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata; 2668 + struct ssi_hash_handle *hash_handle = _drvdata->hash_handle; 2669 + 2670 + switch (mode) { 2671 + case DRV_HASH_NULL: 2672 + break; /*Ignore*/ 2673 + case DRV_HASH_MD5: 2674 + return (hash_handle->larval_digest_sram_addr); 2675 + case DRV_HASH_SHA1: 2676 + return (hash_handle->larval_digest_sram_addr + 2677 + sizeof(md5_init)); 2678 + case DRV_HASH_SHA224: 2679 + return (hash_handle->larval_digest_sram_addr + 2680 + sizeof(md5_init) + 2681 + sizeof(sha1_init)); 2682 + case DRV_HASH_SHA256: 2683 + return (hash_handle->larval_digest_sram_addr + 2684 + sizeof(md5_init) + 2685 + sizeof(sha1_init) + 2686 + sizeof(sha224_init)); 2687 + #if (DX_DEV_SHA_MAX > 256) 2688 + case DRV_HASH_SHA384: 2689 + return (hash_handle->larval_digest_sram_addr + 2690 + sizeof(md5_init) + 2691 + sizeof(sha1_init) + 2692 + sizeof(sha224_init) + 2693 + sizeof(sha256_init)); 2694 + case DRV_HASH_SHA512: 2695 + return (hash_handle->larval_digest_sram_addr + 2696 + sizeof(md5_init) + 2697 + sizeof(sha1_init) + 2698 + sizeof(sha224_init) + 2699 + sizeof(sha256_init) + 2700 + sizeof(sha384_init)); 2701 + #endif 2702 + default: 2703 + SSI_LOG_ERR("Invalid hash mode (%d)\n", mode); 2704 + } 2705 + 2706 + /*This is valid wrong value to avoid kernel crash*/ 2707 + return hash_handle->larval_digest_sram_addr; 2708 + } 2709 + 2710 + ssi_sram_addr_t 2711 + ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode) 2712 + { 2713 + struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata; 2714 + struct ssi_hash_handle *hash_handle = _drvdata->hash_handle; 2715 + ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr; 2716 + 2717 + switch (mode) { 2718 + case DRV_HASH_SHA1: 2719 + case DRV_HASH_SHA224: 2720 + case DRV_HASH_SHA256: 2721 + case DRV_HASH_MD5: 2722 + return digest_len_addr; 2723 + #if (DX_DEV_SHA_MAX > 256) 2724 + case DRV_HASH_SHA384: 2725 + case DRV_HASH_SHA512: 2726 + return digest_len_addr + sizeof(digest_len_init); 2727 + #endif 2728 + default: 2729 + return digest_len_addr; /*to avoid kernel crash*/ 2730 + } 2731 + } 2732 +
+101
drivers/staging/ccree/ssi_hash.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_hash.h 18 + ARM CryptoCell Hash Crypto API 19 + */ 20 + 21 + #ifndef __SSI_HASH_H__ 22 + #define __SSI_HASH_H__ 23 + 24 + #include "ssi_buffer_mgr.h" 25 + 26 + #define HMAC_IPAD_CONST 0x36363636 27 + #define HMAC_OPAD_CONST 0x5C5C5C5C 28 + #if (DX_DEV_SHA_MAX > 256) 29 + #define HASH_LEN_SIZE 16 30 + #define SSI_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE 31 + #define SSI_MAX_HASH_BLCK_SIZE SHA512_BLOCK_SIZE 32 + #else 33 + #define HASH_LEN_SIZE 8 34 + #define SSI_MAX_HASH_DIGEST_SIZE SHA256_DIGEST_SIZE 35 + #define SSI_MAX_HASH_BLCK_SIZE SHA256_BLOCK_SIZE 36 + #endif 37 + 38 + #define XCBC_MAC_K1_OFFSET 0 39 + #define XCBC_MAC_K2_OFFSET 16 40 + #define XCBC_MAC_K3_OFFSET 32 41 + 42 + // this struct was taken from drivers/crypto/nx/nx-aes-xcbc.c and it is used for xcbc/cmac statesize 43 + struct aeshash_state { 44 + u8 state[AES_BLOCK_SIZE]; 45 + unsigned int count; 46 + u8 buffer[AES_BLOCK_SIZE]; 47 + }; 48 + 49 + /* ahash state */ 50 + struct ahash_req_ctx { 51 + uint8_t* buff0; 52 + uint8_t* buff1; 53 + uint8_t* digest_result_buff; 54 + struct async_gen_req_ctx gen_ctx; 55 + enum ssi_req_dma_buf_type data_dma_buf_type; 56 + uint8_t *digest_buff; 57 + uint8_t *opad_digest_buff; 58 + uint8_t *digest_bytes_len; 59 + dma_addr_t opad_digest_dma_addr; 60 + dma_addr_t digest_buff_dma_addr; 61 + dma_addr_t digest_bytes_len_dma_addr; 62 + dma_addr_t digest_result_dma_addr; 63 + uint32_t buff0_cnt; 64 + uint32_t buff1_cnt; 65 + uint32_t buff_index; 66 + uint32_t xcbc_count; /* count xcbc update operatations */ 67 + struct scatterlist buff_sg[2]; 68 + struct scatterlist *curr_sg; 69 + uint32_t in_nents; 70 + uint32_t mlli_nents; 71 + struct mlli_params mlli_params; 72 + }; 73 + 74 + int ssi_hash_alloc(struct ssi_drvdata *drvdata); 75 + int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata); 76 + int ssi_hash_free(struct ssi_drvdata *drvdata); 77 + 78 + /*! 79 + * Gets the initial digest length 80 + * 81 + * \param drvdata 82 + * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512 83 + * 84 + * \return uint32_t returns the address of the initial digest length in SRAM 85 + */ 86 + ssi_sram_addr_t 87 + ssi_ahash_get_initial_digest_len_sram_addr(void *drvdata, uint32_t mode); 88 + 89 + /*! 90 + * Gets the address of the initial digest in SRAM 91 + * according to the given hash mode 92 + * 93 + * \param drvdata 94 + * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256/SHA384/SHA512 95 + * 96 + * \return uint32_t The address of the inital digest in SRAM 97 + */ 98 + ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, uint32_t mode); 99 + 100 + #endif /*__SSI_HASH_H__*/ 101 +
+4
drivers/staging/ccree/ssi_pm.c
··· 26 26 #include "ssi_request_mgr.h" 27 27 #include "ssi_sram_mgr.h" 28 28 #include "ssi_sysfs.h" 29 + #include "ssi_hash.h" 29 30 #include "ssi_pm.h" 30 31 #include "ssi_pm_ext.h" 31 32 ··· 80 79 return rc; 81 80 } 82 81 82 + /* must be after the queue resuming as it uses the HW queue*/ 83 + ssi_hash_init_sram_digest_consts(drvdata); 84 + 83 85 return 0; 84 86 } 85 87