Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: ccree: introduce CryptoCell HW driver

Introduce basic low level Arm TrustZone CryptoCell HW support.
This first patch doesn't actually register any Crypto API
transformations, these will follow up in the next patch.

This first revision supports the CC 712 REE component.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Gilad Ben-Yossef and committed by
Greg Kroah-Hartman
abefd674 f55a6d45

+5220
+2
drivers/staging/Kconfig
··· 104 104 105 105 source "drivers/staging/vc04_services/Kconfig" 106 106 107 + source "drivers/staging/ccree/Kconfig" 108 + 107 109 endif # STAGING
+1
drivers/staging/Makefile
··· 41 41 obj-$(CONFIG_KS7010) += ks7010/ 42 42 obj-$(CONFIG_GREYBUS) += greybus/ 43 43 obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ 44 + obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ 44 45
+19
drivers/staging/ccree/Kconfig
··· 1 + config CRYPTO_DEV_CCREE 2 + tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 3 + depends on CRYPTO_HW && OF && HAS_DMA 4 + default n 5 + help 6 + Say 'Y' to enable a driver for the Arm TrustZone CryptoCell 7 + C7xx. Currently only the CryptoCell 712 REE is supported. 8 + Choose this if you wish to use hardware acceleration of 9 + cryptographic operations on the system REE. 10 + If unsure say Y. 11 + 12 + config CCREE_DISABLE_COHERENT_DMA_OPS 13 + bool "Disable Coherent DMA operations for the CCREE driver" 14 + depends on CRYPTO_DEV_CCREE 15 + default n 16 + help 17 + Say 'Y' to disable the use of coherent DMA operations by the 18 + CCREE driver for debugging purposes. 19 + If unsure say N.
+2
drivers/staging/ccree/Makefile
··· 1 + obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o 2 + ccree-y := ssi_driver.o ssi_sysfs.o ssi_buffer_mgr.o ssi_request_mgr.o ssi_sram_mgr.o ssi_pm.o ssi_pm_ext.o
+62
drivers/staging/ccree/cc_bitops.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /*! 18 + * \file cc_bitops.h 19 + * Bit fields operations macros. 20 + */ 21 + #ifndef _CC_BITOPS_H_ 22 + #define _CC_BITOPS_H_ 23 + 24 + #define BITMASK(mask_size) (((mask_size) < 32) ? \ 25 + ((1UL << (mask_size)) - 1) : 0xFFFFFFFFUL) 26 + #define BITMASK_AT(mask_size, mask_offset) (BITMASK(mask_size) << (mask_offset)) 27 + 28 + #define BITFIELD_GET(word, bit_offset, bit_size) \ 29 + (((word) >> (bit_offset)) & BITMASK(bit_size)) 30 + #define BITFIELD_SET(word, bit_offset, bit_size, new_val) do { \ 31 + word = ((word) & ~BITMASK_AT(bit_size, bit_offset)) | \ 32 + (((new_val) & BITMASK(bit_size)) << (bit_offset)); \ 33 + } while (0) 34 + 35 + /* Is val aligned to "align" ("align" must be power of 2) */ 36 + #ifndef IS_ALIGNED 37 + #define IS_ALIGNED(val, align) \ 38 + (((uintptr_t)(val) & ((align) - 1)) == 0) 39 + #endif 40 + 41 + #define SWAP_ENDIAN(word) \ 42 + (((word) >> 24) | (((word) & 0x00FF0000) >> 8) | \ 43 + (((word) & 0x0000FF00) << 8) | (((word) & 0x000000FF) << 24)) 44 + 45 + #ifdef BIG__ENDIAN 46 + #define SWAP_TO_LE(word) SWAP_ENDIAN(word) 47 + #define SWAP_TO_BE(word) word 48 + #else 49 + #define SWAP_TO_LE(word) word 50 + #define SWAP_TO_BE(word) SWAP_ENDIAN(word) 51 + #endif 52 + 53 + 54 + 55 + /* Is val a multiple of "mult" ("mult" must be power of 2) */ 56 + #define IS_MULT(val, mult) \ 57 + (((val) & ((mult) - 1)) == 0) 58 + 59 + #define IS_NULL_ADDR(adr) \ 60 + (!(adr)) 61 + 62 + #endif /*_CC_BITOPS_H_*/
+235
drivers/staging/ccree/cc_crypto_ctx.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + #ifndef _CC_CRYPTO_CTX_H_ 19 + #define _CC_CRYPTO_CTX_H_ 20 + 21 + #ifdef __KERNEL__ 22 + #include <linux/types.h> 23 + #define INT32_MAX 0x7FFFFFFFL 24 + #else 25 + #include <stdint.h> 26 + #endif 27 + 28 + 29 + #ifndef max 30 + #define max(a, b) ((a) > (b) ? (a) : (b)) 31 + #define min(a, b) ((a) < (b) ? (a) : (b)) 32 + #endif 33 + 34 + /* context size */ 35 + #ifndef CC_CTX_SIZE_LOG2 36 + #if (CC_SUPPORT_SHA > 256) 37 + #define CC_CTX_SIZE_LOG2 8 38 + #else 39 + #define CC_CTX_SIZE_LOG2 7 40 + #endif 41 + #endif 42 + #define CC_CTX_SIZE (1<<CC_CTX_SIZE_LOG2) 43 + #define CC_DRV_CTX_SIZE_WORDS (CC_CTX_SIZE >> 2) 44 + 45 + #define CC_DRV_DES_IV_SIZE 8 46 + #define CC_DRV_DES_BLOCK_SIZE 8 47 + 48 + #define CC_DRV_DES_ONE_KEY_SIZE 8 49 + #define CC_DRV_DES_DOUBLE_KEY_SIZE 16 50 + #define CC_DRV_DES_TRIPLE_KEY_SIZE 24 51 + #define CC_DRV_DES_KEY_SIZE_MAX CC_DRV_DES_TRIPLE_KEY_SIZE 52 + 53 + #define CC_AES_IV_SIZE 16 54 + #define CC_AES_IV_SIZE_WORDS (CC_AES_IV_SIZE >> 2) 55 + 56 + #define CC_AES_BLOCK_SIZE 16 57 + #define CC_AES_BLOCK_SIZE_WORDS 4 58 + 59 + #define CC_AES_128_BIT_KEY_SIZE 16 60 + #define CC_AES_128_BIT_KEY_SIZE_WORDS (CC_AES_128_BIT_KEY_SIZE >> 2) 61 + #define CC_AES_192_BIT_KEY_SIZE 24 62 + #define CC_AES_192_BIT_KEY_SIZE_WORDS (CC_AES_192_BIT_KEY_SIZE >> 2) 63 + #define CC_AES_256_BIT_KEY_SIZE 32 64 + #define CC_AES_256_BIT_KEY_SIZE_WORDS (CC_AES_256_BIT_KEY_SIZE >> 2) 65 + #define CC_AES_KEY_SIZE_MAX CC_AES_256_BIT_KEY_SIZE 66 + #define CC_AES_KEY_SIZE_WORDS_MAX (CC_AES_KEY_SIZE_MAX >> 2) 67 + 68 + #define CC_MD5_DIGEST_SIZE 16 69 + #define CC_SHA1_DIGEST_SIZE 20 70 + #define CC_SHA224_DIGEST_SIZE 28 71 + #define CC_SHA256_DIGEST_SIZE 32 72 + #define CC_SHA256_DIGEST_SIZE_IN_WORDS 8 73 + #define CC_SHA384_DIGEST_SIZE 48 74 + #define CC_SHA512_DIGEST_SIZE 64 75 + 76 + #define CC_SHA1_BLOCK_SIZE 64 77 + #define CC_SHA1_BLOCK_SIZE_IN_WORDS 16 78 + #define CC_MD5_BLOCK_SIZE 64 79 + #define CC_MD5_BLOCK_SIZE_IN_WORDS 16 80 + #define CC_SHA224_BLOCK_SIZE 64 81 + #define CC_SHA256_BLOCK_SIZE 64 82 + #define CC_SHA256_BLOCK_SIZE_IN_WORDS 16 83 + #define CC_SHA1_224_256_BLOCK_SIZE 64 84 + #define CC_SHA384_BLOCK_SIZE 128 85 + #define CC_SHA512_BLOCK_SIZE 128 86 + 87 + #if (CC_SUPPORT_SHA > 256) 88 + #define CC_DIGEST_SIZE_MAX CC_SHA512_DIGEST_SIZE 89 + #define CC_HASH_BLOCK_SIZE_MAX CC_SHA512_BLOCK_SIZE /*1024b*/ 90 + #else /* Only up to SHA256 */ 91 + #define CC_DIGEST_SIZE_MAX CC_SHA256_DIGEST_SIZE 92 + #define CC_HASH_BLOCK_SIZE_MAX CC_SHA256_BLOCK_SIZE /*512b*/ 93 + #endif 94 + 95 + #define CC_HMAC_BLOCK_SIZE_MAX CC_HASH_BLOCK_SIZE_MAX 96 + 97 + #define CC_MULTI2_SYSTEM_KEY_SIZE 32 98 + #define CC_MULTI2_DATA_KEY_SIZE 8 99 + #define CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE (CC_MULTI2_SYSTEM_KEY_SIZE + CC_MULTI2_DATA_KEY_SIZE) 100 + #define CC_MULTI2_BLOCK_SIZE 8 101 + #define CC_MULTI2_IV_SIZE 8 102 + #define CC_MULTI2_MIN_NUM_ROUNDS 8 103 + #define CC_MULTI2_MAX_NUM_ROUNDS 128 104 + 105 + 106 + #define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX 107 + 108 + 109 + enum drv_engine_type { 110 + DRV_ENGINE_NULL = 0, 111 + DRV_ENGINE_AES = 1, 112 + DRV_ENGINE_DES = 2, 113 + DRV_ENGINE_HASH = 3, 114 + DRV_ENGINE_RC4 = 4, 115 + DRV_ENGINE_DOUT = 5, 116 + DRV_ENGINE_RESERVE32B = INT32_MAX, 117 + }; 118 + 119 + enum drv_crypto_alg { 120 + DRV_CRYPTO_ALG_NULL = -1, 121 + DRV_CRYPTO_ALG_AES = 0, 122 + DRV_CRYPTO_ALG_DES = 1, 123 + DRV_CRYPTO_ALG_HASH = 2, 124 + DRV_CRYPTO_ALG_C2 = 3, 125 + DRV_CRYPTO_ALG_HMAC = 4, 126 + DRV_CRYPTO_ALG_AEAD = 5, 127 + DRV_CRYPTO_ALG_BYPASS = 6, 128 + DRV_CRYPTO_ALG_NUM = 7, 129 + DRV_CRYPTO_ALG_RESERVE32B = INT32_MAX 130 + }; 131 + 132 + enum drv_crypto_direction { 133 + DRV_CRYPTO_DIRECTION_NULL = -1, 134 + DRV_CRYPTO_DIRECTION_ENCRYPT = 0, 135 + DRV_CRYPTO_DIRECTION_DECRYPT = 1, 136 + DRV_CRYPTO_DIRECTION_DECRYPT_ENCRYPT = 3, 137 + DRV_CRYPTO_DIRECTION_RESERVE32B = INT32_MAX 138 + }; 139 + 140 + enum drv_cipher_mode { 141 + DRV_CIPHER_NULL_MODE = -1, 142 + DRV_CIPHER_ECB = 0, 143 + DRV_CIPHER_CBC = 1, 144 + DRV_CIPHER_CTR = 2, 145 + DRV_CIPHER_CBC_MAC = 3, 146 + DRV_CIPHER_XTS = 4, 147 + DRV_CIPHER_XCBC_MAC = 5, 148 + DRV_CIPHER_OFB = 6, 149 + DRV_CIPHER_CMAC = 7, 150 + DRV_CIPHER_CCM = 8, 151 + DRV_CIPHER_CBC_CTS = 11, 152 + DRV_CIPHER_GCTR = 12, 153 + DRV_CIPHER_ESSIV = 13, 154 + DRV_CIPHER_BITLOCKER = 14, 155 + DRV_CIPHER_RESERVE32B = INT32_MAX 156 + }; 157 + 158 + enum drv_hash_mode { 159 + DRV_HASH_NULL = -1, 160 + DRV_HASH_SHA1 = 0, 161 + DRV_HASH_SHA256 = 1, 162 + DRV_HASH_SHA224 = 2, 163 + DRV_HASH_SHA512 = 3, 164 + DRV_HASH_SHA384 = 4, 165 + DRV_HASH_MD5 = 5, 166 + DRV_HASH_CBC_MAC = 6, 167 + DRV_HASH_XCBC_MAC = 7, 168 + DRV_HASH_CMAC = 8, 169 + DRV_HASH_MODE_NUM = 9, 170 + DRV_HASH_RESERVE32B = INT32_MAX 171 + }; 172 + 173 + enum drv_hash_hw_mode { 174 + DRV_HASH_HW_MD5 = 0, 175 + DRV_HASH_HW_SHA1 = 1, 176 + DRV_HASH_HW_SHA256 = 2, 177 + DRV_HASH_HW_SHA224 = 10, 178 + DRV_HASH_HW_SHA512 = 4, 179 + DRV_HASH_HW_SHA384 = 12, 180 + DRV_HASH_HW_GHASH = 6, 181 + DRV_HASH_HW_RESERVE32B = INT32_MAX 182 + }; 183 + 184 + enum drv_multi2_mode { 185 + DRV_MULTI2_NULL = -1, 186 + DRV_MULTI2_ECB = 0, 187 + DRV_MULTI2_CBC = 1, 188 + DRV_MULTI2_OFB = 2, 189 + DRV_MULTI2_RESERVE32B = INT32_MAX 190 + }; 191 + 192 + 193 + /* drv_crypto_key_type[1:0] is mapped to cipher_do[1:0] */ 194 + /* drv_crypto_key_type[2] is mapped to cipher_config2 */ 195 + enum drv_crypto_key_type { 196 + DRV_NULL_KEY = -1, 197 + DRV_USER_KEY = 0, /* 0x000 */ 198 + DRV_ROOT_KEY = 1, /* 0x001 */ 199 + DRV_PROVISIONING_KEY = 2, /* 0x010 */ 200 + DRV_SESSION_KEY = 3, /* 0x011 */ 201 + DRV_APPLET_KEY = 4, /* NA */ 202 + DRV_PLATFORM_KEY = 5, /* 0x101 */ 203 + DRV_CUSTOMER_KEY = 6, /* 0x110 */ 204 + DRV_END_OF_KEYS = INT32_MAX, 205 + }; 206 + 207 + enum drv_crypto_padding_type { 208 + DRV_PADDING_NONE = 0, 209 + DRV_PADDING_PKCS7 = 1, 210 + DRV_PADDING_RESERVE32B = INT32_MAX 211 + }; 212 + 213 + /*******************************************************************/ 214 + /***************** DESCRIPTOR BASED CONTEXTS ***********************/ 215 + /*******************************************************************/ 216 + 217 + /* Generic context ("super-class") */ 218 + struct drv_ctx_generic { 219 + enum drv_crypto_alg alg; 220 + } __attribute__((__may_alias__)); 221 + 222 + 223 + /*******************************************************************/ 224 + /***************** MESSAGE BASED CONTEXTS **************************/ 225 + /*******************************************************************/ 226 + 227 + 228 + /* Get the address of a @member within a given @ctx address 229 + @ctx: The context address 230 + @type: Type of context structure 231 + @member: Associated context field */ 232 + #define GET_CTX_FIELD_ADDR(ctx, type, member) (ctx + offsetof(type, member)) 233 + 234 + #endif /* _CC_CRYPTO_CTX_H_ */ 235 +
+30
drivers/staging/ccree/cc_hal.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from CC drivers) */ 18 + 19 + #ifndef __CC_HAL_H__ 20 + #define __CC_HAL_H__ 21 + 22 + #include <linux/io.h> 23 + 24 + #define READ_REGISTER(_addr) ioread32((_addr)) 25 + #define WRITE_REGISTER(_addr, _data) iowrite32((_data), (_addr)) 26 + 27 + #define CC_HAL_WRITE_REGISTER(offset, val) WRITE_REGISTER(cc_base + offset, val) 28 + #define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + offset) 29 + 30 + #endif
+603
drivers/staging/ccree/cc_hw_queue_defs.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __CC_HW_QUEUE_DEFS_H__ 18 + #define __CC_HW_QUEUE_DEFS_H__ 19 + 20 + #include "cc_pal_log.h" 21 + #include "cc_regs.h" 22 + #include "dx_crys_kernel.h" 23 + 24 + #ifdef __KERNEL__ 25 + #include <linux/types.h> 26 + #define UINT32_MAX 0xFFFFFFFFL 27 + #define INT32_MAX 0x7FFFFFFFL 28 + #define UINT16_MAX 0xFFFFL 29 + #else 30 + #include <stdint.h> 31 + #endif 32 + 33 + /****************************************************************************** 34 + * DEFINITIONS 35 + ******************************************************************************/ 36 + 37 + 38 + /* Dma AXI Secure bit */ 39 + #define AXI_SECURE 0 40 + #define AXI_NOT_SECURE 1 41 + 42 + #define HW_DESC_SIZE_WORDS 6 43 + #define HW_QUEUE_SLOTS_MAX 15 /* Max. available slots in HW queue */ 44 + 45 + #define _HW_DESC_MONITOR_KICK 0x7FFFC00 46 + 47 + /****************************************************************************** 48 + * TYPE DEFINITIONS 49 + ******************************************************************************/ 50 + 51 + typedef struct HwDesc { 52 + uint32_t word[HW_DESC_SIZE_WORDS]; 53 + } HwDesc_s; 54 + 55 + typedef enum DescDirection { 56 + DESC_DIRECTION_ILLEGAL = -1, 57 + DESC_DIRECTION_ENCRYPT_ENCRYPT = 0, 58 + DESC_DIRECTION_DECRYPT_DECRYPT = 1, 59 + DESC_DIRECTION_DECRYPT_ENCRYPT = 3, 60 + DESC_DIRECTION_END = INT32_MAX, 61 + }DescDirection_t; 62 + 63 + typedef enum DmaMode { 64 + DMA_MODE_NULL = -1, 65 + NO_DMA = 0, 66 + DMA_SRAM = 1, 67 + DMA_DLLI = 2, 68 + DMA_MLLI = 3, 69 + DmaMode_OPTIONTS, 70 + DmaMode_END = INT32_MAX, 71 + }DmaMode_t; 72 + 73 + typedef enum FlowMode { 74 + FLOW_MODE_NULL = -1, 75 + /* data flows */ 76 + BYPASS = 0, 77 + DIN_AES_DOUT = 1, 78 + AES_to_HASH = 2, 79 + AES_and_HASH = 3, 80 + DIN_DES_DOUT = 4, 81 + DES_to_HASH = 5, 82 + DES_and_HASH = 6, 83 + DIN_HASH = 7, 84 + DIN_HASH_and_BYPASS = 8, 85 + AESMAC_and_BYPASS = 9, 86 + AES_to_HASH_and_DOUT = 10, 87 + DIN_RC4_DOUT = 11, 88 + DES_to_HASH_and_DOUT = 12, 89 + AES_to_AES_to_HASH_and_DOUT = 13, 90 + AES_to_AES_to_HASH = 14, 91 + AES_to_HASH_and_AES = 15, 92 + DIN_MULTI2_DOUT = 16, 93 + DIN_AES_AESMAC = 17, 94 + HASH_to_DOUT = 18, 95 + /* setup flows */ 96 + S_DIN_to_AES = 32, 97 + S_DIN_to_AES2 = 33, 98 + S_DIN_to_DES = 34, 99 + S_DIN_to_RC4 = 35, 100 + S_DIN_to_MULTI2 = 36, 101 + S_DIN_to_HASH = 37, 102 + S_AES_to_DOUT = 38, 103 + S_AES2_to_DOUT = 39, 104 + S_RC4_to_DOUT = 41, 105 + S_DES_to_DOUT = 42, 106 + S_HASH_to_DOUT = 43, 107 + SET_FLOW_ID = 44, 108 + FlowMode_OPTIONTS, 109 + FlowMode_END = INT32_MAX, 110 + }FlowMode_t; 111 + 112 + typedef enum TunnelOp { 113 + TUNNEL_OP_INVALID = -1, 114 + TUNNEL_OFF = 0, 115 + TUNNEL_ON = 1, 116 + TunnelOp_OPTIONS, 117 + TunnelOp_END = INT32_MAX, 118 + } TunnelOp_t; 119 + 120 + typedef enum SetupOp { 121 + SETUP_LOAD_NOP = 0, 122 + SETUP_LOAD_STATE0 = 1, 123 + SETUP_LOAD_STATE1 = 2, 124 + SETUP_LOAD_STATE2 = 3, 125 + SETUP_LOAD_KEY0 = 4, 126 + SETUP_LOAD_XEX_KEY = 5, 127 + SETUP_WRITE_STATE0 = 8, 128 + SETUP_WRITE_STATE1 = 9, 129 + SETUP_WRITE_STATE2 = 10, 130 + SETUP_WRITE_STATE3 = 11, 131 + setupOp_OPTIONTS, 132 + setupOp_END = INT32_MAX, 133 + }SetupOp_t; 134 + 135 + enum AesMacSelector { 136 + AES_SK = 1, 137 + AES_CMAC_INIT = 2, 138 + AES_CMAC_SIZE0 = 3, 139 + AesMacEnd = INT32_MAX, 140 + }; 141 + 142 + #define HW_KEY_MASK_CIPHER_DO 0x3 143 + #define HW_KEY_SHIFT_CIPHER_CFG2 2 144 + 145 + 146 + /* HwCryptoKey[1:0] is mapped to cipher_do[1:0] */ 147 + /* HwCryptoKey[2:3] is mapped to cipher_config2[1:0] */ 148 + typedef enum HwCryptoKey { 149 + USER_KEY = 0, /* 0x0000 */ 150 + ROOT_KEY = 1, /* 0x0001 */ 151 + PROVISIONING_KEY = 2, /* 0x0010 */ /* ==KCP */ 152 + SESSION_KEY = 3, /* 0x0011 */ 153 + RESERVED_KEY = 4, /* NA */ 154 + PLATFORM_KEY = 5, /* 0x0101 */ 155 + CUSTOMER_KEY = 6, /* 0x0110 */ 156 + KFDE0_KEY = 7, /* 0x0111 */ 157 + KFDE1_KEY = 9, /* 0x1001 */ 158 + KFDE2_KEY = 10, /* 0x1010 */ 159 + KFDE3_KEY = 11, /* 0x1011 */ 160 + END_OF_KEYS = INT32_MAX, 161 + }HwCryptoKey_t; 162 + 163 + typedef enum HwAesKeySize { 164 + AES_128_KEY = 0, 165 + AES_192_KEY = 1, 166 + AES_256_KEY = 2, 167 + END_OF_AES_KEYS = INT32_MAX, 168 + }HwAesKeySize_t; 169 + 170 + typedef enum HwDesKeySize { 171 + DES_ONE_KEY = 0, 172 + DES_TWO_KEYS = 1, 173 + DES_THREE_KEYS = 2, 174 + END_OF_DES_KEYS = INT32_MAX, 175 + }HwDesKeySize_t; 176 + 177 + /*****************************/ 178 + /* Descriptor packing macros */ 179 + /*****************************/ 180 + 181 + #define GET_HW_Q_DESC_WORD_IDX(descWordIdx) (CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD ## descWordIdx) ) 182 + 183 + #define HW_DESC_INIT(pDesc) do { \ 184 + (pDesc)->word[0] = 0; \ 185 + (pDesc)->word[1] = 0; \ 186 + (pDesc)->word[2] = 0; \ 187 + (pDesc)->word[3] = 0; \ 188 + (pDesc)->word[4] = 0; \ 189 + (pDesc)->word[5] = 0; \ 190 + } while (0) 191 + 192 + /* HW descriptor debug functions */ 193 + int createDetailedDump(HwDesc_s *pDesc); 194 + void descriptor_log(HwDesc_s *desc); 195 + 196 + #if defined(HW_DESCRIPTOR_LOG) || defined(HW_DESC_DUMP_HOST_BUF) 197 + #define LOG_HW_DESC(pDesc) descriptor_log(pDesc) 198 + #else 199 + #define LOG_HW_DESC(pDesc) 200 + #endif 201 + 202 + #if (CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) || defined(OEMFW_LOG) 203 + 204 + #ifdef UART_PRINTF 205 + #define CREATE_DETAILED_DUMP(pDesc) createDetailedDump(pDesc) 206 + #else 207 + #define CREATE_DETAILED_DUMP(pDesc) 208 + #endif 209 + 210 + #define HW_DESC_DUMP(pDesc) do { \ 211 + CC_PAL_LOG_TRACE("\n---------------------------------------------------\n"); \ 212 + CREATE_DETAILED_DUMP(pDesc); \ 213 + CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[0]); \ 214 + CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[1]); \ 215 + CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[2]); \ 216 + CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[3]); \ 217 + CC_PAL_LOG_TRACE("0x%08X, ", (unsigned int)(pDesc)->word[4]); \ 218 + CC_PAL_LOG_TRACE("0x%08X\n", (unsigned int)(pDesc)->word[5]); \ 219 + CC_PAL_LOG_TRACE("---------------------------------------------------\n\n"); \ 220 + } while (0) 221 + 222 + #else 223 + #define HW_DESC_DUMP(pDesc) do {} while (0) 224 + #endif 225 + 226 + 227 + /*! 228 + * This macro indicates the end of current HW descriptors flow and release the HW engines. 229 + * 230 + * \param pDesc pointer HW descriptor struct 231 + */ 232 + #define HW_DESC_SET_QUEUE_LAST_IND(pDesc) \ 233 + do { \ 234 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1); \ 235 + } while (0) 236 + 237 + /*! 238 + * This macro signs the end of HW descriptors flow by asking for completion ack, and release the HW engines 239 + * 240 + * \param pDesc pointer HW descriptor struct 241 + */ 242 + #define HW_DESC_SET_ACK_LAST(pDesc) \ 243 + do { \ 244 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, QUEUE_LAST_IND, (pDesc)->word[3], 1); \ 245 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, ACK_NEEDED, (pDesc)->word[4], 1); \ 246 + } while (0) 247 + 248 + 249 + #define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX) 250 + 251 + /*! 252 + * This macro sets the DIN field of a HW descriptors 253 + * 254 + * \param pDesc pointer HW descriptor struct 255 + * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT 256 + * \param dinAdr DIN address 257 + * \param dinSize Data size in bytes 258 + * \param axiNs AXI secure bit 259 + */ 260 + #define HW_DESC_SET_DIN_TYPE(pDesc, dmaMode, dinAdr, dinSize, axiNs) \ 261 + do { \ 262 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (dinAdr)&UINT32_MAX ); \ 263 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DIN_ADDR_HIGH, (pDesc)->word[5], MSB64(dinAdr) ); \ 264 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], (dmaMode)); \ 265 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \ 266 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NS_BIT, (pDesc)->word[1], (axiNs)); \ 267 + } while (0) 268 + 269 + 270 + /*! 271 + * This macro sets the DIN field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and 272 + * other special modes 273 + * 274 + * \param pDesc pointer HW descriptor struct 275 + * \param dinAdr DIN address 276 + * \param dinSize Data size in bytes 277 + */ 278 + #define HW_DESC_SET_DIN_NO_DMA(pDesc, dinAdr, dinSize) \ 279 + do { \ 280 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr)); \ 281 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \ 282 + } while (0) 283 + 284 + /*! 285 + * This macro sets the DIN field of a HW descriptors to SRAM mode. 286 + * Note: No need to check SRAM alignment since host requests do not use SRAM and 287 + * adaptor will enforce alignment check. 288 + * 289 + * \param pDesc pointer HW descriptor struct 290 + * \param dinAdr DIN address 291 + * \param dinSize Data size in bytes 292 + */ 293 + #define HW_DESC_SET_DIN_SRAM(pDesc, dinAdr, dinSize) \ 294 + do { \ 295 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(dinAdr)); \ 296 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM); \ 297 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \ 298 + } while (0) 299 + 300 + /*! This macro sets the DIN field of a HW descriptors to CONST mode 301 + * 302 + * \param pDesc pointer HW descriptor struct 303 + * \param val DIN const value 304 + * \param dinSize Data size in bytes 305 + */ 306 + #define HW_DESC_SET_DIN_CONST(pDesc, val, dinSize) \ 307 + do { \ 308 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0, VALUE, (pDesc)->word[0], (uint32_t)(val)); \ 309 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_CONST_VALUE, (pDesc)->word[1], 1); \ 310 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_DMA_MODE, (pDesc)->word[1], DMA_SRAM); \ 311 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, DIN_SIZE, (pDesc)->word[1], (dinSize)); \ 312 + } while (0) 313 + 314 + /*! 315 + * This macro sets the DIN not last input data indicator 316 + * 317 + * \param pDesc pointer HW descriptor struct 318 + */ 319 + #define HW_DESC_SET_DIN_NOT_LAST_INDICATION(pDesc) \ 320 + do { \ 321 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD1, NOT_LAST, (pDesc)->word[1], 1); \ 322 + } while (0) 323 + 324 + /*! 325 + * This macro sets the DOUT field of a HW descriptors 326 + * 327 + * \param pDesc pointer HW descriptor struct 328 + * \param dmaMode The DMA mode: NO_DMA, SRAM, DLLI, MLLI, CONSTANT 329 + * \param doutAdr DOUT address 330 + * \param doutSize Data size in bytes 331 + * \param axiNs AXI secure bit 332 + */ 333 + #define HW_DESC_SET_DOUT_TYPE(pDesc, dmaMode, doutAdr, doutSize, axiNs) \ 334 + do { \ 335 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \ 336 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \ 337 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], (dmaMode)); \ 338 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \ 339 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \ 340 + } while (0) 341 + 342 + /*! 343 + * This macro sets the DOUT field of a HW descriptors to DLLI type 344 + * The LAST INDICATION is provided by the user 345 + * 346 + * \param pDesc pointer HW descriptor struct 347 + * \param doutAdr DOUT address 348 + * \param doutSize Data size in bytes 349 + * \param lastInd The last indication bit 350 + * \param axiNs AXI secure bit 351 + */ 352 + #define HW_DESC_SET_DOUT_DLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \ 353 + do { \ 354 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \ 355 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \ 356 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_DLLI); \ 357 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \ 358 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd); \ 359 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \ 360 + } while (0) 361 + 362 + /*! 363 + * This macro sets the DOUT field of a HW descriptors to DLLI type 364 + * The LAST INDICATION is provided by the user 365 + * 366 + * \param pDesc pointer HW descriptor struct 367 + * \param doutAdr DOUT address 368 + * \param doutSize Data size in bytes 369 + * \param lastInd The last indication bit 370 + * \param axiNs AXI secure bit 371 + */ 372 + #define HW_DESC_SET_DOUT_MLLI(pDesc, doutAdr, doutSize, axiNs ,lastInd) \ 373 + do { \ 374 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (doutAdr)&UINT32_MAX ); \ 375 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD5, DOUT_ADDR_HIGH, (pDesc)->word[5], MSB64(doutAdr) ); \ 376 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_MLLI); \ 377 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \ 378 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], lastInd); \ 379 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, NS_BIT, (pDesc)->word[3], (axiNs)); \ 380 + } while (0) 381 + 382 + /*! 383 + * This macro sets the DOUT field of a HW descriptors to NO DMA mode. Used for NOP descriptor, register patches and 384 + * other special modes 385 + * 386 + * \param pDesc pointer HW descriptor struct 387 + * \param doutAdr DOUT address 388 + * \param doutSize Data size in bytes 389 + * \param registerWriteEnable Enables a write operation to a register 390 + */ 391 + #define HW_DESC_SET_DOUT_NO_DMA(pDesc, doutAdr, doutSize, registerWriteEnable) \ 392 + do { \ 393 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr)); \ 394 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \ 395 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_LAST_IND, (pDesc)->word[3], (registerWriteEnable)); \ 396 + } while (0) 397 + 398 + /*! 399 + * This macro sets the word for the XOR operation. 400 + * 401 + * \param pDesc pointer HW descriptor struct 402 + * \param xorVal xor data value 403 + */ 404 + #define HW_DESC_SET_XOR_VAL(pDesc, xorVal) \ 405 + do { \ 406 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(xorVal)); \ 407 + } while (0) 408 + 409 + /*! 410 + * This macro sets the XOR indicator bit in the descriptor 411 + * 412 + * \param pDesc pointer HW descriptor struct 413 + */ 414 + #define HW_DESC_SET_XOR_ACTIVE(pDesc) \ 415 + do { \ 416 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, HASH_XOR_BIT, (pDesc)->word[3], 1); \ 417 + } while (0) 418 + 419 + /*! 420 + * This macro selects the AES engine instead of HASH engine when setting up combined mode with AES XCBC MAC 421 + * 422 + * \param pDesc pointer HW descriptor struct 423 + */ 424 + #define HW_DESC_SET_AES_NOT_HASH_MODE(pDesc) \ 425 + do { \ 426 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, AES_SEL_N_HASH, (pDesc)->word[4], 1); \ 427 + } while (0) 428 + 429 + /*! 430 + * This macro sets the DOUT field of a HW descriptors to SRAM mode 431 + * Note: No need to check SRAM alignment since host requests do not use SRAM and 432 + * adaptor will enforce alignment check. 433 + * 434 + * \param pDesc pointer HW descriptor struct 435 + * \param doutAdr DOUT address 436 + * \param doutSize Data size in bytes 437 + */ 438 + #define HW_DESC_SET_DOUT_SRAM(pDesc, doutAdr, doutSize) \ 439 + do { \ 440 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(doutAdr)); \ 441 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_DMA_MODE, (pDesc)->word[3], DMA_SRAM); \ 442 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD3, DOUT_SIZE, (pDesc)->word[3], (doutSize)); \ 443 + } while (0) 444 + 445 + 446 + /*! 447 + * This macro sets the data unit size for XEX mode in data_out_addr[15:0] 448 + * 449 + * \param pDesc pointer HW descriptor struct 450 + * \param dataUnitSize data unit size for XEX mode 451 + */ 452 + #define HW_DESC_SET_XEX_DATA_UNIT_SIZE(pDesc, dataUnitSize) \ 453 + do { \ 454 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(dataUnitSize)); \ 455 + } while (0) 456 + 457 + /*! 458 + * This macro sets the number of rounds for Multi2 in data_out_addr[15:0] 459 + * 460 + * \param pDesc pointer HW descriptor struct 461 + * \param numRounds number of rounds for Multi2 462 + */ 463 + #define HW_DESC_SET_MULTI2_NUM_ROUNDS(pDesc, numRounds) \ 464 + do { \ 465 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD2, VALUE, (pDesc)->word[2], (uint32_t)(numRounds)); \ 466 + } while (0) 467 + 468 + /*! 469 + * This macro sets the flow mode. 470 + * 471 + * \param pDesc pointer HW descriptor struct 472 + * \param flowMode Any one of the modes defined in [CC7x-DESC] 473 + */ 474 + 475 + #define HW_DESC_SET_FLOW_MODE(pDesc, flowMode) \ 476 + do { \ 477 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, DATA_FLOW_MODE, (pDesc)->word[4], (flowMode)); \ 478 + } while (0) 479 + 480 + /*! 481 + * This macro sets the cipher mode. 482 + * 483 + * \param pDesc pointer HW descriptor struct 484 + * \param cipherMode Any one of the modes defined in [CC7x-DESC] 485 + */ 486 + #define HW_DESC_SET_CIPHER_MODE(pDesc, cipherMode) \ 487 + do { \ 488 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_MODE, (pDesc)->word[4], (cipherMode)); \ 489 + } while (0) 490 + 491 + /*! 492 + * This macro sets the cipher configuration fields. 493 + * 494 + * \param pDesc pointer HW descriptor struct 495 + * \param cipherConfig Any one of the modes defined in [CC7x-DESC] 496 + */ 497 + #define HW_DESC_SET_CIPHER_CONFIG0(pDesc, cipherConfig) \ 498 + do { \ 499 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF0, (pDesc)->word[4], (cipherConfig)); \ 500 + } while (0) 501 + 502 + /*! 503 + * This macro sets the cipher configuration fields. 504 + * 505 + * \param pDesc pointer HW descriptor struct 506 + * \param cipherConfig Any one of the modes defined in [CC7x-DESC] 507 + */ 508 + #define HW_DESC_SET_CIPHER_CONFIG1(pDesc, cipherConfig) \ 509 + do { \ 510 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF1, (pDesc)->word[4], (cipherConfig)); \ 511 + } while (0) 512 + 513 + /*! 514 + * This macro sets HW key configuration fields. 515 + * 516 + * \param pDesc pointer HW descriptor struct 517 + * \param hwKey The hw key number as in enun HwCryptoKey 518 + */ 519 + #define HW_DESC_SET_HW_CRYPTO_KEY(pDesc, hwKey) \ 520 + do { \ 521 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (hwKey)&HW_KEY_MASK_CIPHER_DO); \ 522 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_CONF2, (pDesc)->word[4], (hwKey>>HW_KEY_SHIFT_CIPHER_CFG2)); \ 523 + } while (0) 524 + 525 + /*! 526 + * This macro changes the bytes order of all setup-finalize descriptosets. 527 + * 528 + * \param pDesc pointer HW descriptor struct 529 + * \param swapConfig Any one of the modes defined in [CC7x-DESC] 530 + */ 531 + #define HW_DESC_SET_BYTES_SWAP(pDesc, swapConfig) \ 532 + do { \ 533 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, BYTES_SWAP, (pDesc)->word[4], (swapConfig)); \ 534 + } while (0) 535 + 536 + /*! 537 + * This macro sets the CMAC_SIZE0 mode. 538 + * 539 + * \param pDesc pointer HW descriptor struct 540 + */ 541 + #define HW_DESC_SET_CMAC_SIZE0_MODE(pDesc) \ 542 + do { \ 543 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CMAC_SIZE0, (pDesc)->word[4], 0x1); \ 544 + } while (0) 545 + 546 + /*! 547 + * This macro sets the key size for AES engine. 548 + * 549 + * \param pDesc pointer HW descriptor struct 550 + * \param keySize key size in bytes (NOT size code) 551 + */ 552 + #define HW_DESC_SET_KEY_SIZE_AES(pDesc, keySize) \ 553 + do { \ 554 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 2); \ 555 + } while (0) 556 + 557 + /*! 558 + * This macro sets the key size for DES engine. 559 + * 560 + * \param pDesc pointer HW descriptor struct 561 + * \param keySize key size in bytes (NOT size code) 562 + */ 563 + #define HW_DESC_SET_KEY_SIZE_DES(pDesc, keySize) \ 564 + do { \ 565 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, KEY_SIZE, (pDesc)->word[4], ((keySize) >> 3) - 1); \ 566 + } while (0) 567 + 568 + /*! 569 + * This macro sets the descriptor's setup mode 570 + * 571 + * \param pDesc pointer HW descriptor struct 572 + * \param setupMode Any one of the setup modes defined in [CC7x-DESC] 573 + */ 574 + #define HW_DESC_SET_SETUP_MODE(pDesc, setupMode) \ 575 + do { \ 576 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, SETUP_OPERATION, (pDesc)->word[4], (setupMode)); \ 577 + } while (0) 578 + 579 + /*! 580 + * This macro sets the descriptor's cipher do 581 + * 582 + * \param pDesc pointer HW descriptor struct 583 + * \param cipherDo Any one of the cipher do defined in [CC7x-DESC] 584 + */ 585 + #define HW_DESC_SET_CIPHER_DO(pDesc, cipherDo) \ 586 + do { \ 587 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_QUEUE_WORD4, CIPHER_DO, (pDesc)->word[4], (cipherDo)&HW_KEY_MASK_CIPHER_DO); \ 588 + } while (0) 589 + 590 + /*! 591 + * This macro sets the DIN field of a HW descriptors to star/stop monitor descriptor. 592 + * Used for performance measurements and debug purposes. 593 + * 594 + * \param pDesc pointer HW descriptor struct 595 + */ 596 + #define HW_DESC_SET_DIN_MONITOR_CNTR(pDesc) \ 597 + do { \ 598 + CC_REG_FLD_SET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR, VALUE, (pDesc)->word[1], _HW_DESC_MONITOR_KICK); \ 599 + } while (0) 600 + 601 + 602 + 603 + #endif /*__CC_HW_QUEUE_DEFS_H__*/
+57
drivers/staging/ccree/cc_lli_defs.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + #ifndef _CC_LLI_DEFS_H_ 19 + #define _CC_LLI_DEFS_H_ 20 + #ifdef __KERNEL__ 21 + #include <linux/types.h> 22 + #else 23 + #include <stdint.h> 24 + #endif 25 + #include "cc_bitops.h" 26 + 27 + /* Max DLLI size */ 28 + #define DLLI_SIZE_BIT_SIZE 0x18 // DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 29 + 30 + #define CC_MAX_MLLI_ENTRY_SIZE 0x10000 31 + 32 + #define MSB64(_addr) (sizeof(_addr) == 4 ? 0 : ((_addr) >> 32)&UINT16_MAX) 33 + 34 + #define LLI_SET_ADDR(lli_p, addr) \ 35 + BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD0_OFFSET], LLI_LADDR_BIT_OFFSET, LLI_LADDR_BIT_SIZE, (addr & UINT32_MAX)); \ 36 + BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_HADDR_BIT_OFFSET, LLI_HADDR_BIT_SIZE, MSB64(addr)); 37 + 38 + #define LLI_SET_SIZE(lli_p, size) \ 39 + BITFIELD_SET(((uint32_t *)(lli_p))[LLI_WORD1_OFFSET], LLI_SIZE_BIT_OFFSET, LLI_SIZE_BIT_SIZE, size) 40 + 41 + /* Size of entry */ 42 + #define LLI_ENTRY_WORD_SIZE 2 43 + #define LLI_ENTRY_BYTE_SIZE (LLI_ENTRY_WORD_SIZE * sizeof(uint32_t)) 44 + 45 + /* Word0[31:0] = ADDR[31:0] */ 46 + #define LLI_WORD0_OFFSET 0 47 + #define LLI_LADDR_BIT_OFFSET 0 48 + #define LLI_LADDR_BIT_SIZE 32 49 + /* Word1[31:16] = ADDR[47:32]; Word1[15:0] = SIZE */ 50 + #define LLI_WORD1_OFFSET 1 51 + #define LLI_SIZE_BIT_OFFSET 0 52 + #define LLI_SIZE_BIT_SIZE 16 53 + #define LLI_HADDR_BIT_OFFSET 16 54 + #define LLI_HADDR_BIT_SIZE 16 55 + 56 + 57 + #endif /*_CC_LLI_DEFS_H_*/
+188
drivers/staging/ccree/cc_pal_log.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef _CC_PAL_LOG_H_ 18 + #define _CC_PAL_LOG_H_ 19 + 20 + #include "cc_pal_types.h" 21 + #include "cc_pal_log_plat.h" 22 + 23 + /*! 24 + @file 25 + @brief This file contains the PAL layer log definitions, by default the log is disabled. 26 + @defgroup cc_pal_log CryptoCell PAL logging APIs and definitions 27 + @{ 28 + @ingroup cc_pal 29 + */ 30 + 31 + /* PAL log levels (to be used in CC_PAL_logLevel) */ 32 + /*! PAL log level - disabled. */ 33 + #define CC_PAL_LOG_LEVEL_NULL (-1) /*!< \internal Disable logging */ 34 + /*! PAL log level - error. */ 35 + #define CC_PAL_LOG_LEVEL_ERR 0 36 + /*! PAL log level - warning. */ 37 + #define CC_PAL_LOG_LEVEL_WARN 1 38 + /*! PAL log level - info. */ 39 + #define CC_PAL_LOG_LEVEL_INFO 2 40 + /*! PAL log level - debug. */ 41 + #define CC_PAL_LOG_LEVEL_DEBUG 3 42 + /*! PAL log level - trace. */ 43 + #define CC_PAL_LOG_LEVEL_TRACE 4 44 + /*! PAL log level - data. */ 45 + #define CC_PAL_LOG_LEVEL_DATA 5 46 + 47 + #ifndef CC_PAL_LOG_CUR_COMPONENT 48 + /* Setting default component mask in case caller did not define */ 49 + /* (a mask that is always on for every log mask value but full masking) */ 50 + /*! Default log debugged component.*/ 51 + #define CC_PAL_LOG_CUR_COMPONENT 0xFFFFFFFF 52 + #endif 53 + #ifndef CC_PAL_LOG_CUR_COMPONENT_NAME 54 + /*! Default log debugged component.*/ 55 + #define CC_PAL_LOG_CUR_COMPONENT_NAME "CC" 56 + #endif 57 + 58 + /* Select compile time log level (default if not explicitly specified by caller) */ 59 + #ifndef CC_PAL_MAX_LOG_LEVEL /* Can be overriden by external definition of this constant */ 60 + #ifdef DEBUG 61 + /*! Default debug log level (when debug is set to on).*/ 62 + #define CC_PAL_MAX_LOG_LEVEL CC_PAL_LOG_LEVEL_ERR /*CC_PAL_LOG_LEVEL_DEBUG*/ 63 + #else /* Disable logging */ 64 + /*! Default debug log level (when debug is set to on).*/ 65 + #define CC_PAL_MAX_LOG_LEVEL CC_PAL_LOG_LEVEL_NULL 66 + #endif 67 + #endif /*CC_PAL_MAX_LOG_LEVEL*/ 68 + /*! Evaluate CC_PAL_MAX_LOG_LEVEL in case provided by caller */ 69 + #define __CC_PAL_LOG_LEVEL_EVAL(level) level 70 + /*! Maximal log level defintion.*/ 71 + #define _CC_PAL_MAX_LOG_LEVEL __CC_PAL_LOG_LEVEL_EVAL(CC_PAL_MAX_LOG_LEVEL) 72 + 73 + 74 + #ifdef ARM_DSM 75 + /*! Log init function. */ 76 + #define CC_PalLogInit() do {} while (0) 77 + /*! Log set level function - sets the level of logging in case of debug. */ 78 + #define CC_PalLogLevelSet(setLevel) do {} while (0) 79 + /*! Log set mask function - sets the component masking in case of debug. */ 80 + #define CC_PalLogMaskSet(setMask) do {} while (0) 81 + #else 82 + #if _CC_PAL_MAX_LOG_LEVEL > CC_PAL_LOG_LEVEL_NULL 83 + /*! Log init function. */ 84 + void CC_PalLogInit(void); 85 + /*! Log set level function - sets the level of logging in case of debug. */ 86 + void CC_PalLogLevelSet(int setLevel); 87 + /*! Log set mask function - sets the component masking in case of debug. */ 88 + void CC_PalLogMaskSet(uint32_t setMask); 89 + /*! Global variable for log level */ 90 + extern int CC_PAL_logLevel; 91 + /*! Global variable for log mask */ 92 + extern uint32_t CC_PAL_logMask; 93 + #else /* No log */ 94 + /*! Log init function. */ 95 + static inline void CC_PalLogInit(void) {} 96 + /*! Log set level function - sets the level of logging in case of debug. */ 97 + static inline void CC_PalLogLevelSet(int setLevel) {CC_UNUSED_PARAM(setLevel);} 98 + /*! Log set mask function - sets the component masking in case of debug. */ 99 + static inline void CC_PalLogMaskSet(uint32_t setMask) {CC_UNUSED_PARAM(setMask);} 100 + #endif 101 + #endif 102 + 103 + /*! Filter logging based on logMask and dispatch to platform specific logging mechanism. */ 104 + #define _CC_PAL_LOG(level, format, ...) \ 105 + if (CC_PAL_logMask & CC_PAL_LOG_CUR_COMPONENT) \ 106 + __CC_PAL_LOG_PLAT(CC_PAL_LOG_LEVEL_ ## level, "%s:%s: " format, CC_PAL_LOG_CUR_COMPONENT_NAME, __func__, ##__VA_ARGS__) 107 + 108 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_ERR) 109 + /*! Log messages according to log level.*/ 110 + #define CC_PAL_LOG_ERR(format, ... ) \ 111 + _CC_PAL_LOG(ERR, format, ##__VA_ARGS__) 112 + #else 113 + /*! Log messages according to log level.*/ 114 + #define CC_PAL_LOG_ERR( ... ) do {} while (0) 115 + #endif 116 + 117 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_WARN) 118 + /*! Log messages according to log level.*/ 119 + #define CC_PAL_LOG_WARN(format, ... ) \ 120 + if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_WARN) \ 121 + _CC_PAL_LOG(WARN, format, ##__VA_ARGS__) 122 + #else 123 + /*! Log messages according to log level.*/ 124 + #define CC_PAL_LOG_WARN( ... ) do {} while (0) 125 + #endif 126 + 127 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_INFO) 128 + /*! Log messages according to log level.*/ 129 + #define CC_PAL_LOG_INFO(format, ... ) \ 130 + if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_INFO) \ 131 + _CC_PAL_LOG(INFO, format, ##__VA_ARGS__) 132 + #else 133 + /*! Log messages according to log level.*/ 134 + #define CC_PAL_LOG_INFO( ... ) do {} while (0) 135 + #endif 136 + 137 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_DEBUG) 138 + /*! Log messages according to log level.*/ 139 + #define CC_PAL_LOG_DEBUG(format, ... ) \ 140 + if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_DEBUG) \ 141 + _CC_PAL_LOG(DEBUG, format, ##__VA_ARGS__) 142 + 143 + /*! Log message buffer.*/ 144 + #define CC_PAL_LOG_DUMP_BUF(msg, buf, size) \ 145 + do { \ 146 + int i; \ 147 + uint8_t *pData = (uint8_t*)buf; \ 148 + \ 149 + PRINTF("%s (%d):\n", msg, size); \ 150 + for (i = 0; i < size; i++) { \ 151 + PRINTF("0x%02X ", pData[i]); \ 152 + if ((i & 0xF) == 0xF) { \ 153 + PRINTF("\n"); \ 154 + } \ 155 + } \ 156 + PRINTF("\n"); \ 157 + } while (0) 158 + #else 159 + /*! Log debug messages.*/ 160 + #define CC_PAL_LOG_DEBUG( ... ) do {} while (0) 161 + /*! Log debug buffer.*/ 162 + #define CC_PAL_LOG_DUMP_BUF(msg, buf, size) do {} while (0) 163 + #endif 164 + 165 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) 166 + /*! Log debug trace.*/ 167 + #define CC_PAL_LOG_TRACE(format, ... ) \ 168 + if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \ 169 + _CC_PAL_LOG(TRACE, format, ##__VA_ARGS__) 170 + #else 171 + /*! Log debug trace.*/ 172 + #define CC_PAL_LOG_TRACE(...) do {} while (0) 173 + #endif 174 + 175 + #if (_CC_PAL_MAX_LOG_LEVEL >= CC_PAL_LOG_LEVEL_TRACE) 176 + /*! Log debug data.*/ 177 + #define CC_PAL_LOG_DATA(format, ...) \ 178 + if (CC_PAL_logLevel >= CC_PAL_LOG_LEVEL_TRACE) \ 179 + _CC_PAL_LOG(DATA, format, ##__VA_ARGS__) 180 + #else 181 + /*! Log debug data.*/ 182 + #define CC_PAL_LOG_DATA( ...) do {} while (0) 183 + #endif 184 + /** 185 + @} 186 + */ 187 + 188 + #endif /*_CC_PAL_LOG_H_*/
+33
drivers/staging/ccree/cc_pal_log_plat.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* Dummy pal_log_plat for test driver in kernel */ 18 + 19 + #ifndef _SSI_PAL_LOG_PLAT_H_ 20 + #define _SSI_PAL_LOG_PLAT_H_ 21 + 22 + #if defined(DEBUG) 23 + 24 + #define __CC_PAL_LOG_PLAT(level, format, ...) printk(level "cc7x_test::" format , ##__VA_ARGS__) 25 + 26 + #else /* Disable all prints */ 27 + 28 + #define __CC_PAL_LOG_PLAT(...) do {} while (0) 29 + 30 + #endif 31 + 32 + #endif /*_SASI_PAL_LOG_PLAT_H_*/ 33 +
+97
drivers/staging/ccree/cc_pal_types.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef CC_PAL_TYPES_H 18 + #define CC_PAL_TYPES_H 19 + 20 + /*! 21 + @file 22 + @brief This file contains platform-dependent definitions and types. 23 + @defgroup cc_pal_types CryptoCell PAL platform dependant types 24 + @{ 25 + @ingroup cc_pal 26 + 27 + */ 28 + 29 + #include "cc_pal_types_plat.h" 30 + 31 + /*! Boolean definition.*/ 32 + typedef enum { 33 + /*! Boolean false definition.*/ 34 + CC_FALSE = 0, 35 + /*! Boolean true definition.*/ 36 + CC_TRUE = 1 37 + } CCBool; 38 + 39 + /*! Success definition. */ 40 + #define CC_SUCCESS 0UL 41 + /*! Failure definition. */ 42 + #define CC_FAIL 1UL 43 + 44 + /*! Defintion of 1KB in bytes. */ 45 + #define CC_1K_SIZE_IN_BYTES 1024 46 + /*! Defintion of number of bits in a byte. */ 47 + #define CC_BITS_IN_BYTE 8 48 + /*! Defintion of number of bits in a 32bits word. */ 49 + #define CC_BITS_IN_32BIT_WORD 32 50 + /*! Defintion of number of bytes in a 32bits word. */ 51 + #define CC_32BIT_WORD_SIZE (sizeof(uint32_t)) 52 + 53 + /*! Success (OK) defintion. */ 54 + #define CC_OK 0 55 + 56 + /*! Macro that handles unused parameters in the code (to avoid compilation warnings). */ 57 + #define CC_UNUSED_PARAM(prm) ((void)prm) 58 + 59 + /*! Maximal uint32 value.*/ 60 + #define CC_MAX_UINT32_VAL (0xFFFFFFFF) 61 + 62 + 63 + /* Minimum and Maximum macros */ 64 + #ifdef min 65 + /*! Definition for minimum. */ 66 + #define CC_MIN(a,b) min( a , b ) 67 + #else 68 + /*! Definition for minimum. */ 69 + #define CC_MIN( a , b ) ( ( (a) < (b) ) ? (a) : (b) ) 70 + #endif 71 + 72 + #ifdef max 73 + /*! Definition for maximum. */ 74 + #define CC_MAX(a,b) max( a , b ) 75 + #else 76 + /*! Definition for maximum. */ 77 + #define CC_MAX( a , b ) ( ( (a) > (b) ) ? (a) : (b) ) 78 + #endif 79 + 80 + /*! Macro that calculates number of full bytes from bits (i.e. 7 bits are 1 byte). */ 81 + #define CALC_FULL_BYTES(numBits) ((numBits)/CC_BITS_IN_BYTE + (((numBits) & (CC_BITS_IN_BYTE-1)) > 0)) 82 + /*! Macro that calculates number of full 32bits words from bits (i.e. 31 bits are 1 word). */ 83 + #define CALC_FULL_32BIT_WORDS(numBits) ((numBits)/CC_BITS_IN_32BIT_WORD + (((numBits) & (CC_BITS_IN_32BIT_WORD-1)) > 0)) 84 + /*! Macro that calculates number of full 32bits words from bytes (i.e. 3 bytes are 1 word). */ 85 + #define CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) ((sizeBytes)/CC_32BIT_WORD_SIZE + (((sizeBytes) & (CC_32BIT_WORD_SIZE-1)) > 0)) 86 + /*! Macro that round up bits to 32bits words. */ 87 + #define ROUNDUP_BITS_TO_32BIT_WORD(numBits) (CALC_FULL_32BIT_WORDS(numBits) * CC_BITS_IN_32BIT_WORD) 88 + /*! Macro that round up bits to bytes. */ 89 + #define ROUNDUP_BITS_TO_BYTES(numBits) (CALC_FULL_BYTES(numBits) * CC_BITS_IN_BYTE) 90 + /*! Macro that round up bytes to 32bits words. */ 91 + #define ROUNDUP_BYTES_TO_32BIT_WORD(sizeBytes) (CALC_32BIT_WORDS_FROM_BYTES(sizeBytes) * CC_32BIT_WORD_SIZE) 92 + 93 + 94 + /** 95 + @} 96 + */ 97 + #endif
+29
drivers/staging/ccree/cc_pal_types_plat.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + #ifndef SSI_PAL_TYPES_PLAT_H 19 + #define SSI_PAL_TYPES_PLAT_H 20 + /* Linux kernel types */ 21 + 22 + #include <linux/types.h> 23 + 24 + #ifndef NULL /* Missing in Linux kernel */ 25 + #define NULL (0x0L) 26 + #endif 27 + 28 + 29 + #endif /*SSI_PAL_TYPES_PLAT_H*/
+106
drivers/staging/ccree/cc_regs.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + /*! 19 + * @file 20 + * @brief This file contains macro definitions for accessing ARM TrustZone CryptoCell register space. 21 + */ 22 + 23 + #ifndef _CC_REGS_H_ 24 + #define _CC_REGS_H_ 25 + 26 + #include "cc_bitops.h" 27 + 28 + /* Register Offset macro */ 29 + #define CC_REG_OFFSET(unit_name, reg_name) \ 30 + (DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET) 31 + 32 + #define CC_REG_BIT_SHIFT(reg_name, field_name) \ 33 + (DX_ ## reg_name ## _ ## field_name ## _BIT_SHIFT) 34 + 35 + /* Register Offset macros (from registers base address in host) */ 36 + #include "dx_reg_base_host.h" 37 + 38 + /* Read-Modify-Write a field of a register */ 39 + #define MODIFY_REGISTER_FLD(unitName, regName, fldName, fldVal) \ 40 + do { \ 41 + uint32_t regVal; \ 42 + regVal = READ_REGISTER(CC_REG_ADDR(unitName, regName)); \ 43 + CC_REG_FLD_SET(unitName, regName, fldName, regVal, fldVal); \ 44 + WRITE_REGISTER(CC_REG_ADDR(unitName, regName), regVal); \ 45 + } while (0) 46 + 47 + /* Registers address macros for ENV registers (development FPGA only) */ 48 + #ifdef DX_BASE_ENV_REGS 49 + 50 + /* This offset should be added to mapping address of DX_BASE_ENV_REGS */ 51 + #define CC_ENV_REG_OFFSET(reg_name) (DX_ENV_ ## reg_name ## _REG_OFFSET) 52 + 53 + #endif /*DX_BASE_ENV_REGS*/ 54 + 55 + /*! Bit fields get */ 56 + #define CC_REG_FLD_GET(unit_name, reg_name, fld_name, reg_val) \ 57 + (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ? \ 58 + reg_val /*!< \internal Optimization for 32b fields */ : \ 59 + BITFIELD_GET(reg_val, DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \ 60 + DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE)) 61 + 62 + /*! Bit fields access */ 63 + #define CC_REG_FLD_GET2(unit_name, reg_name, fld_name, reg_val) \ 64 + (CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20 ? \ 65 + reg_val /*!< \internal Optimization for 32b fields */ : \ 66 + BITFIELD_GET(reg_val, CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \ 67 + CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE)) 68 + 69 + /* yael TBD !!! - * 70 + * all HW includes should start with CC_ and not DX_ !! */ 71 + 72 + 73 + /*! Bit fields set */ 74 + #define CC_REG_FLD_SET( \ 75 + unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val) \ 76 + do { \ 77 + if (DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20) \ 78 + reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\ 79 + else \ 80 + BITFIELD_SET(reg_shadow_var, \ 81 + DX_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \ 82 + DX_ ## reg_name ## _ ## fld_name ## _BIT_SIZE, \ 83 + new_fld_val); \ 84 + } while (0) 85 + 86 + /*! Bit fields set */ 87 + #define CC_REG_FLD_SET2( \ 88 + unit_name, reg_name, fld_name, reg_shadow_var, new_fld_val) \ 89 + do { \ 90 + if (CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE == 0x20) \ 91 + reg_shadow_var = new_fld_val; /*!< \internal Optimization for 32b fields */\ 92 + else \ 93 + BITFIELD_SET(reg_shadow_var, \ 94 + CC_ ## reg_name ## _ ## fld_name ## _BIT_SHIFT, \ 95 + CC_ ## reg_name ## _ ## fld_name ## _BIT_SIZE, \ 96 + new_fld_val); \ 97 + } while (0) 98 + 99 + /* Usage example: 100 + uint32_t reg_shadow = READ_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL)); 101 + CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY0,reg_shadow, 3); 102 + CC_REG_FLD_SET(CRY_KERNEL,AES_CONTROL,NK_KEY1,reg_shadow, 1); 103 + WRITE_REGISTER(CC_REG_ADDR(CRY_KERNEL,AES_CONTROL), reg_shadow); 104 + */ 105 + 106 + #endif /*_CC_REGS_H_*/
+180
drivers/staging/ccree/dx_crys_kernel.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __DX_CRYS_KERNEL_H__ 18 + #define __DX_CRYS_KERNEL_H__ 19 + 20 + // -------------------------------------- 21 + // BLOCK: DSCRPTR 22 + // -------------------------------------- 23 + #define DX_DSCRPTR_COMPLETION_COUNTER_REG_OFFSET 0xE00UL 24 + #define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SHIFT 0x0UL 25 + #define DX_DSCRPTR_COMPLETION_COUNTER_COMPLETION_COUNTER_BIT_SIZE 0x6UL 26 + #define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SHIFT 0x6UL 27 + #define DX_DSCRPTR_COMPLETION_COUNTER_OVERFLOW_COUNTER_BIT_SIZE 0x1UL 28 + #define DX_DSCRPTR_SW_RESET_REG_OFFSET 0xE40UL 29 + #define DX_DSCRPTR_SW_RESET_VALUE_BIT_SHIFT 0x0UL 30 + #define DX_DSCRPTR_SW_RESET_VALUE_BIT_SIZE 0x1UL 31 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_REG_OFFSET 0xE60UL 32 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SHIFT 0x0UL 33 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_NUM_OF_DSCRPTR_BIT_SIZE 0xAUL 34 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SHIFT 0xAUL 35 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_DSCRPTR_SRAM_SIZE_BIT_SIZE 0xCUL 36 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SHIFT 0x16UL 37 + #define DX_DSCRPTR_QUEUE_SRAM_SIZE_SRAM_SIZE_BIT_SIZE 0x3UL 38 + #define DX_DSCRPTR_SINGLE_ADDR_EN_REG_OFFSET 0xE64UL 39 + #define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SHIFT 0x0UL 40 + #define DX_DSCRPTR_SINGLE_ADDR_EN_VALUE_BIT_SIZE 0x1UL 41 + #define DX_DSCRPTR_MEASURE_CNTR_REG_OFFSET 0xE68UL 42 + #define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SHIFT 0x0UL 43 + #define DX_DSCRPTR_MEASURE_CNTR_VALUE_BIT_SIZE 0x20UL 44 + #define DX_DSCRPTR_QUEUE_WORD0_REG_OFFSET 0xE80UL 45 + #define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SHIFT 0x0UL 46 + #define DX_DSCRPTR_QUEUE_WORD0_VALUE_BIT_SIZE 0x20UL 47 + #define DX_DSCRPTR_QUEUE_WORD1_REG_OFFSET 0xE84UL 48 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SHIFT 0x0UL 49 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_DMA_MODE_BIT_SIZE 0x2UL 50 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SHIFT 0x2UL 51 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_SIZE_BIT_SIZE 0x18UL 52 + #define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SHIFT 0x1AUL 53 + #define DX_DSCRPTR_QUEUE_WORD1_NS_BIT_BIT_SIZE 0x1UL 54 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SHIFT 0x1BUL 55 + #define DX_DSCRPTR_QUEUE_WORD1_DIN_CONST_VALUE_BIT_SIZE 0x1UL 56 + #define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SHIFT 0x1CUL 57 + #define DX_DSCRPTR_QUEUE_WORD1_NOT_LAST_BIT_SIZE 0x1UL 58 + #define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SHIFT 0x1DUL 59 + #define DX_DSCRPTR_QUEUE_WORD1_LOCK_QUEUE_BIT_SIZE 0x1UL 60 + #define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SHIFT 0x1EUL 61 + #define DX_DSCRPTR_QUEUE_WORD1_NOT_USED_BIT_SIZE 0x2UL 62 + #define DX_DSCRPTR_QUEUE_WORD2_REG_OFFSET 0xE88UL 63 + #define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SHIFT 0x0UL 64 + #define DX_DSCRPTR_QUEUE_WORD2_VALUE_BIT_SIZE 0x20UL 65 + #define DX_DSCRPTR_QUEUE_WORD3_REG_OFFSET 0xE8CUL 66 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SHIFT 0x0UL 67 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_DMA_MODE_BIT_SIZE 0x2UL 68 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SHIFT 0x2UL 69 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_SIZE_BIT_SIZE 0x18UL 70 + #define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SHIFT 0x1AUL 71 + #define DX_DSCRPTR_QUEUE_WORD3_NS_BIT_BIT_SIZE 0x1UL 72 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SHIFT 0x1BUL 73 + #define DX_DSCRPTR_QUEUE_WORD3_DOUT_LAST_IND_BIT_SIZE 0x1UL 74 + #define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SHIFT 0x1DUL 75 + #define DX_DSCRPTR_QUEUE_WORD3_HASH_XOR_BIT_BIT_SIZE 0x1UL 76 + #define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SHIFT 0x1EUL 77 + #define DX_DSCRPTR_QUEUE_WORD3_NOT_USED_BIT_SIZE 0x1UL 78 + #define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SHIFT 0x1FUL 79 + #define DX_DSCRPTR_QUEUE_WORD3_QUEUE_LAST_IND_BIT_SIZE 0x1UL 80 + #define DX_DSCRPTR_QUEUE_WORD4_REG_OFFSET 0xE90UL 81 + #define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SHIFT 0x0UL 82 + #define DX_DSCRPTR_QUEUE_WORD4_DATA_FLOW_MODE_BIT_SIZE 0x6UL 83 + #define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SHIFT 0x6UL 84 + #define DX_DSCRPTR_QUEUE_WORD4_AES_SEL_N_HASH_BIT_SIZE 0x1UL 85 + #define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SHIFT 0x7UL 86 + #define DX_DSCRPTR_QUEUE_WORD4_AES_XOR_CRYPTO_KEY_BIT_SIZE 0x1UL 87 + #define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SHIFT 0x8UL 88 + #define DX_DSCRPTR_QUEUE_WORD4_ACK_NEEDED_BIT_SIZE 0x2UL 89 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SHIFT 0xAUL 90 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_MODE_BIT_SIZE 0x4UL 91 + #define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SHIFT 0xEUL 92 + #define DX_DSCRPTR_QUEUE_WORD4_CMAC_SIZE0_BIT_SIZE 0x1UL 93 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SHIFT 0xFUL 94 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_DO_BIT_SIZE 0x2UL 95 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SHIFT 0x11UL 96 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF0_BIT_SIZE 0x2UL 97 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SHIFT 0x13UL 98 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF1_BIT_SIZE 0x1UL 99 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SHIFT 0x14UL 100 + #define DX_DSCRPTR_QUEUE_WORD4_CIPHER_CONF2_BIT_SIZE 0x2UL 101 + #define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SHIFT 0x16UL 102 + #define DX_DSCRPTR_QUEUE_WORD4_KEY_SIZE_BIT_SIZE 0x2UL 103 + #define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SHIFT 0x18UL 104 + #define DX_DSCRPTR_QUEUE_WORD4_SETUP_OPERATION_BIT_SIZE 0x4UL 105 + #define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SHIFT 0x1CUL 106 + #define DX_DSCRPTR_QUEUE_WORD4_DIN_SRAM_ENDIANNESS_BIT_SIZE 0x1UL 107 + #define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SHIFT 0x1DUL 108 + #define DX_DSCRPTR_QUEUE_WORD4_DOUT_SRAM_ENDIANNESS_BIT_SIZE 0x1UL 109 + #define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SHIFT 0x1EUL 110 + #define DX_DSCRPTR_QUEUE_WORD4_WORD_SWAP_BIT_SIZE 0x1UL 111 + #define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SHIFT 0x1FUL 112 + #define DX_DSCRPTR_QUEUE_WORD4_BYTES_SWAP_BIT_SIZE 0x1UL 113 + #define DX_DSCRPTR_QUEUE_WORD5_REG_OFFSET 0xE94UL 114 + #define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SHIFT 0x0UL 115 + #define DX_DSCRPTR_QUEUE_WORD5_DIN_ADDR_HIGH_BIT_SIZE 0x10UL 116 + #define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SHIFT 0x10UL 117 + #define DX_DSCRPTR_QUEUE_WORD5_DOUT_ADDR_HIGH_BIT_SIZE 0x10UL 118 + #define DX_DSCRPTR_QUEUE_WATERMARK_REG_OFFSET 0xE98UL 119 + #define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SHIFT 0x0UL 120 + #define DX_DSCRPTR_QUEUE_WATERMARK_VALUE_BIT_SIZE 0xAUL 121 + #define DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET 0xE9CUL 122 + #define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SHIFT 0x0UL 123 + #define DX_DSCRPTR_QUEUE_CONTENT_VALUE_BIT_SIZE 0xAUL 124 + // -------------------------------------- 125 + // BLOCK: AXI_P 126 + // -------------------------------------- 127 + #define DX_AXIM_MON_INFLIGHT_REG_OFFSET 0xB00UL 128 + #define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SHIFT 0x0UL 129 + #define DX_AXIM_MON_INFLIGHT_VALUE_BIT_SIZE 0x8UL 130 + #define DX_AXIM_MON_INFLIGHTLAST_REG_OFFSET 0xB40UL 131 + #define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SHIFT 0x0UL 132 + #define DX_AXIM_MON_INFLIGHTLAST_VALUE_BIT_SIZE 0x8UL 133 + #define DX_AXIM_MON_COMP_REG_OFFSET 0xB80UL 134 + #define DX_AXIM_MON_COMP_VALUE_BIT_SHIFT 0x0UL 135 + #define DX_AXIM_MON_COMP_VALUE_BIT_SIZE 0x10UL 136 + #define DX_AXIM_MON_ERR_REG_OFFSET 0xBC4UL 137 + #define DX_AXIM_MON_ERR_BRESP_BIT_SHIFT 0x0UL 138 + #define DX_AXIM_MON_ERR_BRESP_BIT_SIZE 0x2UL 139 + #define DX_AXIM_MON_ERR_BID_BIT_SHIFT 0x2UL 140 + #define DX_AXIM_MON_ERR_BID_BIT_SIZE 0x4UL 141 + #define DX_AXIM_MON_ERR_RRESP_BIT_SHIFT 0x10UL 142 + #define DX_AXIM_MON_ERR_RRESP_BIT_SIZE 0x2UL 143 + #define DX_AXIM_MON_ERR_RID_BIT_SHIFT 0x12UL 144 + #define DX_AXIM_MON_ERR_RID_BIT_SIZE 0x4UL 145 + #define DX_AXIM_CFG_REG_OFFSET 0xBE8UL 146 + #define DX_AXIM_CFG_BRESPMASK_BIT_SHIFT 0x4UL 147 + #define DX_AXIM_CFG_BRESPMASK_BIT_SIZE 0x1UL 148 + #define DX_AXIM_CFG_RRESPMASK_BIT_SHIFT 0x5UL 149 + #define DX_AXIM_CFG_RRESPMASK_BIT_SIZE 0x1UL 150 + #define DX_AXIM_CFG_INFLTMASK_BIT_SHIFT 0x6UL 151 + #define DX_AXIM_CFG_INFLTMASK_BIT_SIZE 0x1UL 152 + #define DX_AXIM_CFG_COMPMASK_BIT_SHIFT 0x7UL 153 + #define DX_AXIM_CFG_COMPMASK_BIT_SIZE 0x1UL 154 + #define DX_AXIM_ACE_CONST_REG_OFFSET 0xBECUL 155 + #define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SHIFT 0x0UL 156 + #define DX_AXIM_ACE_CONST_ARDOMAIN_BIT_SIZE 0x2UL 157 + #define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SHIFT 0x2UL 158 + #define DX_AXIM_ACE_CONST_AWDOMAIN_BIT_SIZE 0x2UL 159 + #define DX_AXIM_ACE_CONST_ARBAR_BIT_SHIFT 0x4UL 160 + #define DX_AXIM_ACE_CONST_ARBAR_BIT_SIZE 0x2UL 161 + #define DX_AXIM_ACE_CONST_AWBAR_BIT_SHIFT 0x6UL 162 + #define DX_AXIM_ACE_CONST_AWBAR_BIT_SIZE 0x2UL 163 + #define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SHIFT 0x8UL 164 + #define DX_AXIM_ACE_CONST_ARSNOOP_BIT_SIZE 0x4UL 165 + #define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SHIFT 0xCUL 166 + #define DX_AXIM_ACE_CONST_AWSNOOP_NOT_ALIGNED_BIT_SIZE 0x3UL 167 + #define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SHIFT 0xFUL 168 + #define DX_AXIM_ACE_CONST_AWSNOOP_ALIGNED_BIT_SIZE 0x3UL 169 + #define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SHIFT 0x12UL 170 + #define DX_AXIM_ACE_CONST_AWADDR_NOT_MASKED_BIT_SIZE 0x7UL 171 + #define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SHIFT 0x19UL 172 + #define DX_AXIM_ACE_CONST_AWLEN_VAL_BIT_SIZE 0x4UL 173 + #define DX_AXIM_CACHE_PARAMS_REG_OFFSET 0xBF0UL 174 + #define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SHIFT 0x0UL 175 + #define DX_AXIM_CACHE_PARAMS_AWCACHE_LAST_BIT_SIZE 0x4UL 176 + #define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SHIFT 0x4UL 177 + #define DX_AXIM_CACHE_PARAMS_AWCACHE_BIT_SIZE 0x4UL 178 + #define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SHIFT 0x8UL 179 + #define DX_AXIM_CACHE_PARAMS_ARCACHE_BIT_SIZE 0x4UL 180 + #endif // __DX_CRYS_KERNEL_H__
+224
drivers/staging/ccree/dx_env.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __DX_ENV_H__ 18 + #define __DX_ENV_H__ 19 + 20 + // -------------------------------------- 21 + // BLOCK: FPGA_ENV_REGS 22 + // -------------------------------------- 23 + #define DX_ENV_PKA_DEBUG_MODE_REG_OFFSET 0x024UL 24 + #define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SHIFT 0x0UL 25 + #define DX_ENV_PKA_DEBUG_MODE_VALUE_BIT_SIZE 0x1UL 26 + #define DX_ENV_SCAN_MODE_REG_OFFSET 0x030UL 27 + #define DX_ENV_SCAN_MODE_VALUE_BIT_SHIFT 0x0UL 28 + #define DX_ENV_SCAN_MODE_VALUE_BIT_SIZE 0x1UL 29 + #define DX_ENV_CC_ALLOW_SCAN_REG_OFFSET 0x034UL 30 + #define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SHIFT 0x0UL 31 + #define DX_ENV_CC_ALLOW_SCAN_VALUE_BIT_SIZE 0x1UL 32 + #define DX_ENV_CC_HOST_INT_REG_OFFSET 0x0A0UL 33 + #define DX_ENV_CC_HOST_INT_VALUE_BIT_SHIFT 0x0UL 34 + #define DX_ENV_CC_HOST_INT_VALUE_BIT_SIZE 0x1UL 35 + #define DX_ENV_CC_PUB_HOST_INT_REG_OFFSET 0x0A4UL 36 + #define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SHIFT 0x0UL 37 + #define DX_ENV_CC_PUB_HOST_INT_VALUE_BIT_SIZE 0x1UL 38 + #define DX_ENV_CC_RST_N_REG_OFFSET 0x0A8UL 39 + #define DX_ENV_CC_RST_N_VALUE_BIT_SHIFT 0x0UL 40 + #define DX_ENV_CC_RST_N_VALUE_BIT_SIZE 0x1UL 41 + #define DX_ENV_RST_OVERRIDE_REG_OFFSET 0x0ACUL 42 + #define DX_ENV_RST_OVERRIDE_VALUE_BIT_SHIFT 0x0UL 43 + #define DX_ENV_RST_OVERRIDE_VALUE_BIT_SIZE 0x1UL 44 + #define DX_ENV_CC_POR_N_ADDR_REG_OFFSET 0x0E0UL 45 + #define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SHIFT 0x0UL 46 + #define DX_ENV_CC_POR_N_ADDR_VALUE_BIT_SIZE 0x1UL 47 + #define DX_ENV_CC_COLD_RST_REG_OFFSET 0x0FCUL 48 + #define DX_ENV_CC_COLD_RST_VALUE_BIT_SHIFT 0x0UL 49 + #define DX_ENV_CC_COLD_RST_VALUE_BIT_SIZE 0x1UL 50 + #define DX_ENV_DUMMY_ADDR_REG_OFFSET 0x108UL 51 + #define DX_ENV_DUMMY_ADDR_VALUE_BIT_SHIFT 0x0UL 52 + #define DX_ENV_DUMMY_ADDR_VALUE_BIT_SIZE 0x20UL 53 + #define DX_ENV_COUNTER_CLR_REG_OFFSET 0x118UL 54 + #define DX_ENV_COUNTER_CLR_VALUE_BIT_SHIFT 0x0UL 55 + #define DX_ENV_COUNTER_CLR_VALUE_BIT_SIZE 0x1UL 56 + #define DX_ENV_COUNTER_RD_REG_OFFSET 0x11CUL 57 + #define DX_ENV_COUNTER_RD_VALUE_BIT_SHIFT 0x0UL 58 + #define DX_ENV_COUNTER_RD_VALUE_BIT_SIZE 0x20UL 59 + #define DX_ENV_RNG_DEBUG_ENABLE_REG_OFFSET 0x430UL 60 + #define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SHIFT 0x0UL 61 + #define DX_ENV_RNG_DEBUG_ENABLE_VALUE_BIT_SIZE 0x1UL 62 + #define DX_ENV_CC_LCS_REG_OFFSET 0x43CUL 63 + #define DX_ENV_CC_LCS_VALUE_BIT_SHIFT 0x0UL 64 + #define DX_ENV_CC_LCS_VALUE_BIT_SIZE 0x8UL 65 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_REG_OFFSET 0x440UL 66 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SHIFT 0x0UL 67 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_CM_BIT_SIZE 0x1UL 68 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SHIFT 0x1UL 69 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_DM_BIT_SIZE 0x1UL 70 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SHIFT 0x2UL 71 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_SECURE_BIT_SIZE 0x1UL 72 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SHIFT 0x3UL 73 + #define DX_ENV_CC_IS_CM_DM_SECURE_RMA_IS_RMA_BIT_SIZE 0x1UL 74 + #define DX_ENV_DCU_EN_REG_OFFSET 0x444UL 75 + #define DX_ENV_DCU_EN_VALUE_BIT_SHIFT 0x0UL 76 + #define DX_ENV_DCU_EN_VALUE_BIT_SIZE 0x20UL 77 + #define DX_ENV_CC_LCS_IS_VALID_REG_OFFSET 0x448UL 78 + #define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SHIFT 0x0UL 79 + #define DX_ENV_CC_LCS_IS_VALID_VALUE_BIT_SIZE 0x1UL 80 + #define DX_ENV_POWER_DOWN_REG_OFFSET 0x478UL 81 + #define DX_ENV_POWER_DOWN_VALUE_BIT_SHIFT 0x0UL 82 + #define DX_ENV_POWER_DOWN_VALUE_BIT_SIZE 0x20UL 83 + #define DX_ENV_DCU_H_EN_REG_OFFSET 0x484UL 84 + #define DX_ENV_DCU_H_EN_VALUE_BIT_SHIFT 0x0UL 85 + #define DX_ENV_DCU_H_EN_VALUE_BIT_SIZE 0x20UL 86 + #define DX_ENV_VERSION_REG_OFFSET 0x488UL 87 + #define DX_ENV_VERSION_VALUE_BIT_SHIFT 0x0UL 88 + #define DX_ENV_VERSION_VALUE_BIT_SIZE 0x20UL 89 + #define DX_ENV_ROSC_WRITE_REG_OFFSET 0x48CUL 90 + #define DX_ENV_ROSC_WRITE_VALUE_BIT_SHIFT 0x0UL 91 + #define DX_ENV_ROSC_WRITE_VALUE_BIT_SIZE 0x1UL 92 + #define DX_ENV_ROSC_ADDR_REG_OFFSET 0x490UL 93 + #define DX_ENV_ROSC_ADDR_VALUE_BIT_SHIFT 0x0UL 94 + #define DX_ENV_ROSC_ADDR_VALUE_BIT_SIZE 0x8UL 95 + #define DX_ENV_RESET_SESSION_KEY_REG_OFFSET 0x494UL 96 + #define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SHIFT 0x0UL 97 + #define DX_ENV_RESET_SESSION_KEY_VALUE_BIT_SIZE 0x1UL 98 + #define DX_ENV_SESSION_KEY_0_REG_OFFSET 0x4A0UL 99 + #define DX_ENV_SESSION_KEY_0_VALUE_BIT_SHIFT 0x0UL 100 + #define DX_ENV_SESSION_KEY_0_VALUE_BIT_SIZE 0x20UL 101 + #define DX_ENV_SESSION_KEY_1_REG_OFFSET 0x4A4UL 102 + #define DX_ENV_SESSION_KEY_1_VALUE_BIT_SHIFT 0x0UL 103 + #define DX_ENV_SESSION_KEY_1_VALUE_BIT_SIZE 0x20UL 104 + #define DX_ENV_SESSION_KEY_2_REG_OFFSET 0x4A8UL 105 + #define DX_ENV_SESSION_KEY_2_VALUE_BIT_SHIFT 0x0UL 106 + #define DX_ENV_SESSION_KEY_2_VALUE_BIT_SIZE 0x20UL 107 + #define DX_ENV_SESSION_KEY_3_REG_OFFSET 0x4ACUL 108 + #define DX_ENV_SESSION_KEY_3_VALUE_BIT_SHIFT 0x0UL 109 + #define DX_ENV_SESSION_KEY_3_VALUE_BIT_SIZE 0x20UL 110 + #define DX_ENV_SESSION_KEY_VALID_REG_OFFSET 0x4B0UL 111 + #define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SHIFT 0x0UL 112 + #define DX_ENV_SESSION_KEY_VALID_VALUE_BIT_SIZE 0x1UL 113 + #define DX_ENV_SPIDEN_REG_OFFSET 0x4D0UL 114 + #define DX_ENV_SPIDEN_VALUE_BIT_SHIFT 0x0UL 115 + #define DX_ENV_SPIDEN_VALUE_BIT_SIZE 0x1UL 116 + #define DX_ENV_AXIM_USER_PARAMS_REG_OFFSET 0x600UL 117 + #define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SHIFT 0x0UL 118 + #define DX_ENV_AXIM_USER_PARAMS_ARUSER_BIT_SIZE 0x5UL 119 + #define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SHIFT 0x5UL 120 + #define DX_ENV_AXIM_USER_PARAMS_AWUSER_BIT_SIZE 0x5UL 121 + #define DX_ENV_SECURITY_MODE_OVERRIDE_REG_OFFSET 0x604UL 122 + #define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SHIFT 0x0UL 123 + #define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_BIT_BIT_SIZE 0x1UL 124 + #define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SHIFT 0x1UL 125 + #define DX_ENV_SECURITY_MODE_OVERRIDE_AWPROT_NS_OVERRIDE_BIT_SIZE 0x1UL 126 + #define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SHIFT 0x2UL 127 + #define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_BIT_BIT_SIZE 0x1UL 128 + #define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SHIFT 0x3UL 129 + #define DX_ENV_SECURITY_MODE_OVERRIDE_ARPROT_NS_OVERRIDE_BIT_SIZE 0x1UL 130 + #define DX_ENV_AO_CC_KPLT_0_REG_OFFSET 0x620UL 131 + #define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SHIFT 0x0UL 132 + #define DX_ENV_AO_CC_KPLT_0_VALUE_BIT_SIZE 0x20UL 133 + #define DX_ENV_AO_CC_KPLT_1_REG_OFFSET 0x624UL 134 + #define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SHIFT 0x0UL 135 + #define DX_ENV_AO_CC_KPLT_1_VALUE_BIT_SIZE 0x20UL 136 + #define DX_ENV_AO_CC_KPLT_2_REG_OFFSET 0x628UL 137 + #define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SHIFT 0x0UL 138 + #define DX_ENV_AO_CC_KPLT_2_VALUE_BIT_SIZE 0x20UL 139 + #define DX_ENV_AO_CC_KPLT_3_REG_OFFSET 0x62CUL 140 + #define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SHIFT 0x0UL 141 + #define DX_ENV_AO_CC_KPLT_3_VALUE_BIT_SIZE 0x20UL 142 + #define DX_ENV_AO_CC_KCST_0_REG_OFFSET 0x630UL 143 + #define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SHIFT 0x0UL 144 + #define DX_ENV_AO_CC_KCST_0_VALUE_BIT_SIZE 0x20UL 145 + #define DX_ENV_AO_CC_KCST_1_REG_OFFSET 0x634UL 146 + #define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SHIFT 0x0UL 147 + #define DX_ENV_AO_CC_KCST_1_VALUE_BIT_SIZE 0x20UL 148 + #define DX_ENV_AO_CC_KCST_2_REG_OFFSET 0x638UL 149 + #define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SHIFT 0x0UL 150 + #define DX_ENV_AO_CC_KCST_2_VALUE_BIT_SIZE 0x20UL 151 + #define DX_ENV_AO_CC_KCST_3_REG_OFFSET 0x63CUL 152 + #define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SHIFT 0x0UL 153 + #define DX_ENV_AO_CC_KCST_3_VALUE_BIT_SIZE 0x20UL 154 + #define DX_ENV_APB_FIPS_ADDR_REG_OFFSET 0x650UL 155 + #define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL 156 + #define DX_ENV_APB_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL 157 + #define DX_ENV_APB_FIPS_VAL_REG_OFFSET 0x654UL 158 + #define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL 159 + #define DX_ENV_APB_FIPS_VAL_VALUE_BIT_SIZE 0x20UL 160 + #define DX_ENV_APB_FIPS_MASK_REG_OFFSET 0x658UL 161 + #define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL 162 + #define DX_ENV_APB_FIPS_MASK_VALUE_BIT_SIZE 0x20UL 163 + #define DX_ENV_APB_FIPS_CNT_REG_OFFSET 0x65CUL 164 + #define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL 165 + #define DX_ENV_APB_FIPS_CNT_VALUE_BIT_SIZE 0x20UL 166 + #define DX_ENV_APB_FIPS_NEW_ADDR_REG_OFFSET 0x660UL 167 + #define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL 168 + #define DX_ENV_APB_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL 169 + #define DX_ENV_APB_FIPS_NEW_VAL_REG_OFFSET 0x664UL 170 + #define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL 171 + #define DX_ENV_APB_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL 172 + #define DX_ENV_APBP_FIPS_ADDR_REG_OFFSET 0x670UL 173 + #define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SHIFT 0x0UL 174 + #define DX_ENV_APBP_FIPS_ADDR_VALUE_BIT_SIZE 0xCUL 175 + #define DX_ENV_APBP_FIPS_VAL_REG_OFFSET 0x674UL 176 + #define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SHIFT 0x0UL 177 + #define DX_ENV_APBP_FIPS_VAL_VALUE_BIT_SIZE 0x20UL 178 + #define DX_ENV_APBP_FIPS_MASK_REG_OFFSET 0x678UL 179 + #define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SHIFT 0x0UL 180 + #define DX_ENV_APBP_FIPS_MASK_VALUE_BIT_SIZE 0x20UL 181 + #define DX_ENV_APBP_FIPS_CNT_REG_OFFSET 0x67CUL 182 + #define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SHIFT 0x0UL 183 + #define DX_ENV_APBP_FIPS_CNT_VALUE_BIT_SIZE 0x20UL 184 + #define DX_ENV_APBP_FIPS_NEW_ADDR_REG_OFFSET 0x680UL 185 + #define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SHIFT 0x0UL 186 + #define DX_ENV_APBP_FIPS_NEW_ADDR_VALUE_BIT_SIZE 0xCUL 187 + #define DX_ENV_APBP_FIPS_NEW_VAL_REG_OFFSET 0x684UL 188 + #define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SHIFT 0x0UL 189 + #define DX_ENV_APBP_FIPS_NEW_VAL_VALUE_BIT_SIZE 0x20UL 190 + #define DX_ENV_CC_POWERDOWN_EN_REG_OFFSET 0x690UL 191 + #define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SHIFT 0x0UL 192 + #define DX_ENV_CC_POWERDOWN_EN_VALUE_BIT_SIZE 0x1UL 193 + #define DX_ENV_CC_POWERDOWN_RST_EN_REG_OFFSET 0x694UL 194 + #define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SHIFT 0x0UL 195 + #define DX_ENV_CC_POWERDOWN_RST_EN_VALUE_BIT_SIZE 0x1UL 196 + #define DX_ENV_POWERDOWN_RST_CNTR_REG_OFFSET 0x698UL 197 + #define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SHIFT 0x0UL 198 + #define DX_ENV_POWERDOWN_RST_CNTR_VALUE_BIT_SIZE 0x20UL 199 + #define DX_ENV_POWERDOWN_EN_DEBUG_REG_OFFSET 0x69CUL 200 + #define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SHIFT 0x0UL 201 + #define DX_ENV_POWERDOWN_EN_DEBUG_VALUE_BIT_SIZE 0x1UL 202 + // -------------------------------------- 203 + // BLOCK: ENV_CC_MEMORIES 204 + // -------------------------------------- 205 + #define DX_ENV_FUSE_READY_REG_OFFSET 0x000UL 206 + #define DX_ENV_FUSE_READY_VALUE_BIT_SHIFT 0x0UL 207 + #define DX_ENV_FUSE_READY_VALUE_BIT_SIZE 0x1UL 208 + #define DX_ENV_PERF_RAM_MASTER_REG_OFFSET 0x0ECUL 209 + #define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SHIFT 0x0UL 210 + #define DX_ENV_PERF_RAM_MASTER_VALUE_BIT_SIZE 0x1UL 211 + #define DX_ENV_PERF_RAM_ADDR_HIGH4_REG_OFFSET 0x0F0UL 212 + #define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SHIFT 0x0UL 213 + #define DX_ENV_PERF_RAM_ADDR_HIGH4_VALUE_BIT_SIZE 0x2UL 214 + #define DX_ENV_FUSES_RAM_REG_OFFSET 0x3ECUL 215 + #define DX_ENV_FUSES_RAM_VALUE_BIT_SHIFT 0x0UL 216 + #define DX_ENV_FUSES_RAM_VALUE_BIT_SIZE 0x20UL 217 + // -------------------------------------- 218 + // BLOCK: ENV_PERF_RAM_BASE 219 + // -------------------------------------- 220 + #define DX_ENV_PERF_RAM_BASE_REG_OFFSET 0x000UL 221 + #define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SHIFT 0x0UL 222 + #define DX_ENV_PERF_RAM_BASE_VALUE_BIT_SIZE 0x20UL 223 + 224 + #endif /*__DX_ENV_H__*/
+155
drivers/staging/ccree/dx_host.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __DX_HOST_H__ 18 + #define __DX_HOST_H__ 19 + 20 + // -------------------------------------- 21 + // BLOCK: HOST_P 22 + // -------------------------------------- 23 + #define DX_HOST_IRR_REG_OFFSET 0xA00UL 24 + #define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL 25 + #define DX_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL 26 + #define DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL 27 + #define DX_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL 28 + #define DX_HOST_IRR_GPR0_BIT_SHIFT 0xBUL 29 + #define DX_HOST_IRR_GPR0_BIT_SIZE 0x1UL 30 + #define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL 31 + #define DX_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL 32 + #define DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL 33 + #define DX_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL 34 + #define DX_HOST_IMR_REG_OFFSET 0xA04UL 35 + #define DX_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL 36 + #define DX_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL 37 + #define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL 38 + #define DX_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL 39 + #define DX_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL 40 + #define DX_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL 41 + #define DX_HOST_IMR_GPR0_BIT_SHIFT 0xBUL 42 + #define DX_HOST_IMR_GPR0_BIT_SIZE 0x1UL 43 + #define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL 44 + #define DX_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL 45 + #define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL 46 + #define DX_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL 47 + #define DX_HOST_ICR_REG_OFFSET 0xA08UL 48 + #define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL 49 + #define DX_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL 50 + #define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SHIFT 0x8UL 51 + #define DX_HOST_ICR_AXI_ERR_CLEAR_BIT_SIZE 0x1UL 52 + #define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SHIFT 0xBUL 53 + #define DX_HOST_ICR_GPR_INT_CLEAR_BIT_SIZE 0x1UL 54 + #define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SHIFT 0x13UL 55 + #define DX_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL 56 + #define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL 57 + #define DX_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL 58 + #define DX_HOST_SIGNATURE_REG_OFFSET 0xA24UL 59 + #define DX_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL 60 + #define DX_HOST_SIGNATURE_VALUE_BIT_SIZE 0x20UL 61 + #define DX_HOST_BOOT_REG_OFFSET 0xA28UL 62 + #define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SHIFT 0x0UL 63 + #define DX_HOST_BOOT_SYNTHESIS_CONFIG_BIT_SIZE 0x1UL 64 + #define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SHIFT 0x1UL 65 + #define DX_HOST_BOOT_LARGE_RKEK_LOCAL_BIT_SIZE 0x1UL 66 + #define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SHIFT 0x2UL 67 + #define DX_HOST_BOOT_HASH_IN_FUSES_LOCAL_BIT_SIZE 0x1UL 68 + #define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SHIFT 0x3UL 69 + #define DX_HOST_BOOT_EXT_MEM_SECURED_LOCAL_BIT_SIZE 0x1UL 70 + #define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SHIFT 0x5UL 71 + #define DX_HOST_BOOT_RKEK_ECC_EXISTS_LOCAL_N_BIT_SIZE 0x1UL 72 + #define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SHIFT 0x6UL 73 + #define DX_HOST_BOOT_SRAM_SIZE_LOCAL_BIT_SIZE 0x3UL 74 + #define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SHIFT 0x9UL 75 + #define DX_HOST_BOOT_DSCRPTR_EXISTS_LOCAL_BIT_SIZE 0x1UL 76 + #define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SHIFT 0xAUL 77 + #define DX_HOST_BOOT_PAU_EXISTS_LOCAL_BIT_SIZE 0x1UL 78 + #define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SHIFT 0xBUL 79 + #define DX_HOST_BOOT_RNG_EXISTS_LOCAL_BIT_SIZE 0x1UL 80 + #define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SHIFT 0xCUL 81 + #define DX_HOST_BOOT_PKA_EXISTS_LOCAL_BIT_SIZE 0x1UL 82 + #define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SHIFT 0xDUL 83 + #define DX_HOST_BOOT_RC4_EXISTS_LOCAL_BIT_SIZE 0x1UL 84 + #define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SHIFT 0xEUL 85 + #define DX_HOST_BOOT_SHA_512_PRSNT_LOCAL_BIT_SIZE 0x1UL 86 + #define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SHIFT 0xFUL 87 + #define DX_HOST_BOOT_SHA_256_PRSNT_LOCAL_BIT_SIZE 0x1UL 88 + #define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SHIFT 0x10UL 89 + #define DX_HOST_BOOT_MD5_PRSNT_LOCAL_BIT_SIZE 0x1UL 90 + #define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SHIFT 0x11UL 91 + #define DX_HOST_BOOT_HASH_EXISTS_LOCAL_BIT_SIZE 0x1UL 92 + #define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SHIFT 0x12UL 93 + #define DX_HOST_BOOT_C2_EXISTS_LOCAL_BIT_SIZE 0x1UL 94 + #define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SHIFT 0x13UL 95 + #define DX_HOST_BOOT_DES_EXISTS_LOCAL_BIT_SIZE 0x1UL 96 + #define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SHIFT 0x14UL 97 + #define DX_HOST_BOOT_AES_XCBC_MAC_EXISTS_LOCAL_BIT_SIZE 0x1UL 98 + #define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SHIFT 0x15UL 99 + #define DX_HOST_BOOT_AES_CMAC_EXISTS_LOCAL_BIT_SIZE 0x1UL 100 + #define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SHIFT 0x16UL 101 + #define DX_HOST_BOOT_AES_CCM_EXISTS_LOCAL_BIT_SIZE 0x1UL 102 + #define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SHIFT 0x17UL 103 + #define DX_HOST_BOOT_AES_XEX_HW_T_CALC_LOCAL_BIT_SIZE 0x1UL 104 + #define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SHIFT 0x18UL 105 + #define DX_HOST_BOOT_AES_XEX_EXISTS_LOCAL_BIT_SIZE 0x1UL 106 + #define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SHIFT 0x19UL 107 + #define DX_HOST_BOOT_CTR_EXISTS_LOCAL_BIT_SIZE 0x1UL 108 + #define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SHIFT 0x1AUL 109 + #define DX_HOST_BOOT_AES_DIN_BYTE_RESOLUTION_LOCAL_BIT_SIZE 0x1UL 110 + #define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SHIFT 0x1BUL 111 + #define DX_HOST_BOOT_TUNNELING_ENB_LOCAL_BIT_SIZE 0x1UL 112 + #define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SHIFT 0x1CUL 113 + #define DX_HOST_BOOT_SUPPORT_256_192_KEY_LOCAL_BIT_SIZE 0x1UL 114 + #define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SHIFT 0x1DUL 115 + #define DX_HOST_BOOT_ONLY_ENCRYPT_LOCAL_BIT_SIZE 0x1UL 116 + #define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SHIFT 0x1EUL 117 + #define DX_HOST_BOOT_AES_EXISTS_LOCAL_BIT_SIZE 0x1UL 118 + #define DX_HOST_VERSION_REG_OFFSET 0xA40UL 119 + #define DX_HOST_VERSION_VALUE_BIT_SHIFT 0x0UL 120 + #define DX_HOST_VERSION_VALUE_BIT_SIZE 0x20UL 121 + #define DX_HOST_KFDE0_VALID_REG_OFFSET 0xA60UL 122 + #define DX_HOST_KFDE0_VALID_VALUE_BIT_SHIFT 0x0UL 123 + #define DX_HOST_KFDE0_VALID_VALUE_BIT_SIZE 0x1UL 124 + #define DX_HOST_KFDE1_VALID_REG_OFFSET 0xA64UL 125 + #define DX_HOST_KFDE1_VALID_VALUE_BIT_SHIFT 0x0UL 126 + #define DX_HOST_KFDE1_VALID_VALUE_BIT_SIZE 0x1UL 127 + #define DX_HOST_KFDE2_VALID_REG_OFFSET 0xA68UL 128 + #define DX_HOST_KFDE2_VALID_VALUE_BIT_SHIFT 0x0UL 129 + #define DX_HOST_KFDE2_VALID_VALUE_BIT_SIZE 0x1UL 130 + #define DX_HOST_KFDE3_VALID_REG_OFFSET 0xA6CUL 131 + #define DX_HOST_KFDE3_VALID_VALUE_BIT_SHIFT 0x0UL 132 + #define DX_HOST_KFDE3_VALID_VALUE_BIT_SIZE 0x1UL 133 + #define DX_HOST_GPR0_REG_OFFSET 0xA70UL 134 + #define DX_HOST_GPR0_VALUE_BIT_SHIFT 0x0UL 135 + #define DX_HOST_GPR0_VALUE_BIT_SIZE 0x20UL 136 + #define DX_GPR_HOST_REG_OFFSET 0xA74UL 137 + #define DX_GPR_HOST_VALUE_BIT_SHIFT 0x0UL 138 + #define DX_GPR_HOST_VALUE_BIT_SIZE 0x20UL 139 + #define DX_HOST_POWER_DOWN_EN_REG_OFFSET 0xA78UL 140 + #define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL 141 + #define DX_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL 142 + // -------------------------------------- 143 + // BLOCK: HOST_SRAM 144 + // -------------------------------------- 145 + #define DX_SRAM_DATA_REG_OFFSET 0xF00UL 146 + #define DX_SRAM_DATA_VALUE_BIT_SHIFT 0x0UL 147 + #define DX_SRAM_DATA_VALUE_BIT_SIZE 0x20UL 148 + #define DX_SRAM_ADDR_REG_OFFSET 0xF04UL 149 + #define DX_SRAM_ADDR_VALUE_BIT_SHIFT 0x0UL 150 + #define DX_SRAM_ADDR_VALUE_BIT_SIZE 0xFUL 151 + #define DX_SRAM_DATA_READY_REG_OFFSET 0xF08UL 152 + #define DX_SRAM_DATA_READY_VALUE_BIT_SHIFT 0x0UL 153 + #define DX_SRAM_DATA_READY_VALUE_BIT_SIZE 0x1UL 154 + 155 + #endif //__DX_HOST_H__
+34
drivers/staging/ccree/dx_reg_base_host.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __DX_REG_BASE_HOST_H__ 18 + #define __DX_REG_BASE_HOST_H__ 19 + 20 + /* Identify platform: Xilinx Zynq7000 ZC706 */ 21 + #define DX_PLAT_ZYNQ7000 1 22 + #define DX_PLAT_ZYNQ7000_ZC706 1 23 + 24 + #define DX_BASE_CC 0x80000000 25 + 26 + #define DX_BASE_ENV_REGS 0x40008000 27 + #define DX_BASE_ENV_CC_MEMORIES 0x40008000 28 + #define DX_BASE_ENV_PERF_RAM 0x40009000 29 + 30 + #define DX_BASE_HOST_RGF 0x0UL 31 + #define DX_BASE_CRY_KERNEL 0x0UL 32 + #define DX_BASE_ROM 0x40000000 33 + 34 + #endif /*__DX_REG_BASE_HOST_H__*/
+26
drivers/staging/ccree/dx_reg_common.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __DX_REG_COMMON_H__ 18 + #define __DX_REG_COMMON_H__ 19 + 20 + #define DX_DEV_SIGNATURE 0xDCC71200UL 21 + 22 + #define CC_HW_VERSION 0xef840015UL 23 + 24 + #define DX_DEV_SHA_MAX 512 25 + 26 + #endif /*__DX_REG_COMMON_H__*/
+43
drivers/staging/ccree/hw_queue_defs_plat.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __HW_QUEUE_DEFS_PLAT_H__ 18 + #define __HW_QUEUE_DEFS_PLAT_H__ 19 + 20 + 21 + /*****************************/ 22 + /* Descriptor packing macros */ 23 + /*****************************/ 24 + 25 + #define HW_QUEUE_FREE_SLOTS_GET() (CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT)) & HW_QUEUE_SLOTS_MAX) 26 + 27 + #define HW_QUEUE_POLL_QUEUE_UNTIL_FREE_SLOTS(seqLen) \ 28 + do { \ 29 + } while (HW_QUEUE_FREE_SLOTS_GET() < (seqLen)) 30 + 31 + #define HW_DESC_PUSH_TO_QUEUE(pDesc) do { \ 32 + LOG_HW_DESC(pDesc); \ 33 + HW_DESC_DUMP(pDesc); \ 34 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(0), (pDesc)->word[0]); \ 35 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(1), (pDesc)->word[1]); \ 36 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(2), (pDesc)->word[2]); \ 37 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(3), (pDesc)->word[3]); \ 38 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(4), (pDesc)->word[4]); \ 39 + wmb(); \ 40 + CC_HAL_WRITE_REGISTER(GET_HW_Q_DESC_WORD_IDX(5), (pDesc)->word[5]); \ 41 + } while (0) 42 + 43 + #endif /*__HW_QUEUE_DEFS_PLAT_H__*/
+537
drivers/staging/ccree/ssi_buffer_mgr.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include <linux/crypto.h> 18 + #include <linux/version.h> 19 + #include <crypto/algapi.h> 20 + #include <crypto/authenc.h> 21 + #include <crypto/scatterwalk.h> 22 + #include <linux/dmapool.h> 23 + #include <linux/dma-mapping.h> 24 + #include <linux/crypto.h> 25 + #include <linux/module.h> 26 + #include <linux/platform_device.h> 27 + 28 + #include "ssi_buffer_mgr.h" 29 + #include "cc_lli_defs.h" 30 + 31 + #define LLI_MAX_NUM_OF_DATA_ENTRIES 128 32 + #define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4 33 + #define MLLI_TABLE_MIN_ALIGNMENT 4 /*Force the MLLI table to be align to uint32 */ 34 + #define MAX_NUM_OF_BUFFERS_IN_MLLI 4 35 + #define MAX_NUM_OF_TOTAL_MLLI_ENTRIES (2*LLI_MAX_NUM_OF_DATA_ENTRIES + \ 36 + LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES ) 37 + 38 + #ifdef CC_DEBUG 39 + #define DUMP_SGL(sg) \ 40 + while (sg) { \ 41 + SSI_LOG_DEBUG("page=%lu offset=%u length=%u (dma_len=%u) " \ 42 + "dma_addr=%08x\n", (sg)->page_link, (sg)->offset, \ 43 + (sg)->length, sg_dma_len(sg), (sg)->dma_address); \ 44 + (sg) = sg_next(sg); \ 45 + } 46 + #define DUMP_MLLI_TABLE(mlli_p, nents) \ 47 + do { \ 48 + SSI_LOG_DEBUG("mlli=%pK nents=%u\n", (mlli_p), (nents)); \ 49 + while((nents)--) { \ 50 + SSI_LOG_DEBUG("addr=0x%08X size=0x%08X\n", \ 51 + (mlli_p)[LLI_WORD0_OFFSET], \ 52 + (mlli_p)[LLI_WORD1_OFFSET]); \ 53 + (mlli_p) += LLI_ENTRY_WORD_SIZE; \ 54 + } \ 55 + } while (0) 56 + #define GET_DMA_BUFFER_TYPE(buff_type) ( \ 57 + ((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \ 58 + ((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \ 59 + ((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID") 60 + #else 61 + #define DX_BUFFER_MGR_DUMP_SGL(sg) 62 + #define DX_BUFFER_MGR_DUMP_MLLI_TABLE(mlli_p, nents) 63 + #define GET_DMA_BUFFER_TYPE(buff_type) 64 + #endif 65 + 66 + 67 + enum dma_buffer_type { 68 + DMA_NULL_TYPE = -1, 69 + DMA_SGL_TYPE = 1, 70 + DMA_BUFF_TYPE = 2, 71 + }; 72 + 73 + struct buff_mgr_handle { 74 + struct dma_pool *mlli_buffs_pool; 75 + }; 76 + 77 + union buffer_array_entry { 78 + struct scatterlist *sgl; 79 + dma_addr_t buffer_dma; 80 + }; 81 + 82 + struct buffer_array { 83 + unsigned int num_of_buffers; 84 + union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; 85 + unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; 86 + int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 87 + int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; 88 + enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI]; 89 + bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; 90 + uint32_t * mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 91 + }; 92 + 93 + #ifdef CC_DMA_48BIT_SIM 94 + dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len) 95 + { 96 + dma_addr_t tmp_dma_addr; 97 + #ifdef CC_DMA_48BIT_SIM_FULL 98 + /* With this code all addresses will be switched to 48 bits. */ 99 + /* The if condition protects from double expention */ 100 + if((((orig_addr >> 16) & 0xFFFF) != 0xFFFF) && 101 + (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) { 102 + #else 103 + if((!(((orig_addr >> 16) & 0xFF) % 2)) && 104 + (data_len <= CC_MAX_MLLI_ENTRY_SIZE)) { 105 + #endif 106 + tmp_dma_addr = ((orig_addr<<16) | 0xFFFF0000 | 107 + (orig_addr & UINT16_MAX)); 108 + SSI_LOG_DEBUG("MAP DMA: orig address=0x%llX " 109 + "dma_address=0x%llX\n", 110 + orig_addr, tmp_dma_addr); 111 + return tmp_dma_addr; 112 + } 113 + return orig_addr; 114 + } 115 + 116 + dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr) 117 + { 118 + dma_addr_t tmp_dma_addr; 119 + #ifdef CC_DMA_48BIT_SIM_FULL 120 + /* With this code all addresses will be restored from 48 bits. */ 121 + /* The if condition protects from double restoring */ 122 + if((orig_addr >> 32) & 0xFFFF ) { 123 + #else 124 + if(((orig_addr >> 32) & 0xFFFF) && 125 + !(((orig_addr >> 32) & 0xFF) % 2) ) { 126 + #endif 127 + /*return high 16 bits*/ 128 + tmp_dma_addr = ((orig_addr >> 16)); 129 + /*clean the 0xFFFF in the lower bits (set in the add expansion)*/ 130 + tmp_dma_addr &= 0xFFFF0000; 131 + /* Set the original 16 bits */ 132 + tmp_dma_addr |= (orig_addr & UINT16_MAX); 133 + SSI_LOG_DEBUG("Release DMA: orig address=0x%llX " 134 + "dma_address=0x%llX\n", 135 + orig_addr, tmp_dma_addr); 136 + return tmp_dma_addr; 137 + } 138 + return orig_addr; 139 + } 140 + #endif 141 + /** 142 + * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries. 143 + * 144 + * @sg_list: SG list 145 + * @nbytes: [IN] Total SGL data bytes. 146 + * @lbytes: [OUT] Returns the amount of bytes at the last entry 147 + */ 148 + static unsigned int ssi_buffer_mgr_get_sgl_nents( 149 + struct scatterlist *sg_list, unsigned int nbytes, uint32_t *lbytes, bool *is_chained) 150 + { 151 + unsigned int nents = 0; 152 + while (nbytes != 0) { 153 + if (sg_is_chain(sg_list)) { 154 + SSI_LOG_ERR("Unexpected chanined entry " 155 + "in sg (entry =0x%X) \n", nents); 156 + BUG(); 157 + } 158 + if (sg_list->length != 0) { 159 + nents++; 160 + /* get the number of bytes in the last entry */ 161 + *lbytes = nbytes; 162 + nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length; 163 + sg_list = sg_next(sg_list); 164 + } else { 165 + sg_list = (struct scatterlist *)sg_page(sg_list); 166 + if (is_chained != NULL) { 167 + *is_chained = true; 168 + } 169 + } 170 + } 171 + SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes); 172 + return nents; 173 + } 174 + 175 + /** 176 + * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data. 177 + * 178 + * @sgl: 179 + */ 180 + void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len) 181 + { 182 + struct scatterlist *current_sg = sgl; 183 + int sg_index = 0; 184 + 185 + while (sg_index <= data_len) { 186 + if (current_sg == NULL) { 187 + /* reached the end of the sgl --> just return back */ 188 + return; 189 + } 190 + memset(sg_virt(current_sg), 0, current_sg->length); 191 + sg_index += current_sg->length; 192 + current_sg = sg_next(current_sg); 193 + } 194 + } 195 + 196 + /** 197 + * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data, 198 + * from to_skip to end, to dest and vice versa 199 + * 200 + * @dest: 201 + * @sg: 202 + * @to_skip: 203 + * @end: 204 + * @direct: 205 + */ 206 + void ssi_buffer_mgr_copy_scatterlist_portion( 207 + u8 *dest, struct scatterlist *sg, 208 + uint32_t to_skip, uint32_t end, 209 + enum ssi_sg_cpy_direct direct) 210 + { 211 + uint32_t nents, lbytes; 212 + 213 + nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); 214 + sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF)); 215 + } 216 + 217 + static inline int ssi_buffer_mgr_render_buff_to_mlli( 218 + dma_addr_t buff_dma, uint32_t buff_size, uint32_t *curr_nents, 219 + uint32_t **mlli_entry_pp) 220 + { 221 + uint32_t *mlli_entry_p = *mlli_entry_pp; 222 + uint32_t new_nents;; 223 + 224 + /* Verify there is no memory overflow*/ 225 + new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1); 226 + if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) { 227 + return -ENOMEM; 228 + } 229 + 230 + /*handle buffer longer than 64 kbytes */ 231 + while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) { 232 + SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, CC_MAX_MLLI_ENTRY_SIZE); 233 + LLI_SET_ADDR(mlli_entry_p,buff_dma); 234 + LLI_SET_SIZE(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); 235 + SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents, 236 + mlli_entry_p[LLI_WORD0_OFFSET], 237 + mlli_entry_p[LLI_WORD1_OFFSET]); 238 + SSI_RESTORE_DMA_ADDR_TO_48BIT(buff_dma); 239 + buff_dma += CC_MAX_MLLI_ENTRY_SIZE; 240 + buff_size -= CC_MAX_MLLI_ENTRY_SIZE; 241 + mlli_entry_p = mlli_entry_p + 2; 242 + (*curr_nents)++; 243 + } 244 + /*Last entry */ 245 + SSI_UPDATE_DMA_ADDR_TO_48BIT(buff_dma, buff_size); 246 + LLI_SET_ADDR(mlli_entry_p,buff_dma); 247 + LLI_SET_SIZE(mlli_entry_p, buff_size); 248 + SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents, 249 + mlli_entry_p[LLI_WORD0_OFFSET], 250 + mlli_entry_p[LLI_WORD1_OFFSET]); 251 + mlli_entry_p = mlli_entry_p + 2; 252 + *mlli_entry_pp = mlli_entry_p; 253 + (*curr_nents)++; 254 + return 0; 255 + } 256 + 257 + 258 + static inline int ssi_buffer_mgr_render_scatterlist_to_mlli( 259 + struct scatterlist *sgl, uint32_t sgl_data_len, uint32_t sglOffset, uint32_t *curr_nents, 260 + uint32_t **mlli_entry_pp) 261 + { 262 + struct scatterlist *curr_sgl = sgl; 263 + uint32_t *mlli_entry_p = *mlli_entry_pp; 264 + int32_t rc = 0; 265 + 266 + for ( ; (curr_sgl != NULL) && (sgl_data_len != 0); 267 + curr_sgl = sg_next(curr_sgl)) { 268 + uint32_t entry_data_len = 269 + (sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ? 270 + sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ; 271 + sgl_data_len -= entry_data_len; 272 + rc = ssi_buffer_mgr_render_buff_to_mlli( 273 + sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents, 274 + &mlli_entry_p); 275 + if(rc != 0) { 276 + return rc; 277 + } 278 + sglOffset=0; 279 + } 280 + *mlli_entry_pp = mlli_entry_p; 281 + return 0; 282 + } 283 + 284 + static int ssi_buffer_mgr_generate_mlli ( 285 + struct device *dev, 286 + struct buffer_array *sg_data, 287 + struct mlli_params *mlli_params) __maybe_unused; 288 + 289 + static int ssi_buffer_mgr_generate_mlli( 290 + struct device *dev, 291 + struct buffer_array *sg_data, 292 + struct mlli_params *mlli_params) 293 + { 294 + uint32_t *mlli_p; 295 + uint32_t total_nents = 0,prev_total_nents = 0; 296 + int rc = 0, i; 297 + 298 + SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers); 299 + 300 + /* Allocate memory from the pointed pool */ 301 + mlli_params->mlli_virt_addr = dma_pool_alloc( 302 + mlli_params->curr_pool, GFP_KERNEL, 303 + &(mlli_params->mlli_dma_addr)); 304 + if (unlikely(mlli_params->mlli_virt_addr == NULL)) { 305 + SSI_LOG_ERR("dma_pool_alloc() failed\n"); 306 + rc =-ENOMEM; 307 + goto build_mlli_exit; 308 + } 309 + SSI_UPDATE_DMA_ADDR_TO_48BIT(mlli_params->mlli_dma_addr, 310 + (MAX_NUM_OF_TOTAL_MLLI_ENTRIES* 311 + LLI_ENTRY_BYTE_SIZE)); 312 + /* Point to start of MLLI */ 313 + mlli_p = (uint32_t *)mlli_params->mlli_virt_addr; 314 + /* go over all SG's and link it to one MLLI table */ 315 + for (i = 0; i < sg_data->num_of_buffers; i++) { 316 + if (sg_data->type[i] == DMA_SGL_TYPE) 317 + rc = ssi_buffer_mgr_render_scatterlist_to_mlli( 318 + sg_data->entry[i].sgl, 319 + sg_data->total_data_len[i], sg_data->offset[i], &total_nents, 320 + &mlli_p); 321 + else /*DMA_BUFF_TYPE*/ 322 + rc = ssi_buffer_mgr_render_buff_to_mlli( 323 + sg_data->entry[i].buffer_dma, 324 + sg_data->total_data_len[i], &total_nents, 325 + &mlli_p); 326 + if(rc != 0) { 327 + return rc; 328 + } 329 + 330 + /* set last bit in the current table */ 331 + if (sg_data->mlli_nents[i] != NULL) { 332 + /*Calculate the current MLLI table length for the 333 + length field in the descriptor*/ 334 + *(sg_data->mlli_nents[i]) += 335 + (total_nents - prev_total_nents); 336 + prev_total_nents = total_nents; 337 + } 338 + } 339 + 340 + /* Set MLLI size for the bypass operation */ 341 + mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); 342 + 343 + SSI_LOG_DEBUG("MLLI params: " 344 + "virt_addr=%pK dma_addr=0x%llX mlli_len=0x%X\n", 345 + mlli_params->mlli_virt_addr, 346 + (unsigned long long)mlli_params->mlli_dma_addr, 347 + mlli_params->mlli_len); 348 + 349 + build_mlli_exit: 350 + return rc; 351 + } 352 + 353 + static inline void ssi_buffer_mgr_add_buffer_entry( 354 + struct buffer_array *sgl_data, 355 + dma_addr_t buffer_dma, unsigned int buffer_len, 356 + bool is_last_entry, uint32_t *mlli_nents) 357 + { 358 + unsigned int index = sgl_data->num_of_buffers; 359 + 360 + SSI_LOG_DEBUG("index=%u single_buff=0x%llX " 361 + "buffer_len=0x%08X is_last=%d\n", 362 + index, (unsigned long long)buffer_dma, buffer_len, is_last_entry); 363 + sgl_data->nents[index] = 1; 364 + sgl_data->entry[index].buffer_dma = buffer_dma; 365 + sgl_data->offset[index] = 0; 366 + sgl_data->total_data_len[index] = buffer_len; 367 + sgl_data->type[index] = DMA_BUFF_TYPE; 368 + sgl_data->is_last[index] = is_last_entry; 369 + sgl_data->mlli_nents[index] = mlli_nents; 370 + if (sgl_data->mlli_nents[index] != NULL) 371 + *sgl_data->mlli_nents[index] = 0; 372 + sgl_data->num_of_buffers++; 373 + } 374 + 375 + static inline void ssi_buffer_mgr_add_scatterlist_entry( 376 + struct buffer_array *sgl_data, 377 + unsigned int nents, 378 + struct scatterlist *sgl, 379 + unsigned int data_len, 380 + unsigned int data_offset, 381 + bool is_last_table, 382 + uint32_t *mlli_nents) 383 + { 384 + unsigned int index = sgl_data->num_of_buffers; 385 + 386 + SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", 387 + index, nents, sgl, data_len, is_last_table); 388 + sgl_data->nents[index] = nents; 389 + sgl_data->entry[index].sgl = sgl; 390 + sgl_data->offset[index] = data_offset; 391 + sgl_data->total_data_len[index] = data_len; 392 + sgl_data->type[index] = DMA_SGL_TYPE; 393 + sgl_data->is_last[index] = is_last_table; 394 + sgl_data->mlli_nents[index] = mlli_nents; 395 + if (sgl_data->mlli_nents[index] != NULL) 396 + *sgl_data->mlli_nents[index] = 0; 397 + sgl_data->num_of_buffers++; 398 + } 399 + 400 + static int 401 + ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, uint32_t nents, 402 + enum dma_data_direction direction) 403 + { 404 + uint32_t i , j; 405 + struct scatterlist *l_sg = sg; 406 + for (i = 0; i < nents; i++) { 407 + if (l_sg == NULL) { 408 + break; 409 + } 410 + if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){ 411 + SSI_LOG_ERR("dma_map_page() sg buffer failed\n"); 412 + goto err; 413 + } 414 + l_sg = sg_next(l_sg); 415 + } 416 + return nents; 417 + 418 + err: 419 + /* Restore mapped parts */ 420 + for (j = 0; j < i; j++) { 421 + if (sg == NULL) { 422 + break; 423 + } 424 + dma_unmap_sg(dev,sg,1,direction); 425 + sg = sg_next(sg); 426 + } 427 + return 0; 428 + } 429 + 430 + static int ssi_buffer_mgr_map_scatterlist (struct device *dev, 431 + struct scatterlist *sg, unsigned int nbytes, int direction, 432 + uint32_t *nents, uint32_t max_sg_nents, uint32_t *lbytes, 433 + uint32_t *mapped_nents) __maybe_unused; 434 + 435 + static int ssi_buffer_mgr_map_scatterlist( 436 + struct device *dev, struct scatterlist *sg, 437 + unsigned int nbytes, int direction, 438 + uint32_t *nents, uint32_t max_sg_nents, 439 + uint32_t *lbytes, uint32_t *mapped_nents) 440 + { 441 + bool is_chained = false; 442 + 443 + if (sg_is_last(sg)) { 444 + /* One entry only case -set to DLLI */ 445 + if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) { 446 + SSI_LOG_ERR("dma_map_sg() single buffer failed\n"); 447 + return -ENOMEM; 448 + } 449 + SSI_LOG_DEBUG("Mapped sg: dma_address=0x%llX " 450 + "page_link=0x%08lX addr=%pK offset=%u " 451 + "length=%u\n", 452 + (unsigned long long)sg_dma_address(sg), 453 + sg->page_link, 454 + sg_virt(sg), 455 + sg->offset, sg->length); 456 + *lbytes = nbytes; 457 + *nents = 1; 458 + *mapped_nents = 1; 459 + SSI_UPDATE_DMA_ADDR_TO_48BIT(sg_dma_address(sg), sg_dma_len(sg)); 460 + } else { /*sg_is_last*/ 461 + *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes, 462 + &is_chained); 463 + if (*nents > max_sg_nents) { 464 + *nents = 0; 465 + SSI_LOG_ERR("Too many fragments. current %d max %d\n", 466 + *nents, max_sg_nents); 467 + return -ENOMEM; 468 + } 469 + if (!is_chained) { 470 + /* In case of mmu the number of mapped nents might 471 + be changed from the original sgl nents */ 472 + *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 473 + if (unlikely(*mapped_nents == 0)){ 474 + *nents = 0; 475 + SSI_LOG_ERR("dma_map_sg() sg buffer failed\n"); 476 + return -ENOMEM; 477 + } 478 + } else { 479 + /*In this case the driver maps entry by entry so it 480 + must have the same nents before and after map */ 481 + *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev, 482 + sg, 483 + *nents, 484 + direction); 485 + if (unlikely(*mapped_nents != *nents)){ 486 + *nents = *mapped_nents; 487 + SSI_LOG_ERR("dma_map_sg() sg buffer failed\n"); 488 + return -ENOMEM; 489 + } 490 + } 491 + } 492 + 493 + return 0; 494 + } 495 + 496 + int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata) 497 + { 498 + struct buff_mgr_handle *buff_mgr_handle; 499 + struct device *dev = &drvdata->plat_dev->dev; 500 + 501 + buff_mgr_handle = (struct buff_mgr_handle *) 502 + kmalloc(sizeof(struct buff_mgr_handle), GFP_KERNEL); 503 + if (buff_mgr_handle == NULL) 504 + return -ENOMEM; 505 + 506 + drvdata->buff_mgr_handle = buff_mgr_handle; 507 + 508 + buff_mgr_handle->mlli_buffs_pool = dma_pool_create( 509 + "dx_single_mlli_tables", dev, 510 + MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 511 + LLI_ENTRY_BYTE_SIZE, 512 + MLLI_TABLE_MIN_ALIGNMENT, 0); 513 + 514 + if (unlikely(buff_mgr_handle->mlli_buffs_pool == NULL)) 515 + goto error; 516 + 517 + return 0; 518 + 519 + error: 520 + ssi_buffer_mgr_fini(drvdata); 521 + return -ENOMEM; 522 + } 523 + 524 + int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata) 525 + { 526 + struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle; 527 + 528 + if (buff_mgr_handle != NULL) { 529 + if (buff_mgr_handle->mlli_buffs_pool != NULL) 530 + dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); 531 + kfree(drvdata->buff_mgr_handle); 532 + drvdata->buff_mgr_handle = NULL; 533 + 534 + } 535 + return 0; 536 + } 537 +
+79
drivers/staging/ccree/ssi_buffer_mgr.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file buffer_mgr.h 18 + Buffer Manager 19 + */ 20 + 21 + #ifndef __SSI_BUFFER_MGR_H__ 22 + #define __SSI_BUFFER_MGR_H__ 23 + 24 + #include <crypto/algapi.h> 25 + 26 + #include "ssi_config.h" 27 + #include "ssi_driver.h" 28 + 29 + 30 + enum ssi_req_dma_buf_type { 31 + SSI_DMA_BUF_NULL = 0, 32 + SSI_DMA_BUF_DLLI, 33 + SSI_DMA_BUF_MLLI 34 + }; 35 + 36 + enum ssi_sg_cpy_direct { 37 + SSI_SG_TO_BUF = 0, 38 + SSI_SG_FROM_BUF = 1 39 + }; 40 + 41 + struct ssi_mlli { 42 + ssi_sram_addr_t sram_addr; 43 + unsigned int nents; //sg nents 44 + unsigned int mlli_nents; //mlli nents might be different than the above 45 + }; 46 + 47 + struct mlli_params { 48 + struct dma_pool *curr_pool; 49 + uint8_t *mlli_virt_addr; 50 + dma_addr_t mlli_dma_addr; 51 + uint32_t mlli_len; 52 + }; 53 + 54 + int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata); 55 + 56 + int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata); 57 + 58 + void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, uint32_t to_skip, uint32_t end, enum ssi_sg_cpy_direct direct); 59 + 60 + void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, uint32_t data_len); 61 + 62 + 63 + #ifdef CC_DMA_48BIT_SIM 64 + dma_addr_t ssi_buff_mgr_update_dma_addr(dma_addr_t orig_addr, uint32_t data_len); 65 + dma_addr_t ssi_buff_mgr_restore_dma_addr(dma_addr_t orig_addr); 66 + 67 + #define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = \ 68 + ssi_buff_mgr_update_dma_addr(addr,size) 69 + #define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = \ 70 + ssi_buff_mgr_restore_dma_addr(addr) 71 + #else 72 + 73 + #define SSI_UPDATE_DMA_ADDR_TO_48BIT(addr,size) addr = addr 74 + #define SSI_RESTORE_DMA_ADDR_TO_48BIT(addr) addr = addr 75 + 76 + #endif 77 + 78 + #endif /*__BUFFER_MGR_H__*/ 79 +
+61
drivers/staging/ccree/ssi_config.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_config.h 18 + Definitions for ARM CryptoCell Linux Crypto Driver 19 + */ 20 + 21 + #ifndef __SSI_CONFIG_H__ 22 + #define __SSI_CONFIG_H__ 23 + 24 + #include <linux/version.h> 25 + 26 + #define DISABLE_COHERENT_DMA_OPS 27 + //#define FLUSH_CACHE_ALL 28 + //#define COMPLETION_DELAY 29 + //#define DX_DUMP_DESCS 30 + // #define DX_DUMP_BYTES 31 + // #define CC_DEBUG 32 + #define ENABLE_CC_SYSFS /* Enable sysfs interface for debugging REE driver */ 33 + //#define ENABLE_CC_CYCLE_COUNT 34 + //#define DX_IRQ_DELAY 100000 35 + #define DMA_BIT_MASK_LEN 48 /* was 32 bit, but for juno's sake it was enlarged to 48 bit */ 36 + 37 + #if defined ENABLE_CC_CYCLE_COUNT && defined ENABLE_CC_SYSFS 38 + #define CC_CYCLE_COUNT 39 + #endif 40 + 41 + 42 + #if defined (CONFIG_ARM64) // TODO currently only this mode was test on Juno (which is ARM64), need to enable coherent also. 43 + #define DISABLE_COHERENT_DMA_OPS 44 + #endif 45 + 46 + /* Define the CryptoCell DMA cache coherency signals configuration */ 47 + #if defined (DISABLE_COHERENT_DMA_OPS) 48 + /* Software Controlled Cache Coherency (SCCC) */ 49 + #define SSI_CACHE_PARAMS (0x000) 50 + /* CC attached to NONE-ACP such as HPP/ACE/AMBA4. 51 + * The customer is responsible to enable/disable this feature 52 + * according to his platform type. */ 53 + #define DX_HAS_ACP 0 54 + #else 55 + #define SSI_CACHE_PARAMS (0xEEE) 56 + /* CC attached to ACP */ 57 + #define DX_HAS_ACP 1 58 + #endif 59 + 60 + #endif /*__DX_CONFIG_H__*/ 61 +
+499
drivers/staging/ccree/ssi_driver.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + 20 + #include <linux/crypto.h> 21 + #include <crypto/algapi.h> 22 + #include <crypto/aes.h> 23 + #include <crypto/sha.h> 24 + #include <crypto/authenc.h> 25 + #include <crypto/scatterwalk.h> 26 + 27 + #include <linux/init.h> 28 + #include <linux/moduleparam.h> 29 + #include <linux/types.h> 30 + #include <linux/random.h> 31 + #include <linux/ioport.h> 32 + #include <linux/interrupt.h> 33 + #include <linux/fcntl.h> 34 + #include <linux/poll.h> 35 + #include <linux/proc_fs.h> 36 + #include <linux/mutex.h> 37 + #include <linux/sysctl.h> 38 + #include <linux/fs.h> 39 + #include <linux/cdev.h> 40 + #include <linux/platform_device.h> 41 + #include <linux/mm.h> 42 + #include <linux/delay.h> 43 + #include <linux/dma-mapping.h> 44 + #include <linux/dmapool.h> 45 + #include <linux/list.h> 46 + #include <linux/slab.h> 47 + #include <linux/spinlock.h> 48 + #include <linux/pm.h> 49 + 50 + /* cache.h required for L1_CACHE_ALIGN() and cache_line_size() */ 51 + #include <linux/cache.h> 52 + #include <linux/io.h> 53 + #include <linux/uaccess.h> 54 + #include <linux/pagemap.h> 55 + #include <linux/sched.h> 56 + #include <linux/random.h> 57 + #include <linux/of.h> 58 + 59 + #include "ssi_config.h" 60 + #include "ssi_driver.h" 61 + #include "ssi_request_mgr.h" 62 + #include "ssi_buffer_mgr.h" 63 + #include "ssi_sysfs.h" 64 + #include "ssi_sram_mgr.h" 65 + #include "ssi_pm.h" 66 + 67 + 68 + #ifdef DX_DUMP_BYTES 69 + void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size) 70 + { 71 + int i , line_offset = 0, ret = 0; 72 + const uint8_t *cur_byte; 73 + char line_buf[80]; 74 + 75 + if (the_array == NULL) { 76 + SSI_LOG_ERR("cannot dump_byte_array - NULL pointer\n"); 77 + return; 78 + } 79 + 80 + ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", 81 + name, size); 82 + if (ret < 0) { 83 + SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret); 84 + return; 85 + } 86 + line_offset = ret; 87 + for (i = 0 , cur_byte = the_array; 88 + (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) { 89 + ret = snprintf(line_buf + line_offset, 90 + sizeof(line_buf) - line_offset, 91 + "0x%02X ", *cur_byte); 92 + if (ret < 0) { 93 + SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n",ret); 94 + return; 95 + } 96 + line_offset += ret; 97 + if (line_offset > 75) { /* Cut before line end */ 98 + SSI_LOG_DEBUG("%s\n", line_buf); 99 + line_offset = 0; 100 + } 101 + } 102 + 103 + if (line_offset > 0) /* Dump remaining line */ 104 + SSI_LOG_DEBUG("%s\n", line_buf); 105 + } 106 + #endif 107 + 108 + static irqreturn_t cc_isr(int irq, void *dev_id) 109 + { 110 + struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id; 111 + void __iomem *cc_base = drvdata->cc_base; 112 + uint32_t irr; 113 + uint32_t imr; 114 + DECL_CYCLE_COUNT_RESOURCES; 115 + 116 + /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ 117 + START_CYCLE_COUNT(); 118 + 119 + /* read the interrupt status */ 120 + irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR)); 121 + SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr); 122 + if (unlikely(irr == 0)) { /* Probably shared interrupt line */ 123 + SSI_LOG_ERR("Got interrupt with empty IRR\n"); 124 + return IRQ_NONE; 125 + } 126 + imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)); 127 + 128 + /* clear interrupt - must be before processing events */ 129 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr); 130 + 131 + drvdata->irq = irr; 132 + /* Completion interrupt - most probable */ 133 + if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) { 134 + /* Mask AXI completion interrupt - will be unmasked in Deferred service handler */ 135 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK); 136 + irr &= ~SSI_COMP_IRQ_MASK; 137 + complete_request(drvdata); 138 + } 139 + 140 + /* AXI error interrupt */ 141 + if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK) != 0)) { 142 + uint32_t axi_err; 143 + 144 + /* Read the AXI error ID */ 145 + axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR)); 146 + SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err); 147 + 148 + irr &= ~SSI_AXI_ERR_IRQ_MASK; 149 + } 150 + 151 + if (unlikely(irr != 0)) { 152 + SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr); 153 + /* Just warning */ 154 + } 155 + 156 + END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_0); 157 + START_CYCLE_COUNT_AT(drvdata->isr_exit_cycles); 158 + 159 + return IRQ_HANDLED; 160 + } 161 + 162 + int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe) 163 + { 164 + unsigned int val; 165 + void __iomem *cc_base = drvdata->cc_base; 166 + 167 + /* Unmask all AXI interrupt sources AXI_CFG1 register */ 168 + val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)); 169 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK); 170 + SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG))); 171 + 172 + /* Clear all pending interrupts */ 173 + val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR)); 174 + SSI_LOG_DEBUG("IRR=0x%08X\n", val); 175 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val); 176 + 177 + /* Unmask relevant interrupt cause */ 178 + val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK)); 179 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val); 180 + 181 + #ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET 182 + #ifdef DX_IRQ_DELAY 183 + /* Set CC IRQ delay */ 184 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL), 185 + DX_IRQ_DELAY); 186 + #endif 187 + if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) { 188 + SSI_LOG_DEBUG("irq_delay=%d CC cycles\n", 189 + CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL))); 190 + } 191 + #endif 192 + 193 + val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS)); 194 + if (is_probe == true) { 195 + SSI_LOG_INFO("Cache params previous: 0x%08X\n", val); 196 + } 197 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS), SSI_CACHE_PARAMS); 198 + val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS)); 199 + if (is_probe == true) { 200 + SSI_LOG_INFO("Cache params current: 0x%08X (expected: 0x%08X)\n", val, SSI_CACHE_PARAMS); 201 + } 202 + 203 + return 0; 204 + } 205 + 206 + static int init_cc_resources(struct platform_device *plat_dev) 207 + { 208 + struct resource *req_mem_cc_regs = NULL; 209 + void __iomem *cc_base = NULL; 210 + bool irq_registered = false; 211 + struct ssi_drvdata *new_drvdata = kzalloc(sizeof(struct ssi_drvdata), GFP_KERNEL); 212 + uint32_t signature_val; 213 + int rc = 0; 214 + 215 + if (unlikely(new_drvdata == NULL)) { 216 + SSI_LOG_ERR("Failed to allocate drvdata"); 217 + rc = -ENOMEM; 218 + goto init_cc_res_err; 219 + } 220 + 221 + new_drvdata->inflight_counter = 0; 222 + 223 + dev_set_drvdata(&plat_dev->dev, new_drvdata); 224 + /* Get device resources */ 225 + /* First CC registers space */ 226 + new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); 227 + if (unlikely(new_drvdata->res_mem == NULL)) { 228 + SSI_LOG_ERR("Failed getting IO memory resource\n"); 229 + rc = -ENODEV; 230 + goto init_cc_res_err; 231 + } 232 + SSI_LOG_DEBUG("Got MEM resource (%s): start=0x%llX end=0x%llX\n", 233 + new_drvdata->res_mem->name, 234 + (unsigned long long)new_drvdata->res_mem->start, 235 + (unsigned long long)new_drvdata->res_mem->end); 236 + /* Map registers space */ 237 + req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs"); 238 + if (unlikely(req_mem_cc_regs == NULL)) { 239 + SSI_LOG_ERR("Couldn't allocate registers memory region at " 240 + "0x%08X\n", (unsigned int)new_drvdata->res_mem->start); 241 + rc = -EBUSY; 242 + goto init_cc_res_err; 243 + } 244 + cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem)); 245 + if (unlikely(cc_base == NULL)) { 246 + SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n", 247 + (unsigned int)new_drvdata->res_mem->start, (unsigned int)resource_size(new_drvdata->res_mem)); 248 + rc = -ENOMEM; 249 + goto init_cc_res_err; 250 + } 251 + SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base); 252 + new_drvdata->cc_base = cc_base; 253 + 254 + 255 + /* Then IRQ */ 256 + new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0); 257 + if (unlikely(new_drvdata->res_irq == NULL)) { 258 + SSI_LOG_ERR("Failed getting IRQ resource\n"); 259 + rc = -ENODEV; 260 + goto init_cc_res_err; 261 + } 262 + rc = request_irq(new_drvdata->res_irq->start, cc_isr, 263 + IRQF_SHARED, "arm_cc7x", new_drvdata); 264 + if (unlikely(rc != 0)) { 265 + SSI_LOG_ERR("Could not register to interrupt %llu\n", 266 + (unsigned long long)new_drvdata->res_irq->start); 267 + goto init_cc_res_err; 268 + } 269 + init_completion(&new_drvdata->icache_setup_completion); 270 + 271 + irq_registered = true; 272 + SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n", 273 + new_drvdata->res_irq->name, 274 + (unsigned long long)new_drvdata->res_irq->start); 275 + 276 + new_drvdata->plat_dev = plat_dev; 277 + 278 + if(new_drvdata->plat_dev->dev.dma_mask == NULL) 279 + { 280 + new_drvdata->plat_dev->dev.dma_mask = & new_drvdata->plat_dev->dev.coherent_dma_mask; 281 + } 282 + if (!new_drvdata->plat_dev->dev.coherent_dma_mask) 283 + { 284 + new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); 285 + } 286 + 287 + /* Verify correct mapping */ 288 + signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE)); 289 + if (signature_val != DX_DEV_SIGNATURE) { 290 + SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", 291 + signature_val, (uint32_t)DX_DEV_SIGNATURE); 292 + rc = -EINVAL; 293 + goto init_cc_res_err; 294 + } 295 + SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val); 296 + 297 + /* Display HW versions */ 298 + SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR, 299 + CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION); 300 + 301 + rc = init_cc_regs(new_drvdata, true); 302 + if (unlikely(rc != 0)) { 303 + SSI_LOG_ERR("init_cc_regs failed\n"); 304 + goto init_cc_res_err; 305 + } 306 + 307 + #ifdef ENABLE_CC_SYSFS 308 + rc = ssi_sysfs_init(&(plat_dev->dev.kobj), new_drvdata); 309 + if (unlikely(rc != 0)) { 310 + SSI_LOG_ERR("init_stat_db failed\n"); 311 + goto init_cc_res_err; 312 + } 313 + #endif 314 + 315 + rc = ssi_sram_mgr_init(new_drvdata); 316 + if (unlikely(rc != 0)) { 317 + SSI_LOG_ERR("ssi_sram_mgr_init failed\n"); 318 + goto init_cc_res_err; 319 + } 320 + 321 + new_drvdata->mlli_sram_addr = 322 + ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE); 323 + if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) { 324 + SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n"); 325 + rc = -ENOMEM; 326 + goto init_cc_res_err; 327 + } 328 + 329 + rc = request_mgr_init(new_drvdata); 330 + if (unlikely(rc != 0)) { 331 + SSI_LOG_ERR("request_mgr_init failed\n"); 332 + goto init_cc_res_err; 333 + } 334 + 335 + rc = ssi_buffer_mgr_init(new_drvdata); 336 + if (unlikely(rc != 0)) { 337 + SSI_LOG_ERR("buffer_mgr_init failed\n"); 338 + goto init_cc_res_err; 339 + } 340 + 341 + rc = ssi_power_mgr_init(new_drvdata); 342 + if (unlikely(rc != 0)) { 343 + SSI_LOG_ERR("ssi_power_mgr_init failed\n"); 344 + goto init_cc_res_err; 345 + } 346 + 347 + return 0; 348 + 349 + init_cc_res_err: 350 + SSI_LOG_ERR("Freeing CC HW resources!\n"); 351 + 352 + if (new_drvdata != NULL) { 353 + ssi_power_mgr_fini(new_drvdata); 354 + ssi_buffer_mgr_fini(new_drvdata); 355 + request_mgr_fini(new_drvdata); 356 + ssi_sram_mgr_fini(new_drvdata); 357 + #ifdef ENABLE_CC_SYSFS 358 + ssi_sysfs_fini(); 359 + #endif 360 + 361 + if (req_mem_cc_regs != NULL) { 362 + if (irq_registered) { 363 + free_irq(new_drvdata->res_irq->start, new_drvdata); 364 + new_drvdata->res_irq = NULL; 365 + iounmap(cc_base); 366 + new_drvdata->cc_base = NULL; 367 + } 368 + release_mem_region(new_drvdata->res_mem->start, 369 + resource_size(new_drvdata->res_mem)); 370 + new_drvdata->res_mem = NULL; 371 + } 372 + kfree(new_drvdata); 373 + dev_set_drvdata(&plat_dev->dev, NULL); 374 + } 375 + 376 + return rc; 377 + } 378 + 379 + void fini_cc_regs(struct ssi_drvdata *drvdata) 380 + { 381 + /* Mask all interrupts */ 382 + WRITE_REGISTER(drvdata->cc_base + 383 + CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF); 384 + 385 + } 386 + 387 + static void cleanup_cc_resources(struct platform_device *plat_dev) 388 + { 389 + struct ssi_drvdata *drvdata = 390 + (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev); 391 + 392 + ssi_power_mgr_fini(drvdata); 393 + ssi_buffer_mgr_fini(drvdata); 394 + request_mgr_fini(drvdata); 395 + ssi_sram_mgr_fini(drvdata); 396 + #ifdef ENABLE_CC_SYSFS 397 + ssi_sysfs_fini(); 398 + #endif 399 + 400 + /* Mask all interrupts */ 401 + WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_IMR), 402 + 0xFFFFFFFF); 403 + free_irq(drvdata->res_irq->start, drvdata); 404 + drvdata->res_irq = NULL; 405 + 406 + fini_cc_regs(drvdata); 407 + 408 + if (drvdata->cc_base != NULL) { 409 + iounmap(drvdata->cc_base); 410 + release_mem_region(drvdata->res_mem->start, 411 + resource_size(drvdata->res_mem)); 412 + drvdata->cc_base = NULL; 413 + drvdata->res_mem = NULL; 414 + } 415 + 416 + kfree(drvdata); 417 + dev_set_drvdata(&plat_dev->dev, NULL); 418 + } 419 + 420 + static int cc7x_probe(struct platform_device *plat_dev) 421 + { 422 + int rc; 423 + #if defined(CONFIG_ARM) && defined(CC_DEBUG) 424 + uint32_t ctr, cacheline_size; 425 + 426 + asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); 427 + cacheline_size = 4 << ((ctr >> 16) & 0xf); 428 + SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n", 429 + cacheline_size, L1_CACHE_BYTES); 430 + 431 + asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr)); 432 + SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X," 433 + " Part 0x%03X, Rev r%dp%d\n", 434 + (ctr>>24), (ctr>>16)&0xF, (ctr>>4)&0xFFF, (ctr>>20)&0xF, ctr&0xF); 435 + #endif 436 + 437 + /* Map registers space */ 438 + rc = init_cc_resources(plat_dev); 439 + if (rc != 0) 440 + return rc; 441 + 442 + SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n"); 443 + 444 + return 0; 445 + } 446 + 447 + static int cc7x_remove(struct platform_device *plat_dev) 448 + { 449 + SSI_LOG_DEBUG("Releasing cc7x resources...\n"); 450 + 451 + cleanup_cc_resources(plat_dev); 452 + 453 + SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n"); 454 + #ifdef ENABLE_CYCLE_COUNT 455 + display_all_stat_db(); 456 + #endif 457 + 458 + return 0; 459 + } 460 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 461 + static struct dev_pm_ops arm_cc7x_driver_pm = { 462 + SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL) 463 + }; 464 + #endif 465 + 466 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 467 + #define DX_DRIVER_RUNTIME_PM (&arm_cc7x_driver_pm) 468 + #else 469 + #define DX_DRIVER_RUNTIME_PM NULL 470 + #endif 471 + 472 + 473 + #ifdef CONFIG_OF 474 + static const struct of_device_id arm_cc7x_dev_of_match[] = { 475 + {.compatible = "arm,cryptocell-712-ree"}, 476 + {} 477 + }; 478 + MODULE_DEVICE_TABLE(of, arm_cc7x_dev_of_match); 479 + #endif 480 + 481 + static struct platform_driver cc7x_driver = { 482 + .driver = { 483 + .name = "cc7xree", 484 + .owner = THIS_MODULE, 485 + #ifdef CONFIG_OF 486 + .of_match_table = arm_cc7x_dev_of_match, 487 + #endif 488 + .pm = DX_DRIVER_RUNTIME_PM, 489 + }, 490 + .probe = cc7x_probe, 491 + .remove = cc7x_remove, 492 + }; 493 + module_platform_driver(cc7x_driver); 494 + 495 + /* Module description */ 496 + MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver"); 497 + MODULE_VERSION(DRV_MODULE_VERSION); 498 + MODULE_AUTHOR("ARM"); 499 + MODULE_LICENSE("GPL v2");
+183
drivers/staging/ccree/ssi_driver.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_driver.h 18 + ARM CryptoCell Linux Crypto Driver 19 + */ 20 + 21 + #ifndef __SSI_DRIVER_H__ 22 + #define __SSI_DRIVER_H__ 23 + 24 + #include "ssi_config.h" 25 + #ifdef COMP_IN_WQ 26 + #include <linux/workqueue.h> 27 + #else 28 + #include <linux/interrupt.h> 29 + #endif 30 + #include <linux/dma-mapping.h> 31 + #include <crypto/algapi.h> 32 + #include <crypto/aes.h> 33 + #include <crypto/sha.h> 34 + #include <crypto/authenc.h> 35 + #include <linux/version.h> 36 + 37 + #ifndef INT32_MAX /* Missing in Linux kernel */ 38 + #define INT32_MAX 0x7FFFFFFFL 39 + #endif 40 + 41 + /* Registers definitions from shared/hw/ree_include */ 42 + #include "dx_reg_base_host.h" 43 + #include "dx_host.h" 44 + #define DX_CC_HOST_VIRT /* must be defined before including dx_cc_regs.h */ 45 + #include "cc_hw_queue_defs.h" 46 + #include "cc_regs.h" 47 + #include "dx_reg_common.h" 48 + #include "cc_hal.h" 49 + #include "ssi_sram_mgr.h" 50 + #define CC_SUPPORT_SHA DX_DEV_SHA_MAX 51 + #include "cc_crypto_ctx.h" 52 + #include "ssi_sysfs.h" 53 + 54 + #define DRV_MODULE_VERSION "3.0" 55 + 56 + #define SSI_DEV_NAME_STR "cc715ree" 57 + #define SSI_CC_HAS_AES_CCM 1 58 + #define SSI_CC_HAS_AES_GCM 1 59 + #define SSI_CC_HAS_AES_XTS 1 60 + #define SSI_CC_HAS_AES_ESSIV 1 61 + #define SSI_CC_HAS_AES_BITLOCKER 1 62 + #define SSI_CC_HAS_AES_CTS 1 63 + #define SSI_CC_HAS_MULTI2 0 64 + #define SSI_CC_HAS_CMAC 1 65 + 66 + #define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \ 67 + (1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT)) 68 + 69 + #define SSI_AXI_ERR_IRQ_MASK (1 << DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT) 70 + 71 + #define SSI_COMP_IRQ_MASK (1 << DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT) 72 + 73 + /* TEE FIPS status interrupt */ 74 + #define SSI_GPR0_IRQ_MASK (1 << DX_HOST_IRR_GPR0_BIT_SHIFT) 75 + 76 + #define SSI_CRA_PRIO 3000 77 + 78 + #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */ 79 + 80 + #define MAX_REQUEST_QUEUE_SIZE 4096 81 + #define MAX_MLLI_BUFF_SIZE 2080 82 + #define MAX_ICV_NENTS_SUPPORTED 2 83 + 84 + /* Definitions for HW descriptors DIN/DOUT fields */ 85 + #define NS_BIT 1 86 + #define AXI_ID 0 87 + /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID 88 + field in the HW descriptor. The DMA engine +8 that value. */ 89 + 90 + /* Logging macros */ 91 + #define SSI_LOG(level, format, ...) \ 92 + printk(level "cc715ree::%s: " format , __func__, ##__VA_ARGS__) 93 + #define SSI_LOG_ERR(format, ...) SSI_LOG(KERN_ERR, format, ##__VA_ARGS__) 94 + #define SSI_LOG_WARNING(format, ...) SSI_LOG(KERN_WARNING, format, ##__VA_ARGS__) 95 + #define SSI_LOG_NOTICE(format, ...) SSI_LOG(KERN_NOTICE, format, ##__VA_ARGS__) 96 + #define SSI_LOG_INFO(format, ...) SSI_LOG(KERN_INFO, format, ##__VA_ARGS__) 97 + #ifdef CC_DEBUG 98 + #define SSI_LOG_DEBUG(format, ...) SSI_LOG(KERN_DEBUG, format, ##__VA_ARGS__) 99 + #else /* Debug log messages are removed at compile time for non-DEBUG config. */ 100 + #define SSI_LOG_DEBUG(format, ...) do {} while (0) 101 + #endif 102 + 103 + #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 104 + #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 105 + 106 + struct ssi_crypto_req { 107 + void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base); 108 + void *user_arg; 109 + struct completion seq_compl; /* request completion */ 110 + #ifdef ENABLE_CYCLE_COUNT 111 + enum stat_op op_type; 112 + cycles_t submit_cycle; 113 + bool is_monitored_p; 114 + #endif 115 + }; 116 + 117 + /** 118 + * struct ssi_drvdata - driver private data context 119 + * @cc_base: virt address of the CC registers 120 + * @irq: device IRQ number 121 + * @irq_mask: Interrupt mask shadow (1 for masked interrupts) 122 + * @fw_ver: SeP loaded firmware version 123 + */ 124 + struct ssi_drvdata { 125 + struct resource *res_mem; 126 + struct resource *res_irq; 127 + void __iomem *cc_base; 128 + #ifdef DX_BASE_ENV_REGS 129 + void __iomem *env_base; /* ARM CryptoCell development FPGAs only */ 130 + #endif 131 + unsigned int irq; 132 + uint32_t irq_mask; 133 + uint32_t fw_ver; 134 + /* Calibration time of start/stop 135 + * monitor descriptors */ 136 + uint32_t monitor_null_cycles; 137 + struct platform_device *plat_dev; 138 + ssi_sram_addr_t mlli_sram_addr; 139 + struct completion icache_setup_completion; 140 + void *buff_mgr_handle; 141 + void *request_mgr_handle; 142 + void *sram_mgr_handle; 143 + 144 + #ifdef ENABLE_CYCLE_COUNT 145 + cycles_t isr_exit_cycles; /* Save for isr-to-tasklet latency */ 146 + #endif 147 + uint32_t inflight_counter; 148 + 149 + }; 150 + 151 + struct async_gen_req_ctx { 152 + dma_addr_t iv_dma_addr; 153 + enum drv_crypto_direction op_type; 154 + }; 155 + 156 + #ifdef DX_DUMP_BYTES 157 + void dump_byte_array(const char *name, const uint8_t *the_array, unsigned long size); 158 + #else 159 + #define dump_byte_array(name, array, size) do { \ 160 + } while (0); 161 + #endif 162 + 163 + #ifdef ENABLE_CYCLE_COUNT 164 + #define DECL_CYCLE_COUNT_RESOURCES cycles_t _last_cycles_read 165 + #define START_CYCLE_COUNT() do { _last_cycles_read = get_cycles(); } while (0) 166 + #define END_CYCLE_COUNT(_stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _last_cycles_read) 167 + #define GET_START_CYCLE_COUNT() _last_cycles_read 168 + #define START_CYCLE_COUNT_AT(_var) do { _var = get_cycles(); } while(0) 169 + #define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) update_host_stat(_stat_op_type, _stat_phase, get_cycles() - _var) 170 + #else 171 + #define DECL_CYCLE_COUNT_RESOURCES 172 + #define START_CYCLE_COUNT() do { } while (0) 173 + #define END_CYCLE_COUNT(_stat_op_type, _stat_phase) do { } while (0) 174 + #define GET_START_CYCLE_COUNT() 0 175 + #define START_CYCLE_COUNT_AT(_var) do { } while (0) 176 + #define END_CYCLE_COUNT_AT(_var, _stat_op_type, _stat_phase) do { } while (0) 177 + #endif /*ENABLE_CYCLE_COUNT*/ 178 + 179 + int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe); 180 + void fini_cc_regs(struct ssi_drvdata *drvdata); 181 + 182 + #endif /*__SSI_DRIVER_H__*/ 183 +
+144
drivers/staging/ccree/ssi_pm.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + #include "ssi_config.h" 19 + #include <linux/kernel.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/interrupt.h> 22 + #include <crypto/ctr.h> 23 + #include <linux/pm_runtime.h> 24 + #include "ssi_driver.h" 25 + #include "ssi_buffer_mgr.h" 26 + #include "ssi_request_mgr.h" 27 + #include "ssi_sram_mgr.h" 28 + #include "ssi_sysfs.h" 29 + #include "ssi_pm.h" 30 + #include "ssi_pm_ext.h" 31 + 32 + 33 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 34 + 35 + #define POWER_DOWN_ENABLE 0x01 36 + #define POWER_DOWN_DISABLE 0x00 37 + 38 + 39 + int ssi_power_mgr_runtime_suspend(struct device *dev) 40 + { 41 + struct ssi_drvdata *drvdata = 42 + (struct ssi_drvdata *)dev_get_drvdata(dev); 43 + int rc; 44 + 45 + SSI_LOG_DEBUG("ssi_power_mgr_runtime_suspend: set HOST_POWER_DOWN_EN\n"); 46 + WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); 47 + rc = ssi_request_mgr_runtime_suspend_queue(drvdata); 48 + if (rc != 0) { 49 + SSI_LOG_ERR("ssi_request_mgr_runtime_suspend_queue (%x)\n", rc); 50 + return rc; 51 + } 52 + fini_cc_regs(drvdata); 53 + 54 + /* Specific HW suspend code */ 55 + ssi_pm_ext_hw_suspend(dev); 56 + return 0; 57 + } 58 + 59 + int ssi_power_mgr_runtime_resume(struct device *dev) 60 + { 61 + int rc; 62 + struct ssi_drvdata *drvdata = 63 + (struct ssi_drvdata *)dev_get_drvdata(dev); 64 + 65 + SSI_LOG_DEBUG("ssi_power_mgr_runtime_resume , unset HOST_POWER_DOWN_EN\n"); 66 + WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); 67 + /* Specific HW resume code */ 68 + ssi_pm_ext_hw_resume(dev); 69 + 70 + rc = init_cc_regs(drvdata, false); 71 + if (rc !=0) { 72 + SSI_LOG_ERR("init_cc_regs (%x)\n",rc); 73 + return rc; 74 + } 75 + 76 + rc = ssi_request_mgr_runtime_resume_queue(drvdata); 77 + if (rc !=0) { 78 + SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n",rc); 79 + return rc; 80 + } 81 + 82 + return 0; 83 + } 84 + 85 + int ssi_power_mgr_runtime_get(struct device *dev) 86 + { 87 + int rc = 0; 88 + 89 + if (ssi_request_mgr_is_queue_runtime_suspend( 90 + (struct ssi_drvdata *)dev_get_drvdata(dev))) { 91 + rc = pm_runtime_get_sync(dev); 92 + } else { 93 + pm_runtime_get_noresume(dev); 94 + } 95 + return rc; 96 + } 97 + 98 + int ssi_power_mgr_runtime_put_suspend(struct device *dev) 99 + { 100 + int rc = 0; 101 + 102 + if (!ssi_request_mgr_is_queue_runtime_suspend( 103 + (struct ssi_drvdata *)dev_get_drvdata(dev))) { 104 + pm_runtime_mark_last_busy(dev); 105 + rc = pm_runtime_put_autosuspend(dev); 106 + } 107 + else { 108 + /* Something wrong happens*/ 109 + BUG(); 110 + } 111 + return rc; 112 + 113 + } 114 + 115 + #endif 116 + 117 + 118 + 119 + int ssi_power_mgr_init(struct ssi_drvdata *drvdata) 120 + { 121 + int rc = 0; 122 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 123 + struct platform_device *plat_dev = drvdata->plat_dev; 124 + /* must be before the enabling to avoid resdundent suspending */ 125 + pm_runtime_set_autosuspend_delay(&plat_dev->dev,SSI_SUSPEND_TIMEOUT); 126 + pm_runtime_use_autosuspend(&plat_dev->dev); 127 + /* activate the PM module */ 128 + rc = pm_runtime_set_active(&plat_dev->dev); 129 + if (rc != 0) 130 + return rc; 131 + /* enable the PM module*/ 132 + pm_runtime_enable(&plat_dev->dev); 133 + #endif 134 + return rc; 135 + } 136 + 137 + void ssi_power_mgr_fini(struct ssi_drvdata *drvdata) 138 + { 139 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 140 + struct platform_device *plat_dev = drvdata->plat_dev; 141 + 142 + pm_runtime_disable(&plat_dev->dev); 143 + #endif 144 + }
+46
drivers/staging/ccree/ssi_pm.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_pm.h 18 + */ 19 + 20 + #ifndef __SSI_POWER_MGR_H__ 21 + #define __SSI_POWER_MGR_H__ 22 + 23 + 24 + #include "ssi_config.h" 25 + #include "ssi_driver.h" 26 + 27 + 28 + #define SSI_SUSPEND_TIMEOUT 3000 29 + 30 + 31 + int ssi_power_mgr_init(struct ssi_drvdata *drvdata); 32 + 33 + void ssi_power_mgr_fini(struct ssi_drvdata *drvdata); 34 + 35 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 36 + int ssi_power_mgr_runtime_suspend(struct device *dev); 37 + 38 + int ssi_power_mgr_runtime_resume(struct device *dev); 39 + 40 + int ssi_power_mgr_runtime_get(struct device *dev); 41 + 42 + int ssi_power_mgr_runtime_put_suspend(struct device *dev); 43 + #endif 44 + 45 + #endif /*__POWER_MGR_H__*/ 46 +
+60
drivers/staging/ccree/ssi_pm_ext.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + 18 + #include "ssi_config.h" 19 + #include <linux/kernel.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/interrupt.h> 22 + #include <crypto/ctr.h> 23 + #include <linux/pm_runtime.h> 24 + #include "ssi_driver.h" 25 + #include "ssi_sram_mgr.h" 26 + #include "ssi_pm_ext.h" 27 + 28 + /* 29 + This function should suspend the HW (if possiable), It should be implemented by 30 + the driver user. 31 + The reference code clears the internal SRAM to imitate lose of state. 32 + */ 33 + void ssi_pm_ext_hw_suspend(struct device *dev) 34 + { 35 + struct ssi_drvdata *drvdata = 36 + (struct ssi_drvdata *)dev_get_drvdata(dev); 37 + unsigned int val; 38 + void __iomem *cc_base = drvdata->cc_base; 39 + unsigned int sram_addr = 0; 40 + 41 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_ADDR), sram_addr); 42 + 43 + for (;sram_addr < SSI_CC_SRAM_SIZE ; sram_addr+=4) { 44 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA), 0x0); 45 + 46 + do { 47 + val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, SRAM_DATA_READY)); 48 + } while (!(val &0x1)); 49 + } 50 + } 51 + 52 + /* 53 + This function should resume the HW (if possiable).It should be implemented by 54 + the driver user. 55 + */ 56 + void ssi_pm_ext_hw_resume(struct device *dev) 57 + { 58 + return; 59 + } 60 +
+33
drivers/staging/ccree/ssi_pm_ext.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_pm_ext.h 18 + */ 19 + 20 + #ifndef __PM_EXT_H__ 21 + #define __PM_EXT_H__ 22 + 23 + 24 + #include "ssi_config.h" 25 + #include "ssi_driver.h" 26 + 27 + void ssi_pm_ext_hw_suspend(struct device *dev); 28 + 29 + void ssi_pm_ext_hw_resume(struct device *dev); 30 + 31 + 32 + #endif /*__POWER_MGR_H__*/ 33 +
+680
drivers/staging/ccree/ssi_request_mgr.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include "ssi_config.h" 18 + #include <linux/kernel.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/delay.h> 22 + #include <crypto/ctr.h> 23 + #ifdef FLUSH_CACHE_ALL 24 + #include <asm/cacheflush.h> 25 + #endif 26 + #include <linux/pm_runtime.h> 27 + #include "ssi_driver.h" 28 + #include "ssi_buffer_mgr.h" 29 + #include "ssi_request_mgr.h" 30 + #include "ssi_sysfs.h" 31 + #include "ssi_pm.h" 32 + 33 + #define SSI_MAX_POLL_ITER 10 34 + 35 + #define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP) 36 + 37 + #ifdef CC_CYCLE_COUNT 38 + 39 + #define MONITOR_CNTR_BIT 0 40 + 41 + /** 42 + * Monitor descriptor. 43 + * Used to measure CC performance. 44 + */ 45 + #define INIT_CC_MONITOR_DESC(desc_p) \ 46 + do { \ 47 + HW_DESC_INIT(desc_p); \ 48 + HW_DESC_SET_DIN_MONITOR_CNTR(desc_p); \ 49 + } while (0) 50 + 51 + /** 52 + * Try adding monitor descriptor BEFORE enqueuing sequence. 53 + */ 54 + #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) \ 55 + do { \ 56 + if (!test_and_set_bit(MONITOR_CNTR_BIT, (lock_p))) { \ 57 + enqueue_seq((cc_base_addr), (desc_p), 1); \ 58 + *(is_monitored_p) = true; \ 59 + } else { \ 60 + *(is_monitored_p) = false; \ 61 + } \ 62 + } while (0) 63 + 64 + /** 65 + * If CC_CYCLE_DESC_HEAD was successfully added: 66 + * 1. Add memory barrier descriptor to ensure last AXI transaction. 67 + * 2. Add monitor descriptor to sequence tail AFTER enqueuing sequence. 68 + */ 69 + #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) \ 70 + do { \ 71 + if ((is_monitored) == true) { \ 72 + HwDesc_s barrier_desc; \ 73 + HW_DESC_INIT(&barrier_desc); \ 74 + HW_DESC_SET_DIN_NO_DMA(&barrier_desc, 0, 0xfffff0); \ 75 + HW_DESC_SET_DOUT_NO_DMA(&barrier_desc, 0, 0, 1); \ 76 + enqueue_seq((cc_base_addr), &barrier_desc, 1); \ 77 + enqueue_seq((cc_base_addr), (desc_p), 1); \ 78 + } \ 79 + } while (0) 80 + 81 + /** 82 + * Try reading CC monitor counter value upon sequence complete. 83 + * Can only succeed if the lock_p is taken by the owner of the given request. 84 + */ 85 + #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) \ 86 + do { \ 87 + uint32_t elapsed_cycles; \ 88 + if ((is_monitored) == true) { \ 89 + elapsed_cycles = READ_REGISTER((cc_base_addr) + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); \ 90 + clear_bit(MONITOR_CNTR_BIT, (lock_p)); \ 91 + if (elapsed_cycles > 0) \ 92 + update_cc_stat(stat_op_type, stat_phase, (elapsed_cycles - monitor_null_cycles)); \ 93 + } \ 94 + } while (0) 95 + 96 + #else /*CC_CYCLE_COUNT*/ 97 + 98 + #define INIT_CC_MONITOR_DESC(desc_p) do { } while (0) 99 + #define CC_CYCLE_DESC_HEAD(cc_base_addr, desc_p, lock_p, is_monitored_p) do { } while (0) 100 + #define CC_CYCLE_DESC_TAIL(cc_base_addr, desc_p, is_monitored) do { } while (0) 101 + #define END_CC_MONITOR_COUNT(cc_base_addr, stat_op_type, stat_phase, monitor_null_cycles, lock_p, is_monitored) do { } while (0) 102 + #endif /*CC_CYCLE_COUNT*/ 103 + 104 + 105 + struct ssi_request_mgr_handle { 106 + /* Request manager resources */ 107 + unsigned int hw_queue_size; /* HW capability */ 108 + unsigned int min_free_hw_slots; 109 + unsigned int max_used_sw_slots; 110 + struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; 111 + uint32_t req_queue_head; 112 + uint32_t req_queue_tail; 113 + uint32_t axi_completed; 114 + uint32_t q_free_slots; 115 + spinlock_t hw_lock; 116 + HwDesc_s compl_desc; 117 + uint8_t *dummy_comp_buff; 118 + dma_addr_t dummy_comp_buff_dma; 119 + HwDesc_s monitor_desc; 120 + volatile unsigned long monitor_lock; 121 + #ifdef COMP_IN_WQ 122 + struct workqueue_struct *workq; 123 + struct delayed_work compwork; 124 + #else 125 + struct tasklet_struct comptask; 126 + #endif 127 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 128 + bool is_runtime_suspended; 129 + #endif 130 + }; 131 + 132 + static void comp_handler(unsigned long devarg); 133 + #ifdef COMP_IN_WQ 134 + static void comp_work_handler(struct work_struct *work); 135 + #endif 136 + 137 + void request_mgr_fini(struct ssi_drvdata *drvdata) 138 + { 139 + struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 140 + 141 + if (req_mgr_h == NULL) 142 + return; /* Not allocated */ 143 + 144 + if (req_mgr_h->dummy_comp_buff_dma != 0) { 145 + SSI_RESTORE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma); 146 + dma_free_coherent(&drvdata->plat_dev->dev, 147 + sizeof(uint32_t), req_mgr_h->dummy_comp_buff, 148 + req_mgr_h->dummy_comp_buff_dma); 149 + } 150 + 151 + SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - 152 + req_mgr_h->min_free_hw_slots) ); 153 + SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); 154 + 155 + #ifdef COMP_IN_WQ 156 + flush_workqueue(req_mgr_h->workq); 157 + destroy_workqueue(req_mgr_h->workq); 158 + #else 159 + /* Kill tasklet */ 160 + tasklet_kill(&req_mgr_h->comptask); 161 + #endif 162 + memset(req_mgr_h, 0, sizeof(struct ssi_request_mgr_handle)); 163 + kfree(req_mgr_h); 164 + drvdata->request_mgr_handle = NULL; 165 + } 166 + 167 + int request_mgr_init(struct ssi_drvdata *drvdata) 168 + { 169 + #ifdef CC_CYCLE_COUNT 170 + HwDesc_s monitor_desc[2]; 171 + struct ssi_crypto_req monitor_req = {0}; 172 + #endif 173 + struct ssi_request_mgr_handle *req_mgr_h; 174 + int rc = 0; 175 + 176 + req_mgr_h = kzalloc(sizeof(struct ssi_request_mgr_handle),GFP_KERNEL); 177 + if (req_mgr_h == NULL) { 178 + rc = -ENOMEM; 179 + goto req_mgr_init_err; 180 + } 181 + 182 + drvdata->request_mgr_handle = req_mgr_h; 183 + 184 + spin_lock_init(&req_mgr_h->hw_lock); 185 + #ifdef COMP_IN_WQ 186 + SSI_LOG_DEBUG("Initializing completion workqueue\n"); 187 + req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq"); 188 + if (unlikely(req_mgr_h->workq == NULL)) { 189 + SSI_LOG_ERR("Failed creating work queue\n"); 190 + rc = -ENOMEM; 191 + goto req_mgr_init_err; 192 + } 193 + INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); 194 + #else 195 + SSI_LOG_DEBUG("Initializing completion tasklet\n"); 196 + tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata); 197 + #endif 198 + req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base + 199 + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE)); 200 + SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); 201 + if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { 202 + SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n", 203 + req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); 204 + rc = -ENOMEM; 205 + goto req_mgr_init_err; 206 + } 207 + req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; 208 + req_mgr_h->max_used_sw_slots = 0; 209 + 210 + 211 + /* Allocate DMA word for "dummy" completion descriptor use */ 212 + req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev, 213 + sizeof(uint32_t), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL); 214 + if (!req_mgr_h->dummy_comp_buff) { 215 + SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped " 216 + "buffer\n", sizeof(uint32_t)); 217 + rc = -ENOMEM; 218 + goto req_mgr_init_err; 219 + } 220 + SSI_UPDATE_DMA_ADDR_TO_48BIT(req_mgr_h->dummy_comp_buff_dma, 221 + sizeof(uint32_t)); 222 + 223 + /* Init. "dummy" completion descriptor */ 224 + HW_DESC_INIT(&req_mgr_h->compl_desc); 225 + HW_DESC_SET_DIN_CONST(&req_mgr_h->compl_desc, 0, sizeof(uint32_t)); 226 + HW_DESC_SET_DOUT_DLLI(&req_mgr_h->compl_desc, 227 + req_mgr_h->dummy_comp_buff_dma, 228 + sizeof(uint32_t), NS_BIT, 1); 229 + HW_DESC_SET_FLOW_MODE(&req_mgr_h->compl_desc, BYPASS); 230 + HW_DESC_SET_QUEUE_LAST_IND(&req_mgr_h->compl_desc); 231 + 232 + #ifdef CC_CYCLE_COUNT 233 + /* For CC-HW cycle performance trace */ 234 + INIT_CC_MONITOR_DESC(&req_mgr_h->monitor_desc); 235 + set_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock); 236 + monitor_desc[0] = req_mgr_h->monitor_desc; 237 + monitor_desc[1] = req_mgr_h->monitor_desc; 238 + 239 + rc = send_request(drvdata, &monitor_req, monitor_desc, 2, 0); 240 + if (unlikely(rc != 0)) 241 + goto req_mgr_init_err; 242 + 243 + drvdata->monitor_null_cycles = READ_REGISTER(drvdata->cc_base + 244 + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_MEASURE_CNTR)); 245 + SSI_LOG_ERR("Calibration time=0x%08x\n", drvdata->monitor_null_cycles); 246 + 247 + clear_bit(MONITOR_CNTR_BIT, &req_mgr_h->monitor_lock); 248 + #endif 249 + 250 + return 0; 251 + 252 + req_mgr_init_err: 253 + request_mgr_fini(drvdata); 254 + return rc; 255 + } 256 + 257 + static inline void enqueue_seq( 258 + void __iomem *cc_base, 259 + HwDesc_s seq[], unsigned int seq_len) 260 + { 261 + int i; 262 + 263 + for (i = 0; i < seq_len; i++) { 264 + writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 265 + writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 266 + writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 267 + writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 268 + writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 269 + wmb(); 270 + writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base+CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0))); 271 + #ifdef DX_DUMP_DESCS 272 + SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i, 273 + seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]); 274 + #endif 275 + } 276 + } 277 + 278 + /*! 279 + * Completion will take place if and only if user requested completion 280 + * by setting "is_dout = 0" in send_request(). 281 + * 282 + * \param dev 283 + * \param dx_compl_h The completion event to signal 284 + */ 285 + static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base) 286 + { 287 + struct completion *this_compl = dx_compl_h; 288 + complete(this_compl); 289 + } 290 + 291 + 292 + static inline int request_mgr_queues_status_check( 293 + struct ssi_request_mgr_handle *req_mgr_h, 294 + void __iomem *cc_base, 295 + unsigned int total_seq_len) 296 + { 297 + unsigned long poll_queue; 298 + 299 + /* SW queue is checked only once as it will not 300 + be chaned during the poll becasue the spinlock_bh 301 + is held by the thread */ 302 + if (unlikely(((req_mgr_h->req_queue_head + 1) & 303 + (MAX_REQUEST_QUEUE_SIZE - 1)) == 304 + req_mgr_h->req_queue_tail)) { 305 + SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", 306 + req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); 307 + return -EBUSY; 308 + } 309 + 310 + if ((likely(req_mgr_h->q_free_slots >= total_seq_len)) ) { 311 + return 0; 312 + } 313 + /* Wait for space in HW queue. Poll constant num of iterations. */ 314 + for (poll_queue =0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue ++) { 315 + req_mgr_h->q_free_slots = 316 + CC_HAL_READ_REGISTER( 317 + CC_REG_OFFSET(CRY_KERNEL, 318 + DSCRPTR_QUEUE_CONTENT)); 319 + if (unlikely(req_mgr_h->q_free_slots < 320 + req_mgr_h->min_free_hw_slots)) { 321 + req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; 322 + } 323 + 324 + if (likely (req_mgr_h->q_free_slots >= total_seq_len)) { 325 + /* If there is enough place return */ 326 + return 0; 327 + } 328 + 329 + SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", 330 + req_mgr_h->q_free_slots, total_seq_len); 331 + } 332 + /* No room in the HW queue try again later */ 333 + SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d " 334 + "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n", 335 + req_mgr_h->req_queue_head, 336 + MAX_REQUEST_QUEUE_SIZE, 337 + req_mgr_h->q_free_slots, 338 + total_seq_len); 339 + return -EAGAIN; 340 + } 341 + 342 + /*! 343 + * Enqueue caller request to crypto hardware. 344 + * 345 + * \param drvdata 346 + * \param ssi_req The request to enqueue 347 + * \param desc The crypto sequence 348 + * \param len The crypto sequence length 349 + * \param is_dout If "true": completion is handled by the caller 350 + * If "false": this function adds a dummy descriptor completion 351 + * and waits upon completion signal. 352 + * 353 + * \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false" 354 + */ 355 + int send_request( 356 + struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req, 357 + HwDesc_s *desc, unsigned int len, bool is_dout) 358 + { 359 + void __iomem *cc_base = drvdata->cc_base; 360 + struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 361 + unsigned int used_sw_slots; 362 + unsigned int total_seq_len = len; /*initial sequence length*/ 363 + int rc; 364 + unsigned int max_required_seq_len = total_seq_len + ((is_dout == 0) ? 1 : 0); 365 + DECL_CYCLE_COUNT_RESOURCES; 366 + 367 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 368 + rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev); 369 + if (rc != 0) { 370 + SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n",rc); 371 + spin_unlock_bh(&req_mgr_h->hw_lock); 372 + return rc; 373 + } 374 + #endif 375 + 376 + do { 377 + spin_lock_bh(&req_mgr_h->hw_lock); 378 + 379 + /* Check if there is enough place in the SW/HW queues 380 + in case iv gen add the max size and in case of no dout add 1 381 + for the internal completion descriptor */ 382 + rc = request_mgr_queues_status_check(req_mgr_h, 383 + cc_base, 384 + max_required_seq_len); 385 + if (likely(rc == 0 )) 386 + /* There is enough place in the queue */ 387 + break; 388 + /* something wrong release the spinlock*/ 389 + spin_unlock_bh(&req_mgr_h->hw_lock); 390 + 391 + if (rc != -EAGAIN) { 392 + /* Any error other than HW queue full 393 + (SW queue is full) */ 394 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 395 + ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev); 396 + #endif 397 + return rc; 398 + } 399 + 400 + /* HW queue is full - short sleep */ 401 + msleep(1); 402 + } while (1); 403 + 404 + /* Additional completion descriptor is needed incase caller did not 405 + enabled any DLLI/MLLI DOUT bit in the given sequence */ 406 + if (!is_dout) { 407 + init_completion(&ssi_req->seq_compl); 408 + ssi_req->user_cb = request_mgr_complete; 409 + ssi_req->user_arg = &(ssi_req->seq_compl); 410 + total_seq_len++; 411 + } 412 + 413 + used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE-1)); 414 + if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) { 415 + req_mgr_h->max_used_sw_slots = used_sw_slots; 416 + } 417 + 418 + CC_CYCLE_DESC_HEAD(cc_base, &req_mgr_h->monitor_desc, 419 + &req_mgr_h->monitor_lock, &ssi_req->is_monitored_p); 420 + 421 + /* Enqueue request - must be locked with HW lock*/ 422 + req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req; 423 + START_CYCLE_COUNT_AT(req_mgr_h->req_queue[req_mgr_h->req_queue_head].submit_cycle); 424 + req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); 425 + /* TODO: Use circ_buf.h ? */ 426 + 427 + SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head); 428 + 429 + #ifdef FLUSH_CACHE_ALL 430 + flush_cache_all(); 431 + #endif 432 + 433 + /* STAT_PHASE_4: Push sequence */ 434 + START_CYCLE_COUNT(); 435 + enqueue_seq(cc_base, desc, len); 436 + enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1)); 437 + END_CYCLE_COUNT(ssi_req->op_type, STAT_PHASE_4); 438 + 439 + CC_CYCLE_DESC_TAIL(cc_base, &req_mgr_h->monitor_desc, ssi_req->is_monitored_p); 440 + 441 + if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) { 442 + /*This means that there was a problem with the resume*/ 443 + BUG(); 444 + } 445 + /* Update the free slots in HW queue */ 446 + req_mgr_h->q_free_slots -= total_seq_len; 447 + 448 + spin_unlock_bh(&req_mgr_h->hw_lock); 449 + 450 + if (!is_dout) { 451 + /* Wait upon sequence completion. 452 + * Return "0" -Operation done successfully. */ 453 + return wait_for_completion_interruptible(&ssi_req->seq_compl); 454 + } else { 455 + /* Operation still in process */ 456 + return -EINPROGRESS; 457 + } 458 + } 459 + 460 + 461 + /*! 462 + * Enqueue caller request to crypto hardware during init process. 463 + * assume this function is not called in middle of a flow, 464 + * since we set QUEUE_LAST_IND flag in the last descriptor. 465 + * 466 + * \param drvdata 467 + * \param desc The crypto sequence 468 + * \param len The crypto sequence length 469 + * 470 + * \return int Returns "0" upon success 471 + */ 472 + int send_request_init( 473 + struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len) 474 + { 475 + void __iomem *cc_base = drvdata->cc_base; 476 + struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 477 + unsigned int total_seq_len = len; /*initial sequence length*/ 478 + int rc = 0; 479 + 480 + /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */ 481 + rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len); 482 + if (unlikely(rc != 0 )) { 483 + return rc; 484 + } 485 + HW_DESC_SET_QUEUE_LAST_IND(&desc[len-1]); 486 + 487 + enqueue_seq(cc_base, desc, len); 488 + 489 + /* Update the free slots in HW queue */ 490 + req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER( 491 + CC_REG_OFFSET(CRY_KERNEL, 492 + DSCRPTR_QUEUE_CONTENT)); 493 + 494 + return 0; 495 + } 496 + 497 + 498 + void complete_request(struct ssi_drvdata *drvdata) 499 + { 500 + struct ssi_request_mgr_handle *request_mgr_handle = 501 + drvdata->request_mgr_handle; 502 + #ifdef COMP_IN_WQ 503 + queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0); 504 + #else 505 + tasklet_schedule(&request_mgr_handle->comptask); 506 + #endif 507 + } 508 + 509 + #ifdef COMP_IN_WQ 510 + static void comp_work_handler(struct work_struct *work) 511 + { 512 + struct ssi_drvdata *drvdata = 513 + container_of(work, struct ssi_drvdata, compwork.work); 514 + 515 + comp_handler((unsigned long)drvdata); 516 + } 517 + #endif 518 + 519 + static void proc_completions(struct ssi_drvdata *drvdata) 520 + { 521 + struct ssi_crypto_req *ssi_req; 522 + struct platform_device *plat_dev = drvdata->plat_dev; 523 + struct ssi_request_mgr_handle * request_mgr_handle = 524 + drvdata->request_mgr_handle; 525 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 526 + int rc = 0; 527 + #endif 528 + DECL_CYCLE_COUNT_RESOURCES; 529 + 530 + while(request_mgr_handle->axi_completed) { 531 + request_mgr_handle->axi_completed--; 532 + 533 + /* Dequeue request */ 534 + if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) { 535 + SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head); 536 + BUG(); 537 + } 538 + 539 + ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail]; 540 + END_CYCLE_COUNT_AT(ssi_req->submit_cycle, ssi_req->op_type, STAT_PHASE_5); /* Seq. Comp. */ 541 + END_CC_MONITOR_COUNT(drvdata->cc_base, ssi_req->op_type, STAT_PHASE_6, 542 + drvdata->monitor_null_cycles, &request_mgr_handle->monitor_lock, ssi_req->is_monitored_p); 543 + 544 + #ifdef FLUSH_CACHE_ALL 545 + flush_cache_all(); 546 + #endif 547 + 548 + #ifdef COMPLETION_DELAY 549 + /* Delay */ 550 + { 551 + uint32_t axi_err; 552 + int i; 553 + SSI_LOG_INFO("Delay\n"); 554 + for (i=0;i<1000000;i++) { 555 + axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR)); 556 + } 557 + } 558 + #endif /* COMPLETION_DELAY */ 559 + 560 + if (likely(ssi_req->user_cb != NULL)) { 561 + START_CYCLE_COUNT(); 562 + ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base); 563 + END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_3); 564 + } 565 + request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); 566 + SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail); 567 + SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed); 568 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 569 + rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev); 570 + if (rc != 0) { 571 + SSI_LOG_ERR("Failed to set runtime suspension %d\n",rc); 572 + } 573 + #endif 574 + } 575 + } 576 + 577 + /* Deferred service handler, run as interrupt-fired tasklet */ 578 + static void comp_handler(unsigned long devarg) 579 + { 580 + struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg; 581 + void __iomem *cc_base = drvdata->cc_base; 582 + struct ssi_request_mgr_handle * request_mgr_handle = 583 + drvdata->request_mgr_handle; 584 + 585 + uint32_t irq; 586 + 587 + DECL_CYCLE_COUNT_RESOURCES; 588 + 589 + START_CYCLE_COUNT(); 590 + 591 + irq = (drvdata->irq & SSI_COMP_IRQ_MASK); 592 + 593 + if (irq & SSI_COMP_IRQ_MASK) { 594 + /* To avoid the interrupt from firing as we unmask it, we clear it now */ 595 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK); 596 + 597 + /* Avoid race with above clear: Test completion counter once more */ 598 + request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 599 + CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); 600 + 601 + /* ISR-to-Tasklet latency */ 602 + if (request_mgr_handle->axi_completed) { 603 + /* Only if actually reflects ISR-to-completion-handling latency, i.e., 604 + not duplicate as a result of interrupt after AXIM_MON_ERR clear, before end of loop */ 605 + END_CYCLE_COUNT_AT(drvdata->isr_exit_cycles, STAT_OP_TYPE_GENERIC, STAT_PHASE_1); 606 + } 607 + 608 + while (request_mgr_handle->axi_completed) { 609 + do { 610 + proc_completions(drvdata); 611 + /* At this point (after proc_completions()), request_mgr_handle->axi_completed is always 0. 612 + The following assignment was changed to = (previously was +=) to conform KW restrictions. */ 613 + request_mgr_handle->axi_completed = CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 614 + CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); 615 + } while (request_mgr_handle->axi_completed > 0); 616 + 617 + /* To avoid the interrupt from firing as we unmask it, we clear it now */ 618 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK); 619 + 620 + /* Avoid race with above clear: Test completion counter once more */ 621 + request_mgr_handle->axi_completed += CC_REG_FLD_GET(CRY_KERNEL, AXIM_MON_COMP, VALUE, 622 + CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET)); 623 + }; 624 + 625 + } 626 + /* after verifing that there is nothing to do, Unmask AXI completion interrupt */ 627 + CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), 628 + CC_HAL_READ_REGISTER( 629 + CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq); 630 + END_CYCLE_COUNT(STAT_OP_TYPE_GENERIC, STAT_PHASE_2); 631 + } 632 + 633 + /* 634 + resume the queue configuration - no need to take the lock as this happens inside 635 + the spin lock protection 636 + */ 637 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 638 + int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata) 639 + { 640 + struct ssi_request_mgr_handle * request_mgr_handle = drvdata->request_mgr_handle; 641 + 642 + spin_lock_bh(&request_mgr_handle->hw_lock); 643 + request_mgr_handle->is_runtime_suspended = false; 644 + spin_unlock_bh(&request_mgr_handle->hw_lock); 645 + 646 + return 0 ; 647 + } 648 + 649 + /* 650 + suspend the queue configuration. Since it is used for the runtime suspend 651 + only verify that the queue can be suspended. 652 + */ 653 + int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata) 654 + { 655 + struct ssi_request_mgr_handle * request_mgr_handle = 656 + drvdata->request_mgr_handle; 657 + 658 + /* lock the send_request */ 659 + spin_lock_bh(&request_mgr_handle->hw_lock); 660 + if (request_mgr_handle->req_queue_head != 661 + request_mgr_handle->req_queue_tail) { 662 + spin_unlock_bh(&request_mgr_handle->hw_lock); 663 + return -EBUSY; 664 + } 665 + request_mgr_handle->is_runtime_suspended = true; 666 + spin_unlock_bh(&request_mgr_handle->hw_lock); 667 + 668 + return 0; 669 + } 670 + 671 + bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata) 672 + { 673 + struct ssi_request_mgr_handle * request_mgr_handle = 674 + drvdata->request_mgr_handle; 675 + 676 + return request_mgr_handle->is_runtime_suspended; 677 + } 678 + 679 + #endif 680 +
+60
drivers/staging/ccree/ssi_request_mgr.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file request_mgr.h 18 + Request Manager 19 + */ 20 + 21 + #ifndef __REQUEST_MGR_H__ 22 + #define __REQUEST_MGR_H__ 23 + 24 + #include "cc_hw_queue_defs.h" 25 + 26 + int request_mgr_init(struct ssi_drvdata *drvdata); 27 + 28 + /*! 29 + * Enqueue caller request to crypto hardware. 30 + * 31 + * \param drvdata 32 + * \param ssi_req The request to enqueue 33 + * \param desc The crypto sequence 34 + * \param len The crypto sequence length 35 + * \param is_dout If "true": completion is handled by the caller 36 + * If "false": this function adds a dummy descriptor completion 37 + * and waits upon completion signal. 38 + * 39 + * \return int Returns -EINPROGRESS if "is_dout=ture"; "0" if "is_dout=false" 40 + */ 41 + int send_request( 42 + struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req, 43 + HwDesc_s *desc, unsigned int len, bool is_dout); 44 + 45 + int send_request_init( 46 + struct ssi_drvdata *drvdata, HwDesc_s *desc, unsigned int len); 47 + 48 + void complete_request(struct ssi_drvdata *drvdata); 49 + 50 + void request_mgr_fini(struct ssi_drvdata *drvdata); 51 + 52 + #if defined (CONFIG_PM_RUNTIME) || defined (CONFIG_PM_SLEEP) 53 + int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata); 54 + 55 + int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata); 56 + 57 + bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata); 58 + #endif 59 + 60 + #endif /*__REQUEST_MGR_H__*/
+138
drivers/staging/ccree/ssi_sram_mgr.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include "ssi_driver.h" 18 + #include "ssi_sram_mgr.h" 19 + 20 + 21 + /** 22 + * struct ssi_sram_mgr_ctx -Internal RAM context manager 23 + * @sram_free_offset: the offset to the non-allocated area 24 + */ 25 + struct ssi_sram_mgr_ctx { 26 + ssi_sram_addr_t sram_free_offset; 27 + }; 28 + 29 + 30 + /** 31 + * ssi_sram_mgr_fini() - Cleanup SRAM pool. 32 + * 33 + * @drvdata: Associated device driver context 34 + */ 35 + void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata) 36 + { 37 + struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle; 38 + 39 + /* Free "this" context */ 40 + if (smgr_ctx != NULL) { 41 + memset(smgr_ctx, 0, sizeof(struct ssi_sram_mgr_ctx)); 42 + kfree(smgr_ctx); 43 + } 44 + } 45 + 46 + /** 47 + * ssi_sram_mgr_init() - Initializes SRAM pool. 48 + * The pool starts right at the beginning of SRAM. 49 + * Returns zero for success, negative value otherwise. 50 + * 51 + * @drvdata: Associated device driver context 52 + */ 53 + int ssi_sram_mgr_init(struct ssi_drvdata *drvdata) 54 + { 55 + struct ssi_sram_mgr_ctx *smgr_ctx; 56 + int rc; 57 + 58 + /* Allocate "this" context */ 59 + drvdata->sram_mgr_handle = kzalloc( 60 + sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL); 61 + if (!drvdata->sram_mgr_handle) { 62 + SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n", 63 + sizeof(struct ssi_sram_mgr_ctx)); 64 + rc = -ENOMEM; 65 + goto out; 66 + } 67 + smgr_ctx = drvdata->sram_mgr_handle; 68 + 69 + /* Pool starts at start of SRAM */ 70 + smgr_ctx->sram_free_offset = 0; 71 + 72 + return 0; 73 + 74 + out: 75 + ssi_sram_mgr_fini(drvdata); 76 + return rc; 77 + } 78 + 79 + /*! 80 + * Allocated buffer from SRAM pool. 81 + * Note: Caller is responsible to free the LAST allocated buffer. 82 + * This function does not taking care of any fragmentation may occur 83 + * by the order of calls to alloc/free. 84 + * 85 + * \param drvdata 86 + * \param size The requested bytes to allocate 87 + */ 88 + ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size) 89 + { 90 + struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle; 91 + ssi_sram_addr_t p; 92 + 93 + if (unlikely((size & 0x3) != 0)) { 94 + SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4", 95 + size); 96 + return NULL_SRAM_ADDR; 97 + } 98 + if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) { 99 + SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n", 100 + size, smgr_ctx->sram_free_offset); 101 + return NULL_SRAM_ADDR; 102 + } 103 + 104 + p = smgr_ctx->sram_free_offset; 105 + smgr_ctx->sram_free_offset += size; 106 + SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p); 107 + return p; 108 + } 109 + 110 + /** 111 + * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to 112 + * set values in given array into SRAM. 113 + * Note: each const value can't exceed word size. 114 + * 115 + * @src: A pointer to array of words to set as consts. 116 + * @dst: The target SRAM buffer to set into 117 + * @nelements: The number of words in "src" array 118 + * @seq: A pointer to the given IN/OUT descriptor sequence 119 + * @seq_len: A pointer to the given IN/OUT sequence length 120 + */ 121 + void ssi_sram_mgr_const2sram_desc( 122 + const uint32_t *src, ssi_sram_addr_t dst, 123 + unsigned int nelement, 124 + HwDesc_s *seq, unsigned int *seq_len) 125 + { 126 + uint32_t i; 127 + unsigned int idx = *seq_len; 128 + 129 + for (i = 0; i < nelement; i++, idx++) { 130 + HW_DESC_INIT(&seq[idx]); 131 + HW_DESC_SET_DIN_CONST(&seq[idx], src[i], sizeof(uint32_t)); 132 + HW_DESC_SET_DOUT_SRAM(&seq[idx], dst + (i * sizeof(uint32_t)), sizeof(uint32_t)); 133 + HW_DESC_SET_FLOW_MODE(&seq[idx], BYPASS); 134 + } 135 + 136 + *seq_len = idx; 137 + } 138 +
+80
drivers/staging/ccree/ssi_sram_mgr.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef __SSI_SRAM_MGR_H__ 18 + #define __SSI_SRAM_MGR_H__ 19 + 20 + 21 + #ifndef SSI_CC_SRAM_SIZE 22 + #define SSI_CC_SRAM_SIZE 4096 23 + #endif 24 + 25 + struct ssi_drvdata; 26 + 27 + /** 28 + * Address (offset) within CC internal SRAM 29 + */ 30 + 31 + typedef uint64_t ssi_sram_addr_t; 32 + 33 + #define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1) 34 + 35 + /*! 36 + * Initializes SRAM pool. 37 + * The first X bytes of SRAM are reserved for ROM usage, hence, pool 38 + * starts right after X bytes. 39 + * 40 + * \param drvdata 41 + * 42 + * \return int Zero for success, negative value otherwise. 43 + */ 44 + int ssi_sram_mgr_init(struct ssi_drvdata *drvdata); 45 + 46 + /*! 47 + * Uninits SRAM pool. 48 + * 49 + * \param drvdata 50 + */ 51 + void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata); 52 + 53 + /*! 54 + * Allocated buffer from SRAM pool. 55 + * Note: Caller is responsible to free the LAST allocated buffer. 56 + * This function does not taking care of any fragmentation may occur 57 + * by the order of calls to alloc/free. 58 + * 59 + * \param drvdata 60 + * \param size The requested bytes to allocate 61 + */ 62 + ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, uint32_t size); 63 + 64 + /** 65 + * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to 66 + * set values in given array into SRAM. 67 + * Note: each const value can't exceed word size. 68 + * 69 + * @src: A pointer to array of words to set as consts. 70 + * @dst: The target SRAM buffer to set into 71 + * @nelements: The number of words in "src" array 72 + * @seq: A pointer to the given IN/OUT descriptor sequence 73 + * @seq_len: A pointer to the given IN/OUT sequence length 74 + */ 75 + void ssi_sram_mgr_const2sram_desc( 76 + const uint32_t *src, ssi_sram_addr_t dst, 77 + unsigned int nelement, 78 + HwDesc_s *seq, unsigned int *seq_len); 79 + 80 + #endif /*__SSI_SRAM_MGR_H__*/
+440
drivers/staging/ccree/ssi_sysfs.c
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #include <linux/kernel.h> 18 + #include "ssi_config.h" 19 + #include "ssi_driver.h" 20 + #include "cc_crypto_ctx.h" 21 + #include "ssi_sysfs.h" 22 + 23 + #ifdef ENABLE_CC_SYSFS 24 + 25 + static struct ssi_drvdata *sys_get_drvdata(void); 26 + 27 + #ifdef CC_CYCLE_COUNT 28 + 29 + #include <asm/timex.h> 30 + 31 + struct stat_item { 32 + unsigned int min; 33 + unsigned int max; 34 + cycles_t sum; 35 + unsigned int count; 36 + }; 37 + 38 + struct stat_name { 39 + const char *op_type_name; 40 + const char *stat_phase_name[MAX_STAT_PHASES]; 41 + }; 42 + 43 + static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = 44 + { 45 + { 46 + /* STAT_OP_TYPE_NULL */ 47 + .op_type_name = "NULL", 48 + .stat_phase_name = {NULL}, 49 + }, 50 + { 51 + .op_type_name = "Encode", 52 + .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks", 53 + .stat_phase_name[STAT_PHASE_1] = "Map buffers", 54 + .stat_phase_name[STAT_PHASE_2] = "Create sequence", 55 + .stat_phase_name[STAT_PHASE_3] = "Send Request", 56 + .stat_phase_name[STAT_PHASE_4] = "HW-Q push", 57 + .stat_phase_name[STAT_PHASE_5] = "Sequence completion", 58 + .stat_phase_name[STAT_PHASE_6] = "HW cycles", 59 + }, 60 + { .op_type_name = "Decode", 61 + .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks", 62 + .stat_phase_name[STAT_PHASE_1] = "Map buffers", 63 + .stat_phase_name[STAT_PHASE_2] = "Create sequence", 64 + .stat_phase_name[STAT_PHASE_3] = "Send Request", 65 + .stat_phase_name[STAT_PHASE_4] = "HW-Q push", 66 + .stat_phase_name[STAT_PHASE_5] = "Sequence completion", 67 + .stat_phase_name[STAT_PHASE_6] = "HW cycles", 68 + }, 69 + { .op_type_name = "Setkey", 70 + .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks", 71 + .stat_phase_name[STAT_PHASE_1] = "Copy key to ctx", 72 + .stat_phase_name[STAT_PHASE_2] = "Create sequence", 73 + .stat_phase_name[STAT_PHASE_3] = "Send Request", 74 + .stat_phase_name[STAT_PHASE_4] = "HW-Q push", 75 + .stat_phase_name[STAT_PHASE_5] = "Sequence completion", 76 + .stat_phase_name[STAT_PHASE_6] = "HW cycles", 77 + }, 78 + { 79 + .op_type_name = "Generic", 80 + .stat_phase_name[STAT_PHASE_0] = "Interrupt", 81 + .stat_phase_name[STAT_PHASE_1] = "ISR-to-Tasklet", 82 + .stat_phase_name[STAT_PHASE_2] = "Tasklet start-to-end", 83 + .stat_phase_name[STAT_PHASE_3] = "Tasklet:user_cb()", 84 + .stat_phase_name[STAT_PHASE_4] = "Tasklet:dx_X_complete() - w/o X_complete()", 85 + .stat_phase_name[STAT_PHASE_5] = "", 86 + .stat_phase_name[STAT_PHASE_6] = "HW cycles", 87 + } 88 + }; 89 + 90 + /* 91 + * Structure used to create a directory 92 + * and its attributes in sysfs. 93 + */ 94 + struct sys_dir { 95 + struct kobject *sys_dir_kobj; 96 + struct attribute_group sys_dir_attr_group; 97 + struct attribute **sys_dir_attr_list; 98 + uint32_t num_of_attrs; 99 + struct ssi_drvdata *drvdata; /* Associated driver context */ 100 + }; 101 + 102 + /* top level directory structures */ 103 + struct sys_dir sys_top_dir; 104 + 105 + static DEFINE_SPINLOCK(stat_lock); 106 + 107 + /* List of DBs */ 108 + static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]; 109 + static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]; 110 + 111 + 112 + static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]) 113 + { 114 + unsigned int i, j; 115 + 116 + /* Clear db */ 117 + for (i=0; i<MAX_STAT_OP_TYPES; i++) { 118 + for (j=0; j<MAX_STAT_PHASES; j++) { 119 + item[i][j].min = 0xFFFFFFFF; 120 + item[i][j].max = 0; 121 + item[i][j].sum = 0; 122 + item[i][j].count = 0; 123 + } 124 + } 125 + } 126 + 127 + static void update_db(struct stat_item *item, unsigned int result) 128 + { 129 + item->count++; 130 + item->sum += result; 131 + if (result < item->min) 132 + item->min = result; 133 + if (result > item->max ) 134 + item->max = result; 135 + } 136 + 137 + static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES]) 138 + { 139 + unsigned int i, j; 140 + uint64_t avg; 141 + 142 + for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) { 143 + for (j=0; j<MAX_STAT_PHASES; j++) { 144 + if (item[i][j].count > 0) { 145 + avg = (uint64_t)item[i][j].sum; 146 + do_div(avg, item[i][j].count); 147 + SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n", 148 + stat_name_db[i].op_type_name, stat_name_db[i].stat_phase_name[j], 149 + item[i][j].min, (int)avg, item[i][j].max, (long long)item[i][j].sum, item[i][j].count); 150 + } 151 + } 152 + } 153 + } 154 + 155 + 156 + /************************************** 157 + * Attributes show functions section * 158 + **************************************/ 159 + 160 + static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj, 161 + struct kobj_attribute *attr, const char *buf, size_t count) 162 + { 163 + init_db(stat_host_db); 164 + return count; 165 + } 166 + 167 + static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj, 168 + struct kobj_attribute *attr, const char *buf, size_t count) 169 + { 170 + init_db(stat_cc_db); 171 + return count; 172 + } 173 + 174 + static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj, 175 + struct kobj_attribute *attr, char *buf) 176 + { 177 + int i, j ; 178 + char line[512]; 179 + uint32_t min_cyc, max_cyc; 180 + uint64_t avg; 181 + ssize_t buf_len, tmp_len=0; 182 + 183 + buf_len = scnprintf(buf,PAGE_SIZE, 184 + "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); 185 + if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/ 186 + return buf_len; 187 + for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) { 188 + for (j=0; j<MAX_STAT_PHASES-1; j++) { 189 + if (stat_host_db[i][j].count > 0) { 190 + avg = (uint64_t)stat_host_db[i][j].sum; 191 + do_div(avg, stat_host_db[i][j].count); 192 + min_cyc = stat_host_db[i][j].min; 193 + max_cyc = stat_host_db[i][j].max; 194 + } else { 195 + avg = min_cyc = max_cyc = 0; 196 + } 197 + tmp_len = scnprintf(line,512, 198 + "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n", 199 + stat_name_db[i].op_type_name, 200 + stat_name_db[i].stat_phase_name[j], 201 + min_cyc, (unsigned int)avg, max_cyc, 202 + stat_host_db[i][j].count); 203 + if ( tmp_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/ 204 + return buf_len; 205 + if ( buf_len + tmp_len >= PAGE_SIZE) 206 + return buf_len; 207 + buf_len += tmp_len; 208 + strncat(buf, line,512); 209 + } 210 + } 211 + return buf_len; 212 + } 213 + 214 + static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj, 215 + struct kobj_attribute *attr, char *buf) 216 + { 217 + int i; 218 + char line[256]; 219 + uint32_t min_cyc, max_cyc; 220 + uint64_t avg; 221 + ssize_t buf_len,tmp_len=0; 222 + 223 + buf_len = scnprintf(buf,PAGE_SIZE, 224 + "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n"); 225 + if ( buf_len <0 )/* scnprintf shouldn't return negative value according to its implementation*/ 226 + return buf_len; 227 + for (i=STAT_OP_TYPE_ENCODE; i<MAX_STAT_OP_TYPES; i++) { 228 + if (stat_cc_db[i][STAT_PHASE_6].count > 0) { 229 + avg = (uint64_t)stat_cc_db[i][STAT_PHASE_6].sum; 230 + do_div(avg, stat_cc_db[i][STAT_PHASE_6].count); 231 + min_cyc = stat_cc_db[i][STAT_PHASE_6].min; 232 + max_cyc = stat_cc_db[i][STAT_PHASE_6].max; 233 + } else { 234 + avg = min_cyc = max_cyc = 0; 235 + } 236 + tmp_len = scnprintf(line,256, 237 + "%s\t%6u\t%6u\t%6u\t%7u\n", 238 + stat_name_db[i].op_type_name, 239 + min_cyc, 240 + (unsigned int)avg, 241 + max_cyc, 242 + stat_cc_db[i][STAT_PHASE_6].count); 243 + 244 + if ( tmp_len < 0 )/* scnprintf shouldn't return negative value according to its implementation*/ 245 + return buf_len; 246 + 247 + if ( buf_len + tmp_len >= PAGE_SIZE) 248 + return buf_len; 249 + buf_len += tmp_len; 250 + strncat(buf, line,256); 251 + } 252 + return buf_len; 253 + } 254 + 255 + void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result) 256 + { 257 + unsigned long flags; 258 + 259 + spin_lock_irqsave(&stat_lock, flags); 260 + update_db(&(stat_host_db[op_type][phase]), (unsigned int)result); 261 + spin_unlock_irqrestore(&stat_lock, flags); 262 + } 263 + 264 + void update_cc_stat( 265 + unsigned int op_type, 266 + unsigned int phase, 267 + unsigned int elapsed_cycles) 268 + { 269 + update_db(&(stat_cc_db[op_type][phase]), elapsed_cycles); 270 + } 271 + 272 + void display_all_stat_db(void) 273 + { 274 + SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n"); 275 + display_db(stat_host_db); 276 + SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n"); 277 + display_db(stat_cc_db); 278 + } 279 + #endif /*CC_CYCLE_COUNT*/ 280 + 281 + 282 + 283 + static ssize_t ssi_sys_regdump_show(struct kobject *kobj, 284 + struct kobj_attribute *attr, char *buf) 285 + { 286 + struct ssi_drvdata *drvdata = sys_get_drvdata(); 287 + uint32_t register_value; 288 + void __iomem* cc_base = drvdata->cc_base; 289 + int offset = 0; 290 + 291 + register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE)); 292 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value); 293 + register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR)); 294 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value); 295 + register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN)); 296 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value); 297 + register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR)); 298 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value); 299 + register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT)); 300 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X \n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value); 301 + return offset; 302 + } 303 + 304 + static ssize_t ssi_sys_help_show(struct kobject *kobj, 305 + struct kobj_attribute *attr, char *buf) 306 + { 307 + char* help_str[]={ 308 + "cat reg_dump ", "Print several of CC register values", 309 + #if defined CC_CYCLE_COUNT 310 + "cat stats_host ", "Print host statistics", 311 + "echo <number> > stats_host", "Clear host statistics database", 312 + "cat stats_cc ", "Print CC statistics", 313 + "echo <number> > stats_cc ", "Clear CC statistics database", 314 + #endif 315 + }; 316 + int i=0, offset = 0; 317 + 318 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n"); 319 + for ( i = 0; i < (sizeof(help_str)/sizeof(help_str[0])); i+=2) { 320 + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i+1]); 321 + } 322 + return offset; 323 + } 324 + 325 + /******************************************************** 326 + * SYSFS objects * 327 + ********************************************************/ 328 + /* 329 + * Structure used to create a directory 330 + * and its attributes in sysfs. 331 + */ 332 + struct sys_dir { 333 + struct kobject *sys_dir_kobj; 334 + struct attribute_group sys_dir_attr_group; 335 + struct attribute **sys_dir_attr_list; 336 + uint32_t num_of_attrs; 337 + struct ssi_drvdata *drvdata; /* Associated driver context */ 338 + }; 339 + 340 + /* top level directory structures */ 341 + static struct sys_dir sys_top_dir; 342 + 343 + /* TOP LEVEL ATTRIBUTES */ 344 + static struct kobj_attribute ssi_sys_top_level_attrs[] = { 345 + __ATTR(dump_regs, 0444, ssi_sys_regdump_show, NULL), 346 + __ATTR(help, 0444, ssi_sys_help_show, NULL), 347 + #if defined CC_CYCLE_COUNT 348 + __ATTR(stats_host, 0664, ssi_sys_stat_host_db_show, ssi_sys_stats_host_db_clear), 349 + __ATTR(stats_cc, 0664, ssi_sys_stat_cc_db_show, ssi_sys_stats_cc_db_clear), 350 + #endif 351 + 352 + }; 353 + 354 + static struct ssi_drvdata *sys_get_drvdata(void) 355 + { 356 + /* TODO: supporting multiple SeP devices would require avoiding 357 + * global "top_dir" and finding associated "top_dir" by traversing 358 + * up the tree to the kobject which matches one of the top_dir's */ 359 + return sys_top_dir.drvdata; 360 + } 361 + 362 + static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata, 363 + struct kobject *parent_dir_kobj, const char *dir_name, 364 + struct kobj_attribute *attrs, uint32_t num_of_attrs) 365 + { 366 + int i; 367 + 368 + memset(sys_dir, 0, sizeof(struct sys_dir)); 369 + 370 + sys_dir->drvdata = drvdata; 371 + 372 + /* initialize directory kobject */ 373 + sys_dir->sys_dir_kobj = 374 + kobject_create_and_add(dir_name, parent_dir_kobj); 375 + 376 + if (!(sys_dir->sys_dir_kobj)) 377 + return -ENOMEM; 378 + /* allocate memory for directory's attributes list */ 379 + sys_dir->sys_dir_attr_list = 380 + kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1), 381 + GFP_KERNEL); 382 + 383 + if (!(sys_dir->sys_dir_attr_list)) { 384 + kobject_put(sys_dir->sys_dir_kobj); 385 + return -ENOMEM; 386 + } 387 + 388 + sys_dir->num_of_attrs = num_of_attrs; 389 + 390 + /* initialize attributes list */ 391 + for (i = 0; i < num_of_attrs; ++i) 392 + sys_dir->sys_dir_attr_list[i] = &(attrs[i].attr); 393 + 394 + /* last list entry should be NULL */ 395 + sys_dir->sys_dir_attr_list[num_of_attrs] = NULL; 396 + 397 + sys_dir->sys_dir_attr_group.attrs = sys_dir->sys_dir_attr_list; 398 + 399 + return sysfs_create_group(sys_dir->sys_dir_kobj, 400 + &(sys_dir->sys_dir_attr_group)); 401 + } 402 + 403 + static void sys_free_dir(struct sys_dir *sys_dir) 404 + { 405 + if (!sys_dir) 406 + return; 407 + 408 + kfree(sys_dir->sys_dir_attr_list); 409 + 410 + if (sys_dir->sys_dir_kobj != NULL) 411 + kobject_put(sys_dir->sys_dir_kobj); 412 + } 413 + 414 + int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata) 415 + { 416 + int retval; 417 + 418 + #if defined CC_CYCLE_COUNT 419 + /* Init. statistics */ 420 + init_db(stat_host_db); 421 + init_db(stat_cc_db); 422 + #endif 423 + 424 + SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name); 425 + 426 + /* Initialize top directory */ 427 + retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, 428 + "cc_info", ssi_sys_top_level_attrs, 429 + sizeof(ssi_sys_top_level_attrs) / 430 + sizeof(struct kobj_attribute)); 431 + return retval; 432 + } 433 + 434 + void ssi_sysfs_fini(void) 435 + { 436 + sys_free_dir(&sys_top_dir); 437 + } 438 + 439 + #endif /*ENABLE_CC_SYSFS*/ 440 +
+54
drivers/staging/ccree/ssi_sysfs.h
··· 1 + /* 2 + * Copyright (C) 2012-2017 ARM Limited or its affiliates. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + /* \file ssi_sysfs.h 18 + ARM CryptoCell sysfs APIs 19 + */ 20 + 21 + #ifndef __SSI_SYSFS_H__ 22 + #define __SSI_SYSFS_H__ 23 + 24 + #include <asm/timex.h> 25 + 26 + /* forward declaration */ 27 + struct ssi_drvdata; 28 + 29 + enum stat_phase { 30 + STAT_PHASE_0 = 0, 31 + STAT_PHASE_1, 32 + STAT_PHASE_2, 33 + STAT_PHASE_3, 34 + STAT_PHASE_4, 35 + STAT_PHASE_5, 36 + STAT_PHASE_6, 37 + MAX_STAT_PHASES, 38 + }; 39 + enum stat_op { 40 + STAT_OP_TYPE_NULL = 0, 41 + STAT_OP_TYPE_ENCODE, 42 + STAT_OP_TYPE_DECODE, 43 + STAT_OP_TYPE_SETKEY, 44 + STAT_OP_TYPE_GENERIC, 45 + MAX_STAT_OP_TYPES, 46 + }; 47 + 48 + int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata); 49 + void ssi_sysfs_fini(void); 50 + void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result); 51 + void update_cc_stat(unsigned int op_type, unsigned int phase, unsigned int elapsed_cycles); 52 + void display_all_stat_db(void); 53 + 54 + #endif /*__SSI_SYSFS_H__*/