Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - add support for 420xx devices

Add support for 420xx devices by including a new device driver that
supports such devices, updates to the firmware loader and capabilities.

Compared to 4xxx devices, 420xx devices have more acceleration engines
(16 service engines and 1 admin) and support the wireless cipher
algorithms ZUC and Snow 3G.

Signed-off-by: Jie Wang <jie.wang@intel.com>
Co-developed-by: Dong Xie <dong.xie@intel.com>
Signed-off-by: Dong Xie <dong.xie@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Jie Wang and committed by
Herbert Xu
fcf60f4b 98a4f29f

+849 -5
+11
drivers/crypto/intel/qat/Kconfig
··· 59 59 To compile this as a module, choose M here: the module 60 60 will be called qat_4xxx. 61 61 62 + config CRYPTO_DEV_QAT_420XX 63 + tristate "Support for Intel(R) QAT_420XX" 64 + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) 65 + select CRYPTO_DEV_QAT 66 + help 67 + Support for Intel(R) QuickAssist Technology QAT_420xx 68 + for accelerating crypto and compression workloads. 69 + 70 + To compile this as a module, choose M here: the module 71 + will be called qat_420xx. 72 + 62 73 config CRYPTO_DEV_QAT_DH895xCCVF 63 74 tristate "Support for Intel(R) DH895xCC Virtual Function" 64 75 depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+1
drivers/crypto/intel/qat/Makefile
··· 4 4 obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ 5 5 obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ 6 6 obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ 7 + obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ 7 8 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ 8 9 obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ 9 10 obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
+4
drivers/crypto/intel/qat/qat_420xx/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + ccflags-y := -I $(srctree)/$(src)/../qat_common 3 + obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o 4 + qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
+552
drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + #include <linux/iopoll.h> 4 + #include <adf_accel_devices.h> 5 + #include <adf_admin.h> 6 + #include <adf_cfg.h> 7 + #include <adf_cfg_services.h> 8 + #include <adf_clock.h> 9 + #include <adf_common_drv.h> 10 + #include <adf_fw_config.h> 11 + #include <adf_gen4_config.h> 12 + #include <adf_gen4_dc.h> 13 + #include <adf_gen4_hw_data.h> 14 + #include <adf_gen4_pfvf.h> 15 + #include <adf_gen4_pm.h> 16 + #include <adf_gen4_ras.h> 17 + #include <adf_gen4_timer.h> 18 + #include "adf_420xx_hw_data.h" 19 + #include "icp_qat_hw.h" 20 + 21 + #define ADF_AE_GROUP_0 GENMASK(3, 0) 22 + #define ADF_AE_GROUP_1 GENMASK(7, 4) 23 + #define ADF_AE_GROUP_2 GENMASK(11, 8) 24 + #define ADF_AE_GROUP_3 GENMASK(15, 12) 25 + #define ADF_AE_GROUP_4 BIT(16) 26 + 27 + static const char * const adf_420xx_fw_objs[] = { 28 + [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, 29 + [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, 30 + [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ, 31 + [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ, 32 + }; 33 + 34 + static const struct adf_fw_config adf_fw_cy_config[] = { 35 + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, 36 + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, 37 + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, 38 + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, 39 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 40 + }; 41 + 42 + static const struct adf_fw_config adf_fw_dc_config[] = { 43 + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, 44 + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, 45 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 46 + }; 47 + 48 + static const struct adf_fw_config adf_fw_sym_config[] = { 49 + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, 50 + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, 51 + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, 52 + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, 53 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 54 + }; 55 + 56 + static const struct adf_fw_config adf_fw_asym_config[] = { 57 + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, 58 + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, 59 + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, 60 + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, 61 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 62 + }; 63 + 64 + static const struct adf_fw_config adf_fw_asym_dc_config[] = { 65 + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, 66 + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, 67 + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, 68 + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, 69 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 70 + }; 71 + 72 + static const struct adf_fw_config adf_fw_sym_dc_config[] = { 73 + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, 74 + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, 75 + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, 76 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 77 + }; 78 + 79 + static const struct adf_fw_config adf_fw_dcc_config[] = { 80 + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, 81 + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, 82 + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, 83 + }; 84 + 85 + /* Worker thread to service arbiter mappings */ 86 + static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { 87 + 0x00000055, 0x00000055, 0x00000055, 0x00000055, 88 + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 89 + 0x00000055, 0x00000055, 0x00000055, 0x00000055, 90 + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 91 + 0x0 92 + }; 93 + 94 + static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { 95 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 96 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 97 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 98 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 99 + 0x0 100 + }; 101 + 102 + static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { 103 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 104 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 105 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 106 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 107 + 0x0 108 + }; 109 + 110 + static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { 111 + 0x00000055, 0x00000055, 0x00000055, 0x00000055, 112 + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 113 + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 114 + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, 115 + 0x0 116 + }; 117 + 118 + static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { 119 + 0x00000055, 0x00000055, 0x00000055, 0x00000055, 120 + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 121 + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 122 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 123 + 0x0 124 + }; 125 + 126 + static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { 127 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 128 + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, 129 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 130 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 131 + 0x0 132 + }; 133 + 134 + static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { 135 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 136 + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 137 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 138 + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 139 + 0x0 140 + }; 141 + 142 + static struct adf_hw_device_class adf_420xx_class = { 143 + .name = ADF_420XX_DEVICE_NAME, 144 + .type = DEV_420XX, 145 + .instances = 0, 146 + }; 147 + 148 + static u32 get_ae_mask(struct adf_hw_device_data *self) 149 + { 150 + u32 me_disable = self->fuses; 151 + 152 + return ~me_disable & ADF_420XX_ACCELENGINES_MASK; 153 + } 154 + 155 + static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) 156 + { 157 + switch (adf_get_service_enabled(accel_dev)) { 158 + case SVC_CY: 159 + case SVC_CY2: 160 + return ARRAY_SIZE(adf_fw_cy_config); 161 + case SVC_DC: 162 + return ARRAY_SIZE(adf_fw_dc_config); 163 + case SVC_DCC: 164 + return ARRAY_SIZE(adf_fw_dcc_config); 165 + case SVC_SYM: 166 + return ARRAY_SIZE(adf_fw_sym_config); 167 + case SVC_ASYM: 168 + return ARRAY_SIZE(adf_fw_asym_config); 169 + case SVC_ASYM_DC: 170 + case SVC_DC_ASYM: 171 + return ARRAY_SIZE(adf_fw_asym_dc_config); 172 + case SVC_SYM_DC: 173 + case SVC_DC_SYM: 174 + return ARRAY_SIZE(adf_fw_sym_dc_config); 175 + default: 176 + return 0; 177 + } 178 + } 179 + 180 + static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) 181 + { 182 + switch (adf_get_service_enabled(accel_dev)) { 183 + case SVC_CY: 184 + case SVC_CY2: 185 + return adf_fw_cy_config; 186 + case SVC_DC: 187 + return adf_fw_dc_config; 188 + case SVC_DCC: 189 + return adf_fw_dcc_config; 190 + case SVC_SYM: 191 + return adf_fw_sym_config; 192 + case SVC_ASYM: 193 + return adf_fw_asym_config; 194 + case SVC_ASYM_DC: 195 + case SVC_DC_ASYM: 196 + return adf_fw_asym_dc_config; 197 + case SVC_SYM_DC: 198 + case SVC_DC_SYM: 199 + return adf_fw_sym_dc_config; 200 + default: 201 + return NULL; 202 + } 203 + } 204 + 205 + static void update_ae_mask(struct adf_accel_dev *accel_dev) 206 + { 207 + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); 208 + const struct adf_fw_config *fw_config; 209 + u32 config_ae_mask = 0; 210 + u32 ae_mask, num_objs; 211 + int i; 212 + 213 + ae_mask = get_ae_mask(hw_data); 214 + 215 + /* Modify the AE mask based on the firmware configuration loaded */ 216 + fw_config = get_fw_config(accel_dev); 217 + num_objs = uof_get_num_objs(accel_dev); 218 + 219 + config_ae_mask |= ADF_420XX_ADMIN_AE_MASK; 220 + for (i = 0; i < num_objs; i++) 221 + config_ae_mask |= fw_config[i].ae_mask; 222 + 223 + hw_data->ae_mask = ae_mask & config_ae_mask; 224 + } 225 + 226 + static u32 get_accel_cap(struct adf_accel_dev *accel_dev) 227 + { 228 + u32 capabilities_sym, capabilities_asym, capabilities_dc; 229 + struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; 230 + u32 capabilities_dcc; 231 + u32 fusectl1; 232 + 233 + /* As a side effect, update ae_mask based on configuration */ 234 + update_ae_mask(accel_dev); 235 + 236 + /* Read accelerator capabilities mask */ 237 + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); 238 + 239 + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | 240 + ICP_ACCEL_CAPABILITIES_CIPHER | 241 + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | 242 + ICP_ACCEL_CAPABILITIES_SHA3 | 243 + ICP_ACCEL_CAPABILITIES_SHA3_EXT | 244 + ICP_ACCEL_CAPABILITIES_HKDF | 245 + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | 246 + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | 247 + ICP_ACCEL_CAPABILITIES_SM3 | 248 + ICP_ACCEL_CAPABILITIES_SM4 | 249 + ICP_ACCEL_CAPABILITIES_AES_V2 | 250 + ICP_ACCEL_CAPABILITIES_ZUC | 251 + ICP_ACCEL_CAPABILITIES_ZUC_256 | 252 + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | 253 + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; 254 + 255 + /* A set bit in fusectl1 means the feature is OFF in this SKU */ 256 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { 257 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; 258 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; 259 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 260 + } 261 + 262 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { 263 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; 264 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; 265 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; 266 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 267 + } 268 + 269 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { 270 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; 271 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; 272 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; 273 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; 274 + } 275 + 276 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { 277 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; 278 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; 279 + } 280 + 281 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { 282 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; 283 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; 284 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; 285 + } 286 + 287 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { 288 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; 289 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; 290 + } 291 + 292 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) 293 + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; 294 + 295 + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | 296 + ICP_ACCEL_CAPABILITIES_SM2 | 297 + ICP_ACCEL_CAPABILITIES_ECEDMONT; 298 + 299 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { 300 + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; 301 + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; 302 + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; 303 + } 304 + 305 + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | 306 + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | 307 + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | 308 + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; 309 + 310 + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { 311 + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; 312 + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; 313 + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; 314 + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; 315 + } 316 + 317 + switch (adf_get_service_enabled(accel_dev)) { 318 + case SVC_CY: 319 + case SVC_CY2: 320 + return capabilities_sym | capabilities_asym; 321 + case SVC_DC: 322 + return capabilities_dc; 323 + case SVC_DCC: 324 + /* 325 + * Sym capabilities are available for chaining operations, 326 + * but sym crypto instances cannot be supported 327 + */ 328 + capabilities_dcc = capabilities_dc | capabilities_sym; 329 + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; 330 + return capabilities_dcc; 331 + case SVC_SYM: 332 + return capabilities_sym; 333 + case SVC_ASYM: 334 + return capabilities_asym; 335 + case SVC_ASYM_DC: 336 + case SVC_DC_ASYM: 337 + return capabilities_asym | capabilities_dc; 338 + case SVC_SYM_DC: 339 + case SVC_DC_SYM: 340 + return capabilities_sym | capabilities_dc; 341 + default: 342 + return 0; 343 + } 344 + } 345 + 346 + static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) 347 + { 348 + switch (adf_get_service_enabled(accel_dev)) { 349 + case SVC_ASYM: 350 + return thrd_to_arb_map_asym; 351 + case SVC_SYM: 352 + return thrd_to_arb_map_sym; 353 + case SVC_DC: 354 + return thrd_to_arb_map_dc; 355 + case SVC_DCC: 356 + return thrd_to_arb_map_dcc; 357 + case SVC_ASYM_DC: 358 + case SVC_DC_ASYM: 359 + return thrd_to_arb_map_asym_dc; 360 + case SVC_DC_SYM: 361 + case SVC_SYM_DC: 362 + return thrd_to_arb_map_sym_dc; 363 + default: 364 + return default_thrd_to_arb_map; 365 + } 366 + } 367 + 368 + static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) 369 + { 370 + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; 371 + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; 372 + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; 373 + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; 374 + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; 375 + 376 + rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV; 377 + rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL; 378 + rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION; 379 + rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM; 380 + rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM; 381 + rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC; 382 + rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC; 383 + rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; 384 + } 385 + 386 + enum adf_rp_groups { 387 + RP_GROUP_0 = 0, 388 + RP_GROUP_1, 389 + RP_GROUP_COUNT 390 + }; 391 + 392 + static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) 393 + { 394 + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; 395 + const struct adf_fw_config *fw_config; 396 + u16 ring_to_svc_map; 397 + int i, j; 398 + 399 + fw_config = get_fw_config(accel_dev); 400 + if (!fw_config) 401 + return 0; 402 + 403 + for (i = 0; i < RP_GROUP_COUNT; i++) { 404 + switch (fw_config[i].ae_mask) { 405 + case ADF_AE_GROUP_0: 406 + j = RP_GROUP_0; 407 + break; 408 + case ADF_AE_GROUP_1: 409 + j = RP_GROUP_1; 410 + break; 411 + default: 412 + return 0; 413 + } 414 + 415 + switch (fw_config[i].obj) { 416 + case ADF_FW_SYM_OBJ: 417 + rps[j] = SYM; 418 + break; 419 + case ADF_FW_ASYM_OBJ: 420 + rps[j] = ASYM; 421 + break; 422 + case ADF_FW_DC_OBJ: 423 + rps[j] = COMP; 424 + break; 425 + default: 426 + rps[j] = 0; 427 + break; 428 + } 429 + } 430 + 431 + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | 432 + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | 433 + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | 434 + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; 435 + 436 + return ring_to_svc_map; 437 + } 438 + 439 + static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, 440 + const char * const fw_objs[], int num_objs) 441 + { 442 + const struct adf_fw_config *fw_config; 443 + int id; 444 + 445 + fw_config = get_fw_config(accel_dev); 446 + if (fw_config) 447 + id = fw_config[obj_num].obj; 448 + else 449 + id = -EINVAL; 450 + 451 + if (id < 0 || id > num_objs) 452 + return NULL; 453 + 454 + return fw_objs[id]; 455 + } 456 + 457 + static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num) 458 + { 459 + int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs); 460 + 461 + return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); 462 + } 463 + 464 + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) 465 + { 466 + const struct adf_fw_config *fw_config; 467 + 468 + fw_config = get_fw_config(accel_dev); 469 + if (!fw_config) 470 + return 0; 471 + 472 + return fw_config[obj_num].ae_mask; 473 + } 474 + 475 + static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) 476 + { 477 + dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK; 478 + dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK; 479 + dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; 480 + dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; 481 + dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; 482 + dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; 483 + } 484 + 485 + void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) 486 + { 487 + hw_data->dev_class = &adf_420xx_class; 488 + hw_data->instance_id = adf_420xx_class.instances++; 489 + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; 490 + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; 491 + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; 492 + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; 493 + hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES; 494 + hw_data->num_logical_accel = 1; 495 + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; 496 + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; 497 + hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; 498 + hw_data->alloc_irq = adf_isr_resource_alloc; 499 + hw_data->free_irq = adf_isr_resource_free; 500 + hw_data->enable_error_correction = adf_gen4_enable_error_correction; 501 + hw_data->get_accel_mask = adf_gen4_get_accel_mask; 502 + hw_data->get_ae_mask = get_ae_mask; 503 + hw_data->get_num_accels = adf_gen4_get_num_accels; 504 + hw_data->get_num_aes = adf_gen4_get_num_aes; 505 + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; 506 + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; 507 + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; 508 + hw_data->get_arb_info = adf_gen4_get_arb_info; 509 + hw_data->get_admin_info = adf_gen4_get_admin_info; 510 + hw_data->get_accel_cap = get_accel_cap; 511 + hw_data->get_sku = adf_gen4_get_sku; 512 + hw_data->init_admin_comms = adf_init_admin_comms; 513 + hw_data->exit_admin_comms = adf_exit_admin_comms; 514 + hw_data->send_admin_init = adf_send_admin_init; 515 + hw_data->init_arb = adf_init_arb; 516 + hw_data->exit_arb = adf_exit_arb; 517 + hw_data->get_arb_mapping = adf_get_arbiter_mapping; 518 + hw_data->enable_ints = adf_gen4_enable_ints; 519 + hw_data->init_device = adf_gen4_init_device; 520 + hw_data->reset_device = adf_reset_flr; 521 + hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; 522 + hw_data->fw_name = ADF_420XX_FW; 523 + hw_data->fw_mmp_name = ADF_420XX_MMP; 524 + hw_data->uof_get_name = uof_get_name_420xx; 525 + hw_data->uof_get_num_objs = uof_get_num_objs; 526 + hw_data->uof_get_ae_mask = uof_get_ae_mask; 527 + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; 528 + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; 529 + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; 530 + hw_data->disable_iov = adf_disable_sriov; 531 + hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; 532 + hw_data->enable_pm = adf_gen4_enable_pm; 533 + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; 534 + hw_data->dev_config = adf_gen4_dev_config; 535 + hw_data->start_timer = adf_gen4_timer_start; 536 + hw_data->stop_timer = adf_gen4_timer_stop; 537 + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; 538 + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; 539 + hw_data->clock_frequency = ADF_420XX_AE_FREQ; 540 + 541 + adf_gen4_set_err_mask(&hw_data->dev_err_mask); 542 + adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); 543 + adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); 544 + adf_gen4_init_dc_ops(&hw_data->dc_ops); 545 + adf_gen4_init_ras_ops(&hw_data->ras_ops); 546 + adf_init_rl_data(&hw_data->rl_data); 547 + } 548 + 549 + void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data) 550 + { 551 + hw_data->dev_class->instances--; 552 + }
+55
drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + #ifndef ADF_420XX_HW_DATA_H_ 4 + #define ADF_420XX_HW_DATA_H_ 5 + 6 + #include <adf_accel_devices.h> 7 + 8 + #define ADF_420XX_MAX_ACCELENGINES 17 9 + 10 + #define ADF_420XX_ACCELENGINES_MASK 0x1FFFF 11 + #define ADF_420XX_ADMIN_AE_MASK 0x10000 12 + 13 + #define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF) 14 + #define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF) 15 + #define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001) 16 + #define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007) 17 + #define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF) 18 + #define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF) 19 + 20 + /* 21 + * SSMFEATREN bit mask 22 + * BIT(4) - enables parity detection on CPP 23 + * BIT(12) - enables the logging of push/pull data errors 24 + * in pperr register 25 + * BIT(16) - BIT(27) - enable parity detection on SPPs 26 + */ 27 + #define ADF_420XX_SSMFEATREN_MASK \ 28 + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ 29 + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) 30 + 31 + /* Firmware Binaries */ 32 + #define ADF_420XX_FW "qat_420xx.bin" 33 + #define ADF_420XX_MMP "qat_420xx_mmp.bin" 34 + #define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin" 35 + #define ADF_420XX_DC_OBJ "qat_420xx_dc.bin" 36 + #define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin" 37 + #define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin" 38 + 39 + /* RL constants */ 40 + #define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100 41 + #define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102 42 + #define ADF_420XX_RL_DCPR_CORRECTION 1 43 + #define ADF_420XX_RL_SCANS_PER_SEC 954 44 + #define ADF_420XX_RL_MAX_TP_ASYM 173750UL 45 + #define ADF_420XX_RL_MAX_TP_SYM 95000UL 46 + #define ADF_420XX_RL_MAX_TP_DC 40000UL 47 + #define ADF_420XX_RL_SLICE_REF 1000UL 48 + 49 + /* Clocks frequency */ 50 + #define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ) 51 + 52 + void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id); 53 + void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data); 54 + 55 + #endif
+202
drivers/crypto/intel/qat/qat_420xx/adf_drv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright(c) 2023 Intel Corporation */ 3 + #include <linux/device.h> 4 + #include <linux/module.h> 5 + #include <linux/pci.h> 6 + 7 + #include <adf_accel_devices.h> 8 + #include <adf_gen4_hw_data.h> 9 + #include <adf_gen4_config.h> 10 + #include <adf_cfg.h> 11 + #include <adf_common_drv.h> 12 + #include <adf_dbgfs.h> 13 + 14 + #include "adf_420xx_hw_data.h" 15 + 16 + static const struct pci_device_id adf_pci_tbl[] = { 17 + { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, 18 + { } 19 + }; 20 + MODULE_DEVICE_TABLE(pci, adf_pci_tbl); 21 + 22 + static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) 23 + { 24 + if (accel_dev->hw_device) { 25 + adf_clean_hw_data_420xx(accel_dev->hw_device); 26 + accel_dev->hw_device = NULL; 27 + } 28 + adf_dbgfs_exit(accel_dev); 29 + adf_cfg_dev_remove(accel_dev); 30 + adf_devmgr_rm_dev(accel_dev, NULL); 31 + } 32 + 33 + static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 34 + { 35 + struct adf_accel_dev *accel_dev; 36 + struct adf_accel_pci *accel_pci_dev; 37 + struct adf_hw_device_data *hw_data; 38 + unsigned int i, bar_nr; 39 + unsigned long bar_mask; 40 + struct adf_bar *bar; 41 + int ret; 42 + 43 + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { 44 + /* 45 + * If the accelerator is connected to a node with no memory 46 + * there is no point in using the accelerator since the remote 47 + * memory transaction will be very slow. 48 + */ 49 + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); 50 + return -EINVAL; 51 + } 52 + 53 + accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL); 54 + if (!accel_dev) 55 + return -ENOMEM; 56 + 57 + INIT_LIST_HEAD(&accel_dev->crypto_list); 58 + accel_pci_dev = &accel_dev->accel_pci_dev; 59 + accel_pci_dev->pci_dev = pdev; 60 + 61 + /* 62 + * Add accel device to accel table 63 + * This should be called before adf_cleanup_accel is called 64 + */ 65 + if (adf_devmgr_add_dev(accel_dev, NULL)) { 66 + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); 67 + return -EFAULT; 68 + } 69 + 70 + accel_dev->owner = THIS_MODULE; 71 + /* Allocate and initialise device hardware meta-data structure */ 72 + hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL); 73 + if (!hw_data) { 74 + ret = -ENOMEM; 75 + goto out_err; 76 + } 77 + 78 + accel_dev->hw_device = hw_data; 79 + adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); 80 + 81 + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); 82 + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); 83 + 84 + /* Get Accelerators and Accelerators Engines masks */ 85 + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); 86 + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); 87 + accel_pci_dev->sku = hw_data->get_sku(hw_data); 88 + /* If the device has no acceleration engines then ignore it */ 89 + if (!hw_data->accel_mask || !hw_data->ae_mask || 90 + (~hw_data->ae_mask & 0x01)) { 91 + dev_err(&pdev->dev, "No acceleration units found.\n"); 92 + ret = -EFAULT; 93 + goto out_err; 94 + } 95 + 96 + /* Create device configuration table */ 97 + ret = adf_cfg_dev_add(accel_dev); 98 + if (ret) 99 + goto out_err; 100 + 101 + /* Enable PCI device */ 102 + ret = pcim_enable_device(pdev); 103 + if (ret) { 104 + dev_err(&pdev->dev, "Can't enable PCI device.\n"); 105 + goto out_err; 106 + } 107 + 108 + /* Set DMA identifier */ 109 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 110 + if (ret) { 111 + dev_err(&pdev->dev, "No usable DMA configuration.\n"); 112 + goto out_err; 113 + } 114 + 115 + ret = adf_gen4_cfg_dev_init(accel_dev); 116 + if (ret) { 117 + dev_err(&pdev->dev, "Failed to initialize configuration.\n"); 118 + goto out_err; 119 + } 120 + 121 + /* Get accelerator capabilities mask */ 122 + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); 123 + if (!hw_data->accel_capabilities_mask) { 124 + dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); 125 + ret = -EINVAL; 126 + goto out_err; 127 + } 128 + 129 + /* Find and map all the device's BARS */ 130 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; 131 + 132 + ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); 133 + if (ret) { 134 + dev_err(&pdev->dev, "Failed to map pci regions.\n"); 135 + goto out_err; 136 + } 137 + 138 + i = 0; 139 + for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { 140 + bar = &accel_pci_dev->pci_bars[i++]; 141 + bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; 142 + } 143 + 144 + pci_set_master(pdev); 145 + 146 + if (pci_save_state(pdev)) { 147 + dev_err(&pdev->dev, "Failed to save pci state.\n"); 148 + ret = -ENOMEM; 149 + goto out_err; 150 + } 151 + 152 + accel_dev->ras_errors.enabled = true; 153 + adf_dbgfs_init(accel_dev); 154 + 155 + ret = adf_dev_up(accel_dev, true); 156 + if (ret) 157 + goto out_err_dev_stop; 158 + 159 + ret = adf_sysfs_init(accel_dev); 160 + if (ret) 161 + goto out_err_dev_stop; 162 + 163 + return ret; 164 + 165 + out_err_dev_stop: 166 + adf_dev_down(accel_dev, false); 167 + out_err: 168 + adf_cleanup_accel(accel_dev); 169 + return ret; 170 + } 171 + 172 + static void adf_remove(struct pci_dev *pdev) 173 + { 174 + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); 175 + 176 + if (!accel_dev) { 177 + pr_err("QAT: Driver removal failed\n"); 178 + return; 179 + } 180 + adf_dev_down(accel_dev, false); 181 + adf_cleanup_accel(accel_dev); 182 + } 183 + 184 + static struct pci_driver adf_driver = { 185 + .id_table = adf_pci_tbl, 186 + .name = ADF_420XX_DEVICE_NAME, 187 + .probe = adf_probe, 188 + .remove = adf_remove, 189 + .sriov_configure = adf_sriov_configure, 190 + .err_handler = &adf_err_handler, 191 + }; 192 + 193 + module_pci_driver(adf_driver); 194 + 195 + MODULE_LICENSE("GPL"); 196 + MODULE_AUTHOR("Intel"); 197 + MODULE_FIRMWARE(ADF_420XX_FW); 198 + MODULE_FIRMWARE(ADF_420XX_MMP); 199 + MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); 200 + MODULE_VERSION(ADF_DRV_VERSION); 201 + MODULE_SOFTDEP("pre: crypto-intel_qat"); 202 + MODULE_IMPORT_NS(CRYPTO_QAT);
+3
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
··· 19 19 #define ADF_C3XXX_DEVICE_NAME "c3xxx" 20 20 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" 21 21 #define ADF_4XXX_DEVICE_NAME "4xxx" 22 + #define ADF_420XX_DEVICE_NAME "420xx" 22 23 #define ADF_4XXX_PCI_DEVICE_ID 0x4940 23 24 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 24 25 #define ADF_401XX_PCI_DEVICE_ID 0x4942 25 26 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 26 27 #define ADF_402XX_PCI_DEVICE_ID 0x4944 27 28 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 29 + #define ADF_420XX_PCI_DEVICE_ID 0x4946 30 + #define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 28 31 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 29 32 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C 30 33 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
+1
drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
··· 47 47 DEV_C3XXX, 48 48 DEV_C3XXXVF, 49 49 DEV_4XXX, 50 + DEV_420XX, 50 51 }; 51 52 52 53 struct adf_dev_status_info {
+2
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
··· 202 202 ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), 203 203 ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), 204 204 ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), 205 + ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), 206 + ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), 205 207 }; 206 208 207 209 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev);
+11 -3
drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
··· 18 18 ICP_QAT_HW_AE_9 = 9, 19 19 ICP_QAT_HW_AE_10 = 10, 20 20 ICP_QAT_HW_AE_11 = 11, 21 - ICP_QAT_HW_AE_DELIMITER = 12 21 + ICP_QAT_HW_AE_12 = 12, 22 + ICP_QAT_HW_AE_13 = 13, 23 + ICP_QAT_HW_AE_14 = 14, 24 + ICP_QAT_HW_AE_15 = 15, 25 + ICP_QAT_HW_AE_16 = 16, 26 + ICP_QAT_HW_AE_DELIMITER = 17 22 27 }; 23 28 24 29 enum icp_qat_hw_qat_id { ··· 100 95 /* Bits 10-11 are currently reserved */ 101 96 ICP_ACCEL_CAPABILITIES_HKDF = BIT(12), 102 97 ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13), 103 - /* Bit 14 is currently reserved */ 98 + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14), 104 99 ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), 105 100 ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), 106 101 ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), ··· 112 107 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), 113 108 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), 114 109 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25), 115 - ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26) 110 + ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26), 111 + /* Bits 27-28 are currently reserved */ 112 + ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29), 113 + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30), 116 114 }; 117 115 118 116 #define QAT_AUTH_MODE_BITPOS 4
+1 -1
drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
··· 7 7 #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 8 8 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 9 9 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 10 - #define ICP_QAT_UCLO_MAX_AE 12 10 + #define ICP_QAT_UCLO_MAX_AE 17 11 11 #define ICP_QAT_UCLO_MAX_CTX 8 12 12 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) 13 13 #define ICP_QAT_UCLO_MAX_USTORE 0x4000
+5 -1
drivers/crypto/intel/qat/qat_common/qat_hal.c
··· 697 697 case ADF_4XXX_PCI_DEVICE_ID: 698 698 case ADF_401XX_PCI_DEVICE_ID: 699 699 case ADF_402XX_PCI_DEVICE_ID: 700 + case ADF_420XX_PCI_DEVICE_ID: 700 701 handle->chip_info->mmp_sram_size = 0; 701 702 handle->chip_info->nn = false; 702 703 handle->chip_info->lm2lm3 = true; 703 704 handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; 704 705 handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; 705 - handle->chip_info->icp_rst_mask = 0x100015; 706 + if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) 707 + handle->chip_info->icp_rst_mask = 0x100155; 708 + else 709 + handle->chip_info->icp_rst_mask = 0x100015; 706 710 handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0; 707 711 handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX; 708 712 handle->chip_info->wakeup_event_val = 0x80000000;
+1
drivers/crypto/intel/qat/qat_common/qat_uclo.c
··· 733 733 case ADF_4XXX_PCI_DEVICE_ID: 734 734 case ADF_401XX_PCI_DEVICE_ID: 735 735 case ADF_402XX_PCI_DEVICE_ID: 736 + case ADF_420XX_PCI_DEVICE_ID: 736 737 return ICP_QAT_AC_4XXX_A_DEV_TYPE; 737 738 default: 738 739 pr_err("QAT: unsupported device 0x%x\n",