Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: qat - add support for new devices to FW loader

FW loader updates for new qat devices

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Pingchao Yang and committed by
Herbert Xu
b0272276 9809ebcd

+823 -85
+6 -3
drivers/crypto/qat/qat_common/adf_accel_engine.c
··· 78 78 uof_addr = (void *)loader_data->uof_fw->data; 79 79 mmp_size = loader_data->mmp_fw->size; 80 80 mmp_addr = (void *)loader_data->mmp_fw->data; 81 - qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size); 82 - if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { 83 - dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); 81 + if (qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size)) { 82 + dev_err(&GET_DEV(accel_dev), "Failed to load MMP\n"); 83 + goto out_err; 84 + } 85 + if (qat_uclo_map_obj(loader_data->fw_loader, uof_addr, uof_size)) { 86 + dev_err(&GET_DEV(accel_dev), "Failed to map FW\n"); 84 87 goto out_err; 85 88 } 86 89 if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+6 -4
drivers/crypto/qat/qat_common/adf_common_drv.h
··· 178 178 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle); 179 179 void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle, 180 180 unsigned char ae, unsigned int ctx_mask); 181 + int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, 182 + unsigned int ae); 181 183 int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle, 182 184 unsigned char ae, enum icp_qat_uof_regtype lm_type, 183 185 unsigned char mode); ··· 218 216 unsigned char ae, unsigned short lm_addr, unsigned int value); 219 217 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle); 220 218 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle); 221 - int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 222 - void *addr_ptr, int mem_size); 223 - void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 224 - void *addr_ptr, int mem_size); 219 + int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, void *addr_ptr, 220 + int mem_size); 221 + int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 222 + void *addr_ptr, int mem_size); 225 223 #if defined(CONFIG_PCI_IOV) 226 224 int adf_sriov_configure(struct pci_dev *pdev, int numvfs); 227 225 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+10
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
··· 68 68 69 69 struct icp_qat_fw_loader_handle { 70 70 struct icp_qat_fw_loader_hal_handle *hal_handle; 71 + struct pci_dev *pci_dev; 71 72 void *obj_handle; 73 + void *sobj_handle; 74 + bool fw_auth; 72 75 void __iomem *hal_sram_addr_v; 73 76 void __iomem *hal_cap_g_ctl_csr_addr_v; 74 77 void __iomem *hal_cap_ae_xfer_csr_addr_v; 75 78 void __iomem *hal_cap_ae_local_csr_addr_v; 76 79 void __iomem *hal_ep_csr_addr_v; 80 + }; 81 + 82 + struct icp_firml_dram_desc { 83 + void __iomem *dram_base_addr; 84 + void *dram_base_addr_v; 85 + dma_addr_t dram_bus_addr; 86 + u64 dram_size; 77 87 }; 78 88 #endif
+34 -3
drivers/crypto/qat/qat_common/icp_qat_hal.h
··· 81 81 LOCAL_CSR_STATUS = 0x180, 82 82 }; 83 83 84 + enum fcu_csr { 85 + FCU_CONTROL = 0x8c0, 86 + FCU_STATUS = 0x8c4, 87 + FCU_STATUS1 = 0x8c8, 88 + FCU_DRAM_ADDR_LO = 0x8cc, 89 + FCU_DRAM_ADDR_HI = 0x8d0, 90 + FCU_RAMBASE_ADDR_HI = 0x8d4, 91 + FCU_RAMBASE_ADDR_LO = 0x8d8 92 + }; 93 + 94 + enum fcu_cmd { 95 + FCU_CTRL_CMD_NOOP = 0, 96 + FCU_CTRL_CMD_AUTH = 1, 97 + FCU_CTRL_CMD_LOAD = 2, 98 + FCU_CTRL_CMD_START = 3 99 + }; 100 + 101 + enum fcu_sts { 102 + FCU_STS_NO_STS = 0, 103 + FCU_STS_VERI_DONE = 1, 104 + FCU_STS_LOAD_DONE = 2, 105 + FCU_STS_VERI_FAIL = 3, 106 + FCU_STS_LOAD_FAIL = 4, 107 + FCU_STS_BUSY = 5 108 + }; 84 109 #define UA_ECS (0x1 << 31) 85 110 #define ACS_ABO_BITPOS 31 86 111 #define ACS_ACNO 0x7 ··· 123 98 #define LCS_STATUS (0x1) 124 99 #define MMC_SHARE_CS_BITPOS 2 125 100 #define GLOBAL_CSR 0xA00 101 + #define FCU_CTRL_AE_POS 0x8 102 + #define FCU_AUTH_STS_MASK 0x7 103 + #define FCU_STS_DONE_POS 0x9 104 + #define FCU_STS_AUTHFWLD_POS 0X8 105 + #define FCU_LOADED_AE_POS 0x16 106 + #define FW_AUTH_WAIT_PERIOD 10 107 + #define FW_AUTH_MAX_RETRY 300 126 108 127 109 #define SET_CAP_CSR(handle, csr, val) \ 128 110 ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val) ··· 138 106 #define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val) 139 107 #define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr) 140 108 #define AE_CSR(handle, ae) \ 141 - (handle->hal_cap_ae_local_csr_addr_v + \ 109 + ((char __iomem *)handle->hal_cap_ae_local_csr_addr_v + \ 142 110 ((ae & handle->hal_handle->ae_mask) << 12)) 143 111 #define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr)) 144 112 #define SET_AE_CSR(handle, ae, csr, val) \ 145 113 ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val) 146 114 #define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0) 147 115 #define AE_XFER(handle, ae) \ 148 - (handle->hal_cap_ae_xfer_csr_addr_v + \ 116 + ((char __iomem *)handle->hal_cap_ae_xfer_csr_addr_v + \ 149 117 ((ae & handle->hal_handle->ae_mask) << 12)) 150 118 #define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \ 151 119 ((reg & 0xff) << 2)) ··· 153 121 ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val) 154 122 #define SRAM_WRITE(handle, addr, val) \ 155 123 ADF_CSR_WR(handle->hal_sram_addr_v, addr, val) 156 - #define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr) 157 124 #endif
+158 -7
drivers/crypto/qat/qat_common/icp_qat_uclo.h
··· 47 47 #ifndef __ICP_QAT_UCLO_H__ 48 48 #define __ICP_QAT_UCLO_H__ 49 49 50 - #define ICP_QAT_AC_C_CPU_TYPE 0x00400000 50 + #define ICP_QAT_AC_895XCC_DEV_TYPE 0x00400000 51 + #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 52 + #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 51 53 #define ICP_QAT_UCLO_MAX_AE 12 52 54 #define ICP_QAT_UCLO_MAX_CTX 8 53 55 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) 54 56 #define ICP_QAT_UCLO_MAX_USTORE 0x4000 55 57 #define ICP_QAT_UCLO_MAX_XFER_REG 128 56 58 #define ICP_QAT_UCLO_MAX_GPR_REG 128 57 - #define ICP_QAT_UCLO_MAX_NN_REG 128 58 59 #define ICP_QAT_UCLO_MAX_LMEM_REG 1024 59 60 #define ICP_QAT_UCLO_AE_ALL_CTX 0xff 60 61 #define ICP_QAT_UOF_OBJID_LEN 8 61 62 #define ICP_QAT_UOF_FID 0xc6c2 62 63 #define ICP_QAT_UOF_MAJVER 0x4 63 64 #define ICP_QAT_UOF_MINVER 0x11 64 - #define ICP_QAT_UOF_NN_MODE_NOTCARE 0xff 65 65 #define ICP_QAT_UOF_OBJS "UOF_OBJS" 66 66 #define ICP_QAT_UOF_STRT "UOF_STRT" 67 - #define ICP_QAT_UOF_GTID "UOF_GTID" 68 67 #define ICP_QAT_UOF_IMAG "UOF_IMAG" 69 68 #define ICP_QAT_UOF_IMEM "UOF_IMEM" 70 - #define ICP_QAT_UOF_MSEG "UOF_MSEG" 71 69 #define ICP_QAT_UOF_LOCAL_SCOPE 1 72 70 #define ICP_QAT_UOF_INIT_EXPR 0 73 71 #define ICP_QAT_UOF_INIT_REG 1 74 72 #define ICP_QAT_UOF_INIT_REG_CTX 2 75 73 #define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP 3 74 + #define ICP_QAT_SUOF_OBJ_ID_LEN 8 75 + #define ICP_QAT_SUOF_FID 0x53554f46 76 + #define ICP_QAT_SUOF_MAJVER 0x0 77 + #define ICP_QAT_SUOF_MINVER 0x1 78 + #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) 79 + #define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) 80 + #define ICP_QAT_CSS_FWSK_MODULUS_LEN 256 81 + #define ICP_QAT_CSS_FWSK_EXPONENT_LEN 4 82 + #define ICP_QAT_CSS_FWSK_PAD_LEN 252 83 + #define ICP_QAT_CSS_FWSK_PUB_LEN (ICP_QAT_CSS_FWSK_MODULUS_LEN + \ 84 + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ 85 + ICP_QAT_CSS_FWSK_PAD_LEN) 86 + #define ICP_QAT_CSS_SIGNATURE_LEN 256 87 + #define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ 88 + ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ 89 + ICP_QAT_SIMG_AE_INSTS_LEN) 90 + #define ICP_QAT_CSS_AE_SIMG_LEN (sizeof(struct icp_qat_css_hdr) + \ 91 + ICP_QAT_CSS_FWSK_PUB_LEN + \ 92 + ICP_QAT_CSS_SIGNATURE_LEN + \ 93 + ICP_QAT_CSS_AE_IMG_LEN) 94 + #define ICP_QAT_AE_IMG_OFFSET (sizeof(struct icp_qat_css_hdr) + \ 95 + ICP_QAT_CSS_FWSK_MODULUS_LEN + \ 96 + ICP_QAT_CSS_FWSK_EXPONENT_LEN + \ 97 + ICP_QAT_CSS_SIGNATURE_LEN) 98 + #define ICP_QAT_CSS_MAX_IMAGE_LEN 0x40000 76 99 77 100 #define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf) 78 101 #define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf) ··· 133 110 ICP_LMEM0, 134 111 ICP_LMEM1, 135 112 ICP_NEIGH_REL, 113 + }; 114 + 115 + enum icp_qat_css_fwtype { 116 + CSS_AE_FIRMWARE = 0, 117 + CSS_MMP_FIRMWARE = 1 136 118 }; 137 119 138 120 struct icp_qat_uclo_page { ··· 263 235 }; 264 236 265 237 struct icp_qat_uof_objhdr { 266 - unsigned int cpu_type; 238 + unsigned int ac_dev_type; 267 239 unsigned short min_cpu_ver; 268 240 unsigned short max_cpu_ver; 269 241 short max_chunks; ··· 354 326 unsigned int img_name; 355 327 unsigned int ae_assigned; 356 328 unsigned int ctx_assigned; 357 - unsigned int cpu_type; 329 + unsigned int ac_dev_type; 358 330 unsigned int entry_address; 359 331 unsigned int fill_pattern[2]; 360 332 unsigned int reloadable_size; ··· 401 373 unsigned int *value; 402 374 unsigned int size; 403 375 struct icp_qat_uof_batch_init *next; 376 + }; 377 + 378 + struct icp_qat_suof_img_hdr { 379 + char *simg_buf; 380 + unsigned long simg_len; 381 + char *css_header; 382 + char *css_key; 383 + char *css_signature; 384 + char *css_simg; 385 + unsigned long simg_size; 386 + unsigned int ae_num; 387 + unsigned int ae_mask; 388 + unsigned int fw_type; 389 + unsigned long simg_name; 390 + unsigned long appmeta_data; 391 + }; 392 + 393 + struct icp_qat_suof_img_tbl { 394 + unsigned int num_simgs; 395 + struct icp_qat_suof_img_hdr *simg_hdr; 396 + }; 397 + 398 + struct icp_qat_suof_handle { 399 + unsigned int file_id; 400 + unsigned int check_sum; 401 + char min_ver; 402 + char maj_ver; 403 + char fw_type; 404 + char *suof_buf; 405 + unsigned int suof_size; 406 + char *sym_str; 407 + unsigned int sym_size; 408 + struct icp_qat_suof_img_tbl img_table; 409 + }; 410 + 411 + struct icp_qat_fw_auth_desc { 412 + unsigned int img_len; 413 + unsigned int reserved; 414 + unsigned int css_hdr_high; 415 + unsigned int css_hdr_low; 416 + unsigned int img_high; 417 + unsigned int img_low; 418 + unsigned int signature_high; 419 + unsigned int signature_low; 420 + unsigned int fwsk_pub_high; 421 + unsigned int fwsk_pub_low; 422 + unsigned int img_ae_mode_data_high; 423 + unsigned int img_ae_mode_data_low; 424 + unsigned int img_ae_init_data_high; 425 + unsigned int img_ae_init_data_low; 426 + unsigned int img_ae_insts_high; 427 + unsigned int img_ae_insts_low; 428 + }; 429 + 430 + struct icp_qat_auth_chunk { 431 + struct icp_qat_fw_auth_desc fw_auth_desc; 432 + u64 chunk_size; 433 + u64 chunk_bus_addr; 434 + }; 435 + 436 + struct icp_qat_css_hdr { 437 + unsigned int module_type; 438 + unsigned int header_len; 439 + unsigned int header_ver; 440 + unsigned int module_id; 441 + unsigned int module_vendor; 442 + unsigned int date; 443 + unsigned int size; 444 + unsigned int key_size; 445 + unsigned int module_size; 446 + unsigned int exponent_size; 447 + unsigned int fw_type; 448 + unsigned int reserved[21]; 449 + }; 450 + 451 + struct icp_qat_simg_ae_mode { 452 + unsigned int file_id; 453 + unsigned short maj_ver; 454 + unsigned short min_ver; 455 + unsigned int dev_type; 456 + unsigned short devmax_ver; 457 + unsigned short devmin_ver; 458 + unsigned int ae_mask; 459 + unsigned int ctx_enables; 460 + char fw_type; 461 + char ctx_mode; 462 + char nn_mode; 463 + char lm0_mode; 464 + char lm1_mode; 465 + char scs_mode; 466 + char lm2_mode; 467 + char lm3_mode; 468 + char tindex_mode; 469 + unsigned char reserved[7]; 470 + char simg_name[256]; 471 + char appmeta_data[256]; 472 + }; 473 + 474 + struct icp_qat_suof_filehdr { 475 + unsigned int file_id; 476 + unsigned int check_sum; 477 + char min_ver; 478 + char maj_ver; 479 + char fw_type; 480 + char reserved; 481 + unsigned short max_chunks; 482 + unsigned short num_chunks; 483 + }; 484 + 485 + struct icp_qat_suof_chunk_hdr { 486 + char chunk_id[ICP_QAT_SUOF_OBJ_ID_LEN]; 487 + u64 offset; 488 + u64 size; 489 + }; 490 + 491 + struct icp_qat_suof_strtable { 492 + unsigned int tab_length; 493 + unsigned int strings; 494 + }; 495 + 496 + struct icp_qat_suof_objhdr { 497 + unsigned int img_length; 498 + unsigned int reserved; 404 499 }; 405 500 #endif
+80 -42
drivers/crypto/qat/qat_common/qat_hal.c
··· 45 45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 46 46 */ 47 47 #include <linux/slab.h> 48 + #include <linux/delay.h> 48 49 49 50 #include "adf_accel_devices.h" 50 51 #include "adf_common_drv.h" 51 52 #include "icp_qat_hal.h" 52 53 #include "icp_qat_uclo.h" 53 54 54 - #define BAD_REGADDR 0xffff 55 - #define MAX_RETRY_TIMES 10000 56 - #define INIT_CTX_ARB_VALUE 0x0 55 + #define BAD_REGADDR 0xffff 56 + #define MAX_RETRY_TIMES 10000 57 + #define INIT_CTX_ARB_VALUE 0x0 57 58 #define INIT_CTX_ENABLE_VALUE 0x0 58 - #define INIT_PC_VALUE 0x0 59 + #define INIT_PC_VALUE 0x0 59 60 #define INIT_WAKEUP_EVENTS_VALUE 0x1 60 61 #define INIT_SIG_EVENTS_VALUE 0x1 61 62 #define INIT_CCENABLE_VALUE 0x2000 62 - #define RST_CSR_QAT_LSB 20 63 + #define RST_CSR_QAT_LSB 20 63 64 #define RST_CSR_AE_LSB 0 64 65 #define MC_TIMESTAMP_ENABLE (0x1 << 7) 65 66 ··· 392 391 unsigned int times = MAX_RETRY_TIMES; 393 392 394 393 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 395 - if (!(handle->hal_handle->ae_mask & (1 << ae))) 396 - continue; 397 - 398 394 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, 399 395 (unsigned int *)&base_cnt); 400 396 base_cnt &= 0xffff; ··· 411 413 return 0; 412 414 } 413 415 416 + int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle, 417 + unsigned int ae) 418 + { 419 + unsigned int enable = 0, active = 0; 420 + 421 + qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); 422 + qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); 423 + if ((enable & (0xff >> CE_ENABLE_BITPOS)) || 424 + (active & (1 << ACS_ABO_BITPOS))) 425 + return 1; 426 + else 427 + return 0; 428 + } 429 + 414 430 static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle) 415 431 { 416 432 unsigned int misc_ctl; ··· 437 425 (~MC_TIMESTAMP_ENABLE)); 438 426 439 427 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 440 - if (!(handle->hal_handle->ae_mask & (1 << ae))) 441 - continue; 442 428 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0); 443 429 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0); 444 430 } ··· 450 440 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C 451 441 static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle) 452 442 { 453 - void __iomem *csr_addr = handle->hal_ep_csr_addr_v + 454 - ESRAM_AUTO_INIT_CSR_OFFSET; 443 + void __iomem *csr_addr = 444 + (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v + 445 + ESRAM_AUTO_INIT_CSR_OFFSET); 455 446 unsigned int csr_val, times = 30; 456 447 457 448 csr_val = ADF_CSR_RD(csr_addr, 0); ··· 504 493 505 494 /* Set undefined power-up/reset states to reasonable default values */ 506 495 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 507 - if (!(handle->hal_handle->ae_mask & (1 << ae))) 508 - continue; 509 496 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, 510 497 INIT_CTX_ENABLE_VALUE); 511 498 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX, ··· 607 598 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); 608 599 } 609 600 610 - static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) 601 + static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle) 611 602 { 612 603 unsigned char ae; 613 - unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; 614 - int times = MAX_RETRY_TIMES; 615 - unsigned int csr_val = 0; 616 604 unsigned short reg; 617 - unsigned int savctx = 0; 618 - int ret = 0; 619 605 620 606 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 621 - if (!(handle->hal_handle->ae_mask & (1 << ae))) 622 - continue; 623 607 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) { 624 608 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS, 625 609 reg, 0); 626 610 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS, 627 611 reg, 0); 628 612 } 613 + } 614 + } 615 + 616 + static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle) 617 + { 618 + unsigned char ae; 619 + unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX; 620 + int times = MAX_RETRY_TIMES; 621 + unsigned int csr_val = 0; 622 + unsigned int savctx = 0; 623 + int ret = 0; 624 + 625 + for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 629 626 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); 630 627 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); 631 628 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); ··· 653 638 qat_hal_enable_ctx(handle, ae, ctx_mask); 654 639 } 655 640 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 656 - if (!(handle->hal_handle->ae_mask & (1 << ae))) 657 - continue; 658 641 /* wait for AE to finish */ 659 642 do { 660 643 ret = qat_hal_wait_cycles(handle, ae, 20, 1); ··· 680 667 return 0; 681 668 } 682 669 683 - #define ICP_DH895XCC_AE_OFFSET 0x20000 684 - #define ICP_DH895XCC_CAP_OFFSET (ICP_DH895XCC_AE_OFFSET + 0x10000) 670 + #define ICP_QAT_AE_OFFSET 0x20000 671 + #define ICP_QAT_CAP_OFFSET (ICP_QAT_AE_OFFSET + 0x10000) 685 672 #define LOCAL_TO_XFER_REG_OFFSET 0x800 686 - #define ICP_DH895XCC_EP_OFFSET 0x3a000 673 + #define ICP_QAT_EP_OFFSET 0x3a000 687 674 int qat_hal_init(struct adf_accel_dev *accel_dev) 688 675 { 689 676 unsigned char ae; ··· 700 687 if (!handle) 701 688 return -ENOMEM; 702 689 703 - handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr + 704 - ICP_DH895XCC_CAP_OFFSET; 705 - handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr + 706 - ICP_DH895XCC_AE_OFFSET; 707 - handle->hal_ep_csr_addr_v = misc_bar->virt_addr + 708 - ICP_DH895XCC_EP_OFFSET; 709 - handle->hal_cap_ae_local_csr_addr_v = 710 - handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET; 711 690 handle->hal_sram_addr_v = sram_bar->virt_addr; 691 + handle->hal_cap_g_ctl_csr_addr_v = 692 + (void __iomem *)((uintptr_t)misc_bar->virt_addr + 693 + ICP_QAT_CAP_OFFSET); 694 + handle->hal_cap_ae_xfer_csr_addr_v = 695 + (void __iomem *)((uintptr_t)misc_bar->virt_addr + 696 + ICP_QAT_AE_OFFSET); 697 + handle->hal_ep_csr_addr_v = 698 + (void __iomem *)((uintptr_t)misc_bar->virt_addr + 699 + ICP_QAT_EP_OFFSET); 700 + handle->hal_cap_ae_local_csr_addr_v = 701 + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + 702 + LOCAL_TO_XFER_REG_OFFSET); 703 + handle->pci_dev = pci_info->pci_dev; 704 + handle->fw_auth = (handle->pci_dev->device == 705 + ADF_DH895XCC_PCI_DEVICE_ID) ? false : true; 712 706 handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL); 713 707 if (!handle->hal_handle) 714 708 goto out_hal_handle; ··· 743 723 dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n"); 744 724 goto out_err; 745 725 } 746 - if (qat_hal_clear_gpr(handle)) 747 - goto out_err; 726 + qat_hal_clear_xfer(handle); 727 + if (!handle->fw_auth) { 728 + if (qat_hal_clear_gpr(handle)) 729 + goto out_err; 730 + } 731 + 748 732 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */ 749 733 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 750 734 unsigned int csr_val = 0; 751 735 752 - if (!(hw_data->ae_mask & (1 << ae))) 753 - continue; 754 736 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); 755 737 csr_val |= 0x1; 756 738 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); ··· 778 756 void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae, 779 757 unsigned int ctx_mask) 780 758 { 781 - qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & 759 + int retry = 0; 760 + unsigned int fcu_sts = 0; 761 + 762 + if (handle->fw_auth) { 763 + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_START); 764 + do { 765 + msleep(FW_AUTH_WAIT_PERIOD); 766 + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); 767 + if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1)) 768 + return; 769 + } while (retry++ < FW_AUTH_MAX_RETRY); 770 + pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n", ae, 771 + fcu_sts); 772 + } else { 773 + qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) & 782 774 ICP_QAT_UCLO_AE_ALL_CTX, 0x10000); 783 - qat_hal_enable_ctx(handle, ae, ctx_mask); 775 + qat_hal_enable_ctx(handle, ae, ctx_mask); 776 + } 784 777 } 785 778 786 779 void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae, 787 780 unsigned int ctx_mask) 788 781 { 789 - qat_hal_disable_ctx(handle, ae, ctx_mask); 782 + if (!handle->fw_auth) 783 + qat_hal_disable_ctx(handle, ae, ctx_mask); 790 784 } 791 785 792 786 void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+529 -26
drivers/crypto/qat/qat_common/qat_uclo.c
··· 47 47 #include <linux/slab.h> 48 48 #include <linux/ctype.h> 49 49 #include <linux/kernel.h> 50 - 50 + #include <linux/delay.h> 51 51 #include "adf_accel_devices.h" 52 52 #include "adf_common_drv.h" 53 53 #include "icp_qat_uclo.h" ··· 119 119 { 120 120 if ((!str_table->table_len) || (str_offset > str_table->table_len)) 121 121 return NULL; 122 - return (char *)(((unsigned long)(str_table->strings)) + str_offset); 122 + return (char *)(((uintptr_t)(str_table->strings)) + str_offset); 123 123 } 124 124 125 - static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) 125 + static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) 126 126 { 127 127 int maj = hdr->maj_ver & 0xff; 128 128 int min = hdr->min_ver & 0xff; ··· 133 133 } 134 134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { 135 135 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", 136 + maj, min); 137 + return -EINVAL; 138 + } 139 + return 0; 140 + } 141 + 142 + static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) 143 + { 144 + int maj = suof_hdr->maj_ver & 0xff; 145 + int min = suof_hdr->min_ver & 0xff; 146 + 147 + if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { 148 + pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); 149 + return -EINVAL; 150 + } 151 + if (suof_hdr->fw_type != 0) { 152 + pr_err("QAT: unsupported firmware type\n"); 153 + return -EINVAL; 154 + } 155 + if (suof_hdr->num_chunks <= 0x1) { 156 + pr_err("QAT: SUOF chunk amount is incorrect\n"); 157 + return -EINVAL; 158 + } 159 + if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { 160 + pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", 136 161 maj, min); 137 162 return -EINVAL; 138 163 } ··· 300 275 unsigned int i, flag = 0; 301 276 302 277 mem_val_attr = 303 - (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + 278 + (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + 304 279 sizeof(struct icp_qat_uof_initmem)); 305 280 306 281 init_header = *init_tab_base; ··· 450 425 if (qat_uclo_init_ae_memory(handle, initmem)) 451 426 return -EINVAL; 452 427 } 453 - initmem = (struct icp_qat_uof_initmem *)((unsigned long)( 454 - (unsigned long)initmem + 428 + initmem = (struct icp_qat_uof_initmem *)((uintptr_t)( 429 + (uintptr_t)initmem + 455 430 sizeof(struct icp_qat_uof_initmem)) + 456 431 (sizeof(struct icp_qat_uof_memvar_attr) * 457 432 initmem->val_attr_num)); ··· 479 454 int i; 480 455 struct icp_qat_uof_chunkhdr *chunk_hdr = 481 456 (struct icp_qat_uof_chunkhdr *) 482 - ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); 457 + ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); 483 458 484 459 for (i = 0; i < obj_hdr->num_chunks; i++) { 485 460 if ((cur < (void *)&chunk_hdr[i]) && ··· 621 596 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; 622 597 for (i = 0; i < uword_block_tab->entry_num; i++) 623 598 page->uwblock[i].micro_words = 624 - (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; 599 + (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; 625 600 } 626 601 627 602 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, ··· 722 697 memcpy(&str_table->table_len, obj_hdr->file_buff + 723 698 chunk_hdr->offset, sizeof(str_table->table_len)); 724 699 hdr_size = (char *)&str_table->strings - (char *)str_table; 725 - str_table->strings = (unsigned long)obj_hdr->file_buff + 700 + str_table->strings = (uintptr_t)obj_hdr->file_buff + 726 701 chunk_hdr->offset + hdr_size; 727 702 return str_table; 728 703 } ··· 746 721 } 747 722 } 748 723 724 + static unsigned int 725 + qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) 726 + { 727 + switch (handle->pci_dev->device) { 728 + case ADF_DH895XCC_PCI_DEVICE_ID: 729 + return ICP_QAT_AC_895XCC_DEV_TYPE; 730 + case ADF_C62X_PCI_DEVICE_ID: 731 + return ICP_QAT_AC_C62X_DEV_TYPE; 732 + case ADF_C3XXX_PCI_DEVICE_ID: 733 + return ICP_QAT_AC_C3XXX_DEV_TYPE; 734 + default: 735 + pr_err("QAT: unsupported device 0x%x\n", 736 + handle->pci_dev->device); 737 + return 0; 738 + } 739 + } 740 + 749 741 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) 750 742 { 751 743 unsigned int maj_ver, prod_type = obj_handle->prod_type; 752 744 753 - if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { 754 - pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", 755 - obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); 745 + if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { 746 + pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", 747 + obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, 748 + prod_type); 756 749 return -EINVAL; 757 750 } 758 751 maj_ver = obj_handle->prod_rev & 0xff; ··· 975 932 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) 976 933 obj_handle->obj_hdr->file_buff; 977 934 obj_handle->uword_in_bytes = 6; 978 - obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; 935 + obj_handle->prod_type = qat_uclo_get_dev_type(handle); 979 936 obj_handle->prod_rev = PID_MAJOR_REV | 980 937 (PID_MINOR_REV & handle->hal_handle->revision_id); 981 938 if (qat_uclo_check_uof_compat(obj_handle)) { ··· 1012 969 return -EFAULT; 1013 970 } 1014 971 1015 - void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 1016 - void *addr_ptr, int mem_size) 972 + static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, 973 + struct icp_qat_suof_filehdr *suof_ptr, 974 + int suof_size) 1017 975 { 1018 - qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4)); 976 + unsigned int check_sum = 0; 977 + unsigned int min_ver_offset = 0; 978 + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 979 + 980 + suof_handle->file_id = ICP_QAT_SUOF_FID; 981 + suof_handle->suof_buf = (char *)suof_ptr; 982 + suof_handle->suof_size = suof_size; 983 + min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, 984 + min_ver); 985 + check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, 986 + min_ver_offset); 987 + if (check_sum != suof_ptr->check_sum) { 988 + pr_err("QAT: incorrect SUOF checksum\n"); 989 + return -EINVAL; 990 + } 991 + suof_handle->check_sum = suof_ptr->check_sum; 992 + suof_handle->min_ver = suof_ptr->min_ver; 993 + suof_handle->maj_ver = suof_ptr->maj_ver; 994 + suof_handle->fw_type = suof_ptr->fw_type; 995 + return 0; 1019 996 } 1020 997 1021 - int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 1022 - void *addr_ptr, int mem_size) 998 + static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle, 999 + struct icp_qat_suof_img_hdr *suof_img_hdr, 1000 + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1001 + { 1002 + struct icp_qat_simg_ae_mode *ae_mode; 1003 + struct icp_qat_suof_objhdr *suof_objhdr; 1004 + 1005 + suof_img_hdr->simg_buf = (suof_handle->suof_buf + 1006 + suof_chunk_hdr->offset + 1007 + sizeof(*suof_objhdr)); 1008 + suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t) 1009 + (suof_handle->suof_buf + 1010 + suof_chunk_hdr->offset))->img_length; 1011 + 1012 + suof_img_hdr->css_header = suof_img_hdr->simg_buf; 1013 + suof_img_hdr->css_key = (suof_img_hdr->css_header + 1014 + sizeof(struct icp_qat_css_hdr)); 1015 + suof_img_hdr->css_signature = suof_img_hdr->css_key + 1016 + ICP_QAT_CSS_FWSK_MODULUS_LEN + 1017 + ICP_QAT_CSS_FWSK_EXPONENT_LEN; 1018 + suof_img_hdr->css_simg = suof_img_hdr->css_signature + 1019 + ICP_QAT_CSS_SIGNATURE_LEN; 1020 + 1021 + ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); 1022 + suof_img_hdr->ae_mask = ae_mode->ae_mask; 1023 + suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; 1024 + suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; 1025 + suof_img_hdr->fw_type = ae_mode->fw_type; 1026 + } 1027 + 1028 + static void 1029 + qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, 1030 + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1031 + { 1032 + char **sym_str = (char **)&suof_handle->sym_str; 1033 + unsigned int *sym_size = &suof_handle->sym_size; 1034 + struct icp_qat_suof_strtable *str_table_obj; 1035 + 1036 + *sym_size = *(unsigned int *)(uintptr_t) 1037 + (suof_chunk_hdr->offset + suof_handle->suof_buf); 1038 + *sym_str = (char *)(uintptr_t) 1039 + (suof_handle->suof_buf + suof_chunk_hdr->offset + 1040 + sizeof(str_table_obj->tab_length)); 1041 + } 1042 + 1043 + static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, 1044 + struct icp_qat_suof_img_hdr *img_hdr) 1045 + { 1046 + struct icp_qat_simg_ae_mode *img_ae_mode = NULL; 1047 + unsigned int prod_rev, maj_ver, prod_type; 1048 + 1049 + prod_type = qat_uclo_get_dev_type(handle); 1050 + img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg; 1051 + prod_rev = PID_MAJOR_REV | 1052 + (PID_MINOR_REV & handle->hal_handle->revision_id); 1053 + if (img_ae_mode->dev_type != prod_type) { 1054 + pr_err("QAT: incompatible product type %x\n", 1055 + img_ae_mode->dev_type); 1056 + return -EINVAL; 1057 + } 1058 + maj_ver = prod_rev & 0xff; 1059 + if ((maj_ver > img_ae_mode->devmax_ver) || 1060 + (maj_ver < img_ae_mode->devmin_ver)) { 1061 + pr_err("QAT: incompatible device majver 0x%x\n", maj_ver); 1062 + return -EINVAL; 1063 + } 1064 + return 0; 1065 + } 1066 + 1067 + static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) 1068 + { 1069 + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 1070 + 1071 + kfree(sobj_handle->img_table.simg_hdr); 1072 + sobj_handle->img_table.simg_hdr = NULL; 1073 + kfree(handle->sobj_handle); 1074 + handle->sobj_handle = NULL; 1075 + } 1076 + 1077 + static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, 1078 + unsigned int img_id, unsigned int num_simgs) 1079 + { 1080 + struct icp_qat_suof_img_hdr img_header; 1081 + 1082 + if (img_id != num_simgs - 1) { 1083 + memcpy(&img_header, &suof_img_hdr[num_simgs - 1], 1084 + sizeof(*suof_img_hdr)); 1085 + memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], 1086 + sizeof(*suof_img_hdr)); 1087 + memcpy(&suof_img_hdr[img_id], &img_header, 1088 + sizeof(*suof_img_hdr)); 1089 + } 1090 + } 1091 + 1092 + static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, 1093 + struct icp_qat_suof_filehdr *suof_ptr, 1094 + int suof_size) 1095 + { 1096 + struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1097 + struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; 1098 + struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; 1099 + int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; 1100 + unsigned int i = 0; 1101 + struct icp_qat_suof_img_hdr img_header; 1102 + 1103 + if (!suof_ptr || (suof_size == 0)) { 1104 + pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); 1105 + return -EINVAL; 1106 + } 1107 + if (qat_uclo_check_suof_format(suof_ptr)) 1108 + return -EINVAL; 1109 + ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); 1110 + if (ret) 1111 + return ret; 1112 + suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *) 1113 + ((uintptr_t)suof_ptr + sizeof(*suof_ptr)); 1114 + 1115 + qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); 1116 + suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; 1117 + 1118 + if (suof_handle->img_table.num_simgs != 0) { 1119 + suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs * 1120 + sizeof(img_header), GFP_KERNEL); 1121 + if (!suof_img_hdr) 1122 + return -ENOMEM; 1123 + suof_handle->img_table.simg_hdr = suof_img_hdr; 1124 + } 1125 + 1126 + for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1127 + qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i], 1128 + &suof_chunk_hdr[1 + i]); 1129 + ret = qat_uclo_check_simg_compat(handle, 1130 + &suof_img_hdr[i]); 1131 + if (ret) 1132 + return ret; 1133 + if ((suof_img_hdr[i].ae_mask & 0x1) != 0) 1134 + ae0_img = i; 1135 + } 1136 + qat_uclo_tail_img(suof_img_hdr, ae0_img, 1137 + suof_handle->img_table.num_simgs); 1138 + return 0; 1139 + } 1140 + 1141 + #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low) 1142 + #define BITS_IN_DWORD 32 1143 + 1144 + static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, 1145 + struct icp_qat_fw_auth_desc *desc) 1146 + { 1147 + unsigned int fcu_sts, retry = 0; 1148 + u64 bus_addr; 1149 + 1150 + bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) 1151 + - sizeof(struct icp_qat_auth_chunk); 1152 + SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD)); 1153 + SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr); 1154 + SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH); 1155 + 1156 + do { 1157 + msleep(FW_AUTH_WAIT_PERIOD); 1158 + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); 1159 + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) 1160 + goto auth_fail; 1161 + if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) 1162 + if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) 1163 + return 0; 1164 + } while (retry++ < FW_AUTH_MAX_RETRY); 1165 + auth_fail: 1166 + pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", 1167 + fcu_sts & FCU_AUTH_STS_MASK, retry); 1168 + return -EINVAL; 1169 + } 1170 + 1171 + static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, 1172 + struct icp_firml_dram_desc *dram_desc, 1173 + unsigned int size) 1174 + { 1175 + void *vptr; 1176 + dma_addr_t ptr; 1177 + 1178 + vptr = dma_alloc_coherent(&handle->pci_dev->dev, 1179 + size, &ptr, GFP_KERNEL); 1180 + if (!vptr) 1181 + return -ENOMEM; 1182 + dram_desc->dram_base_addr_v = vptr; 1183 + dram_desc->dram_bus_addr = ptr; 1184 + dram_desc->dram_size = size; 1185 + return 0; 1186 + } 1187 + 1188 + static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, 1189 + struct icp_firml_dram_desc *dram_desc) 1190 + { 1191 + dma_free_coherent(&handle->pci_dev->dev, 1192 + (size_t)(dram_desc->dram_size), 1193 + (dram_desc->dram_base_addr_v), 1194 + dram_desc->dram_bus_addr); 1195 + memset(dram_desc, 0, sizeof(*dram_desc)); 1196 + } 1197 + 1198 + static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, 1199 + struct icp_qat_fw_auth_desc **desc) 1200 + { 1201 + struct icp_firml_dram_desc dram_desc; 1202 + 1203 + dram_desc.dram_base_addr_v = *desc; 1204 + dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *) 1205 + (*desc))->chunk_bus_addr; 1206 + dram_desc.dram_size = ((struct icp_qat_auth_chunk *) 1207 + (*desc))->chunk_size; 1208 + qat_uclo_simg_free(handle, &dram_desc); 1209 + } 1210 + 1211 + static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, 1212 + char *image, unsigned int size, 1213 + struct icp_qat_fw_auth_desc **desc) 1214 + { 1215 + struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; 1216 + struct icp_qat_fw_auth_desc *auth_desc; 1217 + struct icp_qat_auth_chunk *auth_chunk; 1218 + u64 virt_addr, bus_addr, virt_base; 1219 + unsigned int length, simg_offset = sizeof(*auth_chunk); 1220 + struct icp_firml_dram_desc img_desc; 1221 + 1222 + if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) { 1223 + pr_err("QAT: error, input image size overflow %d\n", size); 1224 + return -EINVAL; 1225 + } 1226 + length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? 1227 + ICP_QAT_CSS_AE_SIMG_LEN + simg_offset : 1228 + size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset; 1229 + if (qat_uclo_simg_alloc(handle, &img_desc, length)) { 1230 + pr_err("QAT: error, allocate continuous dram fail\n"); 1231 + return -ENOMEM; 1232 + } 1233 + 1234 + auth_chunk = img_desc.dram_base_addr_v; 1235 + auth_chunk->chunk_size = img_desc.dram_size; 1236 + auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; 1237 + virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset; 1238 + bus_addr = img_desc.dram_bus_addr + simg_offset; 1239 + auth_desc = img_desc.dram_base_addr_v; 1240 + auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1241 + auth_desc->css_hdr_low = (unsigned int)bus_addr; 1242 + virt_addr = virt_base; 1243 + 1244 + memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); 1245 + /* pub key */ 1246 + bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + 1247 + sizeof(*css_hdr); 1248 + virt_addr = virt_addr + sizeof(*css_hdr); 1249 + 1250 + auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1251 + auth_desc->fwsk_pub_low = (unsigned int)bus_addr; 1252 + 1253 + memcpy((void *)(uintptr_t)virt_addr, 1254 + (void *)(image + sizeof(*css_hdr)), 1255 + ICP_QAT_CSS_FWSK_MODULUS_LEN); 1256 + /* padding */ 1257 + memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN), 1258 + 0, ICP_QAT_CSS_FWSK_PAD_LEN); 1259 + 1260 + /* exponent */ 1261 + memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN + 1262 + ICP_QAT_CSS_FWSK_PAD_LEN), 1263 + (void *)(image + sizeof(*css_hdr) + 1264 + ICP_QAT_CSS_FWSK_MODULUS_LEN), 1265 + sizeof(unsigned int)); 1266 + 1267 + /* signature */ 1268 + bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, 1269 + auth_desc->fwsk_pub_low) + 1270 + ICP_QAT_CSS_FWSK_PUB_LEN; 1271 + virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN; 1272 + auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1273 + auth_desc->signature_low = (unsigned int)bus_addr; 1274 + 1275 + memcpy((void *)(uintptr_t)virt_addr, 1276 + (void *)(image + sizeof(*css_hdr) + 1277 + ICP_QAT_CSS_FWSK_MODULUS_LEN + 1278 + ICP_QAT_CSS_FWSK_EXPONENT_LEN), 1279 + ICP_QAT_CSS_SIGNATURE_LEN); 1280 + 1281 + bus_addr = ADD_ADDR(auth_desc->signature_high, 1282 + auth_desc->signature_low) + 1283 + ICP_QAT_CSS_SIGNATURE_LEN; 1284 + virt_addr += ICP_QAT_CSS_SIGNATURE_LEN; 1285 + 1286 + auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1287 + auth_desc->img_low = (unsigned int)bus_addr; 1288 + auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET; 1289 + memcpy((void *)(uintptr_t)virt_addr, 1290 + (void *)(image + ICP_QAT_AE_IMG_OFFSET), 1291 + auth_desc->img_len); 1292 + virt_addr = virt_base; 1293 + /* AE firmware */ 1294 + if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == 1295 + CSS_AE_FIRMWARE) { 1296 + auth_desc->img_ae_mode_data_high = auth_desc->img_high; 1297 + auth_desc->img_ae_mode_data_low = auth_desc->img_low; 1298 + bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, 1299 + auth_desc->img_ae_mode_data_low) + 1300 + sizeof(struct icp_qat_simg_ae_mode); 1301 + 1302 + auth_desc->img_ae_init_data_high = (unsigned int) 1303 + (bus_addr >> BITS_IN_DWORD); 1304 + auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; 1305 + bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; 1306 + auth_desc->img_ae_insts_high = (unsigned int) 1307 + (bus_addr >> BITS_IN_DWORD); 1308 + auth_desc->img_ae_insts_low = (unsigned int)bus_addr; 1309 + } else { 1310 + auth_desc->img_ae_insts_high = auth_desc->img_high; 1311 + auth_desc->img_ae_insts_low = auth_desc->img_low; 1312 + } 1313 + *desc = auth_desc; 1314 + return 0; 1315 + } 1316 + 1317 + static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, 1318 + struct icp_qat_fw_auth_desc *desc) 1319 + { 1320 + unsigned int i; 1321 + unsigned int fcu_sts; 1322 + struct icp_qat_simg_ae_mode *virt_addr; 1323 + unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS; 1324 + 1325 + virt_addr = (void *)((uintptr_t)desc + 1326 + sizeof(struct icp_qat_auth_chunk) + 1327 + sizeof(struct icp_qat_css_hdr) + 1328 + ICP_QAT_CSS_FWSK_PUB_LEN + 1329 + ICP_QAT_CSS_SIGNATURE_LEN); 1330 + for (i = 0; i < handle->hal_handle->ae_max_num; i++) { 1331 + int retry = 0; 1332 + 1333 + if (!((virt_addr->ae_mask >> i) & 0x1)) 1334 + continue; 1335 + if (qat_hal_check_ae_active(handle, i)) { 1336 + pr_err("QAT: AE %d is active\n", i); 1337 + return -EINVAL; 1338 + } 1339 + SET_CAP_CSR(handle, FCU_CONTROL, 1340 + (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS))); 1341 + 1342 + do { 1343 + msleep(FW_AUTH_WAIT_PERIOD); 1344 + fcu_sts = GET_CAP_CSR(handle, FCU_STATUS); 1345 + if (((fcu_sts & FCU_AUTH_STS_MASK) == 1346 + FCU_STS_LOAD_DONE) && 1347 + ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i))) 1348 + break; 1349 + } while (retry++ < FW_AUTH_MAX_RETRY); 1350 + if (retry > FW_AUTH_MAX_RETRY) { 1351 + pr_err("QAT: firmware load failed timeout %x\n", retry); 1352 + return -EINVAL; 1353 + } 1354 + } 1355 + return 0; 1356 + } 1357 + 1358 + static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, 1359 + void *addr_ptr, int mem_size) 1360 + { 1361 + struct icp_qat_suof_handle *suof_handle; 1362 + 1363 + suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL); 1364 + if (!suof_handle) 1365 + return -ENOMEM; 1366 + handle->sobj_handle = suof_handle; 1367 + if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { 1368 + qat_uclo_del_suof(handle); 1369 + pr_err("QAT: map SUOF failed\n"); 1370 + return -EINVAL; 1371 + } 1372 + return 0; 1373 + } 1374 + 1375 + int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 1376 + void *addr_ptr, int mem_size) 1377 + { 1378 + struct icp_qat_fw_auth_desc *desc = NULL; 1379 + int status = 0; 1380 + 1381 + if (handle->fw_auth) { 1382 + if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc)) 1383 + status = qat_uclo_auth_fw(handle, desc); 1384 + qat_uclo_ummap_auth_fw(handle, &desc); 1385 + } else { 1386 + if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) { 1387 + pr_err("QAT: C3XXX doesn't support unsigned MMP\n"); 1388 + return -EINVAL; 1389 + } 1390 + qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); 1391 + } 1392 + return status; 1393 + } 1394 + 1395 + static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 1396 + void *addr_ptr, int mem_size) 1023 1397 { 1024 1398 struct icp_qat_uof_filehdr *filehdr; 1025 1399 struct icp_qat_uclo_objhandle *objhdl; 1026 1400 1027 - BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= 1028 - (sizeof(handle->hal_handle->ae_mask) * 8)); 1029 - 1030 - if (!handle || !addr_ptr || mem_size < 24) 1031 - return -EINVAL; 1032 1401 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); 1033 1402 if (!objhdl) 1034 1403 return -ENOMEM; ··· 1448 993 if (!objhdl->obj_buf) 1449 994 goto out_objbuf_err; 1450 995 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; 1451 - if (qat_uclo_check_format(filehdr)) 996 + if (qat_uclo_check_uof_format(filehdr)) 1452 997 goto out_objhdr_err; 1453 998 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, 1454 999 ICP_QAT_UOF_OBJS); ··· 1471 1016 return -ENOMEM; 1472 1017 } 1473 1018 1019 + int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 1020 + void *addr_ptr, int mem_size) 1021 + { 1022 + BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= 1023 + (sizeof(handle->hal_handle->ae_mask) * 8)); 1024 + 1025 + if (!handle || !addr_ptr || mem_size < 24) 1026 + return -EINVAL; 1027 + 1028 + return (handle->fw_auth) ? 1029 + qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) : 1030 + qat_uclo_map_uof_obj(handle, addr_ptr, mem_size); 1031 + } 1032 + 1474 1033 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) 1475 1034 { 1476 1035 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1477 1036 unsigned int a; 1478 1037 1038 + if (handle->sobj_handle) 1039 + qat_uclo_del_suof(handle); 1479 1040 if (!obj_handle) 1480 1041 return; 1481 1042 ··· 1526 1055 encap_page->uwblock[i].words_num - 1) { 1527 1056 raddr -= encap_page->uwblock[i].start_addr; 1528 1057 raddr *= obj_handle->uword_in_bytes; 1529 - memcpy(&uwrd, (void *)(((unsigned long) 1058 + memcpy(&uwrd, (void *)(((uintptr_t) 1530 1059 encap_page->uwblock[i].micro_words) + raddr), 1531 1060 obj_handle->uword_in_bytes); 1532 1061 uwrd = uwrd & 0xbffffffffffull; ··· 1618 1147 } 1619 1148 } 1620 1149 1621 - int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) 1150 + static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) 1151 + { 1152 + unsigned int i; 1153 + struct icp_qat_fw_auth_desc *desc = NULL; 1154 + struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 1155 + struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; 1156 + 1157 + for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { 1158 + if (qat_uclo_map_auth_fw(handle, 1159 + (char *)simg_hdr[i].simg_buf, 1160 + (unsigned int) 1161 + (simg_hdr[i].simg_len), 1162 + &desc)) 1163 + goto wr_err; 1164 + if (qat_uclo_auth_fw(handle, desc)) 1165 + goto wr_err; 1166 + if (qat_uclo_load_fw(handle, desc)) 1167 + goto wr_err; 1168 + qat_uclo_ummap_auth_fw(handle, &desc); 1169 + } 1170 + return 0; 1171 + wr_err: 1172 + qat_uclo_ummap_auth_fw(handle, &desc); 1173 + return -EINVAL; 1174 + } 1175 + 1176 + static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) 1622 1177 { 1623 1178 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1624 1179 unsigned int i; ··· 1660 1163 obj_handle->ae_uimage[i].img_ptr); 1661 1164 } 1662 1165 return 0; 1166 + } 1167 + 1168 + int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) 1169 + { 1170 + return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : 1171 + qat_uclo_wr_uof_img(handle); 1663 1172 }