Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: octeontx2 - add support for OcteonTX2 98xx CPT block.

OcteonTX2 series of silicons have multiple variants, the
98xx variant has two crypto (CPT0 & CPT1) blocks. This patch
adds support for firmware load on new CPT block(CPT1).

Signed-off-by: Srujana Challa <schalla@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Srujana Challa and committed by
Herbert Xu
b2d17df3 66810912

+153 -58
+5 -5
drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
··· 121 121 122 122 int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, 123 123 struct pci_dev *pdev); 124 - int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, 125 - struct pci_dev *pdev, u64 reg, u64 *val); 124 + int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 125 + u64 reg, u64 *val, int blkaddr); 126 126 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 127 - u64 reg, u64 val); 127 + u64 reg, u64 val, int blkaddr); 128 128 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 129 - u64 reg, u64 *val); 129 + u64 reg, u64 *val, int blkaddr); 130 130 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 131 - u64 reg, u64 val); 131 + u64 reg, u64 val, int blkaddr); 132 132 struct otx2_cptlfs_info; 133 133 int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); 134 134 int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
+8 -6
drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
··· 43 43 } 44 44 45 45 int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 46 - u64 reg, u64 *val) 46 + u64 reg, u64 *val, int blkaddr) 47 47 { 48 48 struct cpt_rd_wr_reg_msg *reg_msg; 49 49 ··· 62 62 reg_msg->is_write = 0; 63 63 reg_msg->reg_offset = reg; 64 64 reg_msg->ret_val = val; 65 + reg_msg->blkaddr = blkaddr; 65 66 66 67 return 0; 67 68 } 68 69 69 70 int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 70 - u64 reg, u64 val) 71 + u64 reg, u64 val, int blkaddr) 71 72 { 72 73 struct cpt_rd_wr_reg_msg *reg_msg; 73 74 ··· 87 86 reg_msg->is_write = 1; 88 87 reg_msg->reg_offset = reg; 89 88 reg_msg->val = val; 89 + reg_msg->blkaddr = blkaddr; 90 90 91 91 return 0; 92 92 } 93 93 94 94 int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 95 - u64 reg, u64 *val) 95 + u64 reg, u64 *val, int blkaddr) 96 96 { 97 97 int ret; 98 98 99 - ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val); 99 + ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr); 100 100 if (ret) 101 101 return ret; 102 102 ··· 105 103 } 106 104 107 105 int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, 108 - u64 reg, u64 val) 106 + u64 reg, u64 val, int blkaddr) 109 107 { 110 108 int ret; 111 109 112 - ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val); 110 + ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr); 113 111 if (ret) 114 112 return ret; 115 113
+4 -4
drivers/crypto/marvell/octeontx2/otx2_cptlf.c
··· 56 56 57 57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 58 58 CPT_AF_LFX_CTL(lf->slot), 59 - &lf_ctrl.u); 59 + &lf_ctrl.u, lfs->blkaddr); 60 60 if (ret) 61 61 return ret; 62 62 ··· 64 64 65 65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 66 66 CPT_AF_LFX_CTL(lf->slot), 67 - lf_ctrl.u); 67 + lf_ctrl.u, lfs->blkaddr); 68 68 return ret; 69 69 } 70 70 ··· 77 77 78 78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 79 79 CPT_AF_LFX_CTL(lf->slot), 80 - &lf_ctrl.u); 80 + &lf_ctrl.u, lfs->blkaddr); 81 81 if (ret) 82 82 return ret; 83 83 ··· 85 85 86 86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 87 87 CPT_AF_LFX_CTL(lf->slot), 88 - lf_ctrl.u); 88 + lf_ctrl.u, lfs->blkaddr); 89 89 return ret; 90 90 } 91 91
+1
drivers/crypto/marvell/octeontx2/otx2_cptlf.h
··· 95 95 u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */ 96 96 u8 kvf_limits; /* Kernel crypto limits */ 97 97 atomic_t state; /* LF's state. started/reset */ 98 + int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ 98 99 }; 99 100 100 101 static inline void otx2_cpt_free_instruction_queues(
+1
drivers/crypto/marvell/octeontx2/otx2_cptpf.h
··· 51 51 u8 max_vfs; /* Maximum number of VFs supported by CPT */ 52 52 u8 enabled_vfs; /* Number of enabled VFs */ 53 53 u8 kvf_limits; /* Kernel crypto limits */ 54 + bool has_cpt1; 54 55 }; 55 56 56 57 irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg);
+29 -4
drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
··· 451 451 return 0; 452 452 } 453 453 454 - static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) 454 + static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr) 455 455 { 456 456 int timeout = 10, ret; 457 457 u64 reg = 0; 458 458 459 459 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 460 - CPT_AF_BLK_RST, 0x1); 460 + CPT_AF_BLK_RST, 0x1, blkaddr); 461 461 if (ret) 462 462 return ret; 463 463 464 464 do { 465 465 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 466 - CPT_AF_BLK_RST, &reg); 466 + CPT_AF_BLK_RST, &reg, blkaddr); 467 467 if (ret) 468 468 return ret; 469 469 ··· 478 478 return ret; 479 479 } 480 480 481 + static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) 482 + { 483 + int ret = 0; 484 + 485 + if (cptpf->has_cpt1) { 486 + ret = cptx_device_reset(cptpf, BLKADDR_CPT1); 487 + if (ret) 488 + return ret; 489 + } 490 + return cptx_device_reset(cptpf, BLKADDR_CPT0); 491 + } 492 + 493 + static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf) 494 + { 495 + u64 cfg; 496 + 497 + cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, 498 + RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1)); 499 + if (cfg & BIT_ULL(11)) 500 + cptpf->has_cpt1 = true; 501 + } 502 + 481 503 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) 482 504 { 483 505 union otx2_cptx_af_constants1 af_cnsts1 = {0}; 484 506 int ret = 0; 485 507 508 + /* check if 'implemented' bit is set for block BLKADDR_CPT1 */ 509 + cptpf_check_block_implemented(cptpf); 486 510 /* Reset the CPT PF device */ 487 511 ret = cptpf_device_reset(cptpf); 488 512 if (ret) ··· 514 490 515 491 /* Get number of SE, IE and AE engines */ 516 492 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 517 - CPT_AF_CONSTANTS1, &af_cnsts1.u); 493 + CPT_AF_CONSTANTS1, &af_cnsts1.u, 494 + BLKADDR_CPT0); 518 495 if (ret) 519 496 return ret; 520 497
+105 -39
drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
··· 153 153 } 154 154 155 155 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng, 156 - dma_addr_t dma_addr) 156 + dma_addr_t dma_addr, int blkaddr) 157 157 { 158 158 return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 159 159 CPT_AF_EXEX_UCODE_BASE(eng), 160 - (u64)dma_addr); 160 + (u64)dma_addr, blkaddr); 161 161 } 162 162 163 - static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) 163 + static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, 164 + struct otx2_cptpf_dev *cptpf, int blkaddr) 164 165 { 165 - struct otx2_cptpf_dev *cptpf = obj; 166 166 struct otx2_cpt_engs_rsvd *engs; 167 167 dma_addr_t dma_addr; 168 168 int i, bit, ret; ··· 170 170 /* Set PF number for microcode fetches */ 171 171 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 172 172 CPT_AF_PF_FUNC, 173 - cptpf->pf_id << RVU_PFVF_PF_SHIFT); 173 + cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr); 174 174 if (ret) 175 175 return ret; 176 176 ··· 187 187 */ 188 188 for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num) 189 189 if (!eng_grp->g->eng_ref_cnt[bit]) { 190 - ret = __write_ucode_base(cptpf, bit, dma_addr); 190 + ret = __write_ucode_base(cptpf, bit, dma_addr, 191 + blkaddr); 191 192 if (ret) 192 193 return ret; 193 194 } ··· 196 195 return 0; 197 196 } 198 197 199 - static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 200 - void *obj) 198 + static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) 201 199 { 202 200 struct otx2_cptpf_dev *cptpf = obj; 203 - struct otx2_cpt_bitmap bmap; 201 + int ret; 202 + 203 + if (cptpf->has_cpt1) { 204 + ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1); 205 + if (ret) 206 + return ret; 207 + } 208 + return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0); 209 + } 210 + 211 + static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 212 + struct otx2_cptpf_dev *cptpf, 213 + struct otx2_cpt_bitmap bmap, 214 + int blkaddr) 215 + { 204 216 int i, timeout = 10; 205 217 int busy, ret; 206 218 u64 reg = 0; 207 219 208 - bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); 209 - if (!bmap.size) 210 - return -EINVAL; 211 - 212 220 /* Detach the cores from group */ 213 221 for_each_set_bit(i, bmap.bits, bmap.size) { 214 222 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 215 - CPT_AF_EXEX_CTL2(i), &reg); 223 + CPT_AF_EXEX_CTL2(i), &reg, blkaddr); 216 224 if (ret) 217 225 return ret; 218 226 ··· 231 221 232 222 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, 233 223 cptpf->pdev, 234 - CPT_AF_EXEX_CTL2(i), reg); 224 + CPT_AF_EXEX_CTL2(i), reg, 225 + blkaddr); 235 226 if (ret) 236 227 return ret; 237 228 } ··· 248 237 for_each_set_bit(i, bmap.bits, bmap.size) { 249 238 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, 250 239 cptpf->pdev, 251 - CPT_AF_EXEX_STS(i), &reg); 240 + CPT_AF_EXEX_STS(i), &reg, 241 + blkaddr); 252 242 if (ret) 253 243 return ret; 254 244 ··· 265 253 if (!eng_grp->g->eng_ref_cnt[i]) { 266 254 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, 267 255 cptpf->pdev, 268 - CPT_AF_EXEX_CTL(i), 0x0); 256 + CPT_AF_EXEX_CTL(i), 0x0, 257 + blkaddr); 269 258 if (ret) 270 259 return ret; 271 260 } ··· 275 262 return 0; 276 263 } 277 264 278 - static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 279 - void *obj) 265 + static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 266 + void *obj) 280 267 { 281 268 struct otx2_cptpf_dev *cptpf = obj; 282 269 struct otx2_cpt_bitmap bmap; 283 - u64 reg = 0; 284 - int i, ret; 270 + int ret; 285 271 286 272 bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); 287 273 if (!bmap.size) 288 274 return -EINVAL; 289 275 276 + if (cptpf->has_cpt1) { 277 + ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, 278 + BLKADDR_CPT1); 279 + if (ret) 280 + return ret; 281 + } 282 + return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, 283 + BLKADDR_CPT0); 284 + } 285 + 286 + static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 287 + struct otx2_cptpf_dev *cptpf, 288 + struct otx2_cpt_bitmap bmap, 289 + int blkaddr) 290 + { 291 + u64 reg = 0; 292 + int i, ret; 293 + 290 294 /* Attach the cores to the group */ 291 295 for_each_set_bit(i, bmap.bits, bmap.size) { 292 296 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 293 - CPT_AF_EXEX_CTL2(i), &reg); 297 + CPT_AF_EXEX_CTL2(i), &reg, blkaddr); 294 298 if (ret) 295 299 return ret; 296 300 ··· 317 287 318 288 ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, 319 289 cptpf->pdev, 320 - CPT_AF_EXEX_CTL2(i), reg); 290 + CPT_AF_EXEX_CTL2(i), reg, 291 + blkaddr); 321 292 if (ret) 322 293 return ret; 323 294 } ··· 326 295 327 296 /* Enable the cores */ 328 297 for_each_set_bit(i, bmap.bits, bmap.size) { 329 - ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, 330 - cptpf->pdev, 331 - CPT_AF_EXEX_CTL(i), 0x1); 298 + ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 299 + CPT_AF_EXEX_CTL(i), 0x1, 300 + blkaddr); 332 301 if (ret) 333 302 return ret; 334 303 } 335 - ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); 304 + return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); 305 + } 336 306 337 - return ret; 307 + static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, 308 + void *obj) 309 + { 310 + struct otx2_cptpf_dev *cptpf = obj; 311 + struct otx2_cpt_bitmap bmap; 312 + int ret; 313 + 314 + bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); 315 + if (!bmap.size) 316 + return -EINVAL; 317 + 318 + if (cptpf->has_cpt1) { 319 + ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, 320 + BLKADDR_CPT1); 321 + if (ret) 322 + return ret; 323 + } 324 + return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0); 338 325 } 339 326 340 327 static int load_fw(struct device *dev, struct fw_info_t *fw_info, ··· 1189 1140 return ret; 1190 1141 } 1191 1142 1192 - int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) 1143 + static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores, 1144 + int blkaddr) 1193 1145 { 1194 - int i, ret, busy, total_cores; 1195 - int timeout = 10; 1196 - u64 reg = 0; 1197 - 1198 - total_cores = cptpf->eng_grps.avail.max_se_cnt + 1199 - cptpf->eng_grps.avail.max_ie_cnt + 1200 - cptpf->eng_grps.avail.max_ae_cnt; 1146 + int timeout = 10, ret; 1147 + int i, busy; 1148 + u64 reg; 1201 1149 1202 1150 /* Disengage the cores from groups */ 1203 1151 for (i = 0; i < total_cores; i++) { 1204 1152 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 1205 - CPT_AF_EXEX_CTL2(i), 0x0); 1153 + CPT_AF_EXEX_CTL2(i), 0x0, 1154 + blkaddr); 1206 1155 if (ret) 1207 1156 return ret; 1208 1157 ··· 1220 1173 for (i = 0; i < total_cores; i++) { 1221 1174 ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, 1222 1175 cptpf->pdev, 1223 - CPT_AF_EXEX_STS(i), &reg); 1176 + CPT_AF_EXEX_STS(i), &reg, 1177 + blkaddr); 1224 1178 if (ret) 1225 1179 return ret; 1226 1180 ··· 1235 1187 /* Disable the cores */ 1236 1188 for (i = 0; i < total_cores; i++) { 1237 1189 ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, 1238 - CPT_AF_EXEX_CTL(i), 0x0); 1190 + CPT_AF_EXEX_CTL(i), 0x0, 1191 + blkaddr); 1239 1192 if (ret) 1240 1193 return ret; 1241 1194 } 1242 1195 return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); 1196 + } 1197 + 1198 + int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) 1199 + { 1200 + int total_cores, ret; 1201 + 1202 + total_cores = cptpf->eng_grps.avail.max_se_cnt + 1203 + cptpf->eng_grps.avail.max_ie_cnt + 1204 + cptpf->eng_grps.avail.max_ae_cnt; 1205 + 1206 + if (cptpf->has_cpt1) { 1207 + ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1); 1208 + if (ret) 1209 + return ret; 1210 + } 1211 + return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0); 1243 1212 } 1244 1213 1245 1214 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev, ··· 1419 1354 lfs->pdev = pdev; 1420 1355 lfs->reg_base = cptpf->reg_base; 1421 1356 lfs->mbox = &cptpf->afpf_mbox; 1357 + lfs->blkaddr = BLKADDR_CPT0; 1422 1358 ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, 1423 1359 OTX2_CPT_QUEUE_HI_PRIO, 1); 1424 1360 if (ret)