Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: hisilicon - support querying the capability register

Query the capability register status of accelerator devices
(SEC, HPRE and ZIP) through the debugfs interface, for example:
cat cap_regs. The purpose is to improve the robustness and
locability of hardware devices and drivers.

Signed-off-by: Qi Tao <taoqi10@huawei.com>
Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Qi Tao and committed by
Herbert Xu
2a69297e acb0ed84

+396 -111
+7
Documentation/ABI/testing/debugfs-hisi-hpre
··· 184 184 Contact: linux-crypto@vger.kernel.org 185 185 Description: Dump the total number of time out requests. 186 186 Available for both PF and VF, and take no other effect on HPRE. 187 + 188 + What: /sys/kernel/debug/hisi_hpre/<bdf>/cap_regs 189 + Date: Oct 2024 190 + Contact: linux-crypto@vger.kernel.org 191 + Description: Dump the values of the qm and hpre capability bit registers and 192 + support the query of device specifications to facilitate fault locating. 193 + Available for both PF and VF, and take no other effect on HPRE.
+7
Documentation/ABI/testing/debugfs-hisi-sec
··· 157 157 Description: Dump the total number of completed but marked error requests 158 158 to be received. 159 159 Available for both PF and VF, and take no other effect on SEC. 160 + 161 + What: /sys/kernel/debug/hisi_sec2/<bdf>/cap_regs 162 + Date: Oct 2024 163 + Contact: linux-crypto@vger.kernel.org 164 + Description: Dump the values of the qm and sec capability bit registers and 165 + support the query of device specifications to facilitate fault locating. 166 + Available for both PF and VF, and take no other effect on SEC.
+7
Documentation/ABI/testing/debugfs-hisi-zip
··· 158 158 Description: Dump the total number of BD type error requests 159 159 to be received. 160 160 Available for both PF and VF, and take no other effect on ZIP. 161 + 162 + What: /sys/kernel/debug/hisi_zip/<bdf>/cap_regs 163 + Date: Oct 2024 164 + Contact: linux-crypto@vger.kernel.org 165 + Description: Dump the values of the qm and zip capability bit registers and 166 + support the query of device specifications to facilitate fault locating. 167 + Available for both PF and VF, and take no other effect on ZIP.
+23
drivers/crypto/hisilicon/hpre/hpre.h
··· 100 100 __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; 101 101 }; 102 102 103 + enum hpre_cap_table_type { 104 + QM_RAS_NFE_TYPE = 0x0, 105 + QM_RAS_NFE_RESET, 106 + QM_RAS_CE_TYPE, 107 + HPRE_RAS_NFE_TYPE, 108 + HPRE_RAS_NFE_RESET, 109 + HPRE_RAS_CE_TYPE, 110 + HPRE_CORE_INFO, 111 + HPRE_CORE_EN, 112 + HPRE_DRV_ALG_BITMAP, 113 + HPRE_ALG_BITMAP, 114 + HPRE_CORE1_BITMAP_CAP, 115 + HPRE_CORE2_BITMAP_CAP, 116 + HPRE_CORE3_BITMAP_CAP, 117 + HPRE_CORE4_BITMAP_CAP, 118 + HPRE_CORE5_BITMAP_CAP, 119 + HPRE_CORE6_BITMAP_CAP, 120 + HPRE_CORE7_BITMAP_CAP, 121 + HPRE_CORE8_BITMAP_CAP, 122 + HPRE_CORE9_BITMAP_CAP, 123 + HPRE_CORE10_BITMAP_CAP, 124 + }; 125 + 103 126 struct hisi_qp *hpre_create_qp(u8 type); 104 127 int hpre_algs_register(struct hisi_qm *qm); 105 128 void hpre_algs_unregister(struct hisi_qm *qm);
+91 -30
drivers/crypto/hisilicon/hpre/hpre_main.c
··· 13 13 #include <linux/uacce.h> 14 14 #include "hpre.h" 15 15 16 + #define CAP_FILE_PERMISSION 0444 16 17 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) 17 18 #define HPRE_CTRL_CNT_CLR_CE 0x301000 18 19 #define HPRE_FSM_MAX_CNT 0x301008 ··· 204 203 {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E}, 205 204 {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E}, 206 205 {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, 207 - {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, 206 + {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, 208 207 {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, 209 208 {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA}, 210 209 {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA}, ··· 223 222 {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} 224 223 }; 225 224 226 - enum hpre_pre_store_cap_idx { 227 - HPRE_CLUSTER_NUM_CAP_IDX = 0x0, 228 - HPRE_CORE_ENABLE_BITMAP_CAP_IDX, 229 - HPRE_DRV_ALG_BITMAP_CAP_IDX, 230 - HPRE_DEV_ALG_BITMAP_CAP_IDX, 231 - }; 232 - 233 - static const u32 hpre_pre_store_caps[] = { 234 - HPRE_CLUSTER_NUM_CAP, 235 - HPRE_CORE_ENABLE_BITMAP_CAP, 236 - HPRE_DRV_ALG_BITMAP_CAP, 237 - HPRE_DEV_ALG_BITMAP_CAP, 225 + static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = { 226 + {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37}, 227 + {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77}, 228 + {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8}, 229 + {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E}, 230 + {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E}, 231 + {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1}, 232 + {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A}, 233 + {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF}, 234 + {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27}, 235 + {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F}, 236 + {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F}, 237 + {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F}, 238 + {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F}, 239 + {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F}, 240 + {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F}, 241 + {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F}, 242 + {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F}, 243 + {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F}, 244 + {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10}, 245 + {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10}, 238 246 }; 239 247 240 248 static const struct hpre_hw_error hpre_hw_errors[] = { ··· 370 360 { 371 361 u32 cap_val; 372 362 373 - cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val; 363 + cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val; 374 364 if (alg & cap_val) 375 365 return true; 376 366 ··· 513 503 static int hpre_set_cluster(struct hisi_qm *qm) 514 504 { 515 505 struct device *dev = &qm->pdev->dev; 516 - unsigned long offset; 517 506 u32 cluster_core_mask; 507 + unsigned long offset; 508 + u32 hpre_core_info; 518 509 u8 clusters_num; 519 510 u32 val = 0; 520 511 int ret, i; 521 512 522 - cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val; 523 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 513 + cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val; 514 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 515 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 516 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 524 517 for (i = 0; i < clusters_num; i++) { 525 518 offset = i * HPRE_CLSTR_ADDR_INTRVL; 526 519 ··· 608 595 { 609 596 unsigned long offset; 610 597 u8 clusters_num, i; 598 + u32 hpre_core_info; 611 599 u32 val; 612 600 613 601 if (qm->ver < QM_HW_V3) ··· 622 608 val |= HPRE_PEH_CFG_AUTO_GATE_EN; 623 609 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); 624 610 625 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 611 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 612 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 613 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 626 614 for (i = 0; i < clusters_num; i++) { 627 615 offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL; 628 616 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); ··· 641 625 { 642 626 unsigned long offset; 643 627 u8 clusters_num, i; 628 + u32 hpre_core_info; 644 629 u32 val; 645 630 646 631 if (qm->ver < QM_HW_V3) ··· 655 638 val &= ~HPRE_PEH_CFG_AUTO_GATE_EN; 656 639 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); 657 640 658 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 641 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 642 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 643 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 659 644 for (i = 0; i < clusters_num; i++) { 660 645 offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL; 661 646 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL); ··· 730 711 static void hpre_cnt_regs_clear(struct hisi_qm *qm) 731 712 { 732 713 unsigned long offset; 714 + u32 hpre_core_info; 733 715 u8 clusters_num; 734 716 int i; 735 717 736 718 /* clear clusterX/cluster_ctrl */ 737 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 719 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 720 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 721 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 738 722 for (i = 0; i < clusters_num; i++) { 739 723 offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; 740 724 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); ··· 1029 1007 char buf[HPRE_DBGFS_VAL_MAX_LEN]; 1030 1008 struct debugfs_regset32 *regset; 1031 1009 struct dentry *tmp_d; 1010 + u32 hpre_core_info; 1032 1011 u8 clusters_num; 1033 1012 int i, ret; 1034 1013 1035 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 1014 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 1015 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 1016 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 1036 1017 for (i = 0; i < clusters_num; i++) { 1037 1018 ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); 1038 1019 if (ret >= HPRE_DBGFS_VAL_MAX_LEN) ··· 1078 1053 return hpre_cluster_debugfs_init(qm); 1079 1054 } 1080 1055 1056 + static int hpre_cap_regs_show(struct seq_file *s, void *unused) 1057 + { 1058 + struct hisi_qm *qm = s->private; 1059 + u32 i, size; 1060 + 1061 + size = qm->cap_tables.qm_cap_size; 1062 + for (i = 0; i < size; i++) 1063 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name, 1064 + qm->cap_tables.qm_cap_table[i].cap_val); 1065 + 1066 + size = qm->cap_tables.dev_cap_size; 1067 + for (i = 0; i < size; i++) 1068 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name, 1069 + qm->cap_tables.dev_cap_table[i].cap_val); 1070 + 1071 + return 0; 1072 + } 1073 + 1074 + DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs); 1075 + 1081 1076 static void hpre_dfx_debug_init(struct hisi_qm *qm) 1082 1077 { 1083 1078 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; ··· 1116 1071 if (qm->fun_type == QM_HW_PF && hpre_regs) 1117 1072 debugfs_create_file("diff_regs", 0444, parent, 1118 1073 qm, &hpre_diff_regs_fops); 1074 + 1075 + debugfs_create_file("cap_regs", CAP_FILE_PERMISSION, 1076 + qm->debug.debug_root, qm, &hpre_cap_regs_fops); 1119 1077 } 1120 1078 1121 1079 static int hpre_debugfs_init(struct hisi_qm *qm) ··· 1166 1118 { 1167 1119 struct hisi_qm_cap_record *hpre_cap; 1168 1120 struct device *dev = &qm->pdev->dev; 1121 + u32 hpre_core_info; 1122 + u8 clusters_num; 1169 1123 size_t i, size; 1170 1124 1171 - size = ARRAY_SIZE(hpre_pre_store_caps); 1125 + size = ARRAY_SIZE(hpre_cap_query_info); 1172 1126 hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL); 1173 1127 if (!hpre_cap) 1174 1128 return -ENOMEM; 1175 1129 1176 1130 for (i = 0; i < size; i++) { 1177 - hpre_cap[i].type = hpre_pre_store_caps[i]; 1178 - hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, 1179 - hpre_pre_store_caps[i], qm->cap_ver); 1131 + hpre_cap[i].type = hpre_cap_query_info[i].type; 1132 + hpre_cap[i].name = hpre_cap_query_info[i].name; 1133 + hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info, 1134 + i, qm->cap_ver); 1180 1135 } 1181 1136 1182 - if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) { 1137 + hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val; 1138 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 1139 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 1140 + if (clusters_num > HPRE_CLUSTERS_NUM_MAX) { 1183 1141 dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n", 1184 - hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX); 1142 + clusters_num, HPRE_CLUSTERS_NUM_MAX); 1185 1143 return -EINVAL; 1186 1144 } 1187 1145 1188 1146 qm->cap_tables.dev_cap_table = hpre_cap; 1147 + qm->cap_tables.dev_cap_size = size; 1189 1148 1190 1149 return 0; 1191 1150 } ··· 1239 1184 return ret; 1240 1185 } 1241 1186 1242 - alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val; 1187 + alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val; 1243 1188 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs)); 1244 1189 if (ret) { 1245 1190 pci_err(pdev, "Failed to set hpre algs!\n"); ··· 1255 1200 int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); 1256 1201 struct qm_debug *debug = &qm->debug; 1257 1202 void __iomem *io_base; 1203 + u32 hpre_core_info; 1258 1204 u8 clusters_num; 1259 1205 int i, j, idx; 1260 1206 1261 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 1207 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 1208 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 1209 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 1262 1210 debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num + 1263 1211 com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); 1264 1212 if (!debug->last_words) ··· 1301 1243 struct qm_debug *debug = &qm->debug; 1302 1244 struct pci_dev *pdev = qm->pdev; 1303 1245 void __iomem *io_base; 1246 + u32 hpre_core_info; 1304 1247 u8 clusters_num; 1305 1248 int i, j, idx; 1306 1249 u32 val; ··· 1317 1258 hpre_com_dfx_regs[i].name, debug->last_words[i], val); 1318 1259 } 1319 1260 1320 - clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; 1261 + hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val; 1262 + clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) & 1263 + hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask; 1321 1264 for (i = 0; i < clusters_num; i++) { 1322 1265 io_base = qm->io_base + hpre_cluster_offsets[i]; 1323 1266 for (j = 0; j < cluster_dfx_regs_num; j++) {
+61 -27
drivers/crypto/hisilicon/qm.c
··· 307 307 QM_VF_IRQ_NUM_CAP, 308 308 }; 309 309 310 - enum qm_pre_store_cap_idx { 311 - QM_EQ_IRQ_TYPE_CAP_IDX = 0x0, 312 - QM_AEQ_IRQ_TYPE_CAP_IDX, 313 - QM_ABN_IRQ_TYPE_CAP_IDX, 314 - QM_PF2VF_IRQ_TYPE_CAP_IDX, 310 + enum qm_cap_table_type { 311 + QM_CAP_VF = 0x0, 312 + QM_AEQE_NUM, 313 + QM_SCQE_NUM, 314 + QM_EQ_IRQ, 315 + QM_AEQ_IRQ, 316 + QM_ABNORMAL_IRQ, 317 + QM_MB_IRQ, 318 + MAX_IRQ_NUM, 319 + EXT_BAR_INDEX, 320 + }; 321 + 322 + static const struct hisi_qm_cap_query_info qm_cap_query_info[] = { 323 + {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01}, 324 + {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800}, 325 + {QM_SCQE_NUM, "QM_SCQE_NUM ", 326 + 0x3108, 0x4000400, 0x4000400, 0x4000400}, 327 + {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000}, 328 + {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001}, 329 + {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003}, 330 + {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002}, 331 + {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003}, 332 + {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14}, 315 333 }; 316 334 317 335 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { ··· 360 342 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 361 343 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 362 344 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 363 - }; 364 - 365 - static const u32 qm_pre_store_caps[] = { 366 - QM_EQ_IRQ_TYPE_CAP, 367 - QM_AEQ_IRQ_TYPE_CAP, 368 - QM_ABN_IRQ_TYPE_CAP, 369 - QM_PF2VF_IRQ_TYPE_CAP, 370 345 }; 371 346 372 347 struct qm_mailbox { ··· 804 793 } 805 794 } 806 795 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 796 + 797 + u32 hisi_qm_get_cap_value(struct hisi_qm *qm, 798 + const struct hisi_qm_cap_query_info *info_table, 799 + u32 index, bool is_read) 800 + { 801 + u32 val; 802 + 803 + switch (qm->ver) { 804 + case QM_HW_V1: 805 + return info_table[index].v1_val; 806 + case QM_HW_V2: 807 + return info_table[index].v2_val; 808 + default: 809 + if (!is_read) 810 + return info_table[index].v3_val; 811 + 812 + val = readl(qm->io_base + info_table[index].offset); 813 + return val; 814 + } 815 + } 816 + EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value); 807 817 808 818 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 809 819 u16 *high_bits, enum qm_basic_type type) ··· 4929 4897 if (qm->fun_type == QM_HW_VF) 4930 4898 return; 4931 4899 4932 - val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4900 + val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 4933 4901 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4934 4902 return; 4935 4903 ··· 4946 4914 if (qm->fun_type == QM_HW_VF) 4947 4915 return 0; 4948 4916 4949 - val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4917 + val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 4950 4918 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4951 4919 return 0; 4952 4920 ··· 4963 4931 struct pci_dev *pdev = qm->pdev; 4964 4932 u32 irq_vector, val; 4965 4933 4966 - val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4934 + val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 4967 4935 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4968 4936 return; 4969 4937 ··· 4977 4945 u32 irq_vector, val; 4978 4946 int ret; 4979 4947 4980 - val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4948 + val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 4981 4949 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4982 4950 return 0; 4983 4951 ··· 4994 4962 struct pci_dev *pdev = qm->pdev; 4995 4963 u32 irq_vector, val; 4996 4964 4997 - val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4965 + val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 4998 4966 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4999 4967 return; 5000 4968 ··· 5008 4976 u32 irq_vector, val; 5009 4977 int ret; 5010 4978 5011 - val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4979 + val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 5012 4980 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5013 4981 return 0; 5014 4982 ··· 5026 4994 struct pci_dev *pdev = qm->pdev; 5027 4995 u32 irq_vector, val; 5028 4996 5029 - val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 4997 + val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5030 4998 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5031 4999 return; 5032 5000 ··· 5040 5008 u32 irq_vector, val; 5041 5009 int ret; 5042 5010 5043 - val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 5011 + val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5044 5012 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5045 5013 return 0; 5046 5014 ··· 5128 5096 return 0; 5129 5097 } 5130 5098 5131 - static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) 5099 + static int qm_pre_store_caps(struct hisi_qm *qm) 5132 5100 { 5133 5101 struct hisi_qm_cap_record *qm_cap; 5134 5102 struct pci_dev *pdev = qm->pdev; 5135 5103 size_t i, size; 5136 5104 5137 - size = ARRAY_SIZE(qm_pre_store_caps); 5105 + size = ARRAY_SIZE(qm_cap_query_info); 5138 5106 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); 5139 5107 if (!qm_cap) 5140 5108 return -ENOMEM; 5141 5109 5142 5110 for (i = 0; i < size; i++) { 5143 - qm_cap[i].type = qm_pre_store_caps[i]; 5144 - qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, 5145 - qm_pre_store_caps[i], qm->cap_ver); 5111 + qm_cap[i].type = qm_cap_query_info[i].type; 5112 + qm_cap[i].name = qm_cap_query_info[i].name; 5113 + qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info, 5114 + i, qm->cap_ver); 5146 5115 } 5147 5116 5148 5117 qm->cap_tables.qm_cap_table = qm_cap; 5118 + qm->cap_tables.qm_cap_size = size; 5149 5119 5150 5120 return 0; 5151 5121 } ··· 5184 5150 set_bit(cap_info[i].type, &qm->caps); 5185 5151 } 5186 5152 5187 - /* Fetch and save the value of irq type related capability registers */ 5188 - return qm_pre_store_irq_type_caps(qm); 5153 + /* Fetch and save the value of qm capability registers */ 5154 + return qm_pre_store_caps(qm); 5189 5155 } 5190 5156 5191 5157 static int qm_get_pci_res(struct hisi_qm *qm)
+21 -5
drivers/crypto/hisilicon/sec2/sec.h
··· 220 220 SEC_CORE4_ALG_BITMAP_HIGH, 221 221 }; 222 222 223 - enum sec_cap_reg_record_idx { 224 - SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0, 225 - SEC_DRV_ALG_BITMAP_HIGH_IDX, 226 - SEC_DEV_ALG_BITMAP_LOW_IDX, 227 - SEC_DEV_ALG_BITMAP_HIGH_IDX, 223 + enum sec_cap_table_type { 224 + QM_RAS_NFE_TYPE = 0x0, 225 + QM_RAS_NFE_RESET, 226 + QM_RAS_CE_TYPE, 227 + SEC_RAS_NFE_TYPE, 228 + SEC_RAS_NFE_RESET, 229 + SEC_RAS_CE_TYPE, 230 + SEC_CORE_INFO, 231 + SEC_CORE_EN, 232 + SEC_DRV_ALG_BITMAP_LOW_TB, 233 + SEC_DRV_ALG_BITMAP_HIGH_TB, 234 + SEC_ALG_BITMAP_LOW, 235 + SEC_ALG_BITMAP_HIGH, 236 + SEC_CORE1_BITMAP_LOW, 237 + SEC_CORE1_BITMAP_HIGH, 238 + SEC_CORE2_BITMAP_LOW, 239 + SEC_CORE2_BITMAP_HIGH, 240 + SEC_CORE3_BITMAP_LOW, 241 + SEC_CORE3_BITMAP_HIGH, 242 + SEC_CORE4_BITMAP_LOW, 243 + SEC_CORE4_BITMAP_HIGH, 228 244 }; 229 245 230 246 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+4 -4
drivers/crypto/hisilicon/sec2/sec_crypto.c
··· 2520 2520 u64 alg_mask; 2521 2521 int ret = 0; 2522 2522 2523 - alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, 2524 - SEC_DRV_ALG_BITMAP_LOW_IDX); 2523 + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB, 2524 + SEC_DRV_ALG_BITMAP_LOW_TB); 2525 2525 2526 2526 mutex_lock(&sec_algs_lock); 2527 2527 if (sec_available_devs) { ··· 2553 2553 { 2554 2554 u64 alg_mask; 2555 2555 2556 - alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, 2557 - SEC_DRV_ALG_BITMAP_LOW_IDX); 2556 + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_TB, 2557 + SEC_DRV_ALG_BITMAP_LOW_TB); 2558 2558 2559 2559 mutex_lock(&sec_algs_lock); 2560 2560 if (--sec_available_devs)
+59 -12
drivers/crypto/hisilicon/sec2/sec_main.c
··· 14 14 #include <linux/seq_file.h> 15 15 #include <linux/topology.h> 16 16 #include <linux/uacce.h> 17 - 18 17 #include "sec.h" 19 18 19 + #define CAP_FILE_PERMISSION 0444 20 20 #define SEC_VF_NUM 63 21 21 #define SEC_QUEUE_NUM_V1 4096 22 22 #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 ··· 167 167 {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 168 168 }; 169 169 170 - static const u32 sec_pre_store_caps[] = { 171 - SEC_DRV_ALG_BITMAP_LOW, 172 - SEC_DRV_ALG_BITMAP_HIGH, 173 - SEC_DEV_ALG_BITMAP_LOW, 174 - SEC_DEV_ALG_BITMAP_HIGH, 170 + static const struct hisi_qm_cap_query_info sec_cap_query_info[] = { 171 + {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77}, 172 + {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77}, 173 + {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8}, 174 + {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177}, 175 + {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177}, 176 + {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088}, 177 + {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404}, 178 + {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF}, 179 + {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ", 180 + 0x3144, 0x18050CB, 0x18050CB, 0x18670CF}, 181 + {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ", 182 + 0x3148, 0x395C, 0x395C, 0x395C}, 183 + {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ", 184 + 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 185 + {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF}, 186 + {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ", 187 + 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 188 + {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF}, 189 + {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ", 190 + 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 191 + {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF}, 192 + {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ", 193 + 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 194 + {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF}, 195 + {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ", 196 + 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 197 + {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF}, 175 198 }; 176 199 177 200 static const struct qm_dev_alg sec_dev_algs[] = { { ··· 861 838 862 839 DEFINE_SHOW_ATTRIBUTE(sec_regs); 863 840 841 + static int sec_cap_regs_show(struct seq_file *s, void *unused) 842 + { 843 + struct hisi_qm *qm = s->private; 844 + u32 i, size; 845 + 846 + size = qm->cap_tables.qm_cap_size; 847 + for (i = 0; i < size; i++) 848 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name, 849 + qm->cap_tables.qm_cap_table[i].cap_val); 850 + 851 + size = qm->cap_tables.dev_cap_size; 852 + for (i = 0; i < size; i++) 853 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name, 854 + qm->cap_tables.dev_cap_table[i].cap_val); 855 + 856 + return 0; 857 + } 858 + 859 + DEFINE_SHOW_ATTRIBUTE(sec_cap_regs); 860 + 864 861 static int sec_core_debug_init(struct hisi_qm *qm) 865 862 { 866 863 struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; ··· 914 871 debugfs_create_file(sec_dfx_labels[i].name, 0644, 915 872 tmp_d, data, &sec_atomic64_ops); 916 873 } 874 + 875 + debugfs_create_file("cap_regs", CAP_FILE_PERMISSION, 876 + qm->debug.debug_root, qm, &sec_cap_regs_fops); 917 877 918 878 return 0; 919 879 } ··· 1131 1085 struct pci_dev *pdev = qm->pdev; 1132 1086 size_t i, size; 1133 1087 1134 - size = ARRAY_SIZE(sec_pre_store_caps); 1088 + size = ARRAY_SIZE(sec_cap_query_info); 1135 1089 sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL); 1136 1090 if (!sec_cap) 1137 1091 return -ENOMEM; 1138 1092 1139 1093 for (i = 0; i < size; i++) { 1140 - sec_cap[i].type = sec_pre_store_caps[i]; 1141 - sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info, 1142 - sec_pre_store_caps[i], qm->cap_ver); 1094 + sec_cap[i].type = sec_cap_query_info[i].type; 1095 + sec_cap[i].name = sec_cap_query_info[i].name; 1096 + sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info, 1097 + i, qm->cap_ver); 1143 1098 } 1144 1099 1145 1100 qm->cap_tables.dev_cap_table = sec_cap; 1101 + qm->cap_tables.dev_cap_size = size; 1146 1102 1147 1103 return 0; 1148 1104 } ··· 1194 1146 hisi_qm_uninit(qm); 1195 1147 return ret; 1196 1148 } 1197 - 1198 - alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX); 1149 + alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW); 1199 1150 ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs)); 1200 1151 if (ret) { 1201 1152 pci_err(qm->pdev, "Failed to set sec algs!\n");
+18
drivers/crypto/hisilicon/zip/zip.h
··· 81 81 u32 rsvd1[4]; 82 82 }; 83 83 84 + enum zip_cap_table_type { 85 + QM_RAS_NFE_TYPE, 86 + QM_RAS_NFE_RESET, 87 + QM_RAS_CE_TYPE, 88 + ZIP_RAS_NFE_TYPE, 89 + ZIP_RAS_NFE_RESET, 90 + ZIP_RAS_CE_TYPE, 91 + ZIP_CORE_INFO, 92 + ZIP_CORE_EN, 93 + ZIP_DRV_ALG_BITMAP_TB, 94 + ZIP_ALG_BITMAP, 95 + ZIP_CORE1_BITMAP, 96 + ZIP_CORE2_BITMAP, 97 + ZIP_CORE3_BITMAP, 98 + ZIP_CORE4_BITMAP, 99 + ZIP_CORE5_BITMAP, 100 + }; 101 + 84 102 int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); 85 103 int hisi_zip_register_to_crypto(struct hisi_qm *qm); 86 104 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm);
+83 -33
drivers/crypto/hisilicon/zip/zip_main.c
··· 14 14 #include <linux/uacce.h> 15 15 #include "zip.h" 16 16 17 + #define CAP_FILE_PERMISSION 0444 17 18 #define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 18 19 19 20 #define HZIP_QUEUE_NUM_V1 4096 ··· 251 250 {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0} 252 251 }; 253 252 254 - enum zip_pre_store_cap_idx { 255 - ZIP_CORE_NUM_CAP_IDX = 0x0, 256 - ZIP_CLUSTER_COMP_NUM_CAP_IDX, 257 - ZIP_CLUSTER_DECOMP_NUM_CAP_IDX, 258 - ZIP_DECOMP_ENABLE_BITMAP_IDX, 259 - ZIP_COMP_ENABLE_BITMAP_IDX, 260 - ZIP_DRV_ALG_BITMAP_IDX, 261 - ZIP_DEV_ALG_BITMAP_IDX, 262 - }; 263 - 264 - static const u32 zip_pre_store_caps[] = { 265 - ZIP_CORE_NUM_CAP, 266 - ZIP_CLUSTER_COMP_NUM_CAP, 267 - ZIP_CLUSTER_DECOMP_NUM_CAP, 268 - ZIP_DECOMP_ENABLE_BITMAP, 269 - ZIP_COMP_ENABLE_BITMAP, 270 - ZIP_DRV_ALG_BITMAP, 271 - ZIP_DEV_ALG_BITMAP, 253 + static const struct hisi_qm_cap_query_info zip_cap_query_info[] = { 254 + {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C57, 0x7C77}, 255 + {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC57, 0x6C77}, 256 + {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8}, 257 + {ZIP_RAS_NFE_TYPE, "ZIP_RAS_NFE_TYPE ", 0x3130, 0x0, 0x7FE, 0x1FFE}, 258 + {ZIP_RAS_NFE_RESET, "ZIP_RAS_NFE_RESET ", 0x3134, 0x0, 0x7FE, 0x7FE}, 259 + {ZIP_RAS_CE_TYPE, "ZIP_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1}, 260 + {ZIP_CORE_INFO, "ZIP_CORE_INFO ", 0x313C, 0x12080206, 0x12080206, 0x12050203}, 261 + {ZIP_CORE_EN, "ZIP_CORE_EN ", 0x3140, 0xFC0003, 0xFC0003, 0x1C0003}, 262 + {ZIP_DRV_ALG_BITMAP_TB, "ZIP_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x0, 0x30}, 263 + {ZIP_ALG_BITMAP, "ZIP_ALG_BITMAP ", 0x3148, 0xF, 0xF, 0x3F}, 264 + {ZIP_CORE1_BITMAP, "ZIP_CORE1_BITMAP ", 0x314C, 0x5, 0x5, 0xD5}, 265 + {ZIP_CORE2_BITMAP, "ZIP_CORE2_BITMAP ", 0x3150, 0x5, 0x5, 0xD5}, 266 + {ZIP_CORE3_BITMAP, "ZIP_CORE3_BITMAP ", 0x3154, 0xA, 0xA, 0x2A}, 267 + {ZIP_CORE4_BITMAP, "ZIP_CORE4_BITMAP ", 0x3158, 0xA, 0xA, 0x2A}, 268 + {ZIP_CORE5_BITMAP, "ZIP_CORE5_BITMAP ", 0x315C, 0xA, 0xA, 0x2A}, 272 269 }; 273 270 274 271 static const struct debugfs_reg32 hzip_dfx_regs[] = { ··· 441 442 { 442 443 u32 cap_val; 443 444 444 - cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val; 445 + cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_TB].cap_val; 445 446 if ((alg & cap_val) == alg) 446 447 return true; 447 448 ··· 529 530 { 530 531 void __iomem *base = qm->io_base; 531 532 u32 dcomp_bm, comp_bm; 533 + u32 zip_core_en; 532 534 533 535 /* qm user domain */ 534 536 writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); ··· 567 567 } 568 568 569 569 /* let's open all compression/decompression cores */ 570 - dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val; 571 - comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val; 570 + 571 + zip_core_en = qm->cap_tables.dev_cap_table[ZIP_CORE_EN].cap_val; 572 + dcomp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].shift) & 573 + zip_basic_cap_info[ZIP_DECOMP_ENABLE_BITMAP].mask; 574 + comp_bm = (zip_core_en >> zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].shift) & 575 + zip_basic_cap_info[ZIP_COMP_ENABLE_BITMAP].mask; 572 576 writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL); 573 577 574 578 /* enable sqc,cqc writeback */ ··· 792 788 793 789 static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num) 794 790 { 795 - u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; 791 + u8 zip_comp_core_num; 792 + u32 zip_core_info; 793 + 794 + zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val; 795 + zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) & 796 + zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask; 796 797 797 798 if (core_num < zip_comp_core_num) 798 799 return qm->io_base + HZIP_CORE_DFX_BASE + ··· 812 803 u32 zip_core_num, zip_comp_core_num; 813 804 struct device *dev = &qm->pdev->dev; 814 805 struct debugfs_regset32 *regset; 806 + u32 zip_core_info; 815 807 struct dentry *tmp_d; 816 808 char buf[HZIP_BUF_SIZE]; 817 809 int i; 818 810 819 - zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; 820 - zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; 811 + zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val; 812 + zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) & 813 + zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask; 814 + zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) & 815 + zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask; 821 816 822 817 for (i = 0; i < zip_core_num; i++) { 823 818 if (i < zip_comp_core_num) ··· 847 834 return 0; 848 835 } 849 836 837 + static int zip_cap_regs_show(struct seq_file *s, void *unused) 838 + { 839 + struct hisi_qm *qm = s->private; 840 + u32 i, size; 841 + 842 + size = qm->cap_tables.qm_cap_size; 843 + for (i = 0; i < size; i++) 844 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name, 845 + qm->cap_tables.qm_cap_table[i].cap_val); 846 + 847 + size = qm->cap_tables.dev_cap_size; 848 + for (i = 0; i < size; i++) 849 + seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name, 850 + qm->cap_tables.dev_cap_table[i].cap_val); 851 + 852 + return 0; 853 + } 854 + 855 + DEFINE_SHOW_ATTRIBUTE(zip_cap_regs); 856 + 850 857 static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) 851 858 { 852 859 struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs; ··· 887 854 if (qm->fun_type == QM_HW_PF && hzip_regs) 888 855 debugfs_create_file("diff_regs", 0444, tmp_dir, 889 856 qm, &hzip_diff_regs_fops); 857 + 858 + debugfs_create_file("cap_regs", CAP_FILE_PERMISSION, 859 + qm->debug.debug_root, qm, &zip_cap_regs_fops); 890 860 } 891 861 892 862 static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) ··· 948 912 /* hisi_zip_debug_regs_clear() - clear the zip debug regs */ 949 913 static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) 950 914 { 951 - u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; 915 + u32 zip_core_info; 916 + u8 zip_core_num; 952 917 int i, j; 918 + 919 + zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val; 920 + zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) & 921 + zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask; 953 922 954 923 /* enable register read_clear bit */ 955 924 writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); ··· 987 946 int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); 988 947 struct qm_debug *debug = &qm->debug; 989 948 void __iomem *io_base; 949 + u32 zip_core_info; 990 950 u32 zip_core_num; 991 951 int i, j, idx; 992 952 993 - zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; 953 + zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val; 954 + zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) & 955 + zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask; 994 956 995 957 debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num, 996 958 sizeof(unsigned int), GFP_KERNEL); ··· 1035 991 u32 zip_core_num, zip_comp_core_num; 1036 992 struct qm_debug *debug = &qm->debug; 1037 993 char buf[HZIP_BUF_SIZE]; 994 + u32 zip_core_info; 1038 995 void __iomem *base; 1039 996 int i, j, idx; 1040 997 u32 val; ··· 1050 1005 hzip_com_dfx_regs[i].name, debug->last_words[i], val); 1051 1006 } 1052 1007 1053 - zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; 1054 - zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; 1008 + zip_core_info = qm->cap_tables.dev_cap_table[ZIP_CORE_INFO].cap_val; 1009 + zip_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CORE_NUM_CAP].shift) & 1010 + zip_basic_cap_info[ZIP_CORE_NUM_CAP].mask; 1011 + zip_comp_core_num = (zip_core_info >> zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].shift) & 1012 + zip_basic_cap_info[ZIP_CLUSTER_COMP_NUM_CAP].mask; 1055 1013 1056 1014 for (i = 0; i < zip_core_num; i++) { 1057 1015 if (i < zip_comp_core_num) ··· 1215 1167 struct pci_dev *pdev = qm->pdev; 1216 1168 size_t i, size; 1217 1169 1218 - size = ARRAY_SIZE(zip_pre_store_caps); 1170 + size = ARRAY_SIZE(zip_cap_query_info); 1219 1171 zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL); 1220 1172 if (!zip_cap) 1221 1173 return -ENOMEM; 1222 1174 1223 1175 for (i = 0; i < size; i++) { 1224 - zip_cap[i].type = zip_pre_store_caps[i]; 1225 - zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, 1226 - zip_pre_store_caps[i], qm->cap_ver); 1176 + zip_cap[i].type = zip_cap_query_info[i].type; 1177 + zip_cap[i].name = zip_cap_query_info[i].name; 1178 + zip_cap[i].cap_val = hisi_qm_get_cap_value(qm, zip_cap_query_info, 1179 + i, qm->cap_ver); 1227 1180 } 1228 1181 1229 1182 qm->cap_tables.dev_cap_table = zip_cap; 1183 + qm->cap_tables.dev_cap_size = size; 1230 1184 1231 1185 return 0; 1232 1186 } ··· 1280 1230 return ret; 1281 1231 } 1282 1232 1283 - alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val; 1233 + alg_msk = qm->cap_tables.dev_cap_table[ZIP_ALG_BITMAP].cap_val; 1284 1234 ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs)); 1285 1235 if (ret) { 1286 1236 pci_err(qm->pdev, "Failed to set zip algs!\n");
+15
include/linux/hisi_acc_qm.h
··· 274 274 u32 v3_val; 275 275 }; 276 276 277 + struct hisi_qm_cap_query_info { 278 + u32 type; 279 + const char *name; 280 + u32 offset; 281 + u32 v1_val; 282 + u32 v2_val; 283 + u32 v3_val; 284 + }; 285 + 277 286 struct hisi_qm_cap_record { 278 287 u32 type; 288 + const char *name; 279 289 u32 cap_val; 280 290 }; 281 291 282 292 struct hisi_qm_cap_tables { 293 + u32 qm_cap_size; 283 294 struct hisi_qm_cap_record *qm_cap_table; 295 + u32 dev_cap_size; 284 296 struct hisi_qm_cap_record *dev_cap_table; 285 297 }; 286 298 ··· 565 553 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); 566 554 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 567 555 const struct hisi_qm_cap_info *info_table, 556 + u32 index, bool is_read); 557 + u32 hisi_qm_get_cap_value(struct hisi_qm *qm, 558 + const struct hisi_qm_cap_query_info *info_table, 568 559 u32 index, bool is_read); 569 560 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 570 561 u32 dev_algs_size);