Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/pm: add smu_13_0_6 mca dump support

v1:
implement smu_v13_0_6 mca bank interface.

v2:
- remove unnecessary lock
- move MCMP1_* macros to mp_13_0_6_sh_mask.h file

Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Yang Wang and committed by
Alex Deucher
25396684 bcd8dc49

+559
+28
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h
··· 670 670 #define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L 671 671 #define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL 672 672 673 + //MCMP1_IPIDT0 674 + #define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0 675 + #define MCMP1_IPIDT0__HardwareID__SHIFT 0x20 676 + #define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c 677 + #define MCMP1_IPIDT0__McaType__SHIFT 0x30 678 + 679 + #define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL 680 + #define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L 681 + #define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L 682 + #define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L 683 + 684 + //MCMP1_STATUST0 685 + #define MCMP1_STATUST0__ErrorCode__SHIFT 0x0 686 + #define MCMP1_STATUST0__ErrorCodeExt__SHIFT 0x10 687 + #define MCMP1_STATUST0__PCC__SHIFT 0x39 688 + #define MCMP1_STATUST0__UC__SHIFT 0x3d 689 + #define MCMP1_STATUST0__Val__SHIFT 0x3f 690 + 691 + #define MCMP1_STATUST0__ErrorCode_MASK 0x000000000000FFFFL 692 + #define MCMP1_STATUST0__ErrorCodeExt_MASK 0x00000000003F0000L 693 + #define MCMP1_STATUST0__PCC_MASK 0x0200000000000000L 694 + #define MCMP1_STATUST0__UC_MASK 0x2000000000000000L 695 + #define MCMP1_STATUST0__Val_MASK 0x8000000000000000L 696 + 697 + //MCMP1_MISC0T0 698 + #define MCMP1_MISC0T0__ErrCnt__SHIFT 0x20 699 + 700 + #define MCMP1_MISC0T0__ErrCnt_MASK 0x00000FFF00000000L 673 701 674 702 #endif
+531
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 44 44 #include "amdgpu_xgmi.h" 45 45 #include <linux/pci.h> 46 46 #include "amdgpu_ras.h" 47 + #include "amdgpu_mca.h" 47 48 #include "smu_cmn.h" 48 49 #include "mp/mp_13_0_6_offset.h" 49 50 #include "mp/mp_13_0_6_sh_mask.h" ··· 93 92 #define LINK_SPEED_MAX 4 94 93 95 94 #define SMU_13_0_6_DSCLK_THRESHOLD 100 95 + 96 + #define MCA_BANK_IPID(_ip, _hwid, _type) \ 97 + [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, } 98 + 99 + enum mca_reg_idx { 100 + MCA_REG_IDX_CONTROL = 0, 101 + MCA_REG_IDX_STATUS = 1, 102 + MCA_REG_IDX_ADDR = 2, 103 + MCA_REG_IDX_MISC0 = 3, 104 + MCA_REG_IDX_CONFIG = 4, 105 + MCA_REG_IDX_IPID = 5, 106 + MCA_REG_IDX_SYND = 6, 107 + MCA_REG_IDX_COUNT = 16, 108 + }; 109 + 110 + struct mca_bank_ipid { 111 + enum amdgpu_mca_ip ip; 112 + uint16_t hwid; 113 + uint16_t mcatype; 114 + }; 115 + 116 + struct mca_ras_info { 117 + enum amdgpu_ras_block blkid; 118 + enum amdgpu_mca_ip ip; 119 + int *err_code_array; 120 + int err_code_count; 121 + int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 122 + enum amdgpu_mca_error_type type, int idx, uint32_t *count); 123 + }; 96 124 97 125 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { 98 126 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), ··· 2214 2184 return 0; 2215 2185 } 2216 2186 2187 + static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) 2188 + { 2189 + struct smu_context *smu = adev->powerplay.pp_handle; 2190 + 2191 + return smu_v13_0_6_mca_set_debug_mode(smu, enable); 2192 + } 2193 + 2194 + static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count) 2195 + { 2196 + uint32_t msg; 2197 + int ret; 2198 + 2199 + if (!count) 2200 + return -EINVAL; 2201 + 2202 + switch (type) { 2203 + case AMDGPU_MCA_ERROR_TYPE_UE: 2204 + msg = SMU_MSG_QueryValidMcaCount; 2205 + break; 2206 + case AMDGPU_MCA_ERROR_TYPE_CE: 2207 + msg = SMU_MSG_QueryValidMcaCeCount; 2208 + break; 2209 + default: 2210 + return -EINVAL; 2211 + } 2212 + 2213 + ret = smu_cmn_send_smc_msg(smu, msg, count); 2214 + if (ret) { 2215 + *count = 0; 2216 + return ret; 2217 + } 2218 + 2219 + return 0; 2220 + } 2221 + 2222 + static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, 2223 + int idx, int offset, uint32_t *val) 2224 + { 2225 + uint32_t msg, param; 2226 + 2227 + switch (type) { 2228 + case AMDGPU_MCA_ERROR_TYPE_UE: 2229 + msg = SMU_MSG_McaBankDumpDW; 2230 + break; 2231 + case AMDGPU_MCA_ERROR_TYPE_CE: 2232 + msg = SMU_MSG_McaBankCeDumpDW; 2233 + break; 2234 + default: 2235 + return -EINVAL; 2236 + } 2237 + 2238 + param = ((idx & 0xffff) << 16) | (offset & 0xfffc); 2239 + 2240 + return smu_cmn_send_smc_msg_with_param(smu, msg, param, val); 2241 + } 2242 + 2243 + static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type, 2244 + int idx, int offset, uint32_t *val, int count) 2245 + { 2246 + int ret, i; 2247 + 2248 + if (!val) 2249 + return -EINVAL; 2250 + 2251 + for (i = 0; i < count; i++) { 2252 + ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]); 2253 + if (ret) 2254 + return ret; 2255 + } 2256 + 2257 + return 0; 2258 + } 2259 + 2260 + static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT] = { 2261 + MCA_BANK_IPID(UMC, 0x96, 0x0), 2262 + MCA_BANK_IPID(SMU, 0x01, 0x1), 2263 + MCA_BANK_IPID(MP5, 0x01, 0x2), 2264 + }; 2265 + 2266 + static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info) 2267 + { 2268 + uint64_t ipid = entry->regs[MCA_REG_IDX_IPID]; 2269 + uint32_t insthi; 2270 + 2271 + /* NOTE: All MCA IPID register share the same format, 2272 + * so the driver can share the MCMP1 register header file. 2273 + * */ 2274 + 2275 + info->hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID); 2276 + info->mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType); 2277 + 2278 + insthi = REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi); 2279 + info->aid = ((insthi >> 2) & 0x03); 2280 + info->socket_id = insthi & 0x03; 2281 + } 2282 + 2283 + static int mca_bank_read_reg(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 2284 + int idx, int reg_idx, uint64_t *val) 2285 + { 2286 + struct smu_context *smu = adev->powerplay.pp_handle; 2287 + uint32_t data[2] = {0, 0}; 2288 + int ret; 2289 + 2290 + if (!val || reg_idx >= MCA_REG_IDX_COUNT) 2291 + return -EINVAL; 2292 + 2293 + ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data)); 2294 + if (ret) 2295 + return ret; 2296 + 2297 + *val = (uint64_t)data[1] << 32 | data[0]; 2298 + 2299 + dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", 2300 + type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); 2301 + 2302 + return 0; 2303 + } 2304 + 2305 + static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, 2306 + int idx, struct mca_bank_entry *entry) 2307 + { 2308 + int i, ret; 2309 + 2310 + /* NOTE: populated all mca register by default */ 2311 + for (i = 0; i < ARRAY_SIZE(entry->regs); i++) { 2312 + ret = mca_bank_read_reg(adev, type, idx, i, &entry->regs[i]); 2313 + if (ret) 2314 + return ret; 2315 + } 2316 + 2317 + if (ret) 2318 + return ret; 2319 + 2320 + entry->idx = idx; 2321 + entry->type = type; 2322 + 2323 + mca_bank_entry_info_decode(entry, &entry->info); 2324 + 2325 + return 0; 2326 + } 2327 + 2328 + static int mca_decode_mca_ipid(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, int *ip) 2329 + { 2330 + const struct mca_bank_ipid *ipid; 2331 + uint64_t val; 2332 + uint16_t hwid, mcatype; 2333 + int i, ret; 2334 + 2335 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_IPID, &val); 2336 + if (ret) 2337 + return ret; 2338 + 2339 + hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID); 2340 + mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType); 2341 + 2342 + if (hwid) { 2343 + for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) { 2344 + ipid = &smu_v13_0_6_mca_ipid_table[i]; 2345 + 2346 + if (!ipid->hwid) 2347 + continue; 2348 + 2349 + if (ipid->hwid == hwid && ipid->mcatype == mcatype) { 2350 + *ip = i; 2351 + return 0; 2352 + } 2353 + } 2354 + } 2355 + 2356 + *ip = AMDGPU_MCA_IP_UNKNOW; 2357 + 2358 + return 0; 2359 + } 2360 + 2361 + static int mca_normal_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2362 + enum amdgpu_mca_error_type type, int idx, uint32_t *count) 2363 + { 2364 + uint64_t status0; 2365 + int ret; 2366 + 2367 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); 2368 + if (ret) 2369 + return ret; 2370 + 2371 + if (REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) 2372 + *count = 1; 2373 + else 2374 + *count = 0; 2375 + 2376 + return 0; 2377 + } 2378 + 2379 + static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2380 + uint32_t errcode) 2381 + { 2382 + int i; 2383 + 2384 + if (!mca_ras->err_code_count || !mca_ras->err_code_array) 2385 + return true; 2386 + 2387 + for (i = 0; i < mca_ras->err_code_count; i++) { 2388 + if (errcode == mca_ras->err_code_array[i]) 2389 + return true; 2390 + } 2391 + 2392 + return false; 2393 + } 2394 + 2395 + static int mca_mp5_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2396 + enum amdgpu_mca_error_type type, int idx, uint32_t *count) 2397 + { 2398 + uint64_t status0 = 0, misc0 = 0; 2399 + uint32_t errcode; 2400 + int ret; 2401 + 2402 + if (mca_ras->ip != AMDGPU_MCA_IP_MP5) 2403 + return -EINVAL; 2404 + 2405 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); 2406 + if (ret) 2407 + return ret; 2408 + 2409 + if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { 2410 + *count = 0; 2411 + return 0; 2412 + } 2413 + 2414 + errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode); 2415 + if (!mca_smu_check_error_code(adev, mca_ras, errcode)) 2416 + return 0; 2417 + 2418 + if (type == AMDGPU_MCA_ERROR_TYPE_UE && 2419 + REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && 2420 + REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { 2421 + if (count) 2422 + *count = 1; 2423 + return 0; 2424 + } 2425 + 2426 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0); 2427 + if (ret) 2428 + return ret; 2429 + 2430 + if (count) 2431 + *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); 2432 + 2433 + return 0; 2434 + } 2435 + 2436 + static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev, 2437 + enum amdgpu_mca_error_type type, int idx, uint32_t *count) 2438 + { 2439 + uint64_t status0 = 0, misc0 = 0; 2440 + uint32_t errcode; 2441 + int ret; 2442 + 2443 + if (mca_ras->ip != AMDGPU_MCA_IP_SMU) 2444 + return -EINVAL; 2445 + 2446 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0); 2447 + if (ret) 2448 + return ret; 2449 + 2450 + if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) { 2451 + *count = 0; 2452 + return 0; 2453 + } 2454 + 2455 + errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode); 2456 + if (!mca_smu_check_error_code(adev, mca_ras, errcode)) 2457 + return 0; 2458 + 2459 + if (type == AMDGPU_MCA_ERROR_TYPE_UE && 2460 + REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 && 2461 + REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) { 2462 + if (count) 2463 + *count = 1; 2464 + return 0; 2465 + } 2466 + 2467 + ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0); 2468 + if (ret) 2469 + return ret; 2470 + 2471 + if (count) 2472 + *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt); 2473 + 2474 + return 0; 2475 + } 2476 + 2477 + static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 }; 2478 + static int mmhub_err_codes[] = { 2479 + CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */ 2480 + CODE_EA0, CODE_EA0 + 1, CODE_EA0 + 2, CODE_EA0 + 3, CODE_EA0 + 4, /* MMEA0-4*/ 2481 + CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE, 2482 + }; 2483 + 2484 + static const struct mca_ras_info mca_ras_table[] = { 2485 + { 2486 + .blkid = AMDGPU_RAS_BLOCK__UMC, 2487 + .ip = AMDGPU_MCA_IP_UMC, 2488 + .get_err_count = mca_normal_mca_get_err_count, 2489 + }, { 2490 + .blkid = AMDGPU_RAS_BLOCK__GFX, 2491 + .ip = AMDGPU_MCA_IP_MP5, 2492 + .get_err_count = mca_mp5_mca_get_err_count, 2493 + }, { 2494 + .blkid = AMDGPU_RAS_BLOCK__SDMA, 2495 + .ip = AMDGPU_MCA_IP_SMU, 2496 + .err_code_array = sdma_err_codes, 2497 + .err_code_count = ARRAY_SIZE(sdma_err_codes), 2498 + .get_err_count = mca_smu_mca_get_err_count, 2499 + }, { 2500 + .blkid = AMDGPU_RAS_BLOCK__MMHUB, 2501 + .ip = AMDGPU_MCA_IP_SMU, 2502 + .err_code_array = mmhub_err_codes, 2503 + .err_code_count = ARRAY_SIZE(mmhub_err_codes), 2504 + .get_err_count = mca_smu_mca_get_err_count, 2505 + }, 2506 + }; 2507 + 2508 + static const struct mca_ras_info *mca_get_mca_ras_info(struct amdgpu_device *adev, enum amdgpu_ras_block blkid) 2509 + { 2510 + int i; 2511 + 2512 + for (i = 0; i < ARRAY_SIZE(mca_ras_table); i++) { 2513 + if (mca_ras_table[i].blkid == blkid) 2514 + return &mca_ras_table[i]; 2515 + } 2516 + 2517 + return NULL; 2518 + } 2519 + 2520 + static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count) 2521 + { 2522 + struct smu_context *smu = adev->powerplay.pp_handle; 2523 + int ret; 2524 + 2525 + switch (type) { 2526 + case AMDGPU_MCA_ERROR_TYPE_UE: 2527 + case AMDGPU_MCA_ERROR_TYPE_CE: 2528 + ret = smu_v13_0_6_get_valid_mca_count(smu, type, count); 2529 + break; 2530 + default: 2531 + ret = -EINVAL; 2532 + break; 2533 + } 2534 + 2535 + return ret; 2536 + } 2537 + 2538 + static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2539 + enum amdgpu_mca_error_type type, int idx) 2540 + { 2541 + int ret, ip = AMDGPU_MCA_IP_UNKNOW; 2542 + 2543 + ret = mca_decode_mca_ipid(adev, type, idx, &ip); 2544 + if (ret) 2545 + return false; 2546 + 2547 + if (ip == AMDGPU_MCA_IP_UNKNOW) 2548 + return false; 2549 + 2550 + return ip == mca_ras->ip; 2551 + } 2552 + 2553 + static int mca_get_valid_mca_idx(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2554 + enum amdgpu_mca_error_type type, 2555 + uint32_t mca_cnt, int *idx_array, int idx_array_size) 2556 + { 2557 + int i, idx_cnt = 0; 2558 + 2559 + for (i = 0; i < mca_cnt; i++) { 2560 + if (!mca_bank_is_valid(adev, mca_ras, type, i)) 2561 + continue; 2562 + 2563 + if (idx_array) { 2564 + if (idx_cnt < idx_array_size) 2565 + idx_array[idx_cnt] = i; 2566 + else 2567 + return -EINVAL; 2568 + } 2569 + 2570 + idx_cnt++; 2571 + } 2572 + 2573 + return idx_cnt; 2574 + } 2575 + 2576 + static int __mca_smu_get_error_count(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, enum amdgpu_mca_error_type type, uint32_t *count) 2577 + { 2578 + uint32_t result, mca_cnt, total = 0; 2579 + int idx_array[16]; 2580 + int i, ret, idx_cnt = 0; 2581 + 2582 + ret = mca_get_valid_mca_count(adev, type, &mca_cnt); 2583 + if (ret) 2584 + return ret; 2585 + 2586 + /* if valid mca bank count is 0, the driver can return 0 directly */ 2587 + if (!mca_cnt) { 2588 + *count = 0; 2589 + return 0; 2590 + } 2591 + 2592 + if (!mca_ras->get_err_count) 2593 + return -EINVAL; 2594 + 2595 + idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, ARRAY_SIZE(idx_array)); 2596 + if (idx_cnt < 0) 2597 + return -EINVAL; 2598 + 2599 + for (i = 0; i < idx_cnt; i++) { 2600 + result = 0; 2601 + ret = mca_ras->get_err_count(mca_ras, adev, type, idx_array[i], &result); 2602 + if (ret) 2603 + return ret; 2604 + 2605 + total += result; 2606 + } 2607 + 2608 + *count = total; 2609 + 2610 + return 0; 2611 + } 2612 + 2613 + static int mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 2614 + enum amdgpu_mca_error_type type, uint32_t *count) 2615 + { 2616 + const struct mca_ras_info *mca_ras; 2617 + 2618 + if (!count) 2619 + return -EINVAL; 2620 + 2621 + mca_ras = mca_get_mca_ras_info(adev, blk); 2622 + if (!mca_ras) 2623 + return -EOPNOTSUPP; 2624 + 2625 + return __mca_smu_get_error_count(adev, mca_ras, type, count); 2626 + } 2627 + 2628 + static int __mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, 2629 + enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size) 2630 + { 2631 + uint32_t mca_cnt = 0; 2632 + int ret, idx_cnt = 0; 2633 + 2634 + ret = mca_get_valid_mca_count(adev, type, &mca_cnt); 2635 + if (ret) 2636 + return ret; 2637 + 2638 + /* if valid mca bank count is 0, the driver can return 0 directly */ 2639 + if (!mca_cnt) { 2640 + *idx_array_size = 0; 2641 + return 0; 2642 + } 2643 + 2644 + idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, *idx_array_size); 2645 + if (idx_cnt < 0) 2646 + return -EINVAL; 2647 + 2648 + *idx_array_size = idx_cnt; 2649 + 2650 + return 0; 2651 + } 2652 + 2653 + static int mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, enum amdgpu_ras_block blk, 2654 + enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size) 2655 + { 2656 + const struct mca_ras_info *mca_ras; 2657 + 2658 + mca_ras = mca_get_mca_ras_info(adev, blk); 2659 + if (!mca_ras) 2660 + return -EOPNOTSUPP; 2661 + 2662 + return __mca_smu_get_ras_mca_idx_array(adev, mca_ras, type, idx_array, idx_array_size); 2663 + } 2664 + 2665 + static int mca_smu_get_mca_entry(struct amdgpu_device *adev, 2666 + enum amdgpu_mca_error_type type, int idx, struct mca_bank_entry *entry) 2667 + { 2668 + return mca_get_mca_entry(adev, type, idx, entry); 2669 + } 2670 + 2671 + static int mca_smu_get_valid_mca_count(struct amdgpu_device *adev, 2672 + enum amdgpu_mca_error_type type, uint32_t *count) 2673 + { 2674 + return mca_get_valid_mca_count(adev, type, count); 2675 + } 2676 + 2677 + static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { 2678 + .max_ue_count = 12, 2679 + .max_ce_count = 12, 2680 + .mca_set_debug_mode = mca_smu_set_debug_mode, 2681 + .mca_get_error_count = mca_smu_get_error_count, 2682 + .mca_get_mca_entry = mca_smu_get_mca_entry, 2683 + .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, 2684 + .mca_get_ras_mca_idx_array = mca_smu_get_ras_mca_idx_array, 2685 + }; 2686 + 2217 2687 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { 2218 2688 /* init dpm */ 2219 2689 .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, ··· 2779 2249 smu->table_map = smu_v13_0_6_table_map; 2780 2250 smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; 2781 2251 smu_v13_0_set_smu_mailbox_registers(smu); 2252 + amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); 2782 2253 }