Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/list.h>
25#include "amdgpu.h"
26#include "amdgpu_aca.h"
27#include "amdgpu_ras.h"
28
29#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
30
31typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
32
33static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
34 ACA_BANK_HWID(SMU, 0x01, 0x01),
35 ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
36 ACA_BANK_HWID(UMC, 0x96, 0x00),
37};
38
39static void aca_banks_init(struct aca_banks *banks)
40{
41 if (!banks)
42 return;
43
44 memset(banks, 0, sizeof(*banks));
45 INIT_LIST_HEAD(&banks->list);
46}
47
48static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
49{
50 struct aca_bank_node *node;
51
52 if (!bank)
53 return -EINVAL;
54
55 node = kvzalloc(sizeof(*node), GFP_KERNEL);
56 if (!node)
57 return -ENOMEM;
58
59 memcpy(&node->bank, bank, sizeof(*bank));
60
61 INIT_LIST_HEAD(&node->node);
62 list_add_tail(&node->node, &banks->list);
63
64 banks->nr_banks++;
65
66 return 0;
67}
68
69static void aca_banks_release(struct aca_banks *banks)
70{
71 struct aca_bank_node *node, *tmp;
72
73 if (list_empty(&banks->list))
74 return;
75
76 list_for_each_entry_safe(node, tmp, &banks->list, node) {
77 list_del(&node->node);
78 kvfree(node);
79 banks->nr_banks--;
80 }
81}
82
83static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count)
84{
85 struct amdgpu_aca *aca = &adev->aca;
86 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
87
88 if (!count)
89 return -EINVAL;
90
91 if (!smu_funcs || !smu_funcs->get_valid_aca_count)
92 return -EOPNOTSUPP;
93
94 return smu_funcs->get_valid_aca_count(adev, type, count);
95}
96
97static struct aca_regs_dump {
98 const char *name;
99 int reg_idx;
100} aca_regs[] = {
101 {"CONTROL", ACA_REG_IDX_CTL},
102 {"STATUS", ACA_REG_IDX_STATUS},
103 {"ADDR", ACA_REG_IDX_ADDR},
104 {"MISC", ACA_REG_IDX_MISC0},
105 {"CONFIG", ACA_REG_IDX_CONFIG},
106 {"IPID", ACA_REG_IDX_IPID},
107 {"SYND", ACA_REG_IDX_SYND},
108 {"DESTAT", ACA_REG_IDX_DESTAT},
109 {"DEADDR", ACA_REG_IDX_DEADDR},
110 {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
111};
112
113static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank,
114 struct ras_query_context *qctx)
115{
116 u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
117 int i;
118
119 if (adev->debug_disable_ce_logs &&
120 bank->smu_err_type == ACA_SMU_TYPE_CE &&
121 !ACA_BANK_ERR_IS_DEFFERED(bank))
122 return;
123
124 RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
125 /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
126 for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
127 RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
128 idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
129
130 if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
131 RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
132}
133
134static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
135{
136
137 struct aca_hwip *hwip;
138 int hwid, mcatype;
139 u64 ipid;
140
141 if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
142 return false;
143
144 hwip = &aca_hwid_mcatypes[type];
145 if (!hwip->hwid)
146 return false;
147
148 ipid = bank->regs[ACA_REG_IDX_IPID];
149 hwid = ACA_REG__IPID__HARDWAREID(ipid);
150 mcatype = ACA_REG__IPID__MCATYPE(ipid);
151
152 return hwip->hwid == hwid && hwip->mcatype == mcatype;
153}
154
155static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
156 int start, int count,
157 struct aca_banks *banks, struct ras_query_context *qctx)
158{
159 struct amdgpu_aca *aca = &adev->aca;
160 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
161 struct aca_bank bank;
162 int i, max_count, ret;
163
164 if (!count)
165 return 0;
166
167 if (!smu_funcs || !smu_funcs->get_valid_aca_bank)
168 return -EOPNOTSUPP;
169
170 switch (type) {
171 case ACA_SMU_TYPE_UE:
172 max_count = smu_funcs->max_ue_bank_count;
173 break;
174 case ACA_SMU_TYPE_CE:
175 max_count = smu_funcs->max_ce_bank_count;
176 break;
177 default:
178 return -EINVAL;
179 }
180
181 if (start + count > max_count)
182 return -EINVAL;
183
184 count = min_t(int, count, max_count);
185 for (i = 0; i < count; i++) {
186 memset(&bank, 0, sizeof(bank));
187 ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank);
188 if (ret)
189 return ret;
190
191 bank.smu_err_type = type;
192
193 /*
194 * Poison being consumed when injecting a UE while running background workloads,
195 * which are unexpected.
196 */
197 if (type == ACA_SMU_TYPE_UE &&
198 ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
199 !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
200 continue;
201
202 aca_smu_bank_dump(adev, i, count, &bank, qctx);
203
204 ret = aca_banks_add_bank(banks, &bank);
205 if (ret)
206 return ret;
207 }
208
209 return 0;
210}
211
212static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
213{
214 const struct aca_bank_ops *bank_ops = handle->bank_ops;
215
216 /* Parse all deferred errors with UMC aca handle */
217 if (ACA_BANK_ERR_IS_DEFFERED(bank))
218 return handle->hwip == ACA_HWIP_TYPE_UMC;
219
220 if (!aca_bank_hwip_is_matched(bank, handle->hwip))
221 return false;
222
223 if (!bank_ops->aca_bank_is_valid)
224 return true;
225
226 return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data);
227}
228
229static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
230{
231 struct aca_bank_error *bank_error;
232
233 bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
234 if (!bank_error)
235 return NULL;
236
237 INIT_LIST_HEAD(&bank_error->node);
238 memcpy(&bank_error->info, info, sizeof(*info));
239
240 mutex_lock(&aerr->lock);
241 list_add_tail(&bank_error->node, &aerr->list);
242 aerr->nr_errors++;
243 mutex_unlock(&aerr->lock);
244
245 return bank_error;
246}
247
248static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
249{
250 struct aca_bank_error *bank_error = NULL;
251 struct aca_bank_info *tmp_info;
252 bool found = false;
253
254 mutex_lock(&aerr->lock);
255 list_for_each_entry(bank_error, &aerr->list, node) {
256 tmp_info = &bank_error->info;
257 if (tmp_info->socket_id == info->socket_id &&
258 tmp_info->die_id == info->die_id) {
259 found = true;
260 goto out_unlock;
261 }
262 }
263
264out_unlock:
265 mutex_unlock(&aerr->lock);
266
267 return found ? bank_error : NULL;
268}
269
270static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error)
271{
272 if (!aerr || !bank_error)
273 return;
274
275 list_del(&bank_error->node);
276 aerr->nr_errors--;
277
278 kvfree(bank_error);
279}
280
281static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
282{
283 struct aca_bank_error *bank_error;
284
285 if (!aerr || !info)
286 return NULL;
287
288 bank_error = find_bank_error(aerr, info);
289 if (bank_error)
290 return bank_error;
291
292 return new_bank_error(aerr, info);
293}
294
295int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
296 enum aca_error_type type, u64 count)
297{
298 struct aca_error_cache *error_cache = &handle->error_cache;
299 struct aca_bank_error *bank_error;
300 struct aca_error *aerr;
301
302 if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT)
303 return -EINVAL;
304
305 if (!count)
306 return 0;
307
308 aerr = &error_cache->errors[type];
309 bank_error = get_bank_error(aerr, info);
310 if (!bank_error)
311 return -ENOMEM;
312
313 bank_error->count += count;
314
315 return 0;
316}
317
318static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
319{
320 const struct aca_bank_ops *bank_ops = handle->bank_ops;
321
322 if (!bank)
323 return -EINVAL;
324
325 if (!bank_ops->aca_bank_parser)
326 return -EOPNOTSUPP;
327
328 return bank_ops->aca_bank_parser(handle, bank, type,
329 handle->data);
330}
331
332static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
333 enum aca_smu_type type, void *data)
334{
335 int ret;
336
337 ret = aca_bank_parser(handle, bank, type);
338 if (ret)
339 return ret;
340
341 return 0;
342}
343
344static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
345 enum aca_smu_type type, bank_handler_t handler, void *data)
346{
347 struct aca_handle *handle;
348 int ret;
349
350 if (list_empty(&mgr->list))
351 return 0;
352
353 list_for_each_entry(handle, &mgr->list, node) {
354 if (!aca_bank_is_valid(handle, bank, type))
355 continue;
356
357 ret = handler(handle, bank, type, data);
358 if (ret)
359 return ret;
360 }
361
362 return 0;
363}
364
365static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
366 enum aca_smu_type type, bank_handler_t handler, void *data)
367{
368 struct aca_bank_node *node;
369 struct aca_bank *bank;
370 int ret;
371
372 if (!mgr || !banks)
373 return -EINVAL;
374
375 /* pre check to avoid unnecessary operations */
376 if (list_empty(&mgr->list) || list_empty(&banks->list))
377 return 0;
378
379 list_for_each_entry(node, &banks->list, node) {
380 bank = &node->bank;
381
382 ret = aca_dispatch_bank(mgr, bank, type, handler, data);
383 if (ret)
384 return ret;
385 }
386
387 return 0;
388}
389
390static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type)
391{
392 struct amdgpu_aca *aca = &adev->aca;
393 bool ret = true;
394
395 /*
396 * Because the UE Valid MCA count will only be cleared after reset,
397 * in order to avoid repeated counting of the error count,
398 * the aca bank is only updated once during the gpu recovery stage.
399 */
400 if (type == ACA_SMU_TYPE_UE) {
401 if (amdgpu_ras_intr_triggered())
402 ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0;
403 else
404 atomic_set(&aca->ue_update_flag, 0);
405 }
406
407 return ret;
408}
409
410static void aca_banks_generate_cper(struct amdgpu_device *adev,
411 enum aca_smu_type type,
412 struct aca_banks *banks,
413 int count)
414{
415 struct aca_bank_node *node;
416 struct aca_bank *bank;
417 int r;
418
419 if (!adev->cper.enabled)
420 return;
421
422 if (!banks || !count) {
423 dev_warn(adev->dev, "fail to generate cper records\n");
424 return;
425 }
426
427 /* UEs must be encoded into separate CPER entries */
428 if (type == ACA_SMU_TYPE_UE) {
429 struct aca_banks de_banks;
430
431 aca_banks_init(&de_banks);
432 list_for_each_entry(node, &banks->list, node) {
433 bank = &node->bank;
434 if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
435 r = aca_banks_add_bank(&de_banks, bank);
436 if (r)
437 dev_warn(adev->dev, "fail to add de banks, ret = %d\n", r);
438 } else {
439 if (amdgpu_cper_generate_ue_record(adev, bank))
440 dev_warn(adev->dev, "fail to generate ue cper records\n");
441 }
442 }
443
444 if (!list_empty(&de_banks.list)) {
445 if (amdgpu_cper_generate_ce_records(adev, &de_banks, de_banks.nr_banks))
446 dev_warn(adev->dev, "fail to generate de cper records\n");
447 }
448
449 aca_banks_release(&de_banks);
450 } else {
451 /*
452 * SMU_TYPE_CE banks are combined into 1 CPER entries,
453 * they could be CEs or DEs or both
454 */
455 if (amdgpu_cper_generate_ce_records(adev, banks, count))
456 dev_warn(adev->dev, "fail to generate ce cper records\n");
457 }
458}
459
460static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
461 bank_handler_t handler, struct ras_query_context *qctx, void *data)
462{
463 struct amdgpu_aca *aca = &adev->aca;
464 struct aca_banks banks;
465 u32 count = 0;
466 int ret;
467
468 if (list_empty(&aca->mgr.list))
469 return 0;
470
471 if (!aca_bank_should_update(adev, type))
472 return 0;
473
474 ret = aca_smu_get_valid_aca_count(adev, type, &count);
475 if (ret)
476 return ret;
477
478 if (!count)
479 return 0;
480
481 aca_banks_init(&banks);
482
483 ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx);
484 if (ret)
485 goto err_release_banks;
486
487 if (list_empty(&banks.list)) {
488 ret = 0;
489 goto err_release_banks;
490 }
491
492 ret = aca_dispatch_banks(&aca->mgr, &banks, type,
493 handler, data);
494 if (ret)
495 goto err_release_banks;
496
497 aca_banks_generate_cper(adev, type, &banks, count);
498
499err_release_banks:
500 aca_banks_release(&banks);
501
502 return ret;
503}
504
505static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data)
506{
507 struct aca_bank_info *info;
508 struct amdgpu_smuio_mcm_config_info mcm_info;
509 u64 count;
510
511 if (type >= ACA_ERROR_TYPE_COUNT)
512 return -EINVAL;
513
514 count = bank_error->count;
515 if (!count)
516 return 0;
517
518 info = &bank_error->info;
519 mcm_info.die_id = info->die_id;
520 mcm_info.socket_id = info->socket_id;
521
522 switch (type) {
523 case ACA_ERROR_TYPE_UE:
524 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
525 break;
526 case ACA_ERROR_TYPE_CE:
527 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
528 break;
529 case ACA_ERROR_TYPE_DEFERRED:
530 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
531 break;
532 default:
533 break;
534 }
535
536 return 0;
537}
538
539static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data)
540{
541 struct aca_error_cache *error_cache = &handle->error_cache;
542 struct aca_error *aerr = &error_cache->errors[type];
543 struct aca_bank_error *bank_error, *tmp;
544
545 mutex_lock(&aerr->lock);
546
547 if (list_empty(&aerr->list))
548 goto out_unlock;
549
550 list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
551 aca_log_aca_error_data(bank_error, type, err_data);
552 aca_bank_error_remove(aerr, bank_error);
553 }
554
555out_unlock:
556 mutex_unlock(&aerr->lock);
557
558 return 0;
559}
560
561static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
562 struct ras_err_data *err_data, struct ras_query_context *qctx)
563{
564 enum aca_smu_type smu_type;
565 int ret;
566
567 switch (type) {
568 case ACA_ERROR_TYPE_UE:
569 smu_type = ACA_SMU_TYPE_UE;
570 break;
571 case ACA_ERROR_TYPE_CE:
572 case ACA_ERROR_TYPE_DEFERRED:
573 smu_type = ACA_SMU_TYPE_CE;
574 break;
575 default:
576 return -EINVAL;
577 }
578
579 /* update aca bank to aca source error_cache first */
580 ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
581 if (ret)
582 return ret;
583
584 /* DEs may contain in CEs or UEs */
585 if (type != ACA_ERROR_TYPE_DEFERRED)
586 aca_log_aca_error(handle, ACA_ERROR_TYPE_DEFERRED, err_data);
587
588 return aca_log_aca_error(handle, type, err_data);
589}
590
591static bool aca_handle_is_valid(struct aca_handle *handle)
592{
593 if (!handle->mask || !list_empty(&handle->node))
594 return false;
595
596 return true;
597}
598
599int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
600 enum aca_error_type type, struct ras_err_data *err_data,
601 struct ras_query_context *qctx)
602{
603 if (!handle || !err_data)
604 return -EINVAL;
605
606 if (aca_handle_is_valid(handle))
607 return -EOPNOTSUPP;
608
609 if ((type < 0) || (!(BIT(type) & handle->mask)))
610 return 0;
611
612 return __aca_get_error_data(adev, handle, type, err_data, qctx);
613}
614
615static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
616{
617 mutex_init(&aerr->lock);
618 INIT_LIST_HEAD(&aerr->list);
619 aerr->type = type;
620 aerr->nr_errors = 0;
621}
622
623static void aca_init_error_cache(struct aca_handle *handle)
624{
625 struct aca_error_cache *error_cache = &handle->error_cache;
626 int type;
627
628 for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
629 aca_error_init(&error_cache->errors[type], type);
630}
631
632static void aca_error_fini(struct aca_error *aerr)
633{
634 struct aca_bank_error *bank_error, *tmp;
635
636 mutex_lock(&aerr->lock);
637 if (list_empty(&aerr->list))
638 goto out_unlock;
639
640 list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
641 aca_bank_error_remove(aerr, bank_error);
642
643out_unlock:
644 mutex_destroy(&aerr->lock);
645}
646
647static void aca_fini_error_cache(struct aca_handle *handle)
648{
649 struct aca_error_cache *error_cache = &handle->error_cache;
650 int type;
651
652 for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
653 aca_error_fini(&error_cache->errors[type]);
654}
655
656static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle,
657 const char *name, const struct aca_info *ras_info, void *data)
658{
659 memset(handle, 0, sizeof(*handle));
660
661 handle->adev = adev;
662 handle->mgr = mgr;
663 handle->name = name;
664 handle->hwip = ras_info->hwip;
665 handle->mask = ras_info->mask;
666 handle->bank_ops = ras_info->bank_ops;
667 handle->data = data;
668 aca_init_error_cache(handle);
669
670 INIT_LIST_HEAD(&handle->node);
671 list_add_tail(&handle->node, &mgr->list);
672 mgr->nr_handles++;
673
674 return 0;
675}
676
677static ssize_t aca_sysfs_read(struct device *dev,
678 struct device_attribute *attr, char *buf)
679{
680 struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr);
681
682 /* NOTE: the aca cache will be auto cleared once read,
683 * So the driver should unify the query entry point, forward request to ras query interface directly */
684 return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data);
685}
686
687static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle)
688{
689 struct device_attribute *aca_attr = &handle->aca_attr;
690
691 snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name);
692 aca_attr->show = aca_sysfs_read;
693 aca_attr->attr.name = handle->attr_name;
694 aca_attr->attr.mode = S_IRUGO;
695 sysfs_attr_init(&aca_attr->attr);
696
697 return sysfs_add_file_to_group(&adev->dev->kobj,
698 &aca_attr->attr,
699 "ras");
700}
701
702int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
703 const char *name, const struct aca_info *ras_info, void *data)
704{
705 struct amdgpu_aca *aca = &adev->aca;
706 int ret;
707
708 if (!amdgpu_aca_is_enabled(adev))
709 return 0;
710
711 ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data);
712 if (ret)
713 return ret;
714
715 return add_aca_sysfs(adev, handle);
716}
717
718static void remove_aca_handle(struct aca_handle *handle)
719{
720 struct aca_handle_manager *mgr = handle->mgr;
721
722 aca_fini_error_cache(handle);
723 list_del(&handle->node);
724 mgr->nr_handles--;
725}
726
727static void remove_aca_sysfs(struct aca_handle *handle)
728{
729 struct amdgpu_device *adev = handle->adev;
730 struct device_attribute *aca_attr = &handle->aca_attr;
731
732 if (adev->dev->kobj.sd)
733 sysfs_remove_file_from_group(&adev->dev->kobj,
734 &aca_attr->attr,
735 "ras");
736}
737
738void amdgpu_aca_remove_handle(struct aca_handle *handle)
739{
740 if (!handle || list_empty(&handle->node))
741 return;
742
743 remove_aca_sysfs(handle);
744 remove_aca_handle(handle);
745}
746
747static int aca_manager_init(struct aca_handle_manager *mgr)
748{
749 INIT_LIST_HEAD(&mgr->list);
750 mgr->nr_handles = 0;
751
752 return 0;
753}
754
755static void aca_manager_fini(struct aca_handle_manager *mgr)
756{
757 struct aca_handle *handle, *tmp;
758
759 if (list_empty(&mgr->list))
760 return;
761
762 list_for_each_entry_safe(handle, tmp, &mgr->list, node)
763 amdgpu_aca_remove_handle(handle);
764}
765
766bool amdgpu_aca_is_enabled(struct amdgpu_device *adev)
767{
768 return (adev->aca.is_enabled ||
769 adev->debug_enable_ras_aca);
770}
771
772int amdgpu_aca_init(struct amdgpu_device *adev)
773{
774 struct amdgpu_aca *aca = &adev->aca;
775 int ret;
776
777 atomic_set(&aca->ue_update_flag, 0);
778
779 ret = aca_manager_init(&aca->mgr);
780 if (ret)
781 return ret;
782
783 return 0;
784}
785
786void amdgpu_aca_fini(struct amdgpu_device *adev)
787{
788 struct amdgpu_aca *aca = &adev->aca;
789
790 aca_manager_fini(&aca->mgr);
791
792 atomic_set(&aca->ue_update_flag, 0);
793}
794
795int amdgpu_aca_reset(struct amdgpu_device *adev)
796{
797 struct amdgpu_aca *aca = &adev->aca;
798
799 atomic_set(&aca->ue_update_flag, 0);
800
801 return 0;
802}
803
804void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
805{
806 struct amdgpu_aca *aca = &adev->aca;
807
808 WARN_ON(aca->smu_funcs);
809 aca->smu_funcs = smu_funcs;
810}
811
812int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
813{
814 u64 ipid;
815 u32 instidhi, instidlo;
816
817 if (!bank || !info)
818 return -EINVAL;
819
820 ipid = bank->regs[ACA_REG_IDX_IPID];
821 info->hwid = ACA_REG__IPID__HARDWAREID(ipid);
822 info->mcatype = ACA_REG__IPID__MCATYPE(ipid);
823 /*
824 * Unfied DieID Format: SAASS. A:AID, S:Socket.
825 * Unfied DieID[4:4] = InstanceId[0:0]
826 * Unfied DieID[0:3] = InstanceIdHi[0:3]
827 */
828 instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid);
829 instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid);
830 info->die_id = ((instidhi >> 2) & 0x03);
831 info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
832
833 return 0;
834}
835
836static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
837{
838 struct amdgpu_aca *aca = &adev->aca;
839 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
840
841 if (!smu_funcs || !smu_funcs->parse_error_code)
842 return -EOPNOTSUPP;
843
844 return smu_funcs->parse_error_code(adev, bank);
845}
846
847int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
848{
849 int i, error_code;
850
851 if (!bank || !err_codes)
852 return -EINVAL;
853
854 error_code = aca_bank_get_error_code(adev, bank);
855 if (error_code < 0)
856 return error_code;
857
858 for (i = 0; i < size; i++) {
859 if (err_codes[i] == error_code)
860 return 0;
861 }
862
863 return -EINVAL;
864}
865
866int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en)
867{
868 struct amdgpu_aca *aca = &adev->aca;
869 const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
870
871 if (!smu_funcs || !smu_funcs->set_debug_mode)
872 return -EOPNOTSUPP;
873
874 return smu_funcs->set_debug_mode(adev, en);
875}
876
877#if defined(CONFIG_DEBUG_FS)
878static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
879{
880 struct amdgpu_device *adev = (struct amdgpu_device *)data;
881 int ret;
882
883 ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false);
884 if (ret)
885 return ret;
886
887 dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off");
888
889 return 0;
890}
891
892static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx)
893{
894 struct aca_bank_info info;
895 int i, ret;
896
897 ret = aca_bank_info_decode(bank, &info);
898 if (ret)
899 return;
900
901 seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE");
902 seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
903 idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
904
905 for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
906 seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]);
907}
908
909struct aca_dump_context {
910 struct seq_file *m;
911 int idx;
912};
913
914static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
915 enum aca_smu_type type, void *data)
916{
917 struct aca_dump_context *ctx = (struct aca_dump_context *)data;
918
919 aca_dump_entry(ctx->m, bank, type, ctx->idx++);
920
921 return handler_aca_log_bank_error(handle, bank, type, NULL);
922}
923
924static int aca_dump_show(struct seq_file *m, enum aca_smu_type type)
925{
926 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
927 struct aca_dump_context context = {
928 .m = m,
929 .idx = 0,
930 };
931
932 return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context);
933}
934
935static int aca_dump_ce_show(struct seq_file *m, void *unused)
936{
937 return aca_dump_show(m, ACA_SMU_TYPE_CE);
938}
939
940static int aca_dump_ce_open(struct inode *inode, struct file *file)
941{
942 return single_open(file, aca_dump_ce_show, inode->i_private);
943}
944
945static const struct file_operations aca_ce_dump_debug_fops = {
946 .owner = THIS_MODULE,
947 .open = aca_dump_ce_open,
948 .read = seq_read,
949 .llseek = seq_lseek,
950 .release = single_release,
951};
952
953static int aca_dump_ue_show(struct seq_file *m, void *unused)
954{
955 return aca_dump_show(m, ACA_SMU_TYPE_UE);
956}
957
958static int aca_dump_ue_open(struct inode *inode, struct file *file)
959{
960 return single_open(file, aca_dump_ue_show, inode->i_private);
961}
962
963static const struct file_operations aca_ue_dump_debug_fops = {
964 .owner = THIS_MODULE,
965 .open = aca_dump_ue_open,
966 .read = seq_read,
967 .llseek = seq_lseek,
968 .release = single_release,
969};
970
971DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n");
972#endif
973
974void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
975{
976#if defined(CONFIG_DEBUG_FS)
977 if (!root)
978 return;
979
980 debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
981 debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops);
982 debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops);
983#endif
984}