Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/sort.h>
25#include "amdgpu.h"
26#include "umc_v6_7.h"
27#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
28
29#define MAX_UMC_HASH_STRING_SIZE 256
30
31static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
32 struct ras_err_data *err_data, uint64_t err_addr,
33 uint32_t ch_inst, uint32_t umc_inst)
34{
35 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
36 case IP_VERSION(6, 7, 0):
37 umc_v6_7_convert_error_address(adev,
38 err_data, err_addr, ch_inst, umc_inst);
39 break;
40 default:
41 dev_warn(adev->dev,
42 "UMC address to Physical address translation is not supported\n");
43 return AMDGPU_RAS_FAIL;
44 }
45
46 return AMDGPU_RAS_SUCCESS;
47}
48
49int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
50 uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
51{
52 struct ras_err_data err_data;
53 int ret;
54
55 ret = amdgpu_ras_error_data_init(&err_data);
56 if (ret)
57 return ret;
58
59 err_data.err_addr =
60 kcalloc(adev->umc.max_ras_err_cnt_per_query,
61 sizeof(struct eeprom_table_record), GFP_KERNEL);
62 if (!err_data.err_addr) {
63 dev_warn(adev->dev,
64 "Failed to alloc memory for umc error record in MCA notifier!\n");
65 ret = AMDGPU_RAS_FAIL;
66 goto out_fini_err_data;
67 }
68
69 err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
70
71 /*
72 * Translate UMC channel address to Physical address
73 */
74 ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
75 ch_inst, umc_inst);
76 if (ret)
77 goto out_free_err_addr;
78
79 if (amdgpu_bad_page_threshold != 0) {
80 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
81 err_data.err_addr_cnt, false);
82 amdgpu_ras_save_bad_pages(adev, NULL);
83 }
84
85out_free_err_addr:
86 kfree(err_data.err_addr);
87
88out_fini_err_data:
89 amdgpu_ras_error_data_fini(&err_data);
90
91 return ret;
92}
93
94void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
95 void *ras_error_status)
96{
97 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
98 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
99 unsigned int error_query_mode;
100 int ret = 0;
101 unsigned long err_count;
102
103 amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
104
105 mutex_lock(&con->page_retirement_lock);
106 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
107 if (ret == -EOPNOTSUPP &&
108 error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
109 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
110 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
111 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
112
113 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
114 adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
115 adev->umc.max_ras_err_cnt_per_query) {
116 err_data->err_addr =
117 kcalloc(adev->umc.max_ras_err_cnt_per_query,
118 sizeof(struct eeprom_table_record), GFP_KERNEL);
119
120 /* still call query_ras_error_address to clear error status
121 * even NOMEM error is encountered
122 */
123 if(!err_data->err_addr)
124 dev_warn(adev->dev, "Failed to alloc memory for "
125 "umc error address record!\n");
126 else
127 err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
128
129 /* umc query_ras_error_address is also responsible for clearing
130 * error status
131 */
132 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
133 }
134 } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
135 (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
136 if (adev->umc.ras &&
137 adev->umc.ras->ecc_info_query_ras_error_count)
138 adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
139
140 if (adev->umc.ras &&
141 adev->umc.ras->ecc_info_query_ras_error_address &&
142 adev->umc.max_ras_err_cnt_per_query) {
143 err_data->err_addr =
144 kcalloc(adev->umc.max_ras_err_cnt_per_query,
145 sizeof(struct eeprom_table_record), GFP_KERNEL);
146
147 /* still call query_ras_error_address to clear error status
148 * even NOMEM error is encountered
149 */
150 if(!err_data->err_addr)
151 dev_warn(adev->dev, "Failed to alloc memory for "
152 "umc error address record!\n");
153 else
154 err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
155
156 /* umc query_ras_error_address is also responsible for clearing
157 * error status
158 */
159 adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
160 }
161 }
162
163 /* only uncorrectable error needs gpu reset */
164 if (err_data->ue_count || err_data->de_count) {
165 err_count = err_data->ue_count + err_data->de_count;
166 if ((amdgpu_bad_page_threshold != 0) &&
167 err_data->err_addr_cnt) {
168 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
169 err_data->err_addr_cnt, false);
170 amdgpu_ras_save_bad_pages(adev, &err_count);
171
172 amdgpu_dpm_send_hbm_bad_pages_num(adev,
173 con->eeprom_control.ras_num_bad_pages);
174
175 if (con->update_channel_flag == true) {
176 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
177 con->update_channel_flag = false;
178 }
179 }
180 }
181
182 kfree(err_data->err_addr);
183 err_data->err_addr = NULL;
184
185 mutex_unlock(&con->page_retirement_lock);
186}
187
188static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
189 void *ras_error_status,
190 struct amdgpu_iv_entry *entry,
191 uint32_t reset)
192{
193 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
194 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
195
196 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
197 amdgpu_umc_handle_bad_pages(adev, ras_error_status);
198
199 if ((err_data->ue_count || err_data->de_count) &&
200 (reset || amdgpu_ras_is_rma(adev))) {
201 con->gpu_reset_flags |= reset;
202 amdgpu_ras_reset_gpu(adev);
203 }
204
205 return AMDGPU_RAS_SUCCESS;
206}
207
208int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
209 enum amdgpu_ras_block block, uint16_t pasid,
210 pasid_notify pasid_fn, void *data, uint32_t reset)
211{
212 int ret = AMDGPU_RAS_SUCCESS;
213
214 if (adev->gmc.xgmi.connected_to_cpu ||
215 adev->gmc.is_app_apu) {
216 if (reset) {
217 /* MCA poison handler is only responsible for GPU reset,
218 * let MCA notifier do page retirement.
219 */
220 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
221 amdgpu_ras_reset_gpu(adev);
222 }
223 return ret;
224 }
225
226 if (!amdgpu_sriov_vf(adev)) {
227 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
228 struct ras_err_data err_data;
229 struct ras_common_if head = {
230 .block = AMDGPU_RAS_BLOCK__UMC,
231 };
232 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
233
234 ret = amdgpu_ras_error_data_init(&err_data);
235 if (ret)
236 return ret;
237
238 ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
239
240 if (ret == AMDGPU_RAS_SUCCESS && obj) {
241 obj->err_data.ue_count += err_data.ue_count;
242 obj->err_data.ce_count += err_data.ce_count;
243 obj->err_data.de_count += err_data.de_count;
244 }
245
246 amdgpu_ras_error_data_fini(&err_data);
247 } else {
248 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
249 int ret;
250
251 ret = amdgpu_ras_put_poison_req(adev,
252 block, pasid, pasid_fn, data, reset);
253 if (!ret) {
254 atomic_inc(&con->page_retirement_req_cnt);
255 atomic_inc(&con->poison_consumption_count);
256 wake_up(&con->page_retirement_wq);
257 }
258 }
259 } else {
260 if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
261 adev->virt.ops->ras_poison_handler(adev, block);
262 else
263 dev_warn(adev->dev,
264 "No ras_poison_handler interface in SRIOV!\n");
265 }
266
267 return ret;
268}
269
270int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
271 enum amdgpu_ras_block block, uint32_t reset)
272{
273 return amdgpu_umc_pasid_poison_handler(adev,
274 block, 0, NULL, NULL, reset);
275}
276
277int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
278 void *ras_error_status,
279 struct amdgpu_iv_entry *entry)
280{
281 return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
282 AMDGPU_RAS_GPU_RESET_MODE1_RESET);
283}
284
285int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
286{
287 int err;
288 struct amdgpu_umc_ras *ras;
289
290 if (!adev->umc.ras)
291 return 0;
292
293 ras = adev->umc.ras;
294
295 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
296 if (err) {
297 dev_err(adev->dev, "Failed to register umc ras block!\n");
298 return err;
299 }
300
301 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
302 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
303 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
304 adev->umc.ras_if = &ras->ras_block.ras_comm;
305
306 if (!ras->ras_block.ras_late_init)
307 ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
308
309 if (!ras->ras_block.ras_cb)
310 ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
311
312 return 0;
313}
314
315int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
316{
317 int r;
318
319 r = amdgpu_ras_block_late_init(adev, ras_block);
320 if (r)
321 return r;
322
323 if (amdgpu_sriov_vf(adev))
324 return r;
325
326 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
327 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
328 if (r)
329 goto late_fini;
330 }
331
332 /* ras init of specific umc version */
333 if (adev->umc.ras &&
334 adev->umc.ras->err_cnt_init)
335 adev->umc.ras->err_cnt_init(adev);
336
337 return 0;
338
339late_fini:
340 amdgpu_ras_block_late_fini(adev, ras_block);
341 return r;
342}
343
344int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
345 struct amdgpu_irq_src *source,
346 struct amdgpu_iv_entry *entry)
347{
348 struct ras_common_if *ras_if = adev->umc.ras_if;
349 struct ras_dispatch_if ih_data = {
350 .entry = entry,
351 };
352
353 if (!ras_if)
354 return 0;
355
356 ih_data.head = *ras_if;
357
358 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
359 return 0;
360}
361
362int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
363 uint64_t err_addr,
364 uint64_t retired_page,
365 uint32_t channel_index,
366 uint32_t umc_inst)
367{
368 struct eeprom_table_record *err_rec;
369
370 if (!err_data ||
371 !err_data->err_addr ||
372 (err_data->err_addr_cnt >= err_data->err_addr_len))
373 return -EINVAL;
374
375 err_rec = &err_data->err_addr[err_data->err_addr_cnt];
376
377 err_rec->address = err_addr;
378 /* page frame address is saved */
379 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
380 err_rec->ts = (uint64_t)ktime_get_real_seconds();
381 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
382 err_rec->cu = 0;
383 err_rec->mem_channel = channel_index;
384 err_rec->mcumc_id = umc_inst;
385
386 err_data->err_addr_cnt++;
387
388 return 0;
389}
390
391static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
392 void *data)
393{
394 uint32_t umc_node_inst;
395 uint32_t node_inst;
396 uint32_t umc_inst;
397 uint32_t ch_inst;
398 int ret;
399
400 /*
401 * This loop is done based on the following -
402 * umc.active mask = mask of active umc instances across all nodes
403 * umc.umc_inst_num = maximum number of umc instancess per node
404 * umc.node_inst_num = maximum number of node instances
405 * Channel instances are not assumed to be harvested.
406 */
407 dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
408 adev->umc.active_mask, adev->umc.umc_inst_num);
409 for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
410 adev->umc.node_inst_num * adev->umc.umc_inst_num) {
411 node_inst = umc_node_inst / adev->umc.umc_inst_num;
412 umc_inst = umc_node_inst % adev->umc.umc_inst_num;
413 LOOP_UMC_CH_INST(ch_inst) {
414 dev_dbg(adev->dev,
415 "node_inst :%d umc_inst: %d ch_inst: %d",
416 node_inst, umc_inst, ch_inst);
417 ret = func(adev, node_inst, umc_inst, ch_inst, data);
418 if (ret) {
419 dev_err(adev->dev,
420 "Node %d umc %d ch %d func returns %d\n",
421 node_inst, umc_inst, ch_inst, ret);
422 return ret;
423 }
424 }
425 }
426
427 return 0;
428}
429
430int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
431 umc_func func, void *data)
432{
433 uint32_t node_inst = 0;
434 uint32_t umc_inst = 0;
435 uint32_t ch_inst = 0;
436 int ret = 0;
437
438 if (adev->aid_mask)
439 return amdgpu_umc_loop_all_aid(adev, func, data);
440
441 if (adev->umc.node_inst_num) {
442 LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
443 ret = func(adev, node_inst, umc_inst, ch_inst, data);
444 if (ret) {
445 dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
446 node_inst, umc_inst, ch_inst, ret);
447 return ret;
448 }
449 }
450 } else {
451 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
452 ret = func(adev, 0, umc_inst, ch_inst, data);
453 if (ret) {
454 dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
455 umc_inst, ch_inst, ret);
456 return ret;
457 }
458 }
459 }
460
461 return 0;
462}
463
464int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
465 uint64_t status, uint64_t ipid, uint64_t addr)
466{
467 if (adev->umc.ras->update_ecc_status)
468 return adev->umc.ras->update_ecc_status(adev,
469 status, ipid, addr);
470 return 0;
471}
472
473int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
474 struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
475{
476 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
477 struct ras_ecc_log_info *ecc_log;
478 int ret;
479
480 ecc_log = &con->umc_ecc_log;
481
482 mutex_lock(&ecc_log->lock);
483 ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
484 if (!ret)
485 radix_tree_tag_set(ecc_tree,
486 ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
487 mutex_unlock(&ecc_log->lock);
488
489 return ret;
490}
491
492int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
493 struct ras_err_data *err_data, uint64_t pa_addr)
494{
495 struct ta_ras_query_address_output addr_out;
496
497 /* reinit err_data */
498 err_data->err_addr_cnt = 0;
499 err_data->err_addr_len = adev->umc.retire_unit;
500
501 addr_out.pa.pa = pa_addr;
502 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
503 return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
504 &addr_out, false);
505 else
506 return -EINVAL;
507}
508
509int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
510 uint64_t pa_addr, uint64_t *pfns, int len)
511{
512 int i, ret;
513 struct ras_err_data err_data;
514
515 err_data.err_addr = kcalloc(adev->umc.retire_unit,
516 sizeof(struct eeprom_table_record), GFP_KERNEL);
517 if (!err_data.err_addr) {
518 dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
519 return 0;
520 }
521
522 ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
523 if (ret)
524 goto out;
525
526 for (i = 0; i < adev->umc.retire_unit; i++) {
527 if (i >= len)
528 goto out;
529
530 pfns[i] = err_data.err_addr[i].retired_page;
531 }
532 ret = i;
533 adev->umc.err_addr_cnt = err_data.err_addr_cnt;
534
535out:
536 kfree(err_data.err_addr);
537 return ret;
538}
539
540int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
541 uint64_t err_addr, uint32_t ch, uint32_t umc,
542 uint32_t node, uint32_t socket,
543 struct ta_ras_query_address_output *addr_out, bool dump_addr)
544{
545 struct ta_ras_query_address_input addr_in;
546 int ret;
547
548 memset(&addr_in, 0, sizeof(addr_in));
549 addr_in.ma.err_addr = err_addr;
550 addr_in.ma.ch_inst = ch;
551 addr_in.ma.umc_inst = umc;
552 addr_in.ma.node_inst = node;
553 addr_in.ma.socket_id = socket;
554
555 if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
556 ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
557 addr_out, dump_addr);
558 if (ret)
559 return ret;
560 } else {
561 return 0;
562 }
563
564 return 0;
565}
566
567int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
568 uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
569{
570 struct ta_ras_query_address_input addr_in;
571 struct ta_ras_query_address_output addr_out;
572 int ret;
573
574 /* nps: the pa belongs to */
575 addr_in.pa.pa = pa | ((uint64_t)nps << 58);
576 addr_in.addr_type = TA_RAS_PA_TO_MCA;
577 ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
578 if (ret) {
579 dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
580 pa);
581
582 return ret;
583 }
584
585 *mca = addr_out.ma.err_addr;
586
587 return 0;
588}