Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24
25#include "amdgpu.h"
26#include "amdgpu_gfx.h"
27#include "soc15.h"
28#include "soc15d.h"
29#include "soc15_common.h"
30#include "vega10_enum.h"
31
32#include "v9_structs.h"
33
34#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35
36#include "gc/gc_9_4_3_offset.h"
37#include "gc/gc_9_4_3_sh_mask.h"
38
39#include "gfx_v9_4_3.h"
40#include "amdgpu_xcp.h"
41#include "amdgpu_aca.h"
42
43MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
44MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
45MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
46MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
47
48#define GFX9_MEC_HPD_SIZE 4096
49#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
50
51#define GOLDEN_GB_ADDR_CONFIG 0x2a114042
52#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
53
54#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
55#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
56#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
57
58#define XCC_REG_RANGE_0_LOW 0x2000 /* XCC gfxdec0 lower Bound */
59#define XCC_REG_RANGE_0_HIGH 0x3400 /* XCC gfxdec0 upper Bound */
60#define XCC_REG_RANGE_1_LOW 0xA000 /* XCC gfxdec1 lower Bound */
61#define XCC_REG_RANGE_1_HIGH 0x10000 /* XCC gfxdec1 upper Bound */
62
63#define NORMALIZE_XCC_REG_OFFSET(offset) \
64 (offset & 0xFFFF)
65
66struct amdgpu_gfx_ras gfx_v9_4_3_ras;
67
68static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
69static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
70static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
71static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
72static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
73 struct amdgpu_cu_info *cu_info);
74
75static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
76 uint64_t queue_mask)
77{
78 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
79 amdgpu_ring_write(kiq_ring,
80 PACKET3_SET_RESOURCES_VMID_MASK(0) |
81 /* vmid_mask:0* queue_type:0 (KIQ) */
82 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
83 amdgpu_ring_write(kiq_ring,
84 lower_32_bits(queue_mask)); /* queue mask lo */
85 amdgpu_ring_write(kiq_ring,
86 upper_32_bits(queue_mask)); /* queue mask hi */
87 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
88 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
89 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
90 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
91}
92
93static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
94 struct amdgpu_ring *ring)
95{
96 struct amdgpu_device *adev = kiq_ring->adev;
97 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
98 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
99 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
100
101 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
102 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
103 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
104 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
105 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
106 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
107 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
108 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
109 /*queue_type: normal compute queue */
110 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
111 /* alloc format: all_on_one_pipe */
112 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
113 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
114 /* num_queues: must be 1 */
115 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
116 amdgpu_ring_write(kiq_ring,
117 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
118 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
119 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
120 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
121 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
122}
123
124static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
125 struct amdgpu_ring *ring,
126 enum amdgpu_unmap_queues_action action,
127 u64 gpu_addr, u64 seq)
128{
129 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
130
131 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
132 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
133 PACKET3_UNMAP_QUEUES_ACTION(action) |
134 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
135 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
136 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
137 amdgpu_ring_write(kiq_ring,
138 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
139
140 if (action == PREEMPT_QUEUES_NO_UNMAP) {
141 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
142 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
143 amdgpu_ring_write(kiq_ring, seq);
144 } else {
145 amdgpu_ring_write(kiq_ring, 0);
146 amdgpu_ring_write(kiq_ring, 0);
147 amdgpu_ring_write(kiq_ring, 0);
148 }
149}
150
151static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
152 struct amdgpu_ring *ring,
153 u64 addr,
154 u64 seq)
155{
156 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
157
158 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
159 amdgpu_ring_write(kiq_ring,
160 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
161 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
162 PACKET3_QUERY_STATUS_COMMAND(2));
163 /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
164 amdgpu_ring_write(kiq_ring,
165 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
166 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
167 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
168 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
169 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
170 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
171}
172
173static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
174 uint16_t pasid, uint32_t flush_type,
175 bool all_hub)
176{
177 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
178 amdgpu_ring_write(kiq_ring,
179 PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
180 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
181 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
182 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
183}
184
185static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
186 .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
187 .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
188 .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
189 .kiq_query_status = gfx_v9_4_3_kiq_query_status,
190 .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
191 .set_resources_size = 8,
192 .map_queues_size = 7,
193 .unmap_queues_size = 6,
194 .query_status_size = 7,
195 .invalidate_tlbs_size = 2,
196};
197
198static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
199{
200 int i, num_xcc;
201
202 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
203 for (i = 0; i < num_xcc; i++)
204 adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
205}
206
207static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
208{
209 int i, num_xcc, dev_inst;
210
211 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
212 for (i = 0; i < num_xcc; i++) {
213 dev_inst = GET_INST(GC, i);
214
215 WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
216 GOLDEN_GB_ADDR_CONFIG);
217 /* Golden settings applied by driver for ASIC with rev_id 0 */
218 if (adev->rev_id == 0) {
219 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
220 REDUCE_FIFO_DEPTH_BY_2, 2);
221 } else {
222 WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
223 SPARE, 0x1);
224 }
225 }
226}
227
228static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
229{
230 uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
231
232 /* If it is an XCC reg, normalize the reg to keep
233 lower 16 bits in local xcc */
234
235 if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
236 ((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
237 return normalized_reg;
238 else
239 return reg;
240}
241
242static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
243 bool wc, uint32_t reg, uint32_t val)
244{
245 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
246 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
247 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
248 WRITE_DATA_DST_SEL(0) |
249 (wc ? WR_CONFIRM : 0));
250 amdgpu_ring_write(ring, reg);
251 amdgpu_ring_write(ring, 0);
252 amdgpu_ring_write(ring, val);
253}
254
255static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
256 int mem_space, int opt, uint32_t addr0,
257 uint32_t addr1, uint32_t ref, uint32_t mask,
258 uint32_t inv)
259{
260 /* Only do the normalization on regspace */
261 if (mem_space == 0) {
262 addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
263 addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
264 }
265
266 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
267 amdgpu_ring_write(ring,
268 /* memory (1) or register (0) */
269 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
270 WAIT_REG_MEM_OPERATION(opt) | /* wait */
271 WAIT_REG_MEM_FUNCTION(3) | /* equal */
272 WAIT_REG_MEM_ENGINE(eng_sel)));
273
274 if (mem_space)
275 BUG_ON(addr0 & 0x3); /* Dword align */
276 amdgpu_ring_write(ring, addr0);
277 amdgpu_ring_write(ring, addr1);
278 amdgpu_ring_write(ring, ref);
279 amdgpu_ring_write(ring, mask);
280 amdgpu_ring_write(ring, inv); /* poll interval */
281}
282
283static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
284{
285 uint32_t scratch_reg0_offset, xcc_offset;
286 struct amdgpu_device *adev = ring->adev;
287 uint32_t tmp = 0;
288 unsigned i;
289 int r;
290
291 /* Use register offset which is local to XCC in the packet */
292 xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
293 scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
294 WREG32(scratch_reg0_offset, 0xCAFEDEAD);
295 tmp = RREG32(scratch_reg0_offset);
296
297 r = amdgpu_ring_alloc(ring, 3);
298 if (r)
299 return r;
300
301 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
302 amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
303 amdgpu_ring_write(ring, 0xDEADBEEF);
304 amdgpu_ring_commit(ring);
305
306 for (i = 0; i < adev->usec_timeout; i++) {
307 tmp = RREG32(scratch_reg0_offset);
308 if (tmp == 0xDEADBEEF)
309 break;
310 udelay(1);
311 }
312
313 if (i >= adev->usec_timeout)
314 r = -ETIMEDOUT;
315 return r;
316}
317
318static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
319{
320 struct amdgpu_device *adev = ring->adev;
321 struct amdgpu_ib ib;
322 struct dma_fence *f = NULL;
323
324 unsigned index;
325 uint64_t gpu_addr;
326 uint32_t tmp;
327 long r;
328
329 r = amdgpu_device_wb_get(adev, &index);
330 if (r)
331 return r;
332
333 gpu_addr = adev->wb.gpu_addr + (index * 4);
334 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
335 memset(&ib, 0, sizeof(ib));
336
337 r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
338 if (r)
339 goto err1;
340
341 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
342 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
343 ib.ptr[2] = lower_32_bits(gpu_addr);
344 ib.ptr[3] = upper_32_bits(gpu_addr);
345 ib.ptr[4] = 0xDEADBEEF;
346 ib.length_dw = 5;
347
348 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
349 if (r)
350 goto err2;
351
352 r = dma_fence_wait_timeout(f, false, timeout);
353 if (r == 0) {
354 r = -ETIMEDOUT;
355 goto err2;
356 } else if (r < 0) {
357 goto err2;
358 }
359
360 tmp = adev->wb.wb[index];
361 if (tmp == 0xDEADBEEF)
362 r = 0;
363 else
364 r = -EINVAL;
365
366err2:
367 amdgpu_ib_free(adev, &ib, NULL);
368 dma_fence_put(f);
369err1:
370 amdgpu_device_wb_free(adev, index);
371 return r;
372}
373
374
375/* This value might differs per partition */
376static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
377{
378 uint64_t clock;
379
380 mutex_lock(&adev->gfx.gpu_clock_mutex);
381 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
382 clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
383 ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
384 mutex_unlock(&adev->gfx.gpu_clock_mutex);
385
386 return clock;
387}
388
389static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
390{
391 amdgpu_ucode_release(&adev->gfx.pfp_fw);
392 amdgpu_ucode_release(&adev->gfx.me_fw);
393 amdgpu_ucode_release(&adev->gfx.ce_fw);
394 amdgpu_ucode_release(&adev->gfx.rlc_fw);
395 amdgpu_ucode_release(&adev->gfx.mec_fw);
396 amdgpu_ucode_release(&adev->gfx.mec2_fw);
397
398 kfree(adev->gfx.rlc.register_list_format);
399}
400
401static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
402 const char *chip_name)
403{
404 int err;
405 const struct rlc_firmware_header_v2_0 *rlc_hdr;
406 uint16_t version_major;
407 uint16_t version_minor;
408
409
410 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
411 "amdgpu/%s_rlc.bin", chip_name);
412 if (err)
413 goto out;
414 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
415
416 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
417 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
418 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
419out:
420 if (err)
421 amdgpu_ucode_release(&adev->gfx.rlc_fw);
422
423 return err;
424}
425
426static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
427{
428 return true;
429}
430
431static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
432{
433 if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
434 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
435}
436
437static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
438 const char *chip_name)
439{
440 int err;
441
442 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
443 "amdgpu/%s_mec.bin", chip_name);
444 if (err)
445 goto out;
446 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
447 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
448
449 adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
450 adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
451
452 gfx_v9_4_3_check_if_need_gfxoff(adev);
453
454out:
455 if (err)
456 amdgpu_ucode_release(&adev->gfx.mec_fw);
457 return err;
458}
459
460static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
461{
462 char ucode_prefix[15];
463 int r;
464
465 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
466
467 r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
468 if (r)
469 return r;
470
471 r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
472 if (r)
473 return r;
474
475 return r;
476}
477
478static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
479{
480 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
481 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
482}
483
484static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
485{
486 int r, i, num_xcc;
487 u32 *hpd;
488 const __le32 *fw_data;
489 unsigned fw_size;
490 u32 *fw;
491 size_t mec_hpd_size;
492
493 const struct gfx_firmware_header_v1_0 *mec_hdr;
494
495 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
496 for (i = 0; i < num_xcc; i++)
497 bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
498 AMDGPU_MAX_COMPUTE_QUEUES);
499
500 /* take ownership of the relevant compute queues */
501 amdgpu_gfx_compute_queue_acquire(adev);
502 mec_hpd_size =
503 adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
504 if (mec_hpd_size) {
505 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
506 AMDGPU_GEM_DOMAIN_VRAM |
507 AMDGPU_GEM_DOMAIN_GTT,
508 &adev->gfx.mec.hpd_eop_obj,
509 &adev->gfx.mec.hpd_eop_gpu_addr,
510 (void **)&hpd);
511 if (r) {
512 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
513 gfx_v9_4_3_mec_fini(adev);
514 return r;
515 }
516
517 if (amdgpu_emu_mode == 1) {
518 for (i = 0; i < mec_hpd_size / 4; i++) {
519 memset((void *)(hpd + i), 0, 4);
520 if (i % 50 == 0)
521 msleep(1);
522 }
523 } else {
524 memset(hpd, 0, mec_hpd_size);
525 }
526
527 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
528 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
529 }
530
531 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
532
533 fw_data = (const __le32 *)
534 (adev->gfx.mec_fw->data +
535 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
536 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
537
538 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
539 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
540 &adev->gfx.mec.mec_fw_obj,
541 &adev->gfx.mec.mec_fw_gpu_addr,
542 (void **)&fw);
543 if (r) {
544 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
545 gfx_v9_4_3_mec_fini(adev);
546 return r;
547 }
548
549 memcpy(fw, fw_data, fw_size);
550
551 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
552 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
553
554 return 0;
555}
556
557static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
558 u32 sh_num, u32 instance, int xcc_id)
559{
560 u32 data;
561
562 if (instance == 0xffffffff)
563 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
564 INSTANCE_BROADCAST_WRITES, 1);
565 else
566 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
567 INSTANCE_INDEX, instance);
568
569 if (se_num == 0xffffffff)
570 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
571 SE_BROADCAST_WRITES, 1);
572 else
573 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
574
575 if (sh_num == 0xffffffff)
576 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
577 SH_BROADCAST_WRITES, 1);
578 else
579 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
580
581 WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
582}
583
584static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
585{
586 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
587 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
588 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
589 (address << SQ_IND_INDEX__INDEX__SHIFT) |
590 (SQ_IND_INDEX__FORCE_READ_MASK));
591 return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
592}
593
594static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
595 uint32_t wave, uint32_t thread,
596 uint32_t regno, uint32_t num, uint32_t *out)
597{
598 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
599 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
600 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
601 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
602 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
603 (SQ_IND_INDEX__FORCE_READ_MASK) |
604 (SQ_IND_INDEX__AUTO_INCR_MASK));
605 while (num--)
606 *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
607}
608
609static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
610 uint32_t xcc_id, uint32_t simd, uint32_t wave,
611 uint32_t *dst, int *no_fields)
612{
613 /* type 1 wave data */
614 dst[(*no_fields)++] = 1;
615 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
616 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
617 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
618 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
619 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
620 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
621 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
622 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
623 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
624 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
625 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
626 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
627 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
628 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
629 dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
630}
631
632static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
633 uint32_t wave, uint32_t start,
634 uint32_t size, uint32_t *dst)
635{
636 wave_read_regs(adev, xcc_id, simd, wave, 0,
637 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
638}
639
640static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
641 uint32_t wave, uint32_t thread,
642 uint32_t start, uint32_t size,
643 uint32_t *dst)
644{
645 wave_read_regs(adev, xcc_id, simd, wave, thread,
646 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
647}
648
649static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
650 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
651{
652 soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
653}
654
655static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
656{
657 u32 xcp_ctl;
658
659 /* Value is expected to be the same on all, fetch from first instance */
660 xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
661
662 return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
663}
664
665static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
666 int num_xccs_per_xcp)
667{
668 int ret, i, num_xcc;
669 u32 tmp = 0;
670
671 if (adev->psp.funcs) {
672 ret = psp_spatial_partition(&adev->psp,
673 NUM_XCC(adev->gfx.xcc_mask) /
674 num_xccs_per_xcp);
675 if (ret)
676 return ret;
677 } else {
678 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
679
680 for (i = 0; i < num_xcc; i++) {
681 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
682 num_xccs_per_xcp);
683 tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
684 i % num_xccs_per_xcp);
685 WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
686 tmp);
687 }
688 ret = 0;
689 }
690
691 adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
692
693 return ret;
694}
695
696static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
697{
698 int xcc;
699
700 xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
701 if (!xcc) {
702 dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
703 return -EINVAL;
704 }
705
706 return xcc - 1;
707}
708
709static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
710 .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
711 .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
712 .read_wave_data = &gfx_v9_4_3_read_wave_data,
713 .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
714 .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
715 .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
716 .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
717 .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
718 .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
719};
720
721static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
722 struct aca_bank *bank, enum aca_smu_type type,
723 void *data)
724{
725 struct aca_bank_info info;
726 u64 misc0;
727 u32 instlo;
728 int ret;
729
730 ret = aca_bank_info_decode(bank, &info);
731 if (ret)
732 return ret;
733
734 /* NOTE: overwrite info.die_id with xcd id for gfx */
735 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
736 instlo &= GENMASK(31, 1);
737 info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
738
739 misc0 = bank->regs[ACA_REG_IDX_MISC0];
740
741 switch (type) {
742 case ACA_SMU_TYPE_UE:
743 ret = aca_error_cache_log_bank_error(handle, &info,
744 ACA_ERROR_TYPE_UE, 1ULL);
745 break;
746 case ACA_SMU_TYPE_CE:
747 ret = aca_error_cache_log_bank_error(handle, &info,
748 ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0));
749 break;
750 default:
751 return -EINVAL;
752 }
753
754 return ret;
755}
756
757static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
758 enum aca_smu_type type, void *data)
759{
760 u32 instlo;
761
762 instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
763 instlo &= GENMASK(31, 1);
764 switch (instlo) {
765 case mmSMNAID_XCD0_MCA_SMU:
766 case mmSMNAID_XCD1_MCA_SMU:
767 case mmSMNXCD_XCD0_MCA_SMU:
768 return true;
769 default:
770 break;
771 }
772
773 return false;
774}
775
776static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
777 .aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
778 .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
779};
780
781static const struct aca_info gfx_v9_4_3_aca_info = {
782 .hwip = ACA_HWIP_TYPE_SMU,
783 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
784 .bank_ops = &gfx_v9_4_3_aca_bank_ops,
785};
786
787static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
788{
789 u32 gb_addr_config;
790
791 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
792 adev->gfx.ras = &gfx_v9_4_3_ras;
793
794 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
795 case IP_VERSION(9, 4, 3):
796 case IP_VERSION(9, 4, 4):
797 adev->gfx.config.max_hw_contexts = 8;
798 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
799 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
800 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
801 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
802 gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG);
803 break;
804 default:
805 BUG();
806 break;
807 }
808
809 adev->gfx.config.gb_addr_config = gb_addr_config;
810
811 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
812 REG_GET_FIELD(
813 adev->gfx.config.gb_addr_config,
814 GB_ADDR_CONFIG,
815 NUM_PIPES);
816
817 adev->gfx.config.max_tile_pipes =
818 adev->gfx.config.gb_addr_config_fields.num_pipes;
819
820 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
821 REG_GET_FIELD(
822 adev->gfx.config.gb_addr_config,
823 GB_ADDR_CONFIG,
824 NUM_BANKS);
825 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
826 REG_GET_FIELD(
827 adev->gfx.config.gb_addr_config,
828 GB_ADDR_CONFIG,
829 MAX_COMPRESSED_FRAGS);
830 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
831 REG_GET_FIELD(
832 adev->gfx.config.gb_addr_config,
833 GB_ADDR_CONFIG,
834 NUM_RB_PER_SE);
835 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
836 REG_GET_FIELD(
837 adev->gfx.config.gb_addr_config,
838 GB_ADDR_CONFIG,
839 NUM_SHADER_ENGINES);
840 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
841 REG_GET_FIELD(
842 adev->gfx.config.gb_addr_config,
843 GB_ADDR_CONFIG,
844 PIPE_INTERLEAVE_SIZE));
845
846 return 0;
847}
848
849static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
850 int xcc_id, int mec, int pipe, int queue)
851{
852 unsigned irq_type;
853 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
854 unsigned int hw_prio;
855 uint32_t xcc_doorbell_start;
856
857 ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
858 ring_id];
859
860 /* mec0 is me1 */
861 ring->xcc_id = xcc_id;
862 ring->me = mec + 1;
863 ring->pipe = pipe;
864 ring->queue = queue;
865
866 ring->ring_obj = NULL;
867 ring->use_doorbell = true;
868 xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
869 xcc_id * adev->doorbell_index.xcc_doorbell_range;
870 ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
871 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
872 (ring_id + xcc_id * adev->gfx.num_compute_rings) *
873 GFX9_MEC_HPD_SIZE;
874 ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
875 sprintf(ring->name, "comp_%d.%d.%d.%d",
876 ring->xcc_id, ring->me, ring->pipe, ring->queue);
877
878 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
879 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
880 + ring->pipe;
881 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
882 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
883 /* type-2 packets are deprecated on MEC, use type-3 instead */
884 return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
885 hw_prio, NULL);
886}
887
888static int gfx_v9_4_3_sw_init(void *handle)
889{
890 int i, j, k, r, ring_id, xcc_id, num_xcc;
891 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
892
893 adev->gfx.mec.num_mec = 2;
894 adev->gfx.mec.num_pipe_per_mec = 4;
895 adev->gfx.mec.num_queue_per_pipe = 8;
896
897 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
898
899 /* EOP Event */
900 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
901 if (r)
902 return r;
903
904 /* Privileged reg */
905 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
906 &adev->gfx.priv_reg_irq);
907 if (r)
908 return r;
909
910 /* Privileged inst */
911 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
912 &adev->gfx.priv_inst_irq);
913 if (r)
914 return r;
915
916 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
917
918 r = adev->gfx.rlc.funcs->init(adev);
919 if (r) {
920 DRM_ERROR("Failed to init rlc BOs!\n");
921 return r;
922 }
923
924 r = gfx_v9_4_3_mec_init(adev);
925 if (r) {
926 DRM_ERROR("Failed to init MEC BOs!\n");
927 return r;
928 }
929
930 /* set up the compute queues - allocate horizontally across pipes */
931 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
932 ring_id = 0;
933 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
934 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
935 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
936 k++) {
937 if (!amdgpu_gfx_is_mec_queue_enabled(
938 adev, xcc_id, i, k, j))
939 continue;
940
941 r = gfx_v9_4_3_compute_ring_init(adev,
942 ring_id,
943 xcc_id,
944 i, k, j);
945 if (r)
946 return r;
947
948 ring_id++;
949 }
950 }
951 }
952
953 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
954 if (r) {
955 DRM_ERROR("Failed to init KIQ BOs!\n");
956 return r;
957 }
958
959 r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
960 if (r)
961 return r;
962
963 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
964 r = amdgpu_gfx_mqd_sw_init(adev,
965 sizeof(struct v9_mqd_allocation), xcc_id);
966 if (r)
967 return r;
968 }
969
970 r = gfx_v9_4_3_gpu_early_init(adev);
971 if (r)
972 return r;
973
974 r = amdgpu_gfx_ras_sw_init(adev);
975 if (r)
976 return r;
977
978
979 if (!amdgpu_sriov_vf(adev))
980 r = amdgpu_gfx_sysfs_init(adev);
981
982 return r;
983}
984
985static int gfx_v9_4_3_sw_fini(void *handle)
986{
987 int i, num_xcc;
988 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
989
990 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
991 for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
992 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
993
994 for (i = 0; i < num_xcc; i++) {
995 amdgpu_gfx_mqd_sw_fini(adev, i);
996 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
997 amdgpu_gfx_kiq_fini(adev, i);
998 }
999
1000 gfx_v9_4_3_mec_fini(adev);
1001 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1002 gfx_v9_4_3_free_microcode(adev);
1003 if (!amdgpu_sriov_vf(adev))
1004 amdgpu_gfx_sysfs_fini(adev);
1005
1006 return 0;
1007}
1008
1009#define DEFAULT_SH_MEM_BASES (0x6000)
1010static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1011 int xcc_id)
1012{
1013 int i;
1014 uint32_t sh_mem_config;
1015 uint32_t sh_mem_bases;
1016 uint32_t data;
1017
1018 /*
1019 * Configure apertures:
1020 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1021 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1022 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1023 */
1024 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1025
1026 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1027 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1028 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1029
1030 mutex_lock(&adev->srbm_mutex);
1031 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1032 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1033 /* CP and shaders */
1034 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1035 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1036
1037 /* Enable trap for each kfd vmid. */
1038 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1039 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1040 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1041 }
1042 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1043 mutex_unlock(&adev->srbm_mutex);
1044
1045 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1046 acccess. These should be enabled by FW for target VMIDs. */
1047 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1048 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1049 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1050 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1051 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1052 }
1053}
1054
1055static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1056{
1057 int vmid;
1058
1059 /*
1060 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1061 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1062 * the driver can enable them for graphics. VMID0 should maintain
1063 * access so that HWS firmware can save/restore entries.
1064 */
1065 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1066 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1067 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1068 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1069 WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1070 }
1071}
1072
1073static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1074 int xcc_id)
1075{
1076 u32 tmp;
1077 int i;
1078
1079 /* XXX SH_MEM regs */
1080 /* where to put LDS, scratch, GPUVM in FSA64 space */
1081 mutex_lock(&adev->srbm_mutex);
1082 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1083 soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1084 /* CP and shaders */
1085 if (i == 0) {
1086 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1087 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1088 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1089 !!adev->gmc.noretry);
1090 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1091 regSH_MEM_CONFIG, tmp);
1092 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1093 regSH_MEM_BASES, 0);
1094 } else {
1095 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1096 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1097 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1098 !!adev->gmc.noretry);
1099 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1100 regSH_MEM_CONFIG, tmp);
1101 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1102 (adev->gmc.private_aperture_start >>
1103 48));
1104 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1105 (adev->gmc.shared_aperture_start >>
1106 48));
1107 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1108 regSH_MEM_BASES, tmp);
1109 }
1110 }
1111 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1112
1113 mutex_unlock(&adev->srbm_mutex);
1114
1115 gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1116 gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1117}
1118
1119static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1120{
1121 int i, num_xcc;
1122
1123 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1124
1125 gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1126 adev->gfx.config.db_debug2 =
1127 RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1128
1129 for (i = 0; i < num_xcc; i++)
1130 gfx_v9_4_3_xcc_constants_init(adev, i);
1131}
1132
1133static void
1134gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1135 int xcc_id)
1136{
1137 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1138}
1139
1140static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1141{
1142 /*
1143 * Rlc save restore list is workable since v2_1.
1144 * And it's needed by gfxoff feature.
1145 */
1146 if (adev->gfx.rlc.is_rlc_v2_1)
1147 gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1148}
1149
1150static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1151{
1152 uint32_t data;
1153
1154 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1155 data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1156 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1157}
1158
1159static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1160{
1161 uint32_t rlc_setting;
1162
1163 /* if RLC is not enabled, do nothing */
1164 rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1165 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1166 return false;
1167
1168 return true;
1169}
1170
1171static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1172{
1173 uint32_t data;
1174 unsigned i;
1175
1176 data = RLC_SAFE_MODE__CMD_MASK;
1177 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1178 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1179
1180 /* wait for RLC_SAFE_MODE */
1181 for (i = 0; i < adev->usec_timeout; i++) {
1182 if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1183 break;
1184 udelay(1);
1185 }
1186}
1187
1188static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1189 int xcc_id)
1190{
1191 uint32_t data;
1192
1193 data = RLC_SAFE_MODE__CMD_MASK;
1194 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1195}
1196
1197static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1198{
1199 int xcc_id, num_xcc;
1200 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1201
1202 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1203 for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1204 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1205 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1206 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1207 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1208 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1209 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1210 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1211 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1212 }
1213 adev->gfx.rlc.rlcg_reg_access_supported = true;
1214}
1215
1216static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1217{
1218 /* init spm vmid with 0xf */
1219 if (adev->gfx.rlc.funcs->update_spm_vmid)
1220 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1221
1222 return 0;
1223}
1224
1225static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1226 int xcc_id)
1227{
1228 u32 i, j, k;
1229 u32 mask;
1230
1231 mutex_lock(&adev->grbm_idx_mutex);
1232 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1233 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1234 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1235 xcc_id);
1236 for (k = 0; k < adev->usec_timeout; k++) {
1237 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1238 break;
1239 udelay(1);
1240 }
1241 if (k == adev->usec_timeout) {
1242 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1243 0xffffffff,
1244 0xffffffff, xcc_id);
1245 mutex_unlock(&adev->grbm_idx_mutex);
1246 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1247 i, j);
1248 return;
1249 }
1250 }
1251 }
1252 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1253 xcc_id);
1254 mutex_unlock(&adev->grbm_idx_mutex);
1255
1256 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1257 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1258 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1259 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1260 for (k = 0; k < adev->usec_timeout; k++) {
1261 if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1262 break;
1263 udelay(1);
1264 }
1265}
1266
1267static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1268 bool enable, int xcc_id)
1269{
1270 u32 tmp;
1271
1272 /* These interrupts should be enabled to drive DS clock */
1273
1274 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1275
1276 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1277 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1278 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1279
1280 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1281}
1282
1283static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1284{
1285 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1286 RLC_ENABLE_F32, 0);
1287 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1288 gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1289}
1290
1291static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1292{
1293 int i, num_xcc;
1294
1295 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1296 for (i = 0; i < num_xcc; i++)
1297 gfx_v9_4_3_xcc_rlc_stop(adev, i);
1298}
1299
1300static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1301{
1302 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1303 SOFT_RESET_RLC, 1);
1304 udelay(50);
1305 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1306 SOFT_RESET_RLC, 0);
1307 udelay(50);
1308}
1309
1310static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1311{
1312 int i, num_xcc;
1313
1314 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1315 for (i = 0; i < num_xcc; i++)
1316 gfx_v9_4_3_xcc_rlc_reset(adev, i);
1317}
1318
1319static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1320{
1321 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1322 RLC_ENABLE_F32, 1);
1323 udelay(50);
1324
1325 /* carrizo do enable cp interrupt after cp inited */
1326 if (!(adev->flags & AMD_IS_APU)) {
1327 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1328 udelay(50);
1329 }
1330}
1331
1332static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1333{
1334#ifdef AMDGPU_RLC_DEBUG_RETRY
1335 u32 rlc_ucode_ver;
1336#endif
1337 int i, num_xcc;
1338
1339 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1340 for (i = 0; i < num_xcc; i++) {
1341 gfx_v9_4_3_xcc_rlc_start(adev, i);
1342#ifdef AMDGPU_RLC_DEBUG_RETRY
1343 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
1344 rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1345 if (rlc_ucode_ver == 0x108) {
1346 dev_info(adev->dev,
1347 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1348 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1349 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1350 * default is 0x9C4 to create a 100us interval */
1351 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1352 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1353 * to disable the page fault retry interrupts, default is
1354 * 0x100 (256) */
1355 WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1356 }
1357#endif
1358 }
1359}
1360
1361static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1362 int xcc_id)
1363{
1364 const struct rlc_firmware_header_v2_0 *hdr;
1365 const __le32 *fw_data;
1366 unsigned i, fw_size;
1367
1368 if (!adev->gfx.rlc_fw)
1369 return -EINVAL;
1370
1371 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1372 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1373
1374 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1375 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1376 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1377
1378 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1379 RLCG_UCODE_LOADING_START_ADDRESS);
1380 for (i = 0; i < fw_size; i++) {
1381 if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1382 dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1383 msleep(1);
1384 }
1385 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1386 }
1387 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1388
1389 return 0;
1390}
1391
1392static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1393{
1394 int r;
1395
1396 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1397 gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1398 /* legacy rlc firmware loading */
1399 r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1400 if (r)
1401 return r;
1402 gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1403 }
1404
1405 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1406 /* disable CG */
1407 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1408 gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1409 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1410
1411 return 0;
1412}
1413
1414static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1415{
1416 int r, i, num_xcc;
1417
1418 if (amdgpu_sriov_vf(adev))
1419 return 0;
1420
1421 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1422 for (i = 0; i < num_xcc; i++) {
1423 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1424 if (r)
1425 return r;
1426 }
1427
1428 return 0;
1429}
1430
1431static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1432 unsigned vmid)
1433{
1434 u32 reg, pre_data, data;
1435
1436 reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1437 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1438 pre_data = RREG32_NO_KIQ(reg);
1439 else
1440 pre_data = RREG32(reg);
1441
1442 data = pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1443 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1444
1445 if (pre_data != data) {
1446 if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1447 WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1448 } else
1449 WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1450 }
1451}
1452
1453static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1454 {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1455 {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1456};
1457
1458static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1459 uint32_t offset,
1460 struct soc15_reg_rlcg *entries, int arr_size)
1461{
1462 int i, inst;
1463 uint32_t reg;
1464
1465 if (!entries)
1466 return false;
1467
1468 for (i = 0; i < arr_size; i++) {
1469 const struct soc15_reg_rlcg *entry;
1470
1471 entry = &entries[i];
1472 inst = adev->ip_map.logical_to_dev_inst ?
1473 adev->ip_map.logical_to_dev_inst(
1474 adev, entry->hwip, entry->instance) :
1475 entry->instance;
1476 reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1477 entry->reg;
1478 if (offset == reg)
1479 return true;
1480 }
1481
1482 return false;
1483}
1484
1485static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1486{
1487 return gfx_v9_4_3_check_rlcg_range(adev, offset,
1488 (void *)rlcg_access_gc_9_4_3,
1489 ARRAY_SIZE(rlcg_access_gc_9_4_3));
1490}
1491
1492static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1493 bool enable, int xcc_id)
1494{
1495 if (enable) {
1496 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1497 } else {
1498 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1499 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1500 adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1501 }
1502 udelay(50);
1503}
1504
1505static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1506 int xcc_id)
1507{
1508 const struct gfx_firmware_header_v1_0 *mec_hdr;
1509 const __le32 *fw_data;
1510 unsigned i;
1511 u32 tmp;
1512 u32 mec_ucode_addr_offset;
1513 u32 mec_ucode_data_offset;
1514
1515 if (!adev->gfx.mec_fw)
1516 return -EINVAL;
1517
1518 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1519
1520 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1521 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1522
1523 fw_data = (const __le32 *)
1524 (adev->gfx.mec_fw->data +
1525 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1526 tmp = 0;
1527 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1528 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1529 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1530
1531 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1532 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1533 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1534 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1535
1536 mec_ucode_addr_offset =
1537 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1538 mec_ucode_data_offset =
1539 SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1540
1541 /* MEC1 */
1542 WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1543 for (i = 0; i < mec_hdr->jt_size; i++)
1544 WREG32(mec_ucode_data_offset,
1545 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1546
1547 WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1548 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1549
1550 return 0;
1551}
1552
1553/* KIQ functions */
1554static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1555{
1556 uint32_t tmp;
1557 struct amdgpu_device *adev = ring->adev;
1558
1559 /* tell RLC which is KIQ queue */
1560 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1561 tmp &= 0xffffff00;
1562 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1563 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1564 tmp |= 0x80;
1565 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp);
1566}
1567
1568static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1569{
1570 struct amdgpu_device *adev = ring->adev;
1571
1572 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1573 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1574 mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1575 mqd->cp_hqd_queue_priority =
1576 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1577 }
1578 }
1579}
1580
1581static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1582{
1583 struct amdgpu_device *adev = ring->adev;
1584 struct v9_mqd *mqd = ring->mqd_ptr;
1585 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1586 uint32_t tmp;
1587
1588 mqd->header = 0xC0310800;
1589 mqd->compute_pipelinestat_enable = 0x00000001;
1590 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1591 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1592 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1593 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1594 mqd->compute_misc_reserved = 0x00000003;
1595
1596 mqd->dynamic_cu_mask_addr_lo =
1597 lower_32_bits(ring->mqd_gpu_addr
1598 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1599 mqd->dynamic_cu_mask_addr_hi =
1600 upper_32_bits(ring->mqd_gpu_addr
1601 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1602
1603 eop_base_addr = ring->eop_gpu_addr >> 8;
1604 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1605 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1606
1607 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1608 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1609 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1610 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1611
1612 mqd->cp_hqd_eop_control = tmp;
1613
1614 /* enable doorbell? */
1615 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1616
1617 if (ring->use_doorbell) {
1618 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1619 DOORBELL_OFFSET, ring->doorbell_index);
1620 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1621 DOORBELL_EN, 1);
1622 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1623 DOORBELL_SOURCE, 0);
1624 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1625 DOORBELL_HIT, 0);
1626 if (amdgpu_sriov_vf(adev))
1627 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1628 DOORBELL_MODE, 1);
1629 } else {
1630 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1631 DOORBELL_EN, 0);
1632 }
1633
1634 mqd->cp_hqd_pq_doorbell_control = tmp;
1635
1636 /* disable the queue if it's active */
1637 ring->wptr = 0;
1638 mqd->cp_hqd_dequeue_request = 0;
1639 mqd->cp_hqd_pq_rptr = 0;
1640 mqd->cp_hqd_pq_wptr_lo = 0;
1641 mqd->cp_hqd_pq_wptr_hi = 0;
1642
1643 /* set the pointer to the MQD */
1644 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1645 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1646
1647 /* set MQD vmid to 0 */
1648 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1649 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1650 mqd->cp_mqd_control = tmp;
1651
1652 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1653 hqd_gpu_addr = ring->gpu_addr >> 8;
1654 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1655 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1656
1657 /* set up the HQD, this is similar to CP_RB0_CNTL */
1658 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1659 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1660 (order_base_2(ring->ring_size / 4) - 1));
1661 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1662 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1663#ifdef __BIG_ENDIAN
1664 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1665#endif
1666 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1667 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1668 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1669 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1670 mqd->cp_hqd_pq_control = tmp;
1671
1672 /* set the wb address whether it's enabled or not */
1673 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1674 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1675 mqd->cp_hqd_pq_rptr_report_addr_hi =
1676 upper_32_bits(wb_gpu_addr) & 0xffff;
1677
1678 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1679 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1680 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1681 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1682
1683 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1684 ring->wptr = 0;
1685 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1686
1687 /* set the vmid for the queue */
1688 mqd->cp_hqd_vmid = 0;
1689
1690 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1691 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1692 mqd->cp_hqd_persistent_state = tmp;
1693
1694 /* set MIN_IB_AVAIL_SIZE */
1695 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1696 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1697 mqd->cp_hqd_ib_control = tmp;
1698
1699 /* set static priority for a queue/ring */
1700 gfx_v9_4_3_mqd_set_priority(ring, mqd);
1701 mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1702
1703 /* map_queues packet doesn't need activate the queue,
1704 * so only kiq need set this field.
1705 */
1706 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1707 mqd->cp_hqd_active = 1;
1708
1709 return 0;
1710}
1711
1712static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1713 int xcc_id)
1714{
1715 struct amdgpu_device *adev = ring->adev;
1716 struct v9_mqd *mqd = ring->mqd_ptr;
1717 int j;
1718
1719 /* disable wptr polling */
1720 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1721
1722 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1723 mqd->cp_hqd_eop_base_addr_lo);
1724 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1725 mqd->cp_hqd_eop_base_addr_hi);
1726
1727 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1728 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1729 mqd->cp_hqd_eop_control);
1730
1731 /* enable doorbell? */
1732 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1733 mqd->cp_hqd_pq_doorbell_control);
1734
1735 /* disable the queue if it's active */
1736 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1737 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1738 for (j = 0; j < adev->usec_timeout; j++) {
1739 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1740 break;
1741 udelay(1);
1742 }
1743 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1744 mqd->cp_hqd_dequeue_request);
1745 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1746 mqd->cp_hqd_pq_rptr);
1747 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1748 mqd->cp_hqd_pq_wptr_lo);
1749 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1750 mqd->cp_hqd_pq_wptr_hi);
1751 }
1752
1753 /* set the pointer to the MQD */
1754 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1755 mqd->cp_mqd_base_addr_lo);
1756 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1757 mqd->cp_mqd_base_addr_hi);
1758
1759 /* set MQD vmid to 0 */
1760 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1761 mqd->cp_mqd_control);
1762
1763 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1764 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1765 mqd->cp_hqd_pq_base_lo);
1766 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1767 mqd->cp_hqd_pq_base_hi);
1768
1769 /* set up the HQD, this is similar to CP_RB0_CNTL */
1770 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1771 mqd->cp_hqd_pq_control);
1772
1773 /* set the wb address whether it's enabled or not */
1774 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1775 mqd->cp_hqd_pq_rptr_report_addr_lo);
1776 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1777 mqd->cp_hqd_pq_rptr_report_addr_hi);
1778
1779 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1780 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1781 mqd->cp_hqd_pq_wptr_poll_addr_lo);
1782 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1783 mqd->cp_hqd_pq_wptr_poll_addr_hi);
1784
1785 /* enable the doorbell if requested */
1786 if (ring->use_doorbell) {
1787 WREG32_SOC15(
1788 GC, GET_INST(GC, xcc_id),
1789 regCP_MEC_DOORBELL_RANGE_LOWER,
1790 ((adev->doorbell_index.kiq +
1791 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1792 2) << 2);
1793 WREG32_SOC15(
1794 GC, GET_INST(GC, xcc_id),
1795 regCP_MEC_DOORBELL_RANGE_UPPER,
1796 ((adev->doorbell_index.userqueue_end +
1797 xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1798 2) << 2);
1799 }
1800
1801 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1802 mqd->cp_hqd_pq_doorbell_control);
1803
1804 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1805 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1806 mqd->cp_hqd_pq_wptr_lo);
1807 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1808 mqd->cp_hqd_pq_wptr_hi);
1809
1810 /* set the vmid for the queue */
1811 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
1812
1813 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
1814 mqd->cp_hqd_persistent_state);
1815
1816 /* activate the queue */
1817 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
1818 mqd->cp_hqd_active);
1819
1820 if (ring->use_doorbell)
1821 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
1822
1823 return 0;
1824}
1825
1826static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
1827 int xcc_id)
1828{
1829 struct amdgpu_device *adev = ring->adev;
1830 int j;
1831
1832 /* disable the queue if it's active */
1833 if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1834
1835 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1836
1837 for (j = 0; j < adev->usec_timeout; j++) {
1838 if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1839 break;
1840 udelay(1);
1841 }
1842
1843 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
1844 DRM_DEBUG("%s dequeue request failed.\n", ring->name);
1845
1846 /* Manual disable if dequeue request times out */
1847 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
1848 }
1849
1850 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1851 0);
1852 }
1853
1854 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
1855 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
1856 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
1857 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
1858 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1859 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
1860 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
1861 WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
1862
1863 return 0;
1864}
1865
1866static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1867{
1868 struct amdgpu_device *adev = ring->adev;
1869 struct v9_mqd *mqd = ring->mqd_ptr;
1870 struct v9_mqd *tmp_mqd;
1871
1872 gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
1873
1874 /* GPU could be in bad state during probe, driver trigger the reset
1875 * after load the SMU, in this case , the mqd is not be initialized.
1876 * driver need to re-init the mqd.
1877 * check mqd->cp_hqd_pq_control since this value should not be 0
1878 */
1879 tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
1880 if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
1881 /* for GPU_RESET case , reset MQD to a clean status */
1882 if (adev->gfx.kiq[xcc_id].mqd_backup)
1883 memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
1884
1885 /* reset ring buffer */
1886 ring->wptr = 0;
1887 amdgpu_ring_clear_ring(ring);
1888 mutex_lock(&adev->srbm_mutex);
1889 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1890 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1891 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1892 mutex_unlock(&adev->srbm_mutex);
1893 } else {
1894 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1895 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1896 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1897 mutex_lock(&adev->srbm_mutex);
1898 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
1899 amdgpu_ring_clear_ring(ring);
1900 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1901 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1902 gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
1903 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1904 mutex_unlock(&adev->srbm_mutex);
1905
1906 if (adev->gfx.kiq[xcc_id].mqd_backup)
1907 memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
1908 }
1909
1910 return 0;
1911}
1912
1913static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
1914{
1915 struct amdgpu_device *adev = ring->adev;
1916 struct v9_mqd *mqd = ring->mqd_ptr;
1917 int mqd_idx = ring - &adev->gfx.compute_ring[0];
1918 struct v9_mqd *tmp_mqd;
1919
1920 /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
1921 * is not be initialized before
1922 */
1923 tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
1924
1925 if (!tmp_mqd->cp_hqd_pq_control ||
1926 (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
1927 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
1928 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
1929 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
1930 mutex_lock(&adev->srbm_mutex);
1931 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
1932 gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
1933 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1934 mutex_unlock(&adev->srbm_mutex);
1935
1936 if (adev->gfx.mec.mqd_backup[mqd_idx])
1937 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
1938 } else {
1939 /* restore MQD to a clean status */
1940 if (adev->gfx.mec.mqd_backup[mqd_idx])
1941 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
1942 /* reset ring buffer */
1943 ring->wptr = 0;
1944 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
1945 amdgpu_ring_clear_ring(ring);
1946 }
1947
1948 return 0;
1949}
1950
1951static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
1952{
1953 struct amdgpu_ring *ring;
1954 int j;
1955
1956 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
1957 ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings];
1958 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
1959 mutex_lock(&adev->srbm_mutex);
1960 soc15_grbm_select(adev, ring->me,
1961 ring->pipe,
1962 ring->queue, 0, GET_INST(GC, xcc_id));
1963 gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
1964 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1965 mutex_unlock(&adev->srbm_mutex);
1966 }
1967 }
1968
1969 return 0;
1970}
1971
1972static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
1973{
1974 struct amdgpu_ring *ring;
1975 int r;
1976
1977 ring = &adev->gfx.kiq[xcc_id].ring;
1978
1979 r = amdgpu_bo_reserve(ring->mqd_obj, false);
1980 if (unlikely(r != 0))
1981 return r;
1982
1983 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
1984 if (unlikely(r != 0)) {
1985 amdgpu_bo_unreserve(ring->mqd_obj);
1986 return r;
1987 }
1988
1989 gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id);
1990 amdgpu_bo_kunmap(ring->mqd_obj);
1991 ring->mqd_ptr = NULL;
1992 amdgpu_bo_unreserve(ring->mqd_obj);
1993 return 0;
1994}
1995
1996static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
1997{
1998 struct amdgpu_ring *ring = NULL;
1999 int r = 0, i;
2000
2001 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2002
2003 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2004 ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
2005
2006 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2007 if (unlikely(r != 0))
2008 goto done;
2009 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2010 if (!r) {
2011 r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id);
2012 amdgpu_bo_kunmap(ring->mqd_obj);
2013 ring->mqd_ptr = NULL;
2014 }
2015 amdgpu_bo_unreserve(ring->mqd_obj);
2016 if (r)
2017 goto done;
2018 }
2019
2020 r = amdgpu_gfx_enable_kcq(adev, xcc_id);
2021done:
2022 return r;
2023}
2024
2025static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2026{
2027 struct amdgpu_ring *ring;
2028 int r, j;
2029
2030 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2031
2032 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2033 gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2034
2035 r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2036 if (r)
2037 return r;
2038 }
2039
2040 r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2041 if (r)
2042 return r;
2043
2044 r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2045 if (r)
2046 return r;
2047
2048 for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2049 ring = &adev->gfx.compute_ring
2050 [j + xcc_id * adev->gfx.num_compute_rings];
2051 r = amdgpu_ring_test_helper(ring);
2052 if (r)
2053 return r;
2054 }
2055
2056 gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2057
2058 return 0;
2059}
2060
2061static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2062{
2063 int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2064
2065 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2066 if (amdgpu_sriov_vf(adev)) {
2067 enum amdgpu_gfx_partition mode;
2068
2069 mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2070 AMDGPU_XCP_FL_NONE);
2071 if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2072 return -EINVAL;
2073 num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2074 adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2075 num_xcp = num_xcc / num_xcc_per_xcp;
2076 r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2077
2078 } else {
2079 if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2080 AMDGPU_XCP_FL_NONE) ==
2081 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2082 r = amdgpu_xcp_switch_partition_mode(
2083 adev->xcp_mgr, amdgpu_user_partt_mode);
2084 }
2085 if (r)
2086 return r;
2087
2088 for (i = 0; i < num_xcc; i++) {
2089 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2090 if (r)
2091 return r;
2092 }
2093
2094 return 0;
2095}
2096
2097static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable,
2098 int xcc_id)
2099{
2100 gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id);
2101}
2102
2103static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2104{
2105 if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2106 DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2107
2108 if (amdgpu_sriov_vf(adev)) {
2109 /* must disable polling for SRIOV when hw finished, otherwise
2110 * CPC engine may still keep fetching WB address which is already
2111 * invalid after sw finished and trigger DMAR reading error in
2112 * hypervisor side.
2113 */
2114 WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2115 return;
2116 }
2117
2118 /* Use deinitialize sequence from CAIL when unbinding device
2119 * from driver, otherwise KIQ is hanging when binding back
2120 */
2121 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2122 mutex_lock(&adev->srbm_mutex);
2123 soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2124 adev->gfx.kiq[xcc_id].ring.pipe,
2125 adev->gfx.kiq[xcc_id].ring.queue, 0,
2126 GET_INST(GC, xcc_id));
2127 gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2128 xcc_id);
2129 soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2130 mutex_unlock(&adev->srbm_mutex);
2131 }
2132
2133 gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2134 gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id);
2135}
2136
2137static int gfx_v9_4_3_hw_init(void *handle)
2138{
2139 int r;
2140 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2141
2142 if (!amdgpu_sriov_vf(adev))
2143 gfx_v9_4_3_init_golden_registers(adev);
2144
2145 gfx_v9_4_3_constants_init(adev);
2146
2147 r = adev->gfx.rlc.funcs->resume(adev);
2148 if (r)
2149 return r;
2150
2151 r = gfx_v9_4_3_cp_resume(adev);
2152 if (r)
2153 return r;
2154
2155 return r;
2156}
2157
2158static int gfx_v9_4_3_hw_fini(void *handle)
2159{
2160 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2161 int i, num_xcc;
2162
2163 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2164 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2165
2166 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2167 for (i = 0; i < num_xcc; i++) {
2168 gfx_v9_4_3_xcc_fini(adev, i);
2169 }
2170
2171 return 0;
2172}
2173
2174static int gfx_v9_4_3_suspend(void *handle)
2175{
2176 return gfx_v9_4_3_hw_fini(handle);
2177}
2178
2179static int gfx_v9_4_3_resume(void *handle)
2180{
2181 return gfx_v9_4_3_hw_init(handle);
2182}
2183
2184static bool gfx_v9_4_3_is_idle(void *handle)
2185{
2186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2187 int i, num_xcc;
2188
2189 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2190 for (i = 0; i < num_xcc; i++) {
2191 if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2192 GRBM_STATUS, GUI_ACTIVE))
2193 return false;
2194 }
2195 return true;
2196}
2197
2198static int gfx_v9_4_3_wait_for_idle(void *handle)
2199{
2200 unsigned i;
2201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2202
2203 for (i = 0; i < adev->usec_timeout; i++) {
2204 if (gfx_v9_4_3_is_idle(handle))
2205 return 0;
2206 udelay(1);
2207 }
2208 return -ETIMEDOUT;
2209}
2210
2211static int gfx_v9_4_3_soft_reset(void *handle)
2212{
2213 u32 grbm_soft_reset = 0;
2214 u32 tmp;
2215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2216
2217 /* GRBM_STATUS */
2218 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2219 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2220 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2221 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2222 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2223 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2224 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2225 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2226 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2227 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2228 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2229 }
2230
2231 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2232 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2233 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2234 }
2235
2236 /* GRBM_STATUS2 */
2237 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2238 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2239 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2240 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2241
2242
2243 if (grbm_soft_reset) {
2244 /* stop the rlc */
2245 adev->gfx.rlc.funcs->stop(adev);
2246
2247 /* Disable MEC parsing/prefetching */
2248 gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2249
2250 if (grbm_soft_reset) {
2251 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2252 tmp |= grbm_soft_reset;
2253 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2254 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2255 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2256
2257 udelay(50);
2258
2259 tmp &= ~grbm_soft_reset;
2260 WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2261 tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2262 }
2263
2264 /* Wait a little for things to settle down */
2265 udelay(50);
2266 }
2267 return 0;
2268}
2269
2270static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2271 uint32_t vmid,
2272 uint32_t gds_base, uint32_t gds_size,
2273 uint32_t gws_base, uint32_t gws_size,
2274 uint32_t oa_base, uint32_t oa_size)
2275{
2276 struct amdgpu_device *adev = ring->adev;
2277
2278 /* GDS Base */
2279 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2280 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2281 gds_base);
2282
2283 /* GDS Size */
2284 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2285 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2286 gds_size);
2287
2288 /* GWS */
2289 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2290 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2291 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2292
2293 /* OA */
2294 gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2295 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2296 (1 << (oa_size + oa_base)) - (1 << oa_base));
2297}
2298
2299static int gfx_v9_4_3_early_init(void *handle)
2300{
2301 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2302
2303 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2304 AMDGPU_MAX_COMPUTE_RINGS);
2305 gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2306 gfx_v9_4_3_set_ring_funcs(adev);
2307 gfx_v9_4_3_set_irq_funcs(adev);
2308 gfx_v9_4_3_set_gds_init(adev);
2309 gfx_v9_4_3_set_rlc_funcs(adev);
2310
2311 /* init rlcg reg access ctrl */
2312 gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2313
2314 return gfx_v9_4_3_init_microcode(adev);
2315}
2316
2317static int gfx_v9_4_3_late_init(void *handle)
2318{
2319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2320 int r;
2321
2322 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2323 if (r)
2324 return r;
2325
2326 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2327 if (r)
2328 return r;
2329
2330 if (adev->gfx.ras &&
2331 adev->gfx.ras->enable_watchdog_timer)
2332 adev->gfx.ras->enable_watchdog_timer(adev);
2333
2334 return 0;
2335}
2336
2337static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2338 bool enable, int xcc_id)
2339{
2340 uint32_t def, data;
2341
2342 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2343 return;
2344
2345 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2346 regRLC_CGTT_MGCG_OVERRIDE);
2347
2348 if (enable)
2349 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2350 else
2351 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2352
2353 if (def != data)
2354 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2355 regRLC_CGTT_MGCG_OVERRIDE, data);
2356
2357}
2358
2359static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2360 bool enable, int xcc_id)
2361{
2362 uint32_t def, data;
2363
2364 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2365 return;
2366
2367 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2368 regRLC_CGTT_MGCG_OVERRIDE);
2369
2370 if (enable)
2371 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2372 else
2373 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2374
2375 if (def != data)
2376 WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2377 regRLC_CGTT_MGCG_OVERRIDE, data);
2378}
2379
2380static void
2381gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2382 bool enable, int xcc_id)
2383{
2384 uint32_t data, def;
2385
2386 /* It is disabled by HW by default */
2387 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2388 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
2389 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2390
2391 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2392 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2393 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2394 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2395
2396 if (def != data)
2397 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2398
2399 /* MGLS is a global flag to control all MGLS in GFX */
2400 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2401 /* 2 - RLC memory Light sleep */
2402 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2403 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2404 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2405 if (def != data)
2406 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2407 }
2408 /* 3 - CP memory Light sleep */
2409 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2410 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2411 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2412 if (def != data)
2413 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2414 }
2415 }
2416 } else {
2417 /* 1 - MGCG_OVERRIDE */
2418 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2419
2420 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2421 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2422 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2423 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2424
2425 if (def != data)
2426 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2427
2428 /* 2 - disable MGLS in RLC */
2429 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2430 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2431 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2432 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2433 }
2434
2435 /* 3 - disable MGLS in CP */
2436 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2437 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2438 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2439 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2440 }
2441 }
2442
2443}
2444
2445static void
2446gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2447 bool enable, int xcc_id)
2448{
2449 uint32_t def, data;
2450
2451 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2452
2453 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2454 /* unset CGCG override */
2455 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2456 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2457 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2458 else
2459 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2460 /* update CGCG and CGLS override bits */
2461 if (def != data)
2462 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2463
2464 /* CGCG Hysteresis: 400us */
2465 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2466
2467 data = (0x2710
2468 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2469 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2470 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2471 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2472 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2473 if (def != data)
2474 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2475
2476 /* set IDLE_POLL_COUNT(0x33450100)*/
2477 def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2478 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2479 (0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2480 if (def != data)
2481 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2482 } else {
2483 def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2484 /* reset CGCG/CGLS bits */
2485 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2486 /* disable cgcg and cgls in FSM */
2487 if (def != data)
2488 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2489 }
2490
2491}
2492
2493static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2494 bool enable, int xcc_id)
2495{
2496 amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2497
2498 if (enable) {
2499 /* FGCG */
2500 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2501 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2502
2503 /* CGCG/CGLS should be enabled after MGCG/MGLS
2504 * === MGCG + MGLS ===
2505 */
2506 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2507 xcc_id);
2508 /* === CGCG + CGLS === */
2509 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2510 xcc_id);
2511 } else {
2512 /* CGCG/CGLS should be disabled before MGCG/MGLS
2513 * === CGCG + CGLS ===
2514 */
2515 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2516 xcc_id);
2517 /* === MGCG + MGLS === */
2518 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2519 xcc_id);
2520
2521 /* FGCG */
2522 gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2523 gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2524 }
2525
2526 amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2527
2528 return 0;
2529}
2530
2531static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2532 .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2533 .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2534 .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2535 .init = gfx_v9_4_3_rlc_init,
2536 .resume = gfx_v9_4_3_rlc_resume,
2537 .stop = gfx_v9_4_3_rlc_stop,
2538 .reset = gfx_v9_4_3_rlc_reset,
2539 .start = gfx_v9_4_3_rlc_start,
2540 .update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2541 .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2542};
2543
2544static int gfx_v9_4_3_set_powergating_state(void *handle,
2545 enum amd_powergating_state state)
2546{
2547 return 0;
2548}
2549
2550static int gfx_v9_4_3_set_clockgating_state(void *handle,
2551 enum amd_clockgating_state state)
2552{
2553 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2554 int i, num_xcc;
2555
2556 if (amdgpu_sriov_vf(adev))
2557 return 0;
2558
2559 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2560 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2561 case IP_VERSION(9, 4, 3):
2562 case IP_VERSION(9, 4, 4):
2563 for (i = 0; i < num_xcc; i++)
2564 gfx_v9_4_3_xcc_update_gfx_clock_gating(
2565 adev, state == AMD_CG_STATE_GATE, i);
2566 break;
2567 default:
2568 break;
2569 }
2570 return 0;
2571}
2572
2573static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
2574{
2575 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2576 int data;
2577
2578 if (amdgpu_sriov_vf(adev))
2579 *flags = 0;
2580
2581 /* AMD_CG_SUPPORT_GFX_MGCG */
2582 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2583 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2584 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
2585
2586 /* AMD_CG_SUPPORT_GFX_CGCG */
2587 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2588 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2589 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
2590
2591 /* AMD_CG_SUPPORT_GFX_CGLS */
2592 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2593 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
2594
2595 /* AMD_CG_SUPPORT_GFX_RLC_LS */
2596 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2597 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2598 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2599
2600 /* AMD_CG_SUPPORT_GFX_CP_LS */
2601 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2602 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2603 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2604}
2605
2606static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2607{
2608 struct amdgpu_device *adev = ring->adev;
2609 u32 ref_and_mask, reg_mem_engine;
2610 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2611
2612 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2613 switch (ring->me) {
2614 case 1:
2615 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2616 break;
2617 case 2:
2618 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2619 break;
2620 default:
2621 return;
2622 }
2623 reg_mem_engine = 0;
2624 } else {
2625 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2626 reg_mem_engine = 1; /* pfp */
2627 }
2628
2629 gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2630 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2631 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2632 ref_and_mask, ref_and_mask, 0x20);
2633}
2634
2635static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2636 struct amdgpu_job *job,
2637 struct amdgpu_ib *ib,
2638 uint32_t flags)
2639{
2640 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2641 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2642
2643 /* Currently, there is a high possibility to get wave ID mismatch
2644 * between ME and GDS, leading to a hw deadlock, because ME generates
2645 * different wave IDs than the GDS expects. This situation happens
2646 * randomly when at least 5 compute pipes use GDS ordered append.
2647 * The wave IDs generated by ME are also wrong after suspend/resume.
2648 * Those are probably bugs somewhere else in the kernel driver.
2649 *
2650 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2651 * GDS to 0 for this ring (me/pipe).
2652 */
2653 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2654 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2655 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2656 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2657 }
2658
2659 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2660 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2661 amdgpu_ring_write(ring,
2662#ifdef __BIG_ENDIAN
2663 (2 << 0) |
2664#endif
2665 lower_32_bits(ib->gpu_addr));
2666 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2667 amdgpu_ring_write(ring, control);
2668}
2669
2670static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2671 u64 seq, unsigned flags)
2672{
2673 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2674 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2675 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2676
2677 /* RELEASE_MEM - flush caches, send int */
2678 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2679 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2680 EOP_TC_NC_ACTION_EN) :
2681 (EOP_TCL1_ACTION_EN |
2682 EOP_TC_ACTION_EN |
2683 EOP_TC_WB_ACTION_EN |
2684 EOP_TC_MD_ACTION_EN)) |
2685 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2686 EVENT_INDEX(5)));
2687 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2688
2689 /*
2690 * the address should be Qword aligned if 64bit write, Dword
2691 * aligned if only send 32bit data low (discard data high)
2692 */
2693 if (write64bit)
2694 BUG_ON(addr & 0x7);
2695 else
2696 BUG_ON(addr & 0x3);
2697 amdgpu_ring_write(ring, lower_32_bits(addr));
2698 amdgpu_ring_write(ring, upper_32_bits(addr));
2699 amdgpu_ring_write(ring, lower_32_bits(seq));
2700 amdgpu_ring_write(ring, upper_32_bits(seq));
2701 amdgpu_ring_write(ring, 0);
2702}
2703
2704static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2705{
2706 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2707 uint32_t seq = ring->fence_drv.sync_seq;
2708 uint64_t addr = ring->fence_drv.gpu_addr;
2709
2710 gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2711 lower_32_bits(addr), upper_32_bits(addr),
2712 seq, 0xffffffff, 4);
2713}
2714
2715static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2716 unsigned vmid, uint64_t pd_addr)
2717{
2718 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2719}
2720
2721static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2722{
2723 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2724}
2725
2726static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2727{
2728 u64 wptr;
2729
2730 /* XXX check if swapping is necessary on BE */
2731 if (ring->use_doorbell)
2732 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2733 else
2734 BUG();
2735 return wptr;
2736}
2737
2738static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2739{
2740 struct amdgpu_device *adev = ring->adev;
2741
2742 /* XXX check if swapping is necessary on BE */
2743 if (ring->use_doorbell) {
2744 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2745 WDOORBELL64(ring->doorbell_index, ring->wptr);
2746 } else {
2747 BUG(); /* only DOORBELL method supported on gfx9 now */
2748 }
2749}
2750
2751static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2752 u64 seq, unsigned int flags)
2753{
2754 struct amdgpu_device *adev = ring->adev;
2755
2756 /* we only allocate 32bit for each seq wb address */
2757 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2758
2759 /* write fence seq to the "addr" */
2760 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2761 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2762 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2763 amdgpu_ring_write(ring, lower_32_bits(addr));
2764 amdgpu_ring_write(ring, upper_32_bits(addr));
2765 amdgpu_ring_write(ring, lower_32_bits(seq));
2766
2767 if (flags & AMDGPU_FENCE_FLAG_INT) {
2768 /* set register to trigger INT */
2769 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2770 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2771 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2772 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2773 amdgpu_ring_write(ring, 0);
2774 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2775 }
2776}
2777
2778static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2779 uint32_t reg_val_offs)
2780{
2781 struct amdgpu_device *adev = ring->adev;
2782
2783 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2784
2785 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2786 amdgpu_ring_write(ring, 0 | /* src: register*/
2787 (5 << 8) | /* dst: memory */
2788 (1 << 20)); /* write confirm */
2789 amdgpu_ring_write(ring, reg);
2790 amdgpu_ring_write(ring, 0);
2791 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2792 reg_val_offs * 4));
2793 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2794 reg_val_offs * 4));
2795}
2796
2797static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2798 uint32_t val)
2799{
2800 uint32_t cmd = 0;
2801
2802 reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2803
2804 switch (ring->funcs->type) {
2805 case AMDGPU_RING_TYPE_GFX:
2806 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
2807 break;
2808 case AMDGPU_RING_TYPE_KIQ:
2809 cmd = (1 << 16); /* no inc addr */
2810 break;
2811 default:
2812 cmd = WR_CONFIRM;
2813 break;
2814 }
2815 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2816 amdgpu_ring_write(ring, cmd);
2817 amdgpu_ring_write(ring, reg);
2818 amdgpu_ring_write(ring, 0);
2819 amdgpu_ring_write(ring, val);
2820}
2821
2822static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
2823 uint32_t val, uint32_t mask)
2824{
2825 gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
2826}
2827
2828static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
2829 uint32_t reg0, uint32_t reg1,
2830 uint32_t ref, uint32_t mask)
2831{
2832 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
2833 ref, mask);
2834}
2835
2836static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2837 struct amdgpu_device *adev, int me, int pipe,
2838 enum amdgpu_interrupt_state state, int xcc_id)
2839{
2840 u32 mec_int_cntl, mec_int_cntl_reg;
2841
2842 /*
2843 * amdgpu controls only the first MEC. That's why this function only
2844 * handles the setting of interrupts for this specific MEC. All other
2845 * pipes' interrupts are set by amdkfd.
2846 */
2847
2848 if (me == 1) {
2849 switch (pipe) {
2850 case 0:
2851 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
2852 break;
2853 case 1:
2854 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
2855 break;
2856 case 2:
2857 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
2858 break;
2859 case 3:
2860 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
2861 break;
2862 default:
2863 DRM_DEBUG("invalid pipe %d\n", pipe);
2864 return;
2865 }
2866 } else {
2867 DRM_DEBUG("invalid me %d\n", me);
2868 return;
2869 }
2870
2871 switch (state) {
2872 case AMDGPU_IRQ_STATE_DISABLE:
2873 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
2874 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2875 TIME_STAMP_INT_ENABLE, 0);
2876 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
2877 break;
2878 case AMDGPU_IRQ_STATE_ENABLE:
2879 mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
2880 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
2881 TIME_STAMP_INT_ENABLE, 1);
2882 WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
2883 break;
2884 default:
2885 break;
2886 }
2887}
2888
2889static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
2890 struct amdgpu_irq_src *source,
2891 unsigned type,
2892 enum amdgpu_interrupt_state state)
2893{
2894 int i, num_xcc;
2895
2896 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2897 switch (state) {
2898 case AMDGPU_IRQ_STATE_DISABLE:
2899 case AMDGPU_IRQ_STATE_ENABLE:
2900 for (i = 0; i < num_xcc; i++)
2901 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2902 PRIV_REG_INT_ENABLE,
2903 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2904 break;
2905 default:
2906 break;
2907 }
2908
2909 return 0;
2910}
2911
2912static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
2913 struct amdgpu_irq_src *source,
2914 unsigned type,
2915 enum amdgpu_interrupt_state state)
2916{
2917 int i, num_xcc;
2918
2919 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2920 switch (state) {
2921 case AMDGPU_IRQ_STATE_DISABLE:
2922 case AMDGPU_IRQ_STATE_ENABLE:
2923 for (i = 0; i < num_xcc; i++)
2924 WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
2925 PRIV_INSTR_INT_ENABLE,
2926 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2927 break;
2928 default:
2929 break;
2930 }
2931
2932 return 0;
2933}
2934
2935static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
2936 struct amdgpu_irq_src *src,
2937 unsigned type,
2938 enum amdgpu_interrupt_state state)
2939{
2940 int i, num_xcc;
2941
2942 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2943 for (i = 0; i < num_xcc; i++) {
2944 switch (type) {
2945 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
2946 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2947 adev, 1, 0, state, i);
2948 break;
2949 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
2950 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2951 adev, 1, 1, state, i);
2952 break;
2953 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
2954 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2955 adev, 1, 2, state, i);
2956 break;
2957 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
2958 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2959 adev, 1, 3, state, i);
2960 break;
2961 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
2962 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2963 adev, 2, 0, state, i);
2964 break;
2965 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
2966 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2967 adev, 2, 1, state, i);
2968 break;
2969 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
2970 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2971 adev, 2, 2, state, i);
2972 break;
2973 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
2974 gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
2975 adev, 2, 3, state, i);
2976 break;
2977 default:
2978 break;
2979 }
2980 }
2981
2982 return 0;
2983}
2984
2985static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
2986 struct amdgpu_irq_src *source,
2987 struct amdgpu_iv_entry *entry)
2988{
2989 int i, xcc_id;
2990 u8 me_id, pipe_id, queue_id;
2991 struct amdgpu_ring *ring;
2992
2993 DRM_DEBUG("IH: CP EOP\n");
2994 me_id = (entry->ring_id & 0x0c) >> 2;
2995 pipe_id = (entry->ring_id & 0x03) >> 0;
2996 queue_id = (entry->ring_id & 0x70) >> 4;
2997
2998 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
2999
3000 if (xcc_id == -EINVAL)
3001 return -EINVAL;
3002
3003 switch (me_id) {
3004 case 0:
3005 case 1:
3006 case 2:
3007 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3008 ring = &adev->gfx.compute_ring
3009 [i +
3010 xcc_id * adev->gfx.num_compute_rings];
3011 /* Per-queue interrupt is supported for MEC starting from VI.
3012 * The interrupt can only be enabled/disabled per pipe instead of per queue.
3013 */
3014
3015 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3016 amdgpu_fence_process(ring);
3017 }
3018 break;
3019 }
3020 return 0;
3021}
3022
3023static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3024 struct amdgpu_iv_entry *entry)
3025{
3026 u8 me_id, pipe_id, queue_id;
3027 struct amdgpu_ring *ring;
3028 int i, xcc_id;
3029
3030 me_id = (entry->ring_id & 0x0c) >> 2;
3031 pipe_id = (entry->ring_id & 0x03) >> 0;
3032 queue_id = (entry->ring_id & 0x70) >> 4;
3033
3034 xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3035
3036 if (xcc_id == -EINVAL)
3037 return;
3038
3039 switch (me_id) {
3040 case 0:
3041 case 1:
3042 case 2:
3043 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3044 ring = &adev->gfx.compute_ring
3045 [i +
3046 xcc_id * adev->gfx.num_compute_rings];
3047 if (ring->me == me_id && ring->pipe == pipe_id &&
3048 ring->queue == queue_id)
3049 drm_sched_fault(&ring->sched);
3050 }
3051 break;
3052 }
3053}
3054
3055static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3056 struct amdgpu_irq_src *source,
3057 struct amdgpu_iv_entry *entry)
3058{
3059 DRM_ERROR("Illegal register access in command stream\n");
3060 gfx_v9_4_3_fault(adev, entry);
3061 return 0;
3062}
3063
3064static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3065 struct amdgpu_irq_src *source,
3066 struct amdgpu_iv_entry *entry)
3067{
3068 DRM_ERROR("Illegal instruction in command stream\n");
3069 gfx_v9_4_3_fault(adev, entry);
3070 return 0;
3071}
3072
3073static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3074{
3075 const unsigned int cp_coher_cntl =
3076 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3077 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3078 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3079 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3080 PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3081
3082 /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3083 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3084 amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3085 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
3086 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
3087 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3088 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
3089 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3090}
3091
3092static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3093 uint32_t pipe, bool enable)
3094{
3095 struct amdgpu_device *adev = ring->adev;
3096 uint32_t val;
3097 uint32_t wcl_cs_reg;
3098
3099 /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3100 val = enable ? 0x1 : 0x7f;
3101
3102 switch (pipe) {
3103 case 0:
3104 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3105 break;
3106 case 1:
3107 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3108 break;
3109 case 2:
3110 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3111 break;
3112 case 3:
3113 wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3114 break;
3115 default:
3116 DRM_DEBUG("invalid pipe %d\n", pipe);
3117 return;
3118 }
3119
3120 amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3121
3122}
3123static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3124{
3125 struct amdgpu_device *adev = ring->adev;
3126 uint32_t val;
3127 int i;
3128
3129 /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3130 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3131 * around 25% of gpu resources.
3132 */
3133 val = enable ? 0x1f : 0x07ffffff;
3134 amdgpu_ring_emit_wreg(ring,
3135 SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3136 val);
3137
3138 /* Restrict waves for normal/low priority compute queues as well
3139 * to get best QoS for high priority compute jobs.
3140 *
3141 * amdgpu controls only 1st ME(0-3 CS pipes).
3142 */
3143 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3144 if (i != ring->pipe)
3145 gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3146
3147 }
3148}
3149
3150enum amdgpu_gfx_cp_ras_mem_id {
3151 AMDGPU_GFX_CP_MEM1 = 1,
3152 AMDGPU_GFX_CP_MEM2,
3153 AMDGPU_GFX_CP_MEM3,
3154 AMDGPU_GFX_CP_MEM4,
3155 AMDGPU_GFX_CP_MEM5,
3156};
3157
3158enum amdgpu_gfx_gcea_ras_mem_id {
3159 AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3160 AMDGPU_GFX_GCEA_IORD_CMDMEM,
3161 AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3162 AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3163 AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3164 AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3165 AMDGPU_GFX_GCEA_MAM_DMEM0,
3166 AMDGPU_GFX_GCEA_MAM_DMEM1,
3167 AMDGPU_GFX_GCEA_MAM_DMEM2,
3168 AMDGPU_GFX_GCEA_MAM_DMEM3,
3169 AMDGPU_GFX_GCEA_MAM_AMEM0,
3170 AMDGPU_GFX_GCEA_MAM_AMEM1,
3171 AMDGPU_GFX_GCEA_MAM_AMEM2,
3172 AMDGPU_GFX_GCEA_MAM_AMEM3,
3173 AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3174 AMDGPU_GFX_GCEA_WRET_TAGMEM,
3175 AMDGPU_GFX_GCEA_RRET_TAGMEM,
3176 AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3177 AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3178 AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3179};
3180
3181enum amdgpu_gfx_gc_cane_ras_mem_id {
3182 AMDGPU_GFX_GC_CANE_MEM0 = 0,
3183};
3184
3185enum amdgpu_gfx_gcutcl2_ras_mem_id {
3186 AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3187};
3188
3189enum amdgpu_gfx_gds_ras_mem_id {
3190 AMDGPU_GFX_GDS_MEM0 = 0,
3191};
3192
3193enum amdgpu_gfx_lds_ras_mem_id {
3194 AMDGPU_GFX_LDS_BANK0 = 0,
3195 AMDGPU_GFX_LDS_BANK1,
3196 AMDGPU_GFX_LDS_BANK2,
3197 AMDGPU_GFX_LDS_BANK3,
3198 AMDGPU_GFX_LDS_BANK4,
3199 AMDGPU_GFX_LDS_BANK5,
3200 AMDGPU_GFX_LDS_BANK6,
3201 AMDGPU_GFX_LDS_BANK7,
3202 AMDGPU_GFX_LDS_BANK8,
3203 AMDGPU_GFX_LDS_BANK9,
3204 AMDGPU_GFX_LDS_BANK10,
3205 AMDGPU_GFX_LDS_BANK11,
3206 AMDGPU_GFX_LDS_BANK12,
3207 AMDGPU_GFX_LDS_BANK13,
3208 AMDGPU_GFX_LDS_BANK14,
3209 AMDGPU_GFX_LDS_BANK15,
3210 AMDGPU_GFX_LDS_BANK16,
3211 AMDGPU_GFX_LDS_BANK17,
3212 AMDGPU_GFX_LDS_BANK18,
3213 AMDGPU_GFX_LDS_BANK19,
3214 AMDGPU_GFX_LDS_BANK20,
3215 AMDGPU_GFX_LDS_BANK21,
3216 AMDGPU_GFX_LDS_BANK22,
3217 AMDGPU_GFX_LDS_BANK23,
3218 AMDGPU_GFX_LDS_BANK24,
3219 AMDGPU_GFX_LDS_BANK25,
3220 AMDGPU_GFX_LDS_BANK26,
3221 AMDGPU_GFX_LDS_BANK27,
3222 AMDGPU_GFX_LDS_BANK28,
3223 AMDGPU_GFX_LDS_BANK29,
3224 AMDGPU_GFX_LDS_BANK30,
3225 AMDGPU_GFX_LDS_BANK31,
3226 AMDGPU_GFX_LDS_SP_BUFFER_A,
3227 AMDGPU_GFX_LDS_SP_BUFFER_B,
3228};
3229
3230enum amdgpu_gfx_rlc_ras_mem_id {
3231 AMDGPU_GFX_RLC_GPMF32 = 1,
3232 AMDGPU_GFX_RLC_RLCVF32,
3233 AMDGPU_GFX_RLC_SCRATCH,
3234 AMDGPU_GFX_RLC_SRM_ARAM,
3235 AMDGPU_GFX_RLC_SRM_DRAM,
3236 AMDGPU_GFX_RLC_TCTAG,
3237 AMDGPU_GFX_RLC_SPM_SE,
3238 AMDGPU_GFX_RLC_SPM_GRBMT,
3239};
3240
3241enum amdgpu_gfx_sp_ras_mem_id {
3242 AMDGPU_GFX_SP_SIMDID0 = 0,
3243};
3244
3245enum amdgpu_gfx_spi_ras_mem_id {
3246 AMDGPU_GFX_SPI_MEM0 = 0,
3247 AMDGPU_GFX_SPI_MEM1,
3248 AMDGPU_GFX_SPI_MEM2,
3249 AMDGPU_GFX_SPI_MEM3,
3250};
3251
3252enum amdgpu_gfx_sqc_ras_mem_id {
3253 AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3254 AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3255 AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3256 AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3257 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3258 AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3259 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3260 AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3261 AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3262 AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3263 AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3264 AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3265 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3266 AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3267 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3268 AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3269 AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3270 AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3271 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3272 AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3273 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3274 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3275 AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3276};
3277
3278enum amdgpu_gfx_sq_ras_mem_id {
3279 AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3280 AMDGPU_GFX_SQ_SGPR_MEM1,
3281 AMDGPU_GFX_SQ_SGPR_MEM2,
3282 AMDGPU_GFX_SQ_SGPR_MEM3,
3283};
3284
3285enum amdgpu_gfx_ta_ras_mem_id {
3286 AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3287 AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3288 AMDGPU_GFX_TA_FS_CFIFO_RAM,
3289 AMDGPU_GFX_TA_FSX_LFIFO,
3290 AMDGPU_GFX_TA_FS_DFIFO_RAM,
3291};
3292
3293enum amdgpu_gfx_tcc_ras_mem_id {
3294 AMDGPU_GFX_TCC_MEM1 = 1,
3295};
3296
3297enum amdgpu_gfx_tca_ras_mem_id {
3298 AMDGPU_GFX_TCA_MEM1 = 1,
3299};
3300
3301enum amdgpu_gfx_tci_ras_mem_id {
3302 AMDGPU_GFX_TCIW_MEM = 1,
3303};
3304
3305enum amdgpu_gfx_tcp_ras_mem_id {
3306 AMDGPU_GFX_TCP_LFIFO0 = 1,
3307 AMDGPU_GFX_TCP_SET0BANK0_RAM,
3308 AMDGPU_GFX_TCP_SET0BANK1_RAM,
3309 AMDGPU_GFX_TCP_SET0BANK2_RAM,
3310 AMDGPU_GFX_TCP_SET0BANK3_RAM,
3311 AMDGPU_GFX_TCP_SET1BANK0_RAM,
3312 AMDGPU_GFX_TCP_SET1BANK1_RAM,
3313 AMDGPU_GFX_TCP_SET1BANK2_RAM,
3314 AMDGPU_GFX_TCP_SET1BANK3_RAM,
3315 AMDGPU_GFX_TCP_SET2BANK0_RAM,
3316 AMDGPU_GFX_TCP_SET2BANK1_RAM,
3317 AMDGPU_GFX_TCP_SET2BANK2_RAM,
3318 AMDGPU_GFX_TCP_SET2BANK3_RAM,
3319 AMDGPU_GFX_TCP_SET3BANK0_RAM,
3320 AMDGPU_GFX_TCP_SET3BANK1_RAM,
3321 AMDGPU_GFX_TCP_SET3BANK2_RAM,
3322 AMDGPU_GFX_TCP_SET3BANK3_RAM,
3323 AMDGPU_GFX_TCP_VM_FIFO,
3324 AMDGPU_GFX_TCP_DB_TAGRAM0,
3325 AMDGPU_GFX_TCP_DB_TAGRAM1,
3326 AMDGPU_GFX_TCP_DB_TAGRAM2,
3327 AMDGPU_GFX_TCP_DB_TAGRAM3,
3328 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3329 AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3330 AMDGPU_GFX_TCP_CMD_FIFO,
3331};
3332
3333enum amdgpu_gfx_td_ras_mem_id {
3334 AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3335 AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3336 AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3337};
3338
3339enum amdgpu_gfx_tcx_ras_mem_id {
3340 AMDGPU_GFX_TCX_FIFOD0 = 0,
3341 AMDGPU_GFX_TCX_FIFOD1,
3342 AMDGPU_GFX_TCX_FIFOD2,
3343 AMDGPU_GFX_TCX_FIFOD3,
3344 AMDGPU_GFX_TCX_FIFOD4,
3345 AMDGPU_GFX_TCX_FIFOD5,
3346 AMDGPU_GFX_TCX_FIFOD6,
3347 AMDGPU_GFX_TCX_FIFOD7,
3348 AMDGPU_GFX_TCX_FIFOB0,
3349 AMDGPU_GFX_TCX_FIFOB1,
3350 AMDGPU_GFX_TCX_FIFOB2,
3351 AMDGPU_GFX_TCX_FIFOB3,
3352 AMDGPU_GFX_TCX_FIFOB4,
3353 AMDGPU_GFX_TCX_FIFOB5,
3354 AMDGPU_GFX_TCX_FIFOB6,
3355 AMDGPU_GFX_TCX_FIFOB7,
3356 AMDGPU_GFX_TCX_FIFOA0,
3357 AMDGPU_GFX_TCX_FIFOA1,
3358 AMDGPU_GFX_TCX_FIFOA2,
3359 AMDGPU_GFX_TCX_FIFOA3,
3360 AMDGPU_GFX_TCX_FIFOA4,
3361 AMDGPU_GFX_TCX_FIFOA5,
3362 AMDGPU_GFX_TCX_FIFOA6,
3363 AMDGPU_GFX_TCX_FIFOA7,
3364 AMDGPU_GFX_TCX_CFIFO0,
3365 AMDGPU_GFX_TCX_CFIFO1,
3366 AMDGPU_GFX_TCX_CFIFO2,
3367 AMDGPU_GFX_TCX_CFIFO3,
3368 AMDGPU_GFX_TCX_CFIFO4,
3369 AMDGPU_GFX_TCX_CFIFO5,
3370 AMDGPU_GFX_TCX_CFIFO6,
3371 AMDGPU_GFX_TCX_CFIFO7,
3372 AMDGPU_GFX_TCX_FIFO_ACKB0,
3373 AMDGPU_GFX_TCX_FIFO_ACKB1,
3374 AMDGPU_GFX_TCX_FIFO_ACKB2,
3375 AMDGPU_GFX_TCX_FIFO_ACKB3,
3376 AMDGPU_GFX_TCX_FIFO_ACKB4,
3377 AMDGPU_GFX_TCX_FIFO_ACKB5,
3378 AMDGPU_GFX_TCX_FIFO_ACKB6,
3379 AMDGPU_GFX_TCX_FIFO_ACKB7,
3380 AMDGPU_GFX_TCX_FIFO_ACKD0,
3381 AMDGPU_GFX_TCX_FIFO_ACKD1,
3382 AMDGPU_GFX_TCX_FIFO_ACKD2,
3383 AMDGPU_GFX_TCX_FIFO_ACKD3,
3384 AMDGPU_GFX_TCX_FIFO_ACKD4,
3385 AMDGPU_GFX_TCX_FIFO_ACKD5,
3386 AMDGPU_GFX_TCX_FIFO_ACKD6,
3387 AMDGPU_GFX_TCX_FIFO_ACKD7,
3388 AMDGPU_GFX_TCX_DST_FIFOA0,
3389 AMDGPU_GFX_TCX_DST_FIFOA1,
3390 AMDGPU_GFX_TCX_DST_FIFOA2,
3391 AMDGPU_GFX_TCX_DST_FIFOA3,
3392 AMDGPU_GFX_TCX_DST_FIFOA4,
3393 AMDGPU_GFX_TCX_DST_FIFOA5,
3394 AMDGPU_GFX_TCX_DST_FIFOA6,
3395 AMDGPU_GFX_TCX_DST_FIFOA7,
3396 AMDGPU_GFX_TCX_DST_FIFOB0,
3397 AMDGPU_GFX_TCX_DST_FIFOB1,
3398 AMDGPU_GFX_TCX_DST_FIFOB2,
3399 AMDGPU_GFX_TCX_DST_FIFOB3,
3400 AMDGPU_GFX_TCX_DST_FIFOB4,
3401 AMDGPU_GFX_TCX_DST_FIFOB5,
3402 AMDGPU_GFX_TCX_DST_FIFOB6,
3403 AMDGPU_GFX_TCX_DST_FIFOB7,
3404 AMDGPU_GFX_TCX_DST_FIFOD0,
3405 AMDGPU_GFX_TCX_DST_FIFOD1,
3406 AMDGPU_GFX_TCX_DST_FIFOD2,
3407 AMDGPU_GFX_TCX_DST_FIFOD3,
3408 AMDGPU_GFX_TCX_DST_FIFOD4,
3409 AMDGPU_GFX_TCX_DST_FIFOD5,
3410 AMDGPU_GFX_TCX_DST_FIFOD6,
3411 AMDGPU_GFX_TCX_DST_FIFOD7,
3412 AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3413 AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3414 AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3415 AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3416 AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3417 AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3418 AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3419 AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3420 AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3421 AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3422 AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3423 AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3424 AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3425 AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3426 AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3427 AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3428};
3429
3430enum amdgpu_gfx_atc_l2_ras_mem_id {
3431 AMDGPU_GFX_ATC_L2_MEM0 = 0,
3432};
3433
3434enum amdgpu_gfx_utcl2_ras_mem_id {
3435 AMDGPU_GFX_UTCL2_MEM0 = 0,
3436};
3437
3438enum amdgpu_gfx_vml2_ras_mem_id {
3439 AMDGPU_GFX_VML2_MEM0 = 0,
3440};
3441
3442enum amdgpu_gfx_vml2_walker_ras_mem_id {
3443 AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3444};
3445
3446static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3447 {AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3448 {AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3449 {AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3450 {AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3451 {AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3452};
3453
3454static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3455 {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3456 {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3457 {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3458 {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3459 {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3460 {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3461 {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3462 {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3463 {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3464 {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3465 {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3466 {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3467 {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3468 {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3469 {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3470 {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3471 {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3472 {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3473 {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3474 {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3475};
3476
3477static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3478 {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3479};
3480
3481static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3482 {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3483};
3484
3485static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3486 {AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3487};
3488
3489static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3490 {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3491 {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3492 {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3493 {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3494 {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3495 {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3496 {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3497 {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3498 {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3499 {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3500 {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3501 {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3502 {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3503 {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3504 {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3505 {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3506 {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3507 {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3508 {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3509 {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3510 {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3511 {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3512 {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3513 {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3514 {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3515 {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3516 {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3517 {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3518 {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3519 {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3520 {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3521 {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3522 {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3523 {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3524};
3525
3526static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3527 {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3528 {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3529 {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3530 {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3531 {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3532 {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3533 {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3534 {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3535};
3536
3537static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3538 {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3539};
3540
3541static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3542 {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3543 {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3544 {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3545 {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3546};
3547
3548static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3549 {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3550 {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3551 {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3552 {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3553 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3554 {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3555 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3556 {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3557 {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3558 {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3559 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3560 {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3561 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3562 {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3563 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3564 {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
3565 {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
3566 {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
3567 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
3568 {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
3569 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
3570 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
3571 {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
3572};
3573
3574static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
3575 {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
3576 {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
3577 {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
3578 {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
3579};
3580
3581static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
3582 {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
3583 {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
3584 {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
3585 {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
3586 {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
3587};
3588
3589static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
3590 {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
3591};
3592
3593static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
3594 {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
3595};
3596
3597static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
3598 {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
3599};
3600
3601static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
3602 {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
3603 {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
3604 {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
3605 {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
3606 {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
3607 {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
3608 {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
3609 {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
3610 {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
3611 {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
3612 {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
3613 {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
3614 {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
3615 {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
3616 {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
3617 {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
3618 {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
3619 {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
3620 {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
3621 {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
3622 {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
3623 {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
3624 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
3625 {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
3626 {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
3627};
3628
3629static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
3630 {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
3631 {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
3632 {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
3633};
3634
3635static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
3636 {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
3637 {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
3638 {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
3639 {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
3640 {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
3641 {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
3642 {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
3643 {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
3644 {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
3645 {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
3646 {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
3647 {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
3648 {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
3649 {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
3650 {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
3651 {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
3652 {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
3653 {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
3654 {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
3655 {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
3656 {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
3657 {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
3658 {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
3659 {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
3660 {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
3661 {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
3662 {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
3663 {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
3664 {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
3665 {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
3666 {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
3667 {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
3668 {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
3669 {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
3670 {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
3671 {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
3672 {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
3673 {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
3674 {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
3675 {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
3676 {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
3677 {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
3678 {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
3679 {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
3680 {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
3681 {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
3682 {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
3683 {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
3684 {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
3685 {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
3686 {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
3687 {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
3688 {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
3689 {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
3690 {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
3691 {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
3692 {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
3693 {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
3694 {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
3695 {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
3696 {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
3697 {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
3698 {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
3699 {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
3700 {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
3701 {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
3702 {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
3703 {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
3704 {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
3705 {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
3706 {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
3707 {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
3708 {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
3709 {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
3710 {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
3711 {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
3712 {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
3713 {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
3714 {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
3715 {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
3716 {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
3717 {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
3718 {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
3719 {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
3720 {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
3721 {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
3722 {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
3723 {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
3724};
3725
3726static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
3727 {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
3728};
3729
3730static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
3731 {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
3732};
3733
3734static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
3735 {AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
3736};
3737
3738static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
3739 {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
3740};
3741
3742static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
3743 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
3744 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
3745 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
3746 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
3747 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
3748 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
3749 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
3750 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
3751 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
3752 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
3753 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
3754 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
3755 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
3756 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
3757 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
3758 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
3759 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
3760 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
3761 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
3762 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
3763 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
3764 AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
3765};
3766
3767static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
3768 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
3769 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3770 AMDGPU_GFX_RLC_MEM, 1},
3771 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
3772 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3773 AMDGPU_GFX_CP_MEM, 1},
3774 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
3775 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3776 AMDGPU_GFX_CP_MEM, 1},
3777 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
3778 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3779 AMDGPU_GFX_CP_MEM, 1},
3780 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
3781 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3782 AMDGPU_GFX_GDS_MEM, 1},
3783 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
3784 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3785 AMDGPU_GFX_GC_CANE_MEM, 1},
3786 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
3787 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3788 AMDGPU_GFX_SPI_MEM, 1},
3789 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
3790 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3791 AMDGPU_GFX_SP_MEM, 4},
3792 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
3793 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3794 AMDGPU_GFX_SP_MEM, 4},
3795 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
3796 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3797 AMDGPU_GFX_SQ_MEM, 4},
3798 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
3799 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3800 AMDGPU_GFX_SQC_MEM, 4},
3801 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
3802 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3803 AMDGPU_GFX_TCX_MEM, 1},
3804 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
3805 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3806 AMDGPU_GFX_TCC_MEM, 1},
3807 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
3808 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3809 AMDGPU_GFX_TA_MEM, 4},
3810 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
3811 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3812 AMDGPU_GFX_TCI_MEM, 1},
3813 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
3814 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3815 AMDGPU_GFX_TCP_MEM, 4},
3816 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
3817 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3818 AMDGPU_GFX_TD_MEM, 4},
3819 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
3820 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3821 AMDGPU_GFX_GCEA_MEM, 1},
3822 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
3823 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3824 AMDGPU_GFX_LDS_MEM, 4},
3825};
3826
3827static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
3828 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
3829 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
3830 AMDGPU_GFX_RLC_MEM, 1},
3831 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
3832 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
3833 AMDGPU_GFX_CP_MEM, 1},
3834 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
3835 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
3836 AMDGPU_GFX_CP_MEM, 1},
3837 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
3838 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
3839 AMDGPU_GFX_CP_MEM, 1},
3840 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
3841 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
3842 AMDGPU_GFX_GDS_MEM, 1},
3843 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
3844 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
3845 AMDGPU_GFX_GC_CANE_MEM, 1},
3846 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
3847 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
3848 AMDGPU_GFX_SPI_MEM, 1},
3849 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
3850 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
3851 AMDGPU_GFX_SP_MEM, 4},
3852 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
3853 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
3854 AMDGPU_GFX_SP_MEM, 4},
3855 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
3856 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
3857 AMDGPU_GFX_SQ_MEM, 4},
3858 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
3859 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
3860 AMDGPU_GFX_SQC_MEM, 4},
3861 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
3862 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
3863 AMDGPU_GFX_TCX_MEM, 1},
3864 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
3865 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
3866 AMDGPU_GFX_TCC_MEM, 1},
3867 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
3868 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
3869 AMDGPU_GFX_TA_MEM, 4},
3870 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
3871 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
3872 AMDGPU_GFX_TCI_MEM, 1},
3873 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
3874 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
3875 AMDGPU_GFX_TCP_MEM, 4},
3876 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
3877 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
3878 AMDGPU_GFX_TD_MEM, 4},
3879 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
3880 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
3881 AMDGPU_GFX_TCA_MEM, 1},
3882 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
3883 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
3884 AMDGPU_GFX_GCEA_MEM, 1},
3885 {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
3886 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
3887 AMDGPU_GFX_LDS_MEM, 4},
3888};
3889
3890static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
3891 void *ras_error_status, int xcc_id)
3892{
3893 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
3894 unsigned long ce_count = 0, ue_count = 0;
3895 uint32_t i, j, k;
3896
3897 /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
3898 struct amdgpu_smuio_mcm_config_info mcm_info = {
3899 .socket_id = adev->smuio.funcs->get_socket_id(adev),
3900 .die_id = xcc_id & 0x01 ? 1 : 0,
3901 };
3902
3903 mutex_lock(&adev->grbm_idx_mutex);
3904
3905 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3906 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3907 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3908 /* no need to select if instance number is 1 */
3909 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3910 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3911 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3912
3913 amdgpu_ras_inst_query_ras_error_count(adev,
3914 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3915 1,
3916 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
3917 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
3918 GET_INST(GC, xcc_id),
3919 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
3920 &ce_count);
3921
3922 amdgpu_ras_inst_query_ras_error_count(adev,
3923 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3924 1,
3925 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3926 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3927 GET_INST(GC, xcc_id),
3928 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3929 &ue_count);
3930 }
3931 }
3932 }
3933
3934 /* handle extra register entries of UE */
3935 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
3936 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
3937 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
3938 /* no need to select if instance number is 1 */
3939 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
3940 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
3941 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3942
3943 amdgpu_ras_inst_query_ras_error_count(adev,
3944 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3945 1,
3946 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
3947 gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
3948 GET_INST(GC, xcc_id),
3949 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
3950 &ue_count);
3951 }
3952 }
3953 }
3954
3955 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
3956 xcc_id);
3957 mutex_unlock(&adev->grbm_idx_mutex);
3958
3959 /* the caller should make sure initialize value of
3960 * err_data->ue_count and err_data->ce_count
3961 */
3962 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
3963 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
3964}
3965
3966static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
3967 void *ras_error_status, int xcc_id)
3968{
3969 uint32_t i, j, k;
3970
3971 mutex_lock(&adev->grbm_idx_mutex);
3972
3973 for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
3974 for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
3975 for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
3976 /* no need to select if instance number is 1 */
3977 if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
3978 gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
3979 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
3980
3981 amdgpu_ras_inst_reset_ras_error_count(adev,
3982 &(gfx_v9_4_3_ce_reg_list[i].reg_entry),
3983 1,
3984 GET_INST(GC, xcc_id));
3985
3986 amdgpu_ras_inst_reset_ras_error_count(adev,
3987 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
3988 1,
3989 GET_INST(GC, xcc_id));
3990 }
3991 }
3992 }
3993
3994 /* handle extra register entries of UE */
3995 for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
3996 for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
3997 for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
3998 /* no need to select if instance number is 1 */
3999 if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4000 gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4001 gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4002
4003 amdgpu_ras_inst_reset_ras_error_count(adev,
4004 &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4005 1,
4006 GET_INST(GC, xcc_id));
4007 }
4008 }
4009 }
4010
4011 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4012 xcc_id);
4013 mutex_unlock(&adev->grbm_idx_mutex);
4014}
4015
4016static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4017 void *ras_error_status, int xcc_id)
4018{
4019 uint32_t i;
4020 uint32_t data;
4021
4022 if (amdgpu_sriov_vf(adev))
4023 return;
4024
4025 data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4026 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4027 amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4028
4029 if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4030 (amdgpu_watchdog_timer.period < 1 ||
4031 amdgpu_watchdog_timer.period > 0x23)) {
4032 dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4033 amdgpu_watchdog_timer.period = 0x23;
4034 }
4035 data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4036 amdgpu_watchdog_timer.period);
4037
4038 mutex_lock(&adev->grbm_idx_mutex);
4039 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4040 gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4041 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4042 }
4043 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4044 xcc_id);
4045 mutex_unlock(&adev->grbm_idx_mutex);
4046}
4047
4048static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4049 void *ras_error_status)
4050{
4051 amdgpu_gfx_ras_error_func(adev, ras_error_status,
4052 gfx_v9_4_3_inst_query_ras_err_count);
4053}
4054
4055static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4056{
4057 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4058}
4059
4060static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4061{
4062 amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4063}
4064
4065static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4066 .name = "gfx_v9_4_3",
4067 .early_init = gfx_v9_4_3_early_init,
4068 .late_init = gfx_v9_4_3_late_init,
4069 .sw_init = gfx_v9_4_3_sw_init,
4070 .sw_fini = gfx_v9_4_3_sw_fini,
4071 .hw_init = gfx_v9_4_3_hw_init,
4072 .hw_fini = gfx_v9_4_3_hw_fini,
4073 .suspend = gfx_v9_4_3_suspend,
4074 .resume = gfx_v9_4_3_resume,
4075 .is_idle = gfx_v9_4_3_is_idle,
4076 .wait_for_idle = gfx_v9_4_3_wait_for_idle,
4077 .soft_reset = gfx_v9_4_3_soft_reset,
4078 .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4079 .set_powergating_state = gfx_v9_4_3_set_powergating_state,
4080 .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4081 .dump_ip_state = NULL,
4082 .print_ip_state = NULL,
4083};
4084
4085static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4086 .type = AMDGPU_RING_TYPE_COMPUTE,
4087 .align_mask = 0xff,
4088 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4089 .support_64bit_ptrs = true,
4090 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4091 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4092 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4093 .emit_frame_size =
4094 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4095 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4096 5 + /* hdp invalidate */
4097 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4098 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4099 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4100 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4101 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4102 7 + /* gfx_v9_4_3_emit_mem_sync */
4103 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4104 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4105 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4106 .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4107 .emit_fence = gfx_v9_4_3_ring_emit_fence,
4108 .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4109 .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4110 .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4111 .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4112 .test_ring = gfx_v9_4_3_ring_test_ring,
4113 .test_ib = gfx_v9_4_3_ring_test_ib,
4114 .insert_nop = amdgpu_ring_insert_nop,
4115 .pad_ib = amdgpu_ring_generic_pad_ib,
4116 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4117 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4118 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4119 .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4120 .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4121};
4122
4123static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4124 .type = AMDGPU_RING_TYPE_KIQ,
4125 .align_mask = 0xff,
4126 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4127 .support_64bit_ptrs = true,
4128 .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4129 .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4130 .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4131 .emit_frame_size =
4132 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4133 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4134 5 + /* hdp invalidate */
4135 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4136 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4137 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4138 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4139 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4140 .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
4141 .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4142 .test_ring = gfx_v9_4_3_ring_test_ring,
4143 .insert_nop = amdgpu_ring_insert_nop,
4144 .pad_ib = amdgpu_ring_generic_pad_ib,
4145 .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4146 .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4147 .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4148 .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4149};
4150
4151static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4152{
4153 int i, j, num_xcc;
4154
4155 num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4156 for (i = 0; i < num_xcc; i++) {
4157 adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4158
4159 for (j = 0; j < adev->gfx.num_compute_rings; j++)
4160 adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4161 = &gfx_v9_4_3_ring_funcs_compute;
4162 }
4163}
4164
4165static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4166 .set = gfx_v9_4_3_set_eop_interrupt_state,
4167 .process = gfx_v9_4_3_eop_irq,
4168};
4169
4170static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4171 .set = gfx_v9_4_3_set_priv_reg_fault_state,
4172 .process = gfx_v9_4_3_priv_reg_irq,
4173};
4174
4175static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4176 .set = gfx_v9_4_3_set_priv_inst_fault_state,
4177 .process = gfx_v9_4_3_priv_inst_irq,
4178};
4179
4180static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4181{
4182 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4183 adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4184
4185 adev->gfx.priv_reg_irq.num_types = 1;
4186 adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4187
4188 adev->gfx.priv_inst_irq.num_types = 1;
4189 adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4190}
4191
4192static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4193{
4194 adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4195}
4196
4197
4198static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4199{
4200 /* init asci gds info */
4201 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4202 case IP_VERSION(9, 4, 3):
4203 case IP_VERSION(9, 4, 4):
4204 /* 9.4.3 removed all the GDS internal memory,
4205 * only support GWS opcode in kernel, like barrier
4206 * semaphore.etc */
4207 adev->gds.gds_size = 0;
4208 break;
4209 default:
4210 adev->gds.gds_size = 0x10000;
4211 break;
4212 }
4213
4214 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
4215 case IP_VERSION(9, 4, 3):
4216 case IP_VERSION(9, 4, 4):
4217 /* deprecated for 9.4.3, no usage at all */
4218 adev->gds.gds_compute_max_wave_id = 0;
4219 break;
4220 default:
4221 /* this really depends on the chip */
4222 adev->gds.gds_compute_max_wave_id = 0x7ff;
4223 break;
4224 }
4225
4226 adev->gds.gws_size = 64;
4227 adev->gds.oa_size = 16;
4228}
4229
4230static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4231 u32 bitmap, int xcc_id)
4232{
4233 u32 data;
4234
4235 if (!bitmap)
4236 return;
4237
4238 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4239 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4240
4241 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4242}
4243
4244static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4245{
4246 u32 data, mask;
4247
4248 data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4249 data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4250
4251 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4252 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4253
4254 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4255
4256 return (~data) & mask;
4257}
4258
4259static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4260 struct amdgpu_cu_info *cu_info)
4261{
4262 int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4263 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4264 unsigned disable_masks[4 * 4];
4265 bool is_symmetric_cus;
4266
4267 if (!adev || !cu_info)
4268 return -EINVAL;
4269
4270 /*
4271 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4272 */
4273 if (adev->gfx.config.max_shader_engines *
4274 adev->gfx.config.max_sh_per_se > 16)
4275 return -EINVAL;
4276
4277 amdgpu_gfx_parse_disable_cu(disable_masks,
4278 adev->gfx.config.max_shader_engines,
4279 adev->gfx.config.max_sh_per_se);
4280
4281 mutex_lock(&adev->grbm_idx_mutex);
4282 for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4283 is_symmetric_cus = true;
4284 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4285 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4286 mask = 1;
4287 ao_bitmap = 0;
4288 counter = 0;
4289 gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4290 gfx_v9_4_3_set_user_cu_inactive_bitmap(
4291 adev,
4292 disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4293 xcc_id);
4294 bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4295
4296 cu_info->bitmap[xcc_id][i][j] = bitmap;
4297
4298 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4299 if (bitmap & mask) {
4300 if (counter < adev->gfx.config.max_cu_per_sh)
4301 ao_bitmap |= mask;
4302 counter++;
4303 }
4304 mask <<= 1;
4305 }
4306 active_cu_number += counter;
4307 if (i < 2 && j < 2)
4308 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4309 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4310 }
4311 if (i && is_symmetric_cus && prev_counter != counter)
4312 is_symmetric_cus = false;
4313 prev_counter = counter;
4314 }
4315 if (is_symmetric_cus) {
4316 tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4317 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4318 tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4319 WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4320 }
4321 gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4322 xcc_id);
4323 }
4324 mutex_unlock(&adev->grbm_idx_mutex);
4325
4326 cu_info->number = active_cu_number;
4327 cu_info->ao_cu_mask = ao_cu_mask;
4328 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4329
4330 return 0;
4331}
4332
4333const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4334 .type = AMD_IP_BLOCK_TYPE_GFX,
4335 .major = 9,
4336 .minor = 4,
4337 .rev = 3,
4338 .funcs = &gfx_v9_4_3_ip_funcs,
4339};
4340
4341static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4342{
4343 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4344 uint32_t tmp_mask;
4345 int i, r;
4346
4347 /* TODO : Initialize golden regs */
4348 /* gfx_v9_4_3_init_golden_registers(adev); */
4349
4350 tmp_mask = inst_mask;
4351 for_each_inst(i, tmp_mask)
4352 gfx_v9_4_3_xcc_constants_init(adev, i);
4353
4354 if (!amdgpu_sriov_vf(adev)) {
4355 tmp_mask = inst_mask;
4356 for_each_inst(i, tmp_mask) {
4357 r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4358 if (r)
4359 return r;
4360 }
4361 }
4362
4363 tmp_mask = inst_mask;
4364 for_each_inst(i, tmp_mask) {
4365 r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4366 if (r)
4367 return r;
4368 }
4369
4370 return 0;
4371}
4372
4373static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4374{
4375 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4376 int i;
4377
4378 for_each_inst(i, inst_mask)
4379 gfx_v9_4_3_xcc_fini(adev, i);
4380
4381 return 0;
4382}
4383
4384struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4385 .suspend = &gfx_v9_4_3_xcp_suspend,
4386 .resume = &gfx_v9_4_3_xcp_resume
4387};
4388
4389struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = {
4390 .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4391 .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4392};
4393
4394static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
4395{
4396 int r;
4397
4398 r = amdgpu_ras_block_late_init(adev, ras_block);
4399 if (r)
4400 return r;
4401
4402 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
4403 &gfx_v9_4_3_aca_info,
4404 NULL);
4405 if (r)
4406 goto late_fini;
4407
4408 return 0;
4409
4410late_fini:
4411 amdgpu_ras_block_late_fini(adev, ras_block);
4412
4413 return r;
4414}
4415
4416struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4417 .ras_block = {
4418 .hw_ops = &gfx_v9_4_3_ras_ops,
4419 .ras_late_init = &gfx_v9_4_3_ras_late_init,
4420 },
4421 .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
4422};