Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
35#include "atom.h"
36#include "amd_pcie.h"
37
38#include "uvd/uvd_7_0_offset.h"
39#include "gc/gc_9_0_offset.h"
40#include "gc/gc_9_0_sh_mask.h"
41#include "sdma0/sdma0_4_0_offset.h"
42#include "sdma1/sdma1_4_0_offset.h"
43#include "hdp/hdp_4_0_offset.h"
44#include "hdp/hdp_4_0_sh_mask.h"
45#include "smuio/smuio_9_0_offset.h"
46#include "smuio/smuio_9_0_sh_mask.h"
47#include "nbio/nbio_7_0_default.h"
48#include "nbio/nbio_7_0_offset.h"
49#include "nbio/nbio_7_0_sh_mask.h"
50#include "nbio/nbio_7_0_smn.h"
51#include "mp/mp_9_0_offset.h"
52
53#include "soc15.h"
54#include "soc15_common.h"
55#include "gfx_v9_0.h"
56#include "gmc_v9_0.h"
57#include "gfxhub_v1_0.h"
58#include "mmhub_v1_0.h"
59#include "df_v1_7.h"
60#include "df_v3_6.h"
61#include "nbio_v6_1.h"
62#include "nbio_v7_0.h"
63#include "nbio_v7_4.h"
64#include "vega10_ih.h"
65#include "sdma_v4_0.h"
66#include "uvd_v7_0.h"
67#include "vce_v4_0.h"
68#include "vcn_v1_0.h"
69#include "vcn_v2_0.h"
70#include "vcn_v2_5.h"
71#include "dce_virtual.h"
72#include "mxgpu_ai.h"
73#include "amdgpu_smu.h"
74#include "amdgpu_ras.h"
75#include "amdgpu_xgmi.h"
76#include <uapi/linux/kfd_ioctl.h>
77
78#define mmMP0_MISC_CGTT_CTRL0 0x01b9
79#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
80#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
81#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
82
83/* for Vega20 register name change */
84#define mmHDP_MEM_POWER_CTRL 0x00d4
85#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
86#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
87#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
88#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
89#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
90/*
91 * Indirect registers accessor
92 */
93static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
94{
95 unsigned long flags, address, data;
96 u32 r;
97 address = adev->nbio.funcs->get_pcie_index_offset(adev);
98 data = adev->nbio.funcs->get_pcie_data_offset(adev);
99
100 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
101 WREG32(address, reg);
102 (void)RREG32(address);
103 r = RREG32(data);
104 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
105 return r;
106}
107
108static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
109{
110 unsigned long flags, address, data;
111
112 address = adev->nbio.funcs->get_pcie_index_offset(adev);
113 data = adev->nbio.funcs->get_pcie_data_offset(adev);
114
115 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
116 WREG32(address, reg);
117 (void)RREG32(address);
118 WREG32(data, v);
119 (void)RREG32(data);
120 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
121}
122
123static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
124{
125 unsigned long flags, address, data;
126 u64 r;
127 address = adev->nbio.funcs->get_pcie_index_offset(adev);
128 data = adev->nbio.funcs->get_pcie_data_offset(adev);
129
130 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
131 /* read low 32 bit */
132 WREG32(address, reg);
133 (void)RREG32(address);
134 r = RREG32(data);
135
136 /* read high 32 bit*/
137 WREG32(address, reg + 4);
138 (void)RREG32(address);
139 r |= ((u64)RREG32(data) << 32);
140 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
141 return r;
142}
143
144static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
145{
146 unsigned long flags, address, data;
147
148 address = adev->nbio.funcs->get_pcie_index_offset(adev);
149 data = adev->nbio.funcs->get_pcie_data_offset(adev);
150
151 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
152 /* write low 32 bit */
153 WREG32(address, reg);
154 (void)RREG32(address);
155 WREG32(data, (u32)(v & 0xffffffffULL));
156 (void)RREG32(data);
157
158 /* write high 32 bit */
159 WREG32(address, reg + 4);
160 (void)RREG32(address);
161 WREG32(data, (u32)(v >> 32));
162 (void)RREG32(data);
163 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
164}
165
166static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
167{
168 unsigned long flags, address, data;
169 u32 r;
170
171 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
172 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
173
174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 WREG32(address, ((reg) & 0x1ff));
176 r = RREG32(data);
177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178 return r;
179}
180
181static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
182{
183 unsigned long flags, address, data;
184
185 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
186 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
187
188 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
189 WREG32(address, ((reg) & 0x1ff));
190 WREG32(data, (v));
191 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
192}
193
194static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
195{
196 unsigned long flags, address, data;
197 u32 r;
198
199 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
200 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
201
202 spin_lock_irqsave(&adev->didt_idx_lock, flags);
203 WREG32(address, (reg));
204 r = RREG32(data);
205 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
206 return r;
207}
208
209static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
210{
211 unsigned long flags, address, data;
212
213 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
214 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
215
216 spin_lock_irqsave(&adev->didt_idx_lock, flags);
217 WREG32(address, (reg));
218 WREG32(data, (v));
219 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
220}
221
222static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
223{
224 unsigned long flags;
225 u32 r;
226
227 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
228 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
229 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
230 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
231 return r;
232}
233
234static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
239 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
240 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
241 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
242}
243
244static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
245{
246 unsigned long flags;
247 u32 r;
248
249 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
250 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
251 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
252 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
253 return r;
254}
255
256static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
257{
258 unsigned long flags;
259
260 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
261 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
262 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
263 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
264}
265
266static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
267{
268 return adev->nbio.funcs->get_memsize(adev);
269}
270
271static u32 soc15_get_xclk(struct amdgpu_device *adev)
272{
273 return adev->clock.spll.reference_freq;
274}
275
276
277void soc15_grbm_select(struct amdgpu_device *adev,
278 u32 me, u32 pipe, u32 queue, u32 vmid)
279{
280 u32 grbm_gfx_cntl = 0;
281 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
282 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
283 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
284 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
285
286 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
287}
288
289static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
290{
291 /* todo */
292}
293
294static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
295{
296 /* todo */
297 return false;
298}
299
300static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
301 u8 *bios, u32 length_bytes)
302{
303 u32 *dw_ptr;
304 u32 i, length_dw;
305
306 if (bios == NULL)
307 return false;
308 if (length_bytes == 0)
309 return false;
310 /* APU vbios image is part of sbios image */
311 if (adev->flags & AMD_IS_APU)
312 return false;
313
314 dw_ptr = (u32 *)bios;
315 length_dw = ALIGN(length_bytes, 4) / 4;
316
317 /* set rom index to 0 */
318 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
319 /* read out the rom data */
320 for (i = 0; i < length_dw; i++)
321 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
322
323 return true;
324}
325
326static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
327 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
328 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
329 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
330 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
331 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
332 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
333 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
334 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
335 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
336 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
337 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
338 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
339 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
340 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
341 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
342 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
343 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
344 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
345 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
346 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
347};
348
349static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
350 u32 sh_num, u32 reg_offset)
351{
352 uint32_t val;
353
354 mutex_lock(&adev->grbm_idx_mutex);
355 if (se_num != 0xffffffff || sh_num != 0xffffffff)
356 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
357
358 val = RREG32(reg_offset);
359
360 if (se_num != 0xffffffff || sh_num != 0xffffffff)
361 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
362 mutex_unlock(&adev->grbm_idx_mutex);
363 return val;
364}
365
366static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
367 bool indexed, u32 se_num,
368 u32 sh_num, u32 reg_offset)
369{
370 if (indexed) {
371 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
372 } else {
373 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
374 return adev->gfx.config.gb_addr_config;
375 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
376 return adev->gfx.config.db_debug2;
377 return RREG32(reg_offset);
378 }
379}
380
381static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
382 u32 sh_num, u32 reg_offset, u32 *value)
383{
384 uint32_t i;
385 struct soc15_allowed_register_entry *en;
386
387 *value = 0;
388 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
389 en = &soc15_allowed_read_registers[i];
390 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
391 + en->reg_offset))
392 continue;
393
394 *value = soc15_get_register_value(adev,
395 soc15_allowed_read_registers[i].grbm_indexed,
396 se_num, sh_num, reg_offset);
397 return 0;
398 }
399 return -EINVAL;
400}
401
402
403/**
404 * soc15_program_register_sequence - program an array of registers.
405 *
406 * @adev: amdgpu_device pointer
407 * @regs: pointer to the register array
408 * @array_size: size of the register array
409 *
410 * Programs an array or registers with and and or masks.
411 * This is a helper for setting golden registers.
412 */
413
414void soc15_program_register_sequence(struct amdgpu_device *adev,
415 const struct soc15_reg_golden *regs,
416 const u32 array_size)
417{
418 const struct soc15_reg_golden *entry;
419 u32 tmp, reg;
420 int i;
421
422 for (i = 0; i < array_size; ++i) {
423 entry = ®s[i];
424 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
425
426 if (entry->and_mask == 0xffffffff) {
427 tmp = entry->or_mask;
428 } else {
429 tmp = RREG32(reg);
430 tmp &= ~(entry->and_mask);
431 tmp |= (entry->or_mask & entry->and_mask);
432 }
433
434 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
435 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
436 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
437 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
438 WREG32_RLC(reg, tmp);
439 else
440 WREG32(reg, tmp);
441
442 }
443
444}
445
446static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
447{
448 u32 i;
449 int ret = 0;
450
451 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
452
453 dev_info(adev->dev, "GPU mode1 reset\n");
454
455 /* disable BM */
456 pci_clear_master(adev->pdev);
457
458 pci_save_state(adev->pdev);
459
460 ret = psp_gpu_reset(adev);
461 if (ret)
462 dev_err(adev->dev, "GPU mode1 reset failed\n");
463
464 pci_restore_state(adev->pdev);
465
466 /* wait for asic to come out of reset */
467 for (i = 0; i < adev->usec_timeout; i++) {
468 u32 memsize = adev->nbio.funcs->get_memsize(adev);
469
470 if (memsize != 0xffffffff)
471 break;
472 udelay(1);
473 }
474
475 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
476
477 return ret;
478}
479
480static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
481{
482 if (is_support_sw_smu(adev)) {
483 struct smu_context *smu = &adev->smu;
484
485 *cap = smu_baco_is_support(smu);
486 return 0;
487 } else {
488 void *pp_handle = adev->powerplay.pp_handle;
489 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
490
491 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) {
492 *cap = false;
493 return -ENOENT;
494 }
495
496 return pp_funcs->get_asic_baco_capability(pp_handle, cap);
497 }
498}
499
500static int soc15_asic_baco_reset(struct amdgpu_device *adev)
501{
502 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
503
504 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
505 if (ras && ras->supported)
506 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
507
508 dev_info(adev->dev, "GPU BACO reset\n");
509
510 if (is_support_sw_smu(adev)) {
511 struct smu_context *smu = &adev->smu;
512
513 if (smu_baco_reset(smu))
514 return -EIO;
515 } else {
516 void *pp_handle = adev->powerplay.pp_handle;
517 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
518
519 if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state)
520 return -ENOENT;
521
522 /* enter BACO state */
523 if (pp_funcs->set_asic_baco_state(pp_handle, 1))
524 return -EIO;
525
526 /* exit BACO state */
527 if (pp_funcs->set_asic_baco_state(pp_handle, 0))
528 return -EIO;
529 }
530
531 /* re-enable doorbell interrupt after BACO exit */
532 if (ras && ras->supported)
533 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
534
535 return 0;
536}
537
538static int soc15_mode2_reset(struct amdgpu_device *adev)
539{
540 if (is_support_sw_smu(adev))
541 return smu_mode2_reset(&adev->smu);
542 if (!adev->powerplay.pp_funcs ||
543 !adev->powerplay.pp_funcs->asic_reset_mode_2)
544 return -ENOENT;
545
546 return adev->powerplay.pp_funcs->asic_reset_mode_2(adev->powerplay.pp_handle);
547}
548
549static enum amd_reset_method
550soc15_asic_reset_method(struct amdgpu_device *adev)
551{
552 bool baco_reset;
553
554 switch (adev->asic_type) {
555 case CHIP_RAVEN:
556 case CHIP_RENOIR:
557 return AMD_RESET_METHOD_MODE2;
558 case CHIP_VEGA10:
559 case CHIP_VEGA12:
560 soc15_asic_get_baco_capability(adev, &baco_reset);
561 break;
562 case CHIP_VEGA20:
563 if (adev->psp.sos_fw_version >= 0x80067)
564 soc15_asic_get_baco_capability(adev, &baco_reset);
565 else
566 baco_reset = false;
567 if (baco_reset) {
568 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
569 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
570
571 if (hive || (ras && ras->supported))
572 baco_reset = false;
573 }
574 break;
575 default:
576 baco_reset = false;
577 break;
578 }
579
580 if (baco_reset)
581 return AMD_RESET_METHOD_BACO;
582 else
583 return AMD_RESET_METHOD_MODE1;
584}
585
586static int soc15_asic_reset(struct amdgpu_device *adev)
587{
588 switch (soc15_asic_reset_method(adev)) {
589 case AMD_RESET_METHOD_BACO:
590 if (!adev->in_suspend)
591 amdgpu_inc_vram_lost(adev);
592 return soc15_asic_baco_reset(adev);
593 case AMD_RESET_METHOD_MODE2:
594 return soc15_mode2_reset(adev);
595 default:
596 if (!adev->in_suspend)
597 amdgpu_inc_vram_lost(adev);
598 return soc15_asic_mode1_reset(adev);
599 }
600}
601
602/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
603 u32 cntl_reg, u32 status_reg)
604{
605 return 0;
606}*/
607
608static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
609{
610 /*int r;
611
612 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
613 if (r)
614 return r;
615
616 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
617 */
618 return 0;
619}
620
621static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
622{
623 /* todo */
624
625 return 0;
626}
627
628static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
629{
630 if (pci_is_root_bus(adev->pdev->bus))
631 return;
632
633 if (amdgpu_pcie_gen2 == 0)
634 return;
635
636 if (adev->flags & AMD_IS_APU)
637 return;
638
639 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
640 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
641 return;
642
643 /* todo */
644}
645
646static void soc15_program_aspm(struct amdgpu_device *adev)
647{
648
649 if (amdgpu_aspm == 0)
650 return;
651
652 /* todo */
653}
654
655static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
656 bool enable)
657{
658 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
659 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
660}
661
662static const struct amdgpu_ip_block_version vega10_common_ip_block =
663{
664 .type = AMD_IP_BLOCK_TYPE_COMMON,
665 .major = 2,
666 .minor = 0,
667 .rev = 0,
668 .funcs = &soc15_common_ip_funcs,
669};
670
671static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
672{
673 return adev->nbio.funcs->get_rev_id(adev);
674}
675
676int soc15_set_ip_blocks(struct amdgpu_device *adev)
677{
678 /* Set IP register base before any HW register access */
679 switch (adev->asic_type) {
680 case CHIP_VEGA10:
681 case CHIP_VEGA12:
682 case CHIP_RAVEN:
683 case CHIP_RENOIR:
684 vega10_reg_base_init(adev);
685 break;
686 case CHIP_VEGA20:
687 vega20_reg_base_init(adev);
688 break;
689 case CHIP_ARCTURUS:
690 arct_reg_base_init(adev);
691 break;
692 default:
693 return -EINVAL;
694 }
695
696 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
697 adev->gmc.xgmi.supported = true;
698
699 if (adev->flags & AMD_IS_APU) {
700 adev->nbio.funcs = &nbio_v7_0_funcs;
701 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
702 } else if (adev->asic_type == CHIP_VEGA20 ||
703 adev->asic_type == CHIP_ARCTURUS) {
704 adev->nbio.funcs = &nbio_v7_4_funcs;
705 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
706 } else {
707 adev->nbio.funcs = &nbio_v6_1_funcs;
708 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
709 }
710
711 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
712 adev->df_funcs = &df_v3_6_funcs;
713 else
714 adev->df_funcs = &df_v1_7_funcs;
715
716 adev->rev_id = soc15_get_rev_id(adev);
717 adev->nbio.funcs->detect_hw_virt(adev);
718
719 if (amdgpu_sriov_vf(adev))
720 adev->virt.ops = &xgpu_ai_virt_ops;
721
722 switch (adev->asic_type) {
723 case CHIP_VEGA10:
724 case CHIP_VEGA12:
725 case CHIP_VEGA20:
726 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
727 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
728
729 /* For Vega10 SR-IOV, PSP need to be initialized before IH */
730 if (amdgpu_sriov_vf(adev)) {
731 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
732 if (adev->asic_type == CHIP_VEGA20)
733 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
734 else
735 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
736 }
737 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
738 } else {
739 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
740 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
741 if (adev->asic_type == CHIP_VEGA20)
742 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
743 else
744 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
745 }
746 }
747 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
748 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
749 if (!amdgpu_sriov_vf(adev)) {
750 if (is_support_sw_smu(adev))
751 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
752 else
753 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
754 }
755 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
756 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
757#if defined(CONFIG_DRM_AMD_DC)
758 else if (amdgpu_device_has_dc_support(adev))
759 amdgpu_device_ip_block_add(adev, &dm_ip_block);
760#endif
761 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
762 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
763 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
764 }
765 break;
766 case CHIP_RAVEN:
767 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
768 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
769 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
770 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
771 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
772 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
773 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
774 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
775 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
776 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
777#if defined(CONFIG_DRM_AMD_DC)
778 else if (amdgpu_device_has_dc_support(adev))
779 amdgpu_device_ip_block_add(adev, &dm_ip_block);
780#endif
781 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
782 break;
783 case CHIP_ARCTURUS:
784 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
785 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
786
787 if (amdgpu_sriov_vf(adev)) {
788 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
789 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
790 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
791 } else {
792 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
793 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
794 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
795 }
796
797 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
798 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
799 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
800 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
801 if (!amdgpu_sriov_vf(adev))
802 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
803
804 if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT))
805 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
806 break;
807 case CHIP_RENOIR:
808 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
809 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
810 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
811 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
812 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
813 if (is_support_sw_smu(adev))
814 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
815 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
816 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
817 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
818 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
819#if defined(CONFIG_DRM_AMD_DC)
820 else if (amdgpu_device_has_dc_support(adev))
821 amdgpu_device_ip_block_add(adev, &dm_ip_block);
822#endif
823 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
824 break;
825 default:
826 return -EINVAL;
827 }
828
829 return 0;
830}
831
832static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
833{
834 adev->nbio.funcs->hdp_flush(adev, ring);
835}
836
837static void soc15_invalidate_hdp(struct amdgpu_device *adev,
838 struct amdgpu_ring *ring)
839{
840 if (!ring || !ring->funcs->emit_wreg)
841 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
842 else
843 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
844 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
845}
846
847static bool soc15_need_full_reset(struct amdgpu_device *adev)
848{
849 /* change this when we implement soft reset */
850 return true;
851}
852static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
853 uint64_t *count1)
854{
855 uint32_t perfctr = 0;
856 uint64_t cnt0_of, cnt1_of;
857 int tmp;
858
859 /* This reports 0 on APUs, so return to avoid writing/reading registers
860 * that may or may not be different from their GPU counterparts
861 */
862 if (adev->flags & AMD_IS_APU)
863 return;
864
865 /* Set the 2 events that we wish to watch, defined above */
866 /* Reg 40 is # received msgs */
867 /* Reg 104 is # of posted requests sent */
868 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
869 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
870
871 /* Write to enable desired perf counters */
872 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
873 /* Zero out and enable the perf counters
874 * Write 0x5:
875 * Bit 0 = Start all counters(1)
876 * Bit 2 = Global counter reset enable(1)
877 */
878 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
879
880 msleep(1000);
881
882 /* Load the shadow and disable the perf counters
883 * Write 0x2:
884 * Bit 0 = Stop counters(0)
885 * Bit 1 = Load the shadow counters(1)
886 */
887 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
888
889 /* Read register values to get any >32bit overflow */
890 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
891 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
892 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
893
894 /* Get the values and add the overflow */
895 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
896 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
897}
898
899static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
900 uint64_t *count1)
901{
902 uint32_t perfctr = 0;
903 uint64_t cnt0_of, cnt1_of;
904 int tmp;
905
906 /* This reports 0 on APUs, so return to avoid writing/reading registers
907 * that may or may not be different from their GPU counterparts
908 */
909 if (adev->flags & AMD_IS_APU)
910 return;
911
912 /* Set the 2 events that we wish to watch, defined above */
913 /* Reg 40 is # received msgs */
914 /* Reg 108 is # of posted requests sent on VG20 */
915 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
916 EVENT0_SEL, 40);
917 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
918 EVENT1_SEL, 108);
919
920 /* Write to enable desired perf counters */
921 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
922 /* Zero out and enable the perf counters
923 * Write 0x5:
924 * Bit 0 = Start all counters(1)
925 * Bit 2 = Global counter reset enable(1)
926 */
927 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
928
929 msleep(1000);
930
931 /* Load the shadow and disable the perf counters
932 * Write 0x2:
933 * Bit 0 = Stop counters(0)
934 * Bit 1 = Load the shadow counters(1)
935 */
936 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
937
938 /* Read register values to get any >32bit overflow */
939 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
940 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
941 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
942
943 /* Get the values and add the overflow */
944 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
945 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
946}
947
948static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
949{
950 u32 sol_reg;
951
952 /* Just return false for soc15 GPUs. Reset does not seem to
953 * be necessary.
954 */
955 if (!amdgpu_passthrough(adev))
956 return false;
957
958 if (adev->flags & AMD_IS_APU)
959 return false;
960
961 /* Check sOS sign of life register to confirm sys driver and sOS
962 * are already been loaded.
963 */
964 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
965 if (sol_reg)
966 return true;
967
968 return false;
969}
970
971static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
972{
973 uint64_t nak_r, nak_g;
974
975 /* Get the number of NAKs received and generated */
976 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
977 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
978
979 /* Add the total number of NAKs, i.e the number of replays */
980 return (nak_r + nak_g);
981}
982
983static const struct amdgpu_asic_funcs soc15_asic_funcs =
984{
985 .read_disabled_bios = &soc15_read_disabled_bios,
986 .read_bios_from_rom = &soc15_read_bios_from_rom,
987 .read_register = &soc15_read_register,
988 .reset = &soc15_asic_reset,
989 .reset_method = &soc15_asic_reset_method,
990 .set_vga_state = &soc15_vga_set_state,
991 .get_xclk = &soc15_get_xclk,
992 .set_uvd_clocks = &soc15_set_uvd_clocks,
993 .set_vce_clocks = &soc15_set_vce_clocks,
994 .get_config_memsize = &soc15_get_config_memsize,
995 .flush_hdp = &soc15_flush_hdp,
996 .invalidate_hdp = &soc15_invalidate_hdp,
997 .need_full_reset = &soc15_need_full_reset,
998 .init_doorbell_index = &vega10_doorbell_index_init,
999 .get_pcie_usage = &soc15_get_pcie_usage,
1000 .need_reset_on_init = &soc15_need_reset_on_init,
1001 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1002};
1003
1004static const struct amdgpu_asic_funcs vega20_asic_funcs =
1005{
1006 .read_disabled_bios = &soc15_read_disabled_bios,
1007 .read_bios_from_rom = &soc15_read_bios_from_rom,
1008 .read_register = &soc15_read_register,
1009 .reset = &soc15_asic_reset,
1010 .set_vga_state = &soc15_vga_set_state,
1011 .get_xclk = &soc15_get_xclk,
1012 .set_uvd_clocks = &soc15_set_uvd_clocks,
1013 .set_vce_clocks = &soc15_set_vce_clocks,
1014 .get_config_memsize = &soc15_get_config_memsize,
1015 .flush_hdp = &soc15_flush_hdp,
1016 .invalidate_hdp = &soc15_invalidate_hdp,
1017 .need_full_reset = &soc15_need_full_reset,
1018 .init_doorbell_index = &vega20_doorbell_index_init,
1019 .get_pcie_usage = &vega20_get_pcie_usage,
1020 .need_reset_on_init = &soc15_need_reset_on_init,
1021 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
1022 .reset_method = &soc15_asic_reset_method
1023};
1024
1025static int soc15_common_early_init(void *handle)
1026{
1027#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1028 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1029
1030 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1031 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1032 adev->smc_rreg = NULL;
1033 adev->smc_wreg = NULL;
1034 adev->pcie_rreg = &soc15_pcie_rreg;
1035 adev->pcie_wreg = &soc15_pcie_wreg;
1036 adev->pcie_rreg64 = &soc15_pcie_rreg64;
1037 adev->pcie_wreg64 = &soc15_pcie_wreg64;
1038 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1039 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1040 adev->didt_rreg = &soc15_didt_rreg;
1041 adev->didt_wreg = &soc15_didt_wreg;
1042 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1043 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1044 adev->se_cac_rreg = &soc15_se_cac_rreg;
1045 adev->se_cac_wreg = &soc15_se_cac_wreg;
1046
1047
1048 adev->external_rev_id = 0xFF;
1049 switch (adev->asic_type) {
1050 case CHIP_VEGA10:
1051 adev->asic_funcs = &soc15_asic_funcs;
1052 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1053 AMD_CG_SUPPORT_GFX_MGLS |
1054 AMD_CG_SUPPORT_GFX_RLC_LS |
1055 AMD_CG_SUPPORT_GFX_CP_LS |
1056 AMD_CG_SUPPORT_GFX_3D_CGCG |
1057 AMD_CG_SUPPORT_GFX_3D_CGLS |
1058 AMD_CG_SUPPORT_GFX_CGCG |
1059 AMD_CG_SUPPORT_GFX_CGLS |
1060 AMD_CG_SUPPORT_BIF_MGCG |
1061 AMD_CG_SUPPORT_BIF_LS |
1062 AMD_CG_SUPPORT_HDP_LS |
1063 AMD_CG_SUPPORT_DRM_MGCG |
1064 AMD_CG_SUPPORT_DRM_LS |
1065 AMD_CG_SUPPORT_ROM_MGCG |
1066 AMD_CG_SUPPORT_DF_MGCG |
1067 AMD_CG_SUPPORT_SDMA_MGCG |
1068 AMD_CG_SUPPORT_SDMA_LS |
1069 AMD_CG_SUPPORT_MC_MGCG |
1070 AMD_CG_SUPPORT_MC_LS;
1071 adev->pg_flags = 0;
1072 adev->external_rev_id = 0x1;
1073 break;
1074 case CHIP_VEGA12:
1075 adev->asic_funcs = &soc15_asic_funcs;
1076 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1077 AMD_CG_SUPPORT_GFX_MGLS |
1078 AMD_CG_SUPPORT_GFX_CGCG |
1079 AMD_CG_SUPPORT_GFX_CGLS |
1080 AMD_CG_SUPPORT_GFX_3D_CGCG |
1081 AMD_CG_SUPPORT_GFX_3D_CGLS |
1082 AMD_CG_SUPPORT_GFX_CP_LS |
1083 AMD_CG_SUPPORT_MC_LS |
1084 AMD_CG_SUPPORT_MC_MGCG |
1085 AMD_CG_SUPPORT_SDMA_MGCG |
1086 AMD_CG_SUPPORT_SDMA_LS |
1087 AMD_CG_SUPPORT_BIF_MGCG |
1088 AMD_CG_SUPPORT_BIF_LS |
1089 AMD_CG_SUPPORT_HDP_MGCG |
1090 AMD_CG_SUPPORT_HDP_LS |
1091 AMD_CG_SUPPORT_ROM_MGCG |
1092 AMD_CG_SUPPORT_VCE_MGCG |
1093 AMD_CG_SUPPORT_UVD_MGCG;
1094 adev->pg_flags = 0;
1095 adev->external_rev_id = adev->rev_id + 0x14;
1096 break;
1097 case CHIP_VEGA20:
1098 adev->asic_funcs = &vega20_asic_funcs;
1099 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1100 AMD_CG_SUPPORT_GFX_MGLS |
1101 AMD_CG_SUPPORT_GFX_CGCG |
1102 AMD_CG_SUPPORT_GFX_CGLS |
1103 AMD_CG_SUPPORT_GFX_3D_CGCG |
1104 AMD_CG_SUPPORT_GFX_3D_CGLS |
1105 AMD_CG_SUPPORT_GFX_CP_LS |
1106 AMD_CG_SUPPORT_MC_LS |
1107 AMD_CG_SUPPORT_MC_MGCG |
1108 AMD_CG_SUPPORT_SDMA_MGCG |
1109 AMD_CG_SUPPORT_SDMA_LS |
1110 AMD_CG_SUPPORT_BIF_MGCG |
1111 AMD_CG_SUPPORT_BIF_LS |
1112 AMD_CG_SUPPORT_HDP_MGCG |
1113 AMD_CG_SUPPORT_HDP_LS |
1114 AMD_CG_SUPPORT_ROM_MGCG |
1115 AMD_CG_SUPPORT_VCE_MGCG |
1116 AMD_CG_SUPPORT_UVD_MGCG;
1117 adev->pg_flags = 0;
1118 adev->external_rev_id = adev->rev_id + 0x28;
1119 break;
1120 case CHIP_RAVEN:
1121 adev->asic_funcs = &soc15_asic_funcs;
1122 if (adev->rev_id >= 0x8)
1123 adev->external_rev_id = adev->rev_id + 0x79;
1124 else if (adev->pdev->device == 0x15d8)
1125 adev->external_rev_id = adev->rev_id + 0x41;
1126 else if (adev->rev_id == 1)
1127 adev->external_rev_id = adev->rev_id + 0x20;
1128 else
1129 adev->external_rev_id = adev->rev_id + 0x01;
1130
1131 if (adev->rev_id >= 0x8) {
1132 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1133 AMD_CG_SUPPORT_GFX_MGLS |
1134 AMD_CG_SUPPORT_GFX_CP_LS |
1135 AMD_CG_SUPPORT_GFX_3D_CGCG |
1136 AMD_CG_SUPPORT_GFX_3D_CGLS |
1137 AMD_CG_SUPPORT_GFX_CGCG |
1138 AMD_CG_SUPPORT_GFX_CGLS |
1139 AMD_CG_SUPPORT_BIF_LS |
1140 AMD_CG_SUPPORT_HDP_LS |
1141 AMD_CG_SUPPORT_ROM_MGCG |
1142 AMD_CG_SUPPORT_MC_MGCG |
1143 AMD_CG_SUPPORT_MC_LS |
1144 AMD_CG_SUPPORT_SDMA_MGCG |
1145 AMD_CG_SUPPORT_SDMA_LS |
1146 AMD_CG_SUPPORT_VCN_MGCG;
1147
1148 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1149 AMD_PG_SUPPORT_VCN |
1150 AMD_PG_SUPPORT_VCN_DPG;
1151 } else if (adev->pdev->device == 0x15d8) {
1152 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1153 AMD_CG_SUPPORT_GFX_MGLS |
1154 AMD_CG_SUPPORT_GFX_CP_LS |
1155 AMD_CG_SUPPORT_GFX_3D_CGCG |
1156 AMD_CG_SUPPORT_GFX_3D_CGLS |
1157 AMD_CG_SUPPORT_GFX_CGCG |
1158 AMD_CG_SUPPORT_GFX_CGLS |
1159 AMD_CG_SUPPORT_BIF_LS |
1160 AMD_CG_SUPPORT_HDP_LS |
1161 AMD_CG_SUPPORT_ROM_MGCG |
1162 AMD_CG_SUPPORT_MC_MGCG |
1163 AMD_CG_SUPPORT_MC_LS |
1164 AMD_CG_SUPPORT_SDMA_MGCG |
1165 AMD_CG_SUPPORT_SDMA_LS;
1166
1167 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1168 AMD_PG_SUPPORT_MMHUB |
1169 AMD_PG_SUPPORT_VCN |
1170 AMD_PG_SUPPORT_VCN_DPG;
1171 } else {
1172 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1173 AMD_CG_SUPPORT_GFX_MGLS |
1174 AMD_CG_SUPPORT_GFX_RLC_LS |
1175 AMD_CG_SUPPORT_GFX_CP_LS |
1176 AMD_CG_SUPPORT_GFX_3D_CGCG |
1177 AMD_CG_SUPPORT_GFX_3D_CGLS |
1178 AMD_CG_SUPPORT_GFX_CGCG |
1179 AMD_CG_SUPPORT_GFX_CGLS |
1180 AMD_CG_SUPPORT_BIF_MGCG |
1181 AMD_CG_SUPPORT_BIF_LS |
1182 AMD_CG_SUPPORT_HDP_MGCG |
1183 AMD_CG_SUPPORT_HDP_LS |
1184 AMD_CG_SUPPORT_DRM_MGCG |
1185 AMD_CG_SUPPORT_DRM_LS |
1186 AMD_CG_SUPPORT_ROM_MGCG |
1187 AMD_CG_SUPPORT_MC_MGCG |
1188 AMD_CG_SUPPORT_MC_LS |
1189 AMD_CG_SUPPORT_SDMA_MGCG |
1190 AMD_CG_SUPPORT_SDMA_LS |
1191 AMD_CG_SUPPORT_VCN_MGCG;
1192
1193 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1194 AMD_PG_SUPPORT_VCN |
1195 AMD_PG_SUPPORT_VCN_DPG;
1196 }
1197 break;
1198 case CHIP_ARCTURUS:
1199 adev->asic_funcs = &vega20_asic_funcs;
1200 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1201 AMD_CG_SUPPORT_GFX_MGLS |
1202 AMD_CG_SUPPORT_GFX_CGCG |
1203 AMD_CG_SUPPORT_GFX_CGLS |
1204 AMD_CG_SUPPORT_GFX_CP_LS |
1205 AMD_CG_SUPPORT_HDP_MGCG |
1206 AMD_CG_SUPPORT_HDP_LS |
1207 AMD_CG_SUPPORT_SDMA_MGCG |
1208 AMD_CG_SUPPORT_SDMA_LS |
1209 AMD_CG_SUPPORT_MC_MGCG |
1210 AMD_CG_SUPPORT_MC_LS |
1211 AMD_CG_SUPPORT_IH_CG;
1212 adev->pg_flags = 0;
1213 adev->external_rev_id = adev->rev_id + 0x32;
1214 break;
1215 case CHIP_RENOIR:
1216 adev->asic_funcs = &soc15_asic_funcs;
1217 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1218 AMD_CG_SUPPORT_GFX_MGLS |
1219 AMD_CG_SUPPORT_GFX_3D_CGCG |
1220 AMD_CG_SUPPORT_GFX_3D_CGLS |
1221 AMD_CG_SUPPORT_GFX_CGCG |
1222 AMD_CG_SUPPORT_GFX_CGLS |
1223 AMD_CG_SUPPORT_GFX_CP_LS |
1224 AMD_CG_SUPPORT_MC_MGCG |
1225 AMD_CG_SUPPORT_MC_LS |
1226 AMD_CG_SUPPORT_SDMA_MGCG |
1227 AMD_CG_SUPPORT_SDMA_LS |
1228 AMD_CG_SUPPORT_BIF_LS |
1229 AMD_CG_SUPPORT_HDP_LS |
1230 AMD_CG_SUPPORT_ROM_MGCG |
1231 AMD_CG_SUPPORT_VCN_MGCG |
1232 AMD_CG_SUPPORT_IH_CG |
1233 AMD_CG_SUPPORT_ATHUB_LS |
1234 AMD_CG_SUPPORT_ATHUB_MGCG |
1235 AMD_CG_SUPPORT_DF_MGCG;
1236 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1237 AMD_PG_SUPPORT_VCN |
1238 AMD_PG_SUPPORT_VCN_DPG;
1239 adev->external_rev_id = adev->rev_id + 0x91;
1240 break;
1241 default:
1242 /* FIXME: not supported yet */
1243 return -EINVAL;
1244 }
1245
1246 if (amdgpu_sriov_vf(adev)) {
1247 amdgpu_virt_init_setting(adev);
1248 xgpu_ai_mailbox_set_irq_funcs(adev);
1249 }
1250
1251 return 0;
1252}
1253
1254static int soc15_common_late_init(void *handle)
1255{
1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1257 int r = 0;
1258
1259 if (amdgpu_sriov_vf(adev))
1260 xgpu_ai_mailbox_get_irq(adev);
1261
1262 if (adev->nbio.funcs->ras_late_init)
1263 r = adev->nbio.funcs->ras_late_init(adev);
1264
1265 return r;
1266}
1267
1268static int soc15_common_sw_init(void *handle)
1269{
1270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1271
1272 if (amdgpu_sriov_vf(adev))
1273 xgpu_ai_mailbox_add_irq_id(adev);
1274
1275 adev->df_funcs->sw_init(adev);
1276
1277 return 0;
1278}
1279
1280static int soc15_common_sw_fini(void *handle)
1281{
1282 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1283
1284 amdgpu_nbio_ras_fini(adev);
1285 adev->df_funcs->sw_fini(adev);
1286 return 0;
1287}
1288
1289static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1290{
1291 int i;
1292 struct amdgpu_ring *ring;
1293
1294 /* sdma/ih doorbell range are programed by hypervisor */
1295 if (!amdgpu_sriov_vf(adev)) {
1296 for (i = 0; i < adev->sdma.num_instances; i++) {
1297 ring = &adev->sdma.instance[i].ring;
1298 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1299 ring->use_doorbell, ring->doorbell_index,
1300 adev->doorbell_index.sdma_doorbell_range);
1301 }
1302
1303 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1304 adev->irq.ih.doorbell_index);
1305 }
1306}
1307
1308static int soc15_common_hw_init(void *handle)
1309{
1310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311
1312 /* enable pcie gen2/3 link */
1313 soc15_pcie_gen3_enable(adev);
1314 /* enable aspm */
1315 soc15_program_aspm(adev);
1316 /* setup nbio registers */
1317 adev->nbio.funcs->init_registers(adev);
1318 /* remap HDP registers to a hole in mmio space,
1319 * for the purpose of expose those registers
1320 * to process space
1321 */
1322 if (adev->nbio.funcs->remap_hdp_registers)
1323 adev->nbio.funcs->remap_hdp_registers(adev);
1324
1325 /* enable the doorbell aperture */
1326 soc15_enable_doorbell_aperture(adev, true);
1327 /* HW doorbell routing policy: doorbell writing not
1328 * in SDMA/IH/MM/ACV range will be routed to CP. So
1329 * we need to init SDMA/IH/MM/ACV doorbell range prior
1330 * to CP ip block init and ring test.
1331 */
1332 soc15_doorbell_range_init(adev);
1333
1334 return 0;
1335}
1336
1337static int soc15_common_hw_fini(void *handle)
1338{
1339 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1340
1341 /* disable the doorbell aperture */
1342 soc15_enable_doorbell_aperture(adev, false);
1343 if (amdgpu_sriov_vf(adev))
1344 xgpu_ai_mailbox_put_irq(adev);
1345
1346 if (adev->nbio.ras_if &&
1347 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1348 if (adev->nbio.funcs->init_ras_controller_interrupt)
1349 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1350 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1351 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1352 }
1353
1354 return 0;
1355}
1356
1357static int soc15_common_suspend(void *handle)
1358{
1359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360
1361 return soc15_common_hw_fini(adev);
1362}
1363
1364static int soc15_common_resume(void *handle)
1365{
1366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1367
1368 return soc15_common_hw_init(adev);
1369}
1370
1371static bool soc15_common_is_idle(void *handle)
1372{
1373 return true;
1374}
1375
1376static int soc15_common_wait_for_idle(void *handle)
1377{
1378 return 0;
1379}
1380
1381static int soc15_common_soft_reset(void *handle)
1382{
1383 return 0;
1384}
1385
1386static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1387{
1388 uint32_t def, data;
1389
1390 if (adev->asic_type == CHIP_VEGA20 ||
1391 adev->asic_type == CHIP_ARCTURUS) {
1392 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1393
1394 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1395 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1396 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1397 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1398 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1399 else
1400 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1401 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1402 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1403 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1404
1405 if (def != data)
1406 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1407 } else {
1408 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1409
1410 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1411 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1412 else
1413 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1414
1415 if (def != data)
1416 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1417 }
1418}
1419
1420static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1421{
1422 uint32_t def, data;
1423
1424 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1425
1426 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1427 data &= ~(0x01000000 |
1428 0x02000000 |
1429 0x04000000 |
1430 0x08000000 |
1431 0x10000000 |
1432 0x20000000 |
1433 0x40000000 |
1434 0x80000000);
1435 else
1436 data |= (0x01000000 |
1437 0x02000000 |
1438 0x04000000 |
1439 0x08000000 |
1440 0x10000000 |
1441 0x20000000 |
1442 0x40000000 |
1443 0x80000000);
1444
1445 if (def != data)
1446 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1447}
1448
1449static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1450{
1451 uint32_t def, data;
1452
1453 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1454
1455 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1456 data |= 1;
1457 else
1458 data &= ~1;
1459
1460 if (def != data)
1461 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1462}
1463
1464static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1465 bool enable)
1466{
1467 uint32_t def, data;
1468
1469 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1470
1471 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1472 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1473 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1474 else
1475 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1476 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1477
1478 if (def != data)
1479 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1480}
1481
1482static int soc15_common_set_clockgating_state(void *handle,
1483 enum amd_clockgating_state state)
1484{
1485 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1486
1487 if (amdgpu_sriov_vf(adev))
1488 return 0;
1489
1490 switch (adev->asic_type) {
1491 case CHIP_VEGA10:
1492 case CHIP_VEGA12:
1493 case CHIP_VEGA20:
1494 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1495 state == AMD_CG_STATE_GATE ? true : false);
1496 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1497 state == AMD_CG_STATE_GATE ? true : false);
1498 soc15_update_hdp_light_sleep(adev,
1499 state == AMD_CG_STATE_GATE ? true : false);
1500 soc15_update_drm_clock_gating(adev,
1501 state == AMD_CG_STATE_GATE ? true : false);
1502 soc15_update_drm_light_sleep(adev,
1503 state == AMD_CG_STATE_GATE ? true : false);
1504 soc15_update_rom_medium_grain_clock_gating(adev,
1505 state == AMD_CG_STATE_GATE ? true : false);
1506 adev->df_funcs->update_medium_grain_clock_gating(adev,
1507 state == AMD_CG_STATE_GATE ? true : false);
1508 break;
1509 case CHIP_RAVEN:
1510 case CHIP_RENOIR:
1511 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1512 state == AMD_CG_STATE_GATE ? true : false);
1513 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1514 state == AMD_CG_STATE_GATE ? true : false);
1515 soc15_update_hdp_light_sleep(adev,
1516 state == AMD_CG_STATE_GATE ? true : false);
1517 soc15_update_drm_clock_gating(adev,
1518 state == AMD_CG_STATE_GATE ? true : false);
1519 soc15_update_drm_light_sleep(adev,
1520 state == AMD_CG_STATE_GATE ? true : false);
1521 soc15_update_rom_medium_grain_clock_gating(adev,
1522 state == AMD_CG_STATE_GATE ? true : false);
1523 break;
1524 case CHIP_ARCTURUS:
1525 soc15_update_hdp_light_sleep(adev,
1526 state == AMD_CG_STATE_GATE ? true : false);
1527 break;
1528 default:
1529 break;
1530 }
1531 return 0;
1532}
1533
1534static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1535{
1536 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1537 int data;
1538
1539 if (amdgpu_sriov_vf(adev))
1540 *flags = 0;
1541
1542 adev->nbio.funcs->get_clockgating_state(adev, flags);
1543
1544 /* AMD_CG_SUPPORT_HDP_LS */
1545 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1546 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1547 *flags |= AMD_CG_SUPPORT_HDP_LS;
1548
1549 /* AMD_CG_SUPPORT_DRM_MGCG */
1550 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1551 if (!(data & 0x01000000))
1552 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1553
1554 /* AMD_CG_SUPPORT_DRM_LS */
1555 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1556 if (data & 0x1)
1557 *flags |= AMD_CG_SUPPORT_DRM_LS;
1558
1559 /* AMD_CG_SUPPORT_ROM_MGCG */
1560 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1561 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1562 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1563
1564 adev->df_funcs->get_clockgating_state(adev, flags);
1565}
1566
1567static int soc15_common_set_powergating_state(void *handle,
1568 enum amd_powergating_state state)
1569{
1570 /* todo */
1571 return 0;
1572}
1573
1574const struct amd_ip_funcs soc15_common_ip_funcs = {
1575 .name = "soc15_common",
1576 .early_init = soc15_common_early_init,
1577 .late_init = soc15_common_late_init,
1578 .sw_init = soc15_common_sw_init,
1579 .sw_fini = soc15_common_sw_fini,
1580 .hw_init = soc15_common_hw_init,
1581 .hw_fini = soc15_common_hw_fini,
1582 .suspend = soc15_common_suspend,
1583 .resume = soc15_common_resume,
1584 .is_idle = soc15_common_is_idle,
1585 .wait_for_idle = soc15_common_wait_for_idle,
1586 .soft_reset = soc15_common_soft_reset,
1587 .set_clockgating_state = soc15_common_set_clockgating_state,
1588 .set_powergating_state = soc15_common_set_powergating_state,
1589 .get_clockgating_state= soc15_common_get_clockgating_state,
1590};