Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/amdgpu_drm.h>
29
30#include "amdgpu.h"
31#include "amdgpu_atombios.h"
32#include "amdgpu_ih.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "amdgpu_ucode.h"
36#include "amdgpu_psp.h"
37#include "atom.h"
38#include "amd_pcie.h"
39
40#include "uvd/uvd_7_0_offset.h"
41#include "gc/gc_9_0_offset.h"
42#include "gc/gc_9_0_sh_mask.h"
43#include "sdma0/sdma0_4_0_offset.h"
44#include "sdma1/sdma1_4_0_offset.h"
45#include "nbio/nbio_7_0_default.h"
46#include "nbio/nbio_7_0_offset.h"
47#include "nbio/nbio_7_0_sh_mask.h"
48#include "nbio/nbio_7_0_smn.h"
49#include "mp/mp_9_0_offset.h"
50
51#include "soc15.h"
52#include "soc15_common.h"
53#include "gfx_v9_0.h"
54#include "gmc_v9_0.h"
55#include "gfxhub_v1_0.h"
56#include "mmhub_v1_0.h"
57#include "df_v1_7.h"
58#include "df_v3_6.h"
59#include "nbio_v6_1.h"
60#include "nbio_v7_0.h"
61#include "nbio_v7_4.h"
62#include "hdp_v4_0.h"
63#include "vega10_ih.h"
64#include "vega20_ih.h"
65#include "navi10_ih.h"
66#include "sdma_v4_0.h"
67#include "uvd_v7_0.h"
68#include "vce_v4_0.h"
69#include "vcn_v1_0.h"
70#include "vcn_v2_0.h"
71#include "jpeg_v2_0.h"
72#include "vcn_v2_5.h"
73#include "jpeg_v2_5.h"
74#include "smuio_v9_0.h"
75#include "smuio_v11_0.h"
76#include "smuio_v13_0.h"
77#include "amdgpu_vkms.h"
78#include "mxgpu_ai.h"
79#include "amdgpu_ras.h"
80#include "amdgpu_xgmi.h"
81#include <uapi/linux/kfd_ioctl.h>
82
83#define mmMP0_MISC_CGTT_CTRL0 0x01b9
84#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
85#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
86#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
87
88static const struct amd_ip_funcs soc15_common_ip_funcs;
89
90/* Vega, Raven, Arcturus */
91static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
92{
93 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
94 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
95};
96
97static const struct amdgpu_video_codecs vega_video_codecs_encode =
98{
99 .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
100 .codec_array = vega_video_codecs_encode_array,
101};
102
103/* Vega */
104static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
105{
106 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
107 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
108 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
109 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
110 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
111 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
112};
113
114static const struct amdgpu_video_codecs vega_video_codecs_decode =
115{
116 .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
117 .codec_array = vega_video_codecs_decode_array,
118};
119
120/* Raven */
121static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
122{
123 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
124 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
125 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
126 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
127 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
128 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
129 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
130};
131
132static const struct amdgpu_video_codecs rv_video_codecs_decode =
133{
134 .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
135 .codec_array = rv_video_codecs_decode_array,
136};
137
138/* Renoir, Arcturus */
139static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
140{
141 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
142 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
143 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
144 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
145 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
146 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
147 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
148};
149
150static const struct amdgpu_video_codecs rn_video_codecs_decode =
151{
152 .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
153 .codec_array = rn_video_codecs_decode_array,
154};
155
156static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
157 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
158 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
159 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
160 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
161 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
162};
163
164static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
165 .codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
166 .codec_array = vcn_4_0_3_video_codecs_decode_array,
167};
168
169static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
170 .codec_count = 0,
171 .codec_array = NULL,
172};
173
174static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
175 const struct amdgpu_video_codecs **codecs)
176{
177 if (adev->ip_versions[VCE_HWIP][0]) {
178 switch (adev->ip_versions[VCE_HWIP][0]) {
179 case IP_VERSION(4, 0, 0):
180 case IP_VERSION(4, 1, 0):
181 if (encode)
182 *codecs = &vega_video_codecs_encode;
183 else
184 *codecs = &vega_video_codecs_decode;
185 return 0;
186 default:
187 return -EINVAL;
188 }
189 } else {
190 switch (adev->ip_versions[UVD_HWIP][0]) {
191 case IP_VERSION(1, 0, 0):
192 case IP_VERSION(1, 0, 1):
193 if (encode)
194 *codecs = &vega_video_codecs_encode;
195 else
196 *codecs = &rv_video_codecs_decode;
197 return 0;
198 case IP_VERSION(2, 5, 0):
199 case IP_VERSION(2, 6, 0):
200 case IP_VERSION(2, 2, 0):
201 if (encode)
202 *codecs = &vega_video_codecs_encode;
203 else
204 *codecs = &rn_video_codecs_decode;
205 return 0;
206 case IP_VERSION(4, 0, 3):
207 if (encode)
208 *codecs = &vcn_4_0_3_video_codecs_encode;
209 else
210 *codecs = &vcn_4_0_3_video_codecs_decode;
211 return 0;
212 default:
213 return -EINVAL;
214 }
215 }
216}
217
218static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
219{
220 unsigned long flags, address, data;
221 u32 r;
222
223 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
224 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
225
226 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
227 WREG32(address, ((reg) & 0x1ff));
228 r = RREG32(data);
229 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
230 return r;
231}
232
233static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
234{
235 unsigned long flags, address, data;
236
237 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
238 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
239
240 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
241 WREG32(address, ((reg) & 0x1ff));
242 WREG32(data, (v));
243 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
244}
245
246static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
247{
248 unsigned long flags, address, data;
249 u32 r;
250
251 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
252 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
253
254 spin_lock_irqsave(&adev->didt_idx_lock, flags);
255 WREG32(address, (reg));
256 r = RREG32(data);
257 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
258 return r;
259}
260
261static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
262{
263 unsigned long flags, address, data;
264
265 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
266 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
267
268 spin_lock_irqsave(&adev->didt_idx_lock, flags);
269 WREG32(address, (reg));
270 WREG32(data, (v));
271 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
272}
273
274static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
275{
276 unsigned long flags;
277 u32 r;
278
279 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
280 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
281 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
282 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
283 return r;
284}
285
286static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
287{
288 unsigned long flags;
289
290 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
291 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
292 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
293 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
294}
295
296static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
297{
298 unsigned long flags;
299 u32 r;
300
301 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
302 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
303 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
304 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
305 return r;
306}
307
308static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
309{
310 unsigned long flags;
311
312 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
313 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
314 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
315 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
316}
317
318static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
319{
320 return adev->nbio.funcs->get_memsize(adev);
321}
322
323static u32 soc15_get_xclk(struct amdgpu_device *adev)
324{
325 u32 reference_clock = adev->clock.spll.reference_freq;
326
327 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
328 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
329 return 10000;
330 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
331 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
332 return reference_clock / 4;
333
334 return reference_clock;
335}
336
337
338void soc15_grbm_select(struct amdgpu_device *adev,
339 u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
340{
341 u32 grbm_gfx_cntl = 0;
342 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
343 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
344 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
345 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
346
347 WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
348}
349
350static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
351{
352 /* todo */
353 return false;
354}
355
356static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
357 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
358 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
359 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
360 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
361 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
362 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
363 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
364 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
365 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
366 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
367 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
368 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
369 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
370 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
371 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
372 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
373 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
374 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
375 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
376 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
377};
378
379static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
380 u32 sh_num, u32 reg_offset)
381{
382 uint32_t val;
383
384 mutex_lock(&adev->grbm_idx_mutex);
385 if (se_num != 0xffffffff || sh_num != 0xffffffff)
386 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
387
388 val = RREG32(reg_offset);
389
390 if (se_num != 0xffffffff || sh_num != 0xffffffff)
391 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
392 mutex_unlock(&adev->grbm_idx_mutex);
393 return val;
394}
395
396static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
397 bool indexed, u32 se_num,
398 u32 sh_num, u32 reg_offset)
399{
400 if (indexed) {
401 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
402 } else {
403 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
404 return adev->gfx.config.gb_addr_config;
405 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
406 return adev->gfx.config.db_debug2;
407 return RREG32(reg_offset);
408 }
409}
410
411static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
412 u32 sh_num, u32 reg_offset, u32 *value)
413{
414 uint32_t i;
415 struct soc15_allowed_register_entry *en;
416
417 *value = 0;
418 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
419 en = &soc15_allowed_read_registers[i];
420 if (!adev->reg_offset[en->hwip][en->inst])
421 continue;
422 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
423 + en->reg_offset))
424 continue;
425
426 *value = soc15_get_register_value(adev,
427 soc15_allowed_read_registers[i].grbm_indexed,
428 se_num, sh_num, reg_offset);
429 return 0;
430 }
431 return -EINVAL;
432}
433
434
435/**
436 * soc15_program_register_sequence - program an array of registers.
437 *
438 * @adev: amdgpu_device pointer
439 * @regs: pointer to the register array
440 * @array_size: size of the register array
441 *
442 * Programs an array or registers with and and or masks.
443 * This is a helper for setting golden registers.
444 */
445
446void soc15_program_register_sequence(struct amdgpu_device *adev,
447 const struct soc15_reg_golden *regs,
448 const u32 array_size)
449{
450 const struct soc15_reg_golden *entry;
451 u32 tmp, reg;
452 int i;
453
454 for (i = 0; i < array_size; ++i) {
455 entry = ®s[i];
456 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
457
458 if (entry->and_mask == 0xffffffff) {
459 tmp = entry->or_mask;
460 } else {
461 tmp = (entry->hwip == GC_HWIP) ?
462 RREG32_SOC15_IP(GC, reg) : RREG32(reg);
463
464 tmp &= ~(entry->and_mask);
465 tmp |= (entry->or_mask & entry->and_mask);
466 }
467
468 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
469 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
470 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
471 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
472 WREG32_RLC(reg, tmp);
473 else
474 (entry->hwip == GC_HWIP) ?
475 WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
476
477 }
478
479}
480
481static int soc15_asic_baco_reset(struct amdgpu_device *adev)
482{
483 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
484 int ret = 0;
485
486 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
487 if (ras && adev->ras_enabled)
488 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
489
490 ret = amdgpu_dpm_baco_reset(adev);
491 if (ret)
492 return ret;
493
494 /* re-enable doorbell interrupt after BACO exit */
495 if (ras && adev->ras_enabled)
496 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
497
498 return 0;
499}
500
501static enum amd_reset_method
502soc15_asic_reset_method(struct amdgpu_device *adev)
503{
504 bool baco_reset = false;
505 bool connected_to_cpu = false;
506 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
507
508 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
509 connected_to_cpu = true;
510
511 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
512 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
513 amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
514 amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
515 /* If connected to cpu, driver only support mode2 */
516 if (connected_to_cpu)
517 return AMD_RESET_METHOD_MODE2;
518 return amdgpu_reset_method;
519 }
520
521 if (amdgpu_reset_method != -1)
522 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
523 amdgpu_reset_method);
524
525 switch (adev->ip_versions[MP1_HWIP][0]) {
526 case IP_VERSION(10, 0, 0):
527 case IP_VERSION(10, 0, 1):
528 case IP_VERSION(12, 0, 0):
529 case IP_VERSION(12, 0, 1):
530 return AMD_RESET_METHOD_MODE2;
531 case IP_VERSION(9, 0, 0):
532 case IP_VERSION(11, 0, 2):
533 if (adev->asic_type == CHIP_VEGA20) {
534 if (adev->psp.sos.fw_version >= 0x80067)
535 baco_reset = amdgpu_dpm_is_baco_supported(adev);
536 /*
537 * 1. PMFW version > 0x284300: all cases use baco
538 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
539 */
540 if (ras && adev->ras_enabled &&
541 adev->pm.fw_version <= 0x283400)
542 baco_reset = false;
543 } else {
544 baco_reset = amdgpu_dpm_is_baco_supported(adev);
545 }
546 break;
547 case IP_VERSION(13, 0, 2):
548 /*
549 * 1.connected to cpu: driver issue mode2 reset
550 * 2.discret gpu: driver issue mode1 reset
551 */
552 if (connected_to_cpu)
553 return AMD_RESET_METHOD_MODE2;
554 break;
555 case IP_VERSION(13, 0, 6):
556 /* Use gpu_recovery param to target a reset method.
557 * Enable triggering of GPU reset only if specified
558 * by module parameter.
559 */
560 if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
561 return AMD_RESET_METHOD_MODE2;
562 else
563 return AMD_RESET_METHOD_NONE;
564 default:
565 break;
566 }
567
568 if (baco_reset)
569 return AMD_RESET_METHOD_BACO;
570 else
571 return AMD_RESET_METHOD_MODE1;
572}
573
574static int soc15_asic_reset(struct amdgpu_device *adev)
575{
576 /* original raven doesn't have full asic reset */
577 if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
578 (adev->apu_flags & AMD_APU_IS_RAVEN2))
579 return 0;
580
581 switch (soc15_asic_reset_method(adev)) {
582 case AMD_RESET_METHOD_PCI:
583 dev_info(adev->dev, "PCI reset\n");
584 return amdgpu_device_pci_reset(adev);
585 case AMD_RESET_METHOD_BACO:
586 dev_info(adev->dev, "BACO reset\n");
587 return soc15_asic_baco_reset(adev);
588 case AMD_RESET_METHOD_MODE2:
589 dev_info(adev->dev, "MODE2 reset\n");
590 return amdgpu_dpm_mode2_reset(adev);
591 default:
592 dev_info(adev->dev, "MODE1 reset\n");
593 return amdgpu_device_mode1_reset(adev);
594 }
595}
596
597static bool soc15_supports_baco(struct amdgpu_device *adev)
598{
599 switch (adev->ip_versions[MP1_HWIP][0]) {
600 case IP_VERSION(9, 0, 0):
601 case IP_VERSION(11, 0, 2):
602 if (adev->asic_type == CHIP_VEGA20) {
603 if (adev->psp.sos.fw_version >= 0x80067)
604 return amdgpu_dpm_is_baco_supported(adev);
605 return false;
606 } else {
607 return amdgpu_dpm_is_baco_supported(adev);
608 }
609 break;
610 default:
611 return false;
612 }
613}
614
615/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
616 u32 cntl_reg, u32 status_reg)
617{
618 return 0;
619}*/
620
621static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
622{
623 /*int r;
624
625 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
626 if (r)
627 return r;
628
629 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
630 */
631 return 0;
632}
633
634static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
635{
636 /* todo */
637
638 return 0;
639}
640
641static void soc15_program_aspm(struct amdgpu_device *adev)
642{
643 if (!amdgpu_device_should_use_aspm(adev))
644 return;
645
646 if (!(adev->flags & AMD_IS_APU) &&
647 (adev->nbio.funcs->program_aspm))
648 adev->nbio.funcs->program_aspm(adev);
649}
650
651const struct amdgpu_ip_block_version vega10_common_ip_block =
652{
653 .type = AMD_IP_BLOCK_TYPE_COMMON,
654 .major = 2,
655 .minor = 0,
656 .rev = 0,
657 .funcs = &soc15_common_ip_funcs,
658};
659
660static void soc15_reg_base_init(struct amdgpu_device *adev)
661{
662 /* Set IP register base before any HW register access */
663 switch (adev->asic_type) {
664 case CHIP_VEGA10:
665 case CHIP_VEGA12:
666 case CHIP_RAVEN:
667 case CHIP_RENOIR:
668 vega10_reg_base_init(adev);
669 break;
670 case CHIP_VEGA20:
671 vega20_reg_base_init(adev);
672 break;
673 case CHIP_ARCTURUS:
674 arct_reg_base_init(adev);
675 break;
676 case CHIP_ALDEBARAN:
677 aldebaran_reg_base_init(adev);
678 break;
679 default:
680 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
681 break;
682 }
683}
684
685void soc15_set_virt_ops(struct amdgpu_device *adev)
686{
687 adev->virt.ops = &xgpu_ai_virt_ops;
688
689 /* init soc15 reg base early enough so we can
690 * request request full access for sriov before
691 * set_ip_blocks. */
692 soc15_reg_base_init(adev);
693}
694
695static bool soc15_need_full_reset(struct amdgpu_device *adev)
696{
697 /* change this when we implement soft reset */
698 return true;
699}
700
701static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
702 uint64_t *count1)
703{
704 uint32_t perfctr = 0;
705 uint64_t cnt0_of, cnt1_of;
706 int tmp;
707
708 /* This reports 0 on APUs, so return to avoid writing/reading registers
709 * that may or may not be different from their GPU counterparts
710 */
711 if (adev->flags & AMD_IS_APU)
712 return;
713
714 /* Set the 2 events that we wish to watch, defined above */
715 /* Reg 40 is # received msgs */
716 /* Reg 104 is # of posted requests sent */
717 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
718 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
719
720 /* Write to enable desired perf counters */
721 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
722 /* Zero out and enable the perf counters
723 * Write 0x5:
724 * Bit 0 = Start all counters(1)
725 * Bit 2 = Global counter reset enable(1)
726 */
727 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
728
729 msleep(1000);
730
731 /* Load the shadow and disable the perf counters
732 * Write 0x2:
733 * Bit 0 = Stop counters(0)
734 * Bit 1 = Load the shadow counters(1)
735 */
736 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
737
738 /* Read register values to get any >32bit overflow */
739 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
740 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
741 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
742
743 /* Get the values and add the overflow */
744 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
745 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
746}
747
748static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
749 uint64_t *count1)
750{
751 uint32_t perfctr = 0;
752 uint64_t cnt0_of, cnt1_of;
753 int tmp;
754
755 /* This reports 0 on APUs, so return to avoid writing/reading registers
756 * that may or may not be different from their GPU counterparts
757 */
758 if (adev->flags & AMD_IS_APU)
759 return;
760
761 /* Set the 2 events that we wish to watch, defined above */
762 /* Reg 40 is # received msgs */
763 /* Reg 108 is # of posted requests sent on VG20 */
764 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
765 EVENT0_SEL, 40);
766 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
767 EVENT1_SEL, 108);
768
769 /* Write to enable desired perf counters */
770 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
771 /* Zero out and enable the perf counters
772 * Write 0x5:
773 * Bit 0 = Start all counters(1)
774 * Bit 2 = Global counter reset enable(1)
775 */
776 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
777
778 msleep(1000);
779
780 /* Load the shadow and disable the perf counters
781 * Write 0x2:
782 * Bit 0 = Stop counters(0)
783 * Bit 1 = Load the shadow counters(1)
784 */
785 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
786
787 /* Read register values to get any >32bit overflow */
788 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
789 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
790 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
791
792 /* Get the values and add the overflow */
793 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
794 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
795}
796
797static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
798{
799 u32 sol_reg;
800
801 /* CP hangs in IGT reloading test on RN, reset to WA */
802 if (adev->asic_type == CHIP_RENOIR)
803 return true;
804
805 /* Just return false for soc15 GPUs. Reset does not seem to
806 * be necessary.
807 */
808 if (!amdgpu_passthrough(adev))
809 return false;
810
811 if (adev->flags & AMD_IS_APU)
812 return false;
813
814 /* Check sOS sign of life register to confirm sys driver and sOS
815 * are already been loaded.
816 */
817 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
818 if (sol_reg)
819 return true;
820
821 return false;
822}
823
824static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
825{
826 uint64_t nak_r, nak_g;
827
828 /* Get the number of NAKs received and generated */
829 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
830 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
831
832 /* Add the total number of NAKs, i.e the number of replays */
833 return (nak_r + nak_g);
834}
835
836static void soc15_pre_asic_init(struct amdgpu_device *adev)
837{
838 gmc_v9_0_restore_registers(adev);
839}
840
841static const struct amdgpu_asic_funcs soc15_asic_funcs =
842{
843 .read_disabled_bios = &soc15_read_disabled_bios,
844 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
845 .read_register = &soc15_read_register,
846 .reset = &soc15_asic_reset,
847 .reset_method = &soc15_asic_reset_method,
848 .get_xclk = &soc15_get_xclk,
849 .set_uvd_clocks = &soc15_set_uvd_clocks,
850 .set_vce_clocks = &soc15_set_vce_clocks,
851 .get_config_memsize = &soc15_get_config_memsize,
852 .need_full_reset = &soc15_need_full_reset,
853 .init_doorbell_index = &vega10_doorbell_index_init,
854 .get_pcie_usage = &soc15_get_pcie_usage,
855 .need_reset_on_init = &soc15_need_reset_on_init,
856 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
857 .supports_baco = &soc15_supports_baco,
858 .pre_asic_init = &soc15_pre_asic_init,
859 .query_video_codecs = &soc15_query_video_codecs,
860};
861
862static const struct amdgpu_asic_funcs vega20_asic_funcs =
863{
864 .read_disabled_bios = &soc15_read_disabled_bios,
865 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
866 .read_register = &soc15_read_register,
867 .reset = &soc15_asic_reset,
868 .reset_method = &soc15_asic_reset_method,
869 .get_xclk = &soc15_get_xclk,
870 .set_uvd_clocks = &soc15_set_uvd_clocks,
871 .set_vce_clocks = &soc15_set_vce_clocks,
872 .get_config_memsize = &soc15_get_config_memsize,
873 .need_full_reset = &soc15_need_full_reset,
874 .init_doorbell_index = &vega20_doorbell_index_init,
875 .get_pcie_usage = &vega20_get_pcie_usage,
876 .need_reset_on_init = &soc15_need_reset_on_init,
877 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
878 .supports_baco = &soc15_supports_baco,
879 .pre_asic_init = &soc15_pre_asic_init,
880 .query_video_codecs = &soc15_query_video_codecs,
881};
882
883static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
884{
885 .read_disabled_bios = &soc15_read_disabled_bios,
886 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
887 .read_register = &soc15_read_register,
888 .reset = &soc15_asic_reset,
889 .reset_method = &soc15_asic_reset_method,
890 .get_xclk = &soc15_get_xclk,
891 .set_uvd_clocks = &soc15_set_uvd_clocks,
892 .set_vce_clocks = &soc15_set_vce_clocks,
893 .get_config_memsize = &soc15_get_config_memsize,
894 .need_full_reset = &soc15_need_full_reset,
895 .init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
896 .get_pcie_usage = &vega20_get_pcie_usage,
897 .need_reset_on_init = &soc15_need_reset_on_init,
898 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
899 .supports_baco = &soc15_supports_baco,
900 .pre_asic_init = &soc15_pre_asic_init,
901 .query_video_codecs = &soc15_query_video_codecs,
902 .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
903};
904
905static int soc15_common_early_init(void *handle)
906{
907#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
908 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
909
910 if (!amdgpu_sriov_vf(adev)) {
911 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
912 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
913 }
914 adev->smc_rreg = NULL;
915 adev->smc_wreg = NULL;
916 adev->pcie_rreg = &amdgpu_device_indirect_rreg;
917 adev->pcie_wreg = &amdgpu_device_indirect_wreg;
918 adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
919 adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
920 adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
921 adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
922 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
923 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
924 adev->didt_rreg = &soc15_didt_rreg;
925 adev->didt_wreg = &soc15_didt_wreg;
926 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
927 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
928 adev->se_cac_rreg = &soc15_se_cac_rreg;
929 adev->se_cac_wreg = &soc15_se_cac_wreg;
930
931 adev->rev_id = amdgpu_device_get_rev_id(adev);
932 adev->external_rev_id = 0xFF;
933 /* TODO: split the GC and PG flags based on the relevant IP version for which
934 * they are relevant.
935 */
936 switch (adev->ip_versions[GC_HWIP][0]) {
937 case IP_VERSION(9, 0, 1):
938 adev->asic_funcs = &soc15_asic_funcs;
939 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
940 AMD_CG_SUPPORT_GFX_MGLS |
941 AMD_CG_SUPPORT_GFX_RLC_LS |
942 AMD_CG_SUPPORT_GFX_CP_LS |
943 AMD_CG_SUPPORT_GFX_3D_CGCG |
944 AMD_CG_SUPPORT_GFX_3D_CGLS |
945 AMD_CG_SUPPORT_GFX_CGCG |
946 AMD_CG_SUPPORT_GFX_CGLS |
947 AMD_CG_SUPPORT_BIF_MGCG |
948 AMD_CG_SUPPORT_BIF_LS |
949 AMD_CG_SUPPORT_HDP_LS |
950 AMD_CG_SUPPORT_DRM_MGCG |
951 AMD_CG_SUPPORT_DRM_LS |
952 AMD_CG_SUPPORT_ROM_MGCG |
953 AMD_CG_SUPPORT_DF_MGCG |
954 AMD_CG_SUPPORT_SDMA_MGCG |
955 AMD_CG_SUPPORT_SDMA_LS |
956 AMD_CG_SUPPORT_MC_MGCG |
957 AMD_CG_SUPPORT_MC_LS;
958 adev->pg_flags = 0;
959 adev->external_rev_id = 0x1;
960 break;
961 case IP_VERSION(9, 2, 1):
962 adev->asic_funcs = &soc15_asic_funcs;
963 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
964 AMD_CG_SUPPORT_GFX_MGLS |
965 AMD_CG_SUPPORT_GFX_CGCG |
966 AMD_CG_SUPPORT_GFX_CGLS |
967 AMD_CG_SUPPORT_GFX_3D_CGCG |
968 AMD_CG_SUPPORT_GFX_3D_CGLS |
969 AMD_CG_SUPPORT_GFX_CP_LS |
970 AMD_CG_SUPPORT_MC_LS |
971 AMD_CG_SUPPORT_MC_MGCG |
972 AMD_CG_SUPPORT_SDMA_MGCG |
973 AMD_CG_SUPPORT_SDMA_LS |
974 AMD_CG_SUPPORT_BIF_MGCG |
975 AMD_CG_SUPPORT_BIF_LS |
976 AMD_CG_SUPPORT_HDP_MGCG |
977 AMD_CG_SUPPORT_HDP_LS |
978 AMD_CG_SUPPORT_ROM_MGCG |
979 AMD_CG_SUPPORT_VCE_MGCG |
980 AMD_CG_SUPPORT_UVD_MGCG;
981 adev->pg_flags = 0;
982 adev->external_rev_id = adev->rev_id + 0x14;
983 break;
984 case IP_VERSION(9, 4, 0):
985 adev->asic_funcs = &vega20_asic_funcs;
986 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
987 AMD_CG_SUPPORT_GFX_MGLS |
988 AMD_CG_SUPPORT_GFX_CGCG |
989 AMD_CG_SUPPORT_GFX_CGLS |
990 AMD_CG_SUPPORT_GFX_3D_CGCG |
991 AMD_CG_SUPPORT_GFX_3D_CGLS |
992 AMD_CG_SUPPORT_GFX_CP_LS |
993 AMD_CG_SUPPORT_MC_LS |
994 AMD_CG_SUPPORT_MC_MGCG |
995 AMD_CG_SUPPORT_SDMA_MGCG |
996 AMD_CG_SUPPORT_SDMA_LS |
997 AMD_CG_SUPPORT_BIF_MGCG |
998 AMD_CG_SUPPORT_BIF_LS |
999 AMD_CG_SUPPORT_HDP_MGCG |
1000 AMD_CG_SUPPORT_HDP_LS |
1001 AMD_CG_SUPPORT_ROM_MGCG |
1002 AMD_CG_SUPPORT_VCE_MGCG |
1003 AMD_CG_SUPPORT_UVD_MGCG;
1004 adev->pg_flags = 0;
1005 adev->external_rev_id = adev->rev_id + 0x28;
1006 break;
1007 case IP_VERSION(9, 1, 0):
1008 case IP_VERSION(9, 2, 2):
1009 adev->asic_funcs = &soc15_asic_funcs;
1010
1011 if (adev->rev_id >= 0x8)
1012 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1013
1014 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1015 adev->external_rev_id = adev->rev_id + 0x79;
1016 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1017 adev->external_rev_id = adev->rev_id + 0x41;
1018 else if (adev->rev_id == 1)
1019 adev->external_rev_id = adev->rev_id + 0x20;
1020 else
1021 adev->external_rev_id = adev->rev_id + 0x01;
1022
1023 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1024 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1025 AMD_CG_SUPPORT_GFX_MGLS |
1026 AMD_CG_SUPPORT_GFX_CP_LS |
1027 AMD_CG_SUPPORT_GFX_3D_CGCG |
1028 AMD_CG_SUPPORT_GFX_3D_CGLS |
1029 AMD_CG_SUPPORT_GFX_CGCG |
1030 AMD_CG_SUPPORT_GFX_CGLS |
1031 AMD_CG_SUPPORT_BIF_LS |
1032 AMD_CG_SUPPORT_HDP_LS |
1033 AMD_CG_SUPPORT_MC_MGCG |
1034 AMD_CG_SUPPORT_MC_LS |
1035 AMD_CG_SUPPORT_SDMA_MGCG |
1036 AMD_CG_SUPPORT_SDMA_LS |
1037 AMD_CG_SUPPORT_VCN_MGCG;
1038
1039 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1040 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1041 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1042 AMD_CG_SUPPORT_GFX_MGLS |
1043 AMD_CG_SUPPORT_GFX_CP_LS |
1044 AMD_CG_SUPPORT_GFX_3D_CGLS |
1045 AMD_CG_SUPPORT_GFX_CGCG |
1046 AMD_CG_SUPPORT_GFX_CGLS |
1047 AMD_CG_SUPPORT_BIF_LS |
1048 AMD_CG_SUPPORT_HDP_LS |
1049 AMD_CG_SUPPORT_MC_MGCG |
1050 AMD_CG_SUPPORT_MC_LS |
1051 AMD_CG_SUPPORT_SDMA_MGCG |
1052 AMD_CG_SUPPORT_SDMA_LS |
1053 AMD_CG_SUPPORT_VCN_MGCG;
1054
1055 /*
1056 * MMHUB PG needs to be disabled for Picasso for
1057 * stability reasons.
1058 */
1059 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1060 AMD_PG_SUPPORT_VCN;
1061 } else {
1062 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1063 AMD_CG_SUPPORT_GFX_MGLS |
1064 AMD_CG_SUPPORT_GFX_RLC_LS |
1065 AMD_CG_SUPPORT_GFX_CP_LS |
1066 AMD_CG_SUPPORT_GFX_3D_CGLS |
1067 AMD_CG_SUPPORT_GFX_CGCG |
1068 AMD_CG_SUPPORT_GFX_CGLS |
1069 AMD_CG_SUPPORT_BIF_MGCG |
1070 AMD_CG_SUPPORT_BIF_LS |
1071 AMD_CG_SUPPORT_HDP_MGCG |
1072 AMD_CG_SUPPORT_HDP_LS |
1073 AMD_CG_SUPPORT_DRM_MGCG |
1074 AMD_CG_SUPPORT_DRM_LS |
1075 AMD_CG_SUPPORT_MC_MGCG |
1076 AMD_CG_SUPPORT_MC_LS |
1077 AMD_CG_SUPPORT_SDMA_MGCG |
1078 AMD_CG_SUPPORT_SDMA_LS |
1079 AMD_CG_SUPPORT_VCN_MGCG;
1080
1081 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1082 }
1083 break;
1084 case IP_VERSION(9, 4, 1):
1085 adev->asic_funcs = &vega20_asic_funcs;
1086 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1087 AMD_CG_SUPPORT_GFX_MGLS |
1088 AMD_CG_SUPPORT_GFX_CGCG |
1089 AMD_CG_SUPPORT_GFX_CGLS |
1090 AMD_CG_SUPPORT_GFX_CP_LS |
1091 AMD_CG_SUPPORT_HDP_MGCG |
1092 AMD_CG_SUPPORT_HDP_LS |
1093 AMD_CG_SUPPORT_SDMA_MGCG |
1094 AMD_CG_SUPPORT_SDMA_LS |
1095 AMD_CG_SUPPORT_MC_MGCG |
1096 AMD_CG_SUPPORT_MC_LS |
1097 AMD_CG_SUPPORT_IH_CG |
1098 AMD_CG_SUPPORT_VCN_MGCG |
1099 AMD_CG_SUPPORT_JPEG_MGCG;
1100 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1101 adev->external_rev_id = adev->rev_id + 0x32;
1102 break;
1103 case IP_VERSION(9, 3, 0):
1104 adev->asic_funcs = &soc15_asic_funcs;
1105
1106 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1107 adev->external_rev_id = adev->rev_id + 0x91;
1108 else
1109 adev->external_rev_id = adev->rev_id + 0xa1;
1110 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1111 AMD_CG_SUPPORT_GFX_MGLS |
1112 AMD_CG_SUPPORT_GFX_3D_CGCG |
1113 AMD_CG_SUPPORT_GFX_3D_CGLS |
1114 AMD_CG_SUPPORT_GFX_CGCG |
1115 AMD_CG_SUPPORT_GFX_CGLS |
1116 AMD_CG_SUPPORT_GFX_CP_LS |
1117 AMD_CG_SUPPORT_MC_MGCG |
1118 AMD_CG_SUPPORT_MC_LS |
1119 AMD_CG_SUPPORT_SDMA_MGCG |
1120 AMD_CG_SUPPORT_SDMA_LS |
1121 AMD_CG_SUPPORT_BIF_LS |
1122 AMD_CG_SUPPORT_HDP_LS |
1123 AMD_CG_SUPPORT_VCN_MGCG |
1124 AMD_CG_SUPPORT_JPEG_MGCG |
1125 AMD_CG_SUPPORT_IH_CG |
1126 AMD_CG_SUPPORT_ATHUB_LS |
1127 AMD_CG_SUPPORT_ATHUB_MGCG |
1128 AMD_CG_SUPPORT_DF_MGCG;
1129 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1130 AMD_PG_SUPPORT_VCN |
1131 AMD_PG_SUPPORT_JPEG |
1132 AMD_PG_SUPPORT_VCN_DPG;
1133 break;
1134 case IP_VERSION(9, 4, 2):
1135 adev->asic_funcs = &vega20_asic_funcs;
1136 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1137 AMD_CG_SUPPORT_GFX_MGLS |
1138 AMD_CG_SUPPORT_GFX_CP_LS |
1139 AMD_CG_SUPPORT_HDP_LS |
1140 AMD_CG_SUPPORT_SDMA_MGCG |
1141 AMD_CG_SUPPORT_SDMA_LS |
1142 AMD_CG_SUPPORT_IH_CG |
1143 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1144 adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1145 adev->external_rev_id = adev->rev_id + 0x3c;
1146 break;
1147 case IP_VERSION(9, 4, 3):
1148 adev->asic_funcs = &aqua_vanjaram_asic_funcs;
1149 adev->cg_flags =
1150 AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
1151 AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
1152 AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
1153 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
1154 AMD_CG_SUPPORT_IH_CG;
1155 adev->pg_flags =
1156 AMD_PG_SUPPORT_VCN |
1157 AMD_PG_SUPPORT_VCN_DPG |
1158 AMD_PG_SUPPORT_JPEG;
1159 adev->external_rev_id = adev->rev_id + 0x46;
1160 break;
1161 default:
1162 /* FIXME: not supported yet */
1163 return -EINVAL;
1164 }
1165
1166 if (amdgpu_sriov_vf(adev)) {
1167 amdgpu_virt_init_setting(adev);
1168 xgpu_ai_mailbox_set_irq_funcs(adev);
1169 }
1170
1171 return 0;
1172}
1173
1174static int soc15_common_late_init(void *handle)
1175{
1176 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1177
1178 if (amdgpu_sriov_vf(adev))
1179 xgpu_ai_mailbox_get_irq(adev);
1180
1181 /* Enable selfring doorbell aperture late because doorbell BAR
1182 * aperture will change if resize BAR successfully in gmc sw_init.
1183 */
1184 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
1185
1186 return 0;
1187}
1188
1189static int soc15_common_sw_init(void *handle)
1190{
1191 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1192
1193 if (amdgpu_sriov_vf(adev))
1194 xgpu_ai_mailbox_add_irq_id(adev);
1195
1196 if (adev->df.funcs &&
1197 adev->df.funcs->sw_init)
1198 adev->df.funcs->sw_init(adev);
1199
1200 return 0;
1201}
1202
1203static int soc15_common_sw_fini(void *handle)
1204{
1205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1206
1207 if (adev->df.funcs &&
1208 adev->df.funcs->sw_fini)
1209 adev->df.funcs->sw_fini(adev);
1210 return 0;
1211}
1212
1213static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
1214{
1215 int i;
1216
1217 /* sdma doorbell range is programed by hypervisor */
1218 if (!amdgpu_sriov_vf(adev)) {
1219 for (i = 0; i < adev->sdma.num_instances; i++) {
1220 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1221 true, adev->doorbell_index.sdma_engine[i] << 1,
1222 adev->doorbell_index.sdma_doorbell_range);
1223 }
1224 }
1225}
1226
1227static int soc15_common_hw_init(void *handle)
1228{
1229 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1230
1231 /* enable aspm */
1232 soc15_program_aspm(adev);
1233 /* setup nbio registers */
1234 adev->nbio.funcs->init_registers(adev);
1235 /* remap HDP registers to a hole in mmio space,
1236 * for the purpose of expose those registers
1237 * to process space
1238 */
1239 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1240 adev->nbio.funcs->remap_hdp_registers(adev);
1241
1242 /* enable the doorbell aperture */
1243 adev->nbio.funcs->enable_doorbell_aperture(adev, true);
1244
1245 /* HW doorbell routing policy: doorbell writing not
1246 * in SDMA/IH/MM/ACV range will be routed to CP. So
1247 * we need to init SDMA doorbell range prior
1248 * to CP ip block init and ring test. IH already
1249 * happens before CP.
1250 */
1251 soc15_sdma_doorbell_range_init(adev);
1252
1253 return 0;
1254}
1255
1256static int soc15_common_hw_fini(void *handle)
1257{
1258 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1259
1260 /* Disable the doorbell aperture and selfring doorbell aperture
1261 * separately in hw_fini because soc15_enable_doorbell_aperture
1262 * has been removed and there is no need to delay disabling
1263 * selfring doorbell.
1264 */
1265 adev->nbio.funcs->enable_doorbell_aperture(adev, false);
1266 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
1267
1268 if (amdgpu_sriov_vf(adev))
1269 xgpu_ai_mailbox_put_irq(adev);
1270
1271 if (adev->nbio.ras_if &&
1272 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1273 if (adev->nbio.ras &&
1274 adev->nbio.ras->init_ras_controller_interrupt)
1275 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1276 if (adev->nbio.ras &&
1277 adev->nbio.ras->init_ras_err_event_athub_interrupt)
1278 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1279 }
1280
1281 return 0;
1282}
1283
1284static int soc15_common_suspend(void *handle)
1285{
1286 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287
1288 return soc15_common_hw_fini(adev);
1289}
1290
1291static int soc15_common_resume(void *handle)
1292{
1293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294
1295 return soc15_common_hw_init(adev);
1296}
1297
1298static bool soc15_common_is_idle(void *handle)
1299{
1300 return true;
1301}
1302
1303static int soc15_common_wait_for_idle(void *handle)
1304{
1305 return 0;
1306}
1307
1308static int soc15_common_soft_reset(void *handle)
1309{
1310 return 0;
1311}
1312
1313static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1314{
1315 uint32_t def, data;
1316
1317 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1318
1319 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1320 data &= ~(0x01000000 |
1321 0x02000000 |
1322 0x04000000 |
1323 0x08000000 |
1324 0x10000000 |
1325 0x20000000 |
1326 0x40000000 |
1327 0x80000000);
1328 else
1329 data |= (0x01000000 |
1330 0x02000000 |
1331 0x04000000 |
1332 0x08000000 |
1333 0x10000000 |
1334 0x20000000 |
1335 0x40000000 |
1336 0x80000000);
1337
1338 if (def != data)
1339 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1340}
1341
1342static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1343{
1344 uint32_t def, data;
1345
1346 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1347
1348 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1349 data |= 1;
1350 else
1351 data &= ~1;
1352
1353 if (def != data)
1354 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1355}
1356
1357static int soc15_common_set_clockgating_state(void *handle,
1358 enum amd_clockgating_state state)
1359{
1360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1361
1362 if (amdgpu_sriov_vf(adev))
1363 return 0;
1364
1365 switch (adev->ip_versions[NBIO_HWIP][0]) {
1366 case IP_VERSION(6, 1, 0):
1367 case IP_VERSION(6, 2, 0):
1368 case IP_VERSION(7, 4, 0):
1369 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1370 state == AMD_CG_STATE_GATE);
1371 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1372 state == AMD_CG_STATE_GATE);
1373 adev->hdp.funcs->update_clock_gating(adev,
1374 state == AMD_CG_STATE_GATE);
1375 soc15_update_drm_clock_gating(adev,
1376 state == AMD_CG_STATE_GATE);
1377 soc15_update_drm_light_sleep(adev,
1378 state == AMD_CG_STATE_GATE);
1379 adev->smuio.funcs->update_rom_clock_gating(adev,
1380 state == AMD_CG_STATE_GATE);
1381 adev->df.funcs->update_medium_grain_clock_gating(adev,
1382 state == AMD_CG_STATE_GATE);
1383 break;
1384 case IP_VERSION(7, 0, 0):
1385 case IP_VERSION(7, 0, 1):
1386 case IP_VERSION(2, 5, 0):
1387 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1388 state == AMD_CG_STATE_GATE);
1389 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1390 state == AMD_CG_STATE_GATE);
1391 adev->hdp.funcs->update_clock_gating(adev,
1392 state == AMD_CG_STATE_GATE);
1393 soc15_update_drm_clock_gating(adev,
1394 state == AMD_CG_STATE_GATE);
1395 soc15_update_drm_light_sleep(adev,
1396 state == AMD_CG_STATE_GATE);
1397 break;
1398 case IP_VERSION(7, 4, 1):
1399 case IP_VERSION(7, 4, 4):
1400 adev->hdp.funcs->update_clock_gating(adev,
1401 state == AMD_CG_STATE_GATE);
1402 break;
1403 default:
1404 break;
1405 }
1406 return 0;
1407}
1408
1409static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
1410{
1411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1412 int data;
1413
1414 if (amdgpu_sriov_vf(adev))
1415 *flags = 0;
1416
1417 adev->nbio.funcs->get_clockgating_state(adev, flags);
1418
1419 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1420
1421 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
1422
1423 /* AMD_CG_SUPPORT_DRM_MGCG */
1424 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1425 if (!(data & 0x01000000))
1426 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1427
1428 /* AMD_CG_SUPPORT_DRM_LS */
1429 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1430 if (data & 0x1)
1431 *flags |= AMD_CG_SUPPORT_DRM_LS;
1432 }
1433
1434 /* AMD_CG_SUPPORT_ROM_MGCG */
1435 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1436
1437 adev->df.funcs->get_clockgating_state(adev, flags);
1438}
1439
1440static int soc15_common_set_powergating_state(void *handle,
1441 enum amd_powergating_state state)
1442{
1443 /* todo */
1444 return 0;
1445}
1446
1447static const struct amd_ip_funcs soc15_common_ip_funcs = {
1448 .name = "soc15_common",
1449 .early_init = soc15_common_early_init,
1450 .late_init = soc15_common_late_init,
1451 .sw_init = soc15_common_sw_init,
1452 .sw_fini = soc15_common_sw_fini,
1453 .hw_init = soc15_common_hw_init,
1454 .hw_fini = soc15_common_hw_fini,
1455 .suspend = soc15_common_suspend,
1456 .resume = soc15_common_resume,
1457 .is_idle = soc15_common_is_idle,
1458 .wait_for_idle = soc15_common_wait_for_idle,
1459 .soft_reset = soc15_common_soft_reset,
1460 .set_clockgating_state = soc15_common_set_clockgating_state,
1461 .set_powergating_state = soc15_common_set_powergating_state,
1462 .get_clockgating_state= soc15_common_get_clockgating_state,
1463};