Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/amdgpu_drm.h>
29
30#include "amdgpu.h"
31#include "amdgpu_atombios.h"
32#include "amdgpu_ih.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "amdgpu_ucode.h"
36#include "amdgpu_psp.h"
37#include "atom.h"
38#include "amd_pcie.h"
39
40#include "uvd/uvd_7_0_offset.h"
41#include "gc/gc_9_0_offset.h"
42#include "gc/gc_9_0_sh_mask.h"
43#include "sdma0/sdma0_4_0_offset.h"
44#include "sdma1/sdma1_4_0_offset.h"
45#include "nbio/nbio_7_0_default.h"
46#include "nbio/nbio_7_0_offset.h"
47#include "nbio/nbio_7_0_sh_mask.h"
48#include "nbio/nbio_7_0_smn.h"
49#include "mp/mp_9_0_offset.h"
50
51#include "soc15.h"
52#include "soc15_common.h"
53#include "gfx_v9_0.h"
54#include "gmc_v9_0.h"
55#include "gfxhub_v1_0.h"
56#include "mmhub_v1_0.h"
57#include "df_v1_7.h"
58#include "df_v3_6.h"
59#include "nbio_v6_1.h"
60#include "nbio_v7_0.h"
61#include "nbio_v7_4.h"
62#include "hdp_v4_0.h"
63#include "vega10_ih.h"
64#include "vega20_ih.h"
65#include "navi10_ih.h"
66#include "sdma_v4_0.h"
67#include "uvd_v7_0.h"
68#include "vce_v4_0.h"
69#include "vcn_v1_0.h"
70#include "vcn_v2_0.h"
71#include "jpeg_v2_0.h"
72#include "vcn_v2_5.h"
73#include "jpeg_v2_5.h"
74#include "smuio_v9_0.h"
75#include "smuio_v11_0.h"
76#include "smuio_v13_0.h"
77#include "amdgpu_vkms.h"
78#include "mxgpu_ai.h"
79#include "amdgpu_ras.h"
80#include "amdgpu_xgmi.h"
81#include <uapi/linux/kfd_ioctl.h>
82
83#define mmMP0_MISC_CGTT_CTRL0 0x01b9
84#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
85#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
86#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
87
88static const struct amd_ip_funcs soc15_common_ip_funcs;
89
90/* Vega, Raven, Arcturus */
91static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
92{
93 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
94 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
95};
96
97static const struct amdgpu_video_codecs vega_video_codecs_encode =
98{
99 .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
100 .codec_array = vega_video_codecs_encode_array,
101};
102
103/* Vega */
104static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
105{
106 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
107 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
108 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
109 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
110 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
111 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
112};
113
114static const struct amdgpu_video_codecs vega_video_codecs_decode =
115{
116 .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
117 .codec_array = vega_video_codecs_decode_array,
118};
119
120/* Raven */
121static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
122{
123 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
124 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
125 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
126 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
127 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
128 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
129 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
130};
131
132static const struct amdgpu_video_codecs rv_video_codecs_decode =
133{
134 .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
135 .codec_array = rv_video_codecs_decode_array,
136};
137
138/* Renoir, Arcturus */
139static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
140{
141 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4906, 3)},
142 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4906, 5)},
143 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4906, 52)},
144 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4906, 4)},
145 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
146 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
147 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
148};
149
150static const struct amdgpu_video_codecs rn_video_codecs_decode =
151{
152 .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
153 .codec_array = rn_video_codecs_decode_array,
154};
155
156static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
157 const struct amdgpu_video_codecs **codecs)
158{
159 if (adev->ip_versions[VCE_HWIP][0]) {
160 switch (adev->ip_versions[VCE_HWIP][0]) {
161 case IP_VERSION(4, 0, 0):
162 case IP_VERSION(4, 1, 0):
163 if (encode)
164 *codecs = &vega_video_codecs_encode;
165 else
166 *codecs = &vega_video_codecs_decode;
167 return 0;
168 default:
169 return -EINVAL;
170 }
171 } else {
172 switch (adev->ip_versions[UVD_HWIP][0]) {
173 case IP_VERSION(1, 0, 0):
174 case IP_VERSION(1, 0, 1):
175 if (encode)
176 *codecs = &vega_video_codecs_encode;
177 else
178 *codecs = &rv_video_codecs_decode;
179 return 0;
180 case IP_VERSION(2, 5, 0):
181 case IP_VERSION(2, 6, 0):
182 case IP_VERSION(2, 2, 0):
183 if (encode)
184 *codecs = &vega_video_codecs_encode;
185 else
186 *codecs = &rn_video_codecs_decode;
187 return 0;
188 default:
189 return -EINVAL;
190 }
191 }
192}
193
194/*
195 * Indirect registers accessor
196 */
197static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
198{
199 unsigned long address, data;
200 address = adev->nbio.funcs->get_pcie_index_offset(adev);
201 data = adev->nbio.funcs->get_pcie_data_offset(adev);
202
203 return amdgpu_device_indirect_rreg(adev, address, data, reg);
204}
205
206static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
207{
208 unsigned long address, data;
209
210 address = adev->nbio.funcs->get_pcie_index_offset(adev);
211 data = adev->nbio.funcs->get_pcie_data_offset(adev);
212
213 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
214}
215
216static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
217{
218 unsigned long address, data;
219 address = adev->nbio.funcs->get_pcie_index_offset(adev);
220 data = adev->nbio.funcs->get_pcie_data_offset(adev);
221
222 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
223}
224
225static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
226{
227 unsigned long address, data;
228
229 address = adev->nbio.funcs->get_pcie_index_offset(adev);
230 data = adev->nbio.funcs->get_pcie_data_offset(adev);
231
232 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
233}
234
235static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
236{
237 unsigned long flags, address, data;
238 u32 r;
239
240 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
241 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
242
243 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
244 WREG32(address, ((reg) & 0x1ff));
245 r = RREG32(data);
246 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
247 return r;
248}
249
250static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
251{
252 unsigned long flags, address, data;
253
254 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
255 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
256
257 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
258 WREG32(address, ((reg) & 0x1ff));
259 WREG32(data, (v));
260 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
261}
262
263static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
264{
265 unsigned long flags, address, data;
266 u32 r;
267
268 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
269 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
270
271 spin_lock_irqsave(&adev->didt_idx_lock, flags);
272 WREG32(address, (reg));
273 r = RREG32(data);
274 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
275 return r;
276}
277
278static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
279{
280 unsigned long flags, address, data;
281
282 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
283 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
284
285 spin_lock_irqsave(&adev->didt_idx_lock, flags);
286 WREG32(address, (reg));
287 WREG32(data, (v));
288 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
289}
290
291static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
292{
293 unsigned long flags;
294 u32 r;
295
296 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
297 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
298 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
299 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
300 return r;
301}
302
303static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
304{
305 unsigned long flags;
306
307 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
308 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
309 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
310 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
311}
312
313static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
314{
315 unsigned long flags;
316 u32 r;
317
318 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
319 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
320 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
321 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
322 return r;
323}
324
325static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
326{
327 unsigned long flags;
328
329 spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
330 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
331 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
332 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
333}
334
335static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
336{
337 return adev->nbio.funcs->get_memsize(adev);
338}
339
340static u32 soc15_get_xclk(struct amdgpu_device *adev)
341{
342 u32 reference_clock = adev->clock.spll.reference_freq;
343
344 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
345 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
346 return 10000;
347 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
348 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
349 return reference_clock / 4;
350
351 return reference_clock;
352}
353
354
355void soc15_grbm_select(struct amdgpu_device *adev,
356 u32 me, u32 pipe, u32 queue, u32 vmid)
357{
358 u32 grbm_gfx_cntl = 0;
359 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
360 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
361 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
362 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
363
364 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
365}
366
367static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
368{
369 /* todo */
370}
371
372static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
373{
374 /* todo */
375 return false;
376}
377
378static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
379 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
380 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
381 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
382 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
383 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
384 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
385 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
386 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
387 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
388 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
389 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
390 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
391 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
392 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
393 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
394 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
395 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
396 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
397 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
398 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
399};
400
401static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
402 u32 sh_num, u32 reg_offset)
403{
404 uint32_t val;
405
406 mutex_lock(&adev->grbm_idx_mutex);
407 if (se_num != 0xffffffff || sh_num != 0xffffffff)
408 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
409
410 val = RREG32(reg_offset);
411
412 if (se_num != 0xffffffff || sh_num != 0xffffffff)
413 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
414 mutex_unlock(&adev->grbm_idx_mutex);
415 return val;
416}
417
418static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
419 bool indexed, u32 se_num,
420 u32 sh_num, u32 reg_offset)
421{
422 if (indexed) {
423 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
424 } else {
425 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
426 return adev->gfx.config.gb_addr_config;
427 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
428 return adev->gfx.config.db_debug2;
429 return RREG32(reg_offset);
430 }
431}
432
433static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
434 u32 sh_num, u32 reg_offset, u32 *value)
435{
436 uint32_t i;
437 struct soc15_allowed_register_entry *en;
438
439 *value = 0;
440 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
441 en = &soc15_allowed_read_registers[i];
442 if (adev->reg_offset[en->hwip][en->inst] &&
443 reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
444 + en->reg_offset))
445 continue;
446
447 *value = soc15_get_register_value(adev,
448 soc15_allowed_read_registers[i].grbm_indexed,
449 se_num, sh_num, reg_offset);
450 return 0;
451 }
452 return -EINVAL;
453}
454
455
456/**
457 * soc15_program_register_sequence - program an array of registers.
458 *
459 * @adev: amdgpu_device pointer
460 * @regs: pointer to the register array
461 * @array_size: size of the register array
462 *
463 * Programs an array or registers with and and or masks.
464 * This is a helper for setting golden registers.
465 */
466
467void soc15_program_register_sequence(struct amdgpu_device *adev,
468 const struct soc15_reg_golden *regs,
469 const u32 array_size)
470{
471 const struct soc15_reg_golden *entry;
472 u32 tmp, reg;
473 int i;
474
475 for (i = 0; i < array_size; ++i) {
476 entry = ®s[i];
477 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
478
479 if (entry->and_mask == 0xffffffff) {
480 tmp = entry->or_mask;
481 } else {
482 tmp = (entry->hwip == GC_HWIP) ?
483 RREG32_SOC15_IP(GC, reg) : RREG32(reg);
484
485 tmp &= ~(entry->and_mask);
486 tmp |= (entry->or_mask & entry->and_mask);
487 }
488
489 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
490 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
491 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
492 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
493 WREG32_RLC(reg, tmp);
494 else
495 (entry->hwip == GC_HWIP) ?
496 WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
497
498 }
499
500}
501
502static int soc15_asic_baco_reset(struct amdgpu_device *adev)
503{
504 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
505 int ret = 0;
506
507 /* avoid NBIF got stuck when do RAS recovery in BACO reset */
508 if (ras && adev->ras_enabled)
509 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
510
511 ret = amdgpu_dpm_baco_reset(adev);
512 if (ret)
513 return ret;
514
515 /* re-enable doorbell interrupt after BACO exit */
516 if (ras && adev->ras_enabled)
517 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
518
519 return 0;
520}
521
522static enum amd_reset_method
523soc15_asic_reset_method(struct amdgpu_device *adev)
524{
525 bool baco_reset = false;
526 bool connected_to_cpu = false;
527 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
528
529 if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
530 connected_to_cpu = true;
531
532 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
533 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
534 amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
535 amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
536 /* If connected to cpu, driver only support mode2 */
537 if (connected_to_cpu)
538 return AMD_RESET_METHOD_MODE2;
539 return amdgpu_reset_method;
540 }
541
542 if (amdgpu_reset_method != -1)
543 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
544 amdgpu_reset_method);
545
546 switch (adev->ip_versions[MP1_HWIP][0]) {
547 case IP_VERSION(10, 0, 0):
548 case IP_VERSION(10, 0, 1):
549 case IP_VERSION(12, 0, 0):
550 case IP_VERSION(12, 0, 1):
551 return AMD_RESET_METHOD_MODE2;
552 case IP_VERSION(9, 0, 0):
553 case IP_VERSION(11, 0, 2):
554 if (adev->asic_type == CHIP_VEGA20) {
555 if (adev->psp.sos.fw_version >= 0x80067)
556 baco_reset = amdgpu_dpm_is_baco_supported(adev);
557 /*
558 * 1. PMFW version > 0x284300: all cases use baco
559 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
560 */
561 if (ras && adev->ras_enabled &&
562 adev->pm.fw_version <= 0x283400)
563 baco_reset = false;
564 } else {
565 baco_reset = amdgpu_dpm_is_baco_supported(adev);
566 }
567 break;
568 case IP_VERSION(13, 0, 2):
569 /*
570 * 1.connected to cpu: driver issue mode2 reset
571 * 2.discret gpu: driver issue mode1 reset
572 */
573 if (connected_to_cpu)
574 return AMD_RESET_METHOD_MODE2;
575 break;
576 default:
577 break;
578 }
579
580 if (baco_reset)
581 return AMD_RESET_METHOD_BACO;
582 else
583 return AMD_RESET_METHOD_MODE1;
584}
585
586static int soc15_asic_reset(struct amdgpu_device *adev)
587{
588 /* original raven doesn't have full asic reset */
589 if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
590 (adev->apu_flags & AMD_APU_IS_RAVEN2))
591 return 0;
592
593 switch (soc15_asic_reset_method(adev)) {
594 case AMD_RESET_METHOD_PCI:
595 dev_info(adev->dev, "PCI reset\n");
596 return amdgpu_device_pci_reset(adev);
597 case AMD_RESET_METHOD_BACO:
598 dev_info(adev->dev, "BACO reset\n");
599 return soc15_asic_baco_reset(adev);
600 case AMD_RESET_METHOD_MODE2:
601 dev_info(adev->dev, "MODE2 reset\n");
602 return amdgpu_dpm_mode2_reset(adev);
603 default:
604 dev_info(adev->dev, "MODE1 reset\n");
605 return amdgpu_device_mode1_reset(adev);
606 }
607}
608
609static bool soc15_supports_baco(struct amdgpu_device *adev)
610{
611 switch (adev->ip_versions[MP1_HWIP][0]) {
612 case IP_VERSION(9, 0, 0):
613 case IP_VERSION(11, 0, 2):
614 if (adev->asic_type == CHIP_VEGA20) {
615 if (adev->psp.sos.fw_version >= 0x80067)
616 return amdgpu_dpm_is_baco_supported(adev);
617 return false;
618 } else {
619 return amdgpu_dpm_is_baco_supported(adev);
620 }
621 break;
622 default:
623 return false;
624 }
625}
626
627/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
628 u32 cntl_reg, u32 status_reg)
629{
630 return 0;
631}*/
632
633static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
634{
635 /*int r;
636
637 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
638 if (r)
639 return r;
640
641 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
642 */
643 return 0;
644}
645
646static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
647{
648 /* todo */
649
650 return 0;
651}
652
653static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
654{
655 if (pci_is_root_bus(adev->pdev->bus))
656 return;
657
658 if (amdgpu_pcie_gen2 == 0)
659 return;
660
661 if (adev->flags & AMD_IS_APU)
662 return;
663
664 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
665 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
666 return;
667
668 /* todo */
669}
670
671static void soc15_program_aspm(struct amdgpu_device *adev)
672{
673 if (!amdgpu_device_should_use_aspm(adev))
674 return;
675
676 if (!(adev->flags & AMD_IS_APU) &&
677 (adev->nbio.funcs->program_aspm))
678 adev->nbio.funcs->program_aspm(adev);
679}
680
681static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
682 bool enable)
683{
684 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
685 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
686}
687
688const struct amdgpu_ip_block_version vega10_common_ip_block =
689{
690 .type = AMD_IP_BLOCK_TYPE_COMMON,
691 .major = 2,
692 .minor = 0,
693 .rev = 0,
694 .funcs = &soc15_common_ip_funcs,
695};
696
697static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
698{
699 return adev->nbio.funcs->get_rev_id(adev);
700}
701
702static void soc15_reg_base_init(struct amdgpu_device *adev)
703{
704 int r;
705
706 /* Set IP register base before any HW register access */
707 switch (adev->asic_type) {
708 case CHIP_VEGA10:
709 case CHIP_VEGA12:
710 case CHIP_RAVEN:
711 vega10_reg_base_init(adev);
712 break;
713 case CHIP_RENOIR:
714 /* It's safe to do ip discovery here for Renoir,
715 * it doesn't support SRIOV. */
716 if (amdgpu_discovery) {
717 r = amdgpu_discovery_reg_base_init(adev);
718 if (r == 0)
719 break;
720 DRM_WARN("failed to init reg base from ip discovery table, "
721 "fallback to legacy init method\n");
722 }
723 vega10_reg_base_init(adev);
724 break;
725 case CHIP_VEGA20:
726 vega20_reg_base_init(adev);
727 break;
728 case CHIP_ARCTURUS:
729 arct_reg_base_init(adev);
730 break;
731 case CHIP_ALDEBARAN:
732 aldebaran_reg_base_init(adev);
733 break;
734 default:
735 DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
736 break;
737 }
738}
739
740void soc15_set_virt_ops(struct amdgpu_device *adev)
741{
742 adev->virt.ops = &xgpu_ai_virt_ops;
743
744 /* init soc15 reg base early enough so we can
745 * request request full access for sriov before
746 * set_ip_blocks. */
747 soc15_reg_base_init(adev);
748}
749
750static bool soc15_need_full_reset(struct amdgpu_device *adev)
751{
752 /* change this when we implement soft reset */
753 return true;
754}
755
756static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
757 uint64_t *count1)
758{
759 uint32_t perfctr = 0;
760 uint64_t cnt0_of, cnt1_of;
761 int tmp;
762
763 /* This reports 0 on APUs, so return to avoid writing/reading registers
764 * that may or may not be different from their GPU counterparts
765 */
766 if (adev->flags & AMD_IS_APU)
767 return;
768
769 /* Set the 2 events that we wish to watch, defined above */
770 /* Reg 40 is # received msgs */
771 /* Reg 104 is # of posted requests sent */
772 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
773 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
774
775 /* Write to enable desired perf counters */
776 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
777 /* Zero out and enable the perf counters
778 * Write 0x5:
779 * Bit 0 = Start all counters(1)
780 * Bit 2 = Global counter reset enable(1)
781 */
782 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
783
784 msleep(1000);
785
786 /* Load the shadow and disable the perf counters
787 * Write 0x2:
788 * Bit 0 = Stop counters(0)
789 * Bit 1 = Load the shadow counters(1)
790 */
791 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
792
793 /* Read register values to get any >32bit overflow */
794 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
795 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
796 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
797
798 /* Get the values and add the overflow */
799 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
800 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
801}
802
803static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
804 uint64_t *count1)
805{
806 uint32_t perfctr = 0;
807 uint64_t cnt0_of, cnt1_of;
808 int tmp;
809
810 /* This reports 0 on APUs, so return to avoid writing/reading registers
811 * that may or may not be different from their GPU counterparts
812 */
813 if (adev->flags & AMD_IS_APU)
814 return;
815
816 /* Set the 2 events that we wish to watch, defined above */
817 /* Reg 40 is # received msgs */
818 /* Reg 108 is # of posted requests sent on VG20 */
819 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
820 EVENT0_SEL, 40);
821 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
822 EVENT1_SEL, 108);
823
824 /* Write to enable desired perf counters */
825 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
826 /* Zero out and enable the perf counters
827 * Write 0x5:
828 * Bit 0 = Start all counters(1)
829 * Bit 2 = Global counter reset enable(1)
830 */
831 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
832
833 msleep(1000);
834
835 /* Load the shadow and disable the perf counters
836 * Write 0x2:
837 * Bit 0 = Stop counters(0)
838 * Bit 1 = Load the shadow counters(1)
839 */
840 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
841
842 /* Read register values to get any >32bit overflow */
843 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
844 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
845 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
846
847 /* Get the values and add the overflow */
848 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
849 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
850}
851
852static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
853{
854 u32 sol_reg;
855
856 /* CP hangs in IGT reloading test on RN, reset to WA */
857 if (adev->asic_type == CHIP_RENOIR)
858 return true;
859
860 /* Just return false for soc15 GPUs. Reset does not seem to
861 * be necessary.
862 */
863 if (!amdgpu_passthrough(adev))
864 return false;
865
866 if (adev->flags & AMD_IS_APU)
867 return false;
868
869 /* Check sOS sign of life register to confirm sys driver and sOS
870 * are already been loaded.
871 */
872 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
873 if (sol_reg)
874 return true;
875
876 return false;
877}
878
879static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
880{
881 uint64_t nak_r, nak_g;
882
883 /* Get the number of NAKs received and generated */
884 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
885 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
886
887 /* Add the total number of NAKs, i.e the number of replays */
888 return (nak_r + nak_g);
889}
890
891static void soc15_pre_asic_init(struct amdgpu_device *adev)
892{
893 gmc_v9_0_restore_registers(adev);
894}
895
896static const struct amdgpu_asic_funcs soc15_asic_funcs =
897{
898 .read_disabled_bios = &soc15_read_disabled_bios,
899 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
900 .read_register = &soc15_read_register,
901 .reset = &soc15_asic_reset,
902 .reset_method = &soc15_asic_reset_method,
903 .set_vga_state = &soc15_vga_set_state,
904 .get_xclk = &soc15_get_xclk,
905 .set_uvd_clocks = &soc15_set_uvd_clocks,
906 .set_vce_clocks = &soc15_set_vce_clocks,
907 .get_config_memsize = &soc15_get_config_memsize,
908 .need_full_reset = &soc15_need_full_reset,
909 .init_doorbell_index = &vega10_doorbell_index_init,
910 .get_pcie_usage = &soc15_get_pcie_usage,
911 .need_reset_on_init = &soc15_need_reset_on_init,
912 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
913 .supports_baco = &soc15_supports_baco,
914 .pre_asic_init = &soc15_pre_asic_init,
915 .query_video_codecs = &soc15_query_video_codecs,
916};
917
918static const struct amdgpu_asic_funcs vega20_asic_funcs =
919{
920 .read_disabled_bios = &soc15_read_disabled_bios,
921 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
922 .read_register = &soc15_read_register,
923 .reset = &soc15_asic_reset,
924 .reset_method = &soc15_asic_reset_method,
925 .set_vga_state = &soc15_vga_set_state,
926 .get_xclk = &soc15_get_xclk,
927 .set_uvd_clocks = &soc15_set_uvd_clocks,
928 .set_vce_clocks = &soc15_set_vce_clocks,
929 .get_config_memsize = &soc15_get_config_memsize,
930 .need_full_reset = &soc15_need_full_reset,
931 .init_doorbell_index = &vega20_doorbell_index_init,
932 .get_pcie_usage = &vega20_get_pcie_usage,
933 .need_reset_on_init = &soc15_need_reset_on_init,
934 .get_pcie_replay_count = &soc15_get_pcie_replay_count,
935 .supports_baco = &soc15_supports_baco,
936 .pre_asic_init = &soc15_pre_asic_init,
937 .query_video_codecs = &soc15_query_video_codecs,
938};
939
940static int soc15_common_early_init(void *handle)
941{
942#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
943 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
944
945 if (!amdgpu_sriov_vf(adev)) {
946 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
947 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
948 }
949 adev->smc_rreg = NULL;
950 adev->smc_wreg = NULL;
951 adev->pcie_rreg = &soc15_pcie_rreg;
952 adev->pcie_wreg = &soc15_pcie_wreg;
953 adev->pcie_rreg64 = &soc15_pcie_rreg64;
954 adev->pcie_wreg64 = &soc15_pcie_wreg64;
955 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
956 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
957 adev->didt_rreg = &soc15_didt_rreg;
958 adev->didt_wreg = &soc15_didt_wreg;
959 adev->gc_cac_rreg = &soc15_gc_cac_rreg;
960 adev->gc_cac_wreg = &soc15_gc_cac_wreg;
961 adev->se_cac_rreg = &soc15_se_cac_rreg;
962 adev->se_cac_wreg = &soc15_se_cac_wreg;
963
964 adev->rev_id = soc15_get_rev_id(adev);
965 adev->external_rev_id = 0xFF;
966 /* TODO: split the GC and PG flags based on the relevant IP version for which
967 * they are relevant.
968 */
969 switch (adev->ip_versions[GC_HWIP][0]) {
970 case IP_VERSION(9, 0, 1):
971 adev->asic_funcs = &soc15_asic_funcs;
972 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
973 AMD_CG_SUPPORT_GFX_MGLS |
974 AMD_CG_SUPPORT_GFX_RLC_LS |
975 AMD_CG_SUPPORT_GFX_CP_LS |
976 AMD_CG_SUPPORT_GFX_3D_CGCG |
977 AMD_CG_SUPPORT_GFX_3D_CGLS |
978 AMD_CG_SUPPORT_GFX_CGCG |
979 AMD_CG_SUPPORT_GFX_CGLS |
980 AMD_CG_SUPPORT_BIF_MGCG |
981 AMD_CG_SUPPORT_BIF_LS |
982 AMD_CG_SUPPORT_HDP_LS |
983 AMD_CG_SUPPORT_DRM_MGCG |
984 AMD_CG_SUPPORT_DRM_LS |
985 AMD_CG_SUPPORT_ROM_MGCG |
986 AMD_CG_SUPPORT_DF_MGCG |
987 AMD_CG_SUPPORT_SDMA_MGCG |
988 AMD_CG_SUPPORT_SDMA_LS |
989 AMD_CG_SUPPORT_MC_MGCG |
990 AMD_CG_SUPPORT_MC_LS;
991 adev->pg_flags = 0;
992 adev->external_rev_id = 0x1;
993 break;
994 case IP_VERSION(9, 2, 1):
995 adev->asic_funcs = &soc15_asic_funcs;
996 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
997 AMD_CG_SUPPORT_GFX_MGLS |
998 AMD_CG_SUPPORT_GFX_CGCG |
999 AMD_CG_SUPPORT_GFX_CGLS |
1000 AMD_CG_SUPPORT_GFX_3D_CGCG |
1001 AMD_CG_SUPPORT_GFX_3D_CGLS |
1002 AMD_CG_SUPPORT_GFX_CP_LS |
1003 AMD_CG_SUPPORT_MC_LS |
1004 AMD_CG_SUPPORT_MC_MGCG |
1005 AMD_CG_SUPPORT_SDMA_MGCG |
1006 AMD_CG_SUPPORT_SDMA_LS |
1007 AMD_CG_SUPPORT_BIF_MGCG |
1008 AMD_CG_SUPPORT_BIF_LS |
1009 AMD_CG_SUPPORT_HDP_MGCG |
1010 AMD_CG_SUPPORT_HDP_LS |
1011 AMD_CG_SUPPORT_ROM_MGCG |
1012 AMD_CG_SUPPORT_VCE_MGCG |
1013 AMD_CG_SUPPORT_UVD_MGCG;
1014 adev->pg_flags = 0;
1015 adev->external_rev_id = adev->rev_id + 0x14;
1016 break;
1017 case IP_VERSION(9, 4, 0):
1018 adev->asic_funcs = &vega20_asic_funcs;
1019 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1020 AMD_CG_SUPPORT_GFX_MGLS |
1021 AMD_CG_SUPPORT_GFX_CGCG |
1022 AMD_CG_SUPPORT_GFX_CGLS |
1023 AMD_CG_SUPPORT_GFX_3D_CGCG |
1024 AMD_CG_SUPPORT_GFX_3D_CGLS |
1025 AMD_CG_SUPPORT_GFX_CP_LS |
1026 AMD_CG_SUPPORT_MC_LS |
1027 AMD_CG_SUPPORT_MC_MGCG |
1028 AMD_CG_SUPPORT_SDMA_MGCG |
1029 AMD_CG_SUPPORT_SDMA_LS |
1030 AMD_CG_SUPPORT_BIF_MGCG |
1031 AMD_CG_SUPPORT_BIF_LS |
1032 AMD_CG_SUPPORT_HDP_MGCG |
1033 AMD_CG_SUPPORT_HDP_LS |
1034 AMD_CG_SUPPORT_ROM_MGCG |
1035 AMD_CG_SUPPORT_VCE_MGCG |
1036 AMD_CG_SUPPORT_UVD_MGCG;
1037 adev->pg_flags = 0;
1038 adev->external_rev_id = adev->rev_id + 0x28;
1039 break;
1040 case IP_VERSION(9, 1, 0):
1041 case IP_VERSION(9, 2, 2):
1042 adev->asic_funcs = &soc15_asic_funcs;
1043
1044 if (adev->rev_id >= 0x8)
1045 adev->apu_flags |= AMD_APU_IS_RAVEN2;
1046
1047 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1048 adev->external_rev_id = adev->rev_id + 0x79;
1049 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1050 adev->external_rev_id = adev->rev_id + 0x41;
1051 else if (adev->rev_id == 1)
1052 adev->external_rev_id = adev->rev_id + 0x20;
1053 else
1054 adev->external_rev_id = adev->rev_id + 0x01;
1055
1056 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1057 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1058 AMD_CG_SUPPORT_GFX_MGLS |
1059 AMD_CG_SUPPORT_GFX_CP_LS |
1060 AMD_CG_SUPPORT_GFX_3D_CGCG |
1061 AMD_CG_SUPPORT_GFX_3D_CGLS |
1062 AMD_CG_SUPPORT_GFX_CGCG |
1063 AMD_CG_SUPPORT_GFX_CGLS |
1064 AMD_CG_SUPPORT_BIF_LS |
1065 AMD_CG_SUPPORT_HDP_LS |
1066 AMD_CG_SUPPORT_MC_MGCG |
1067 AMD_CG_SUPPORT_MC_LS |
1068 AMD_CG_SUPPORT_SDMA_MGCG |
1069 AMD_CG_SUPPORT_SDMA_LS |
1070 AMD_CG_SUPPORT_VCN_MGCG;
1071
1072 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1073 } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1074 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1075 AMD_CG_SUPPORT_GFX_MGLS |
1076 AMD_CG_SUPPORT_GFX_CP_LS |
1077 AMD_CG_SUPPORT_GFX_3D_CGLS |
1078 AMD_CG_SUPPORT_GFX_CGCG |
1079 AMD_CG_SUPPORT_GFX_CGLS |
1080 AMD_CG_SUPPORT_BIF_LS |
1081 AMD_CG_SUPPORT_HDP_LS |
1082 AMD_CG_SUPPORT_MC_MGCG |
1083 AMD_CG_SUPPORT_MC_LS |
1084 AMD_CG_SUPPORT_SDMA_MGCG |
1085 AMD_CG_SUPPORT_SDMA_LS |
1086 AMD_CG_SUPPORT_VCN_MGCG;
1087
1088 /*
1089 * MMHUB PG needs to be disabled for Picasso for
1090 * stability reasons.
1091 */
1092 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1093 AMD_PG_SUPPORT_VCN;
1094 } else {
1095 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1096 AMD_CG_SUPPORT_GFX_MGLS |
1097 AMD_CG_SUPPORT_GFX_RLC_LS |
1098 AMD_CG_SUPPORT_GFX_CP_LS |
1099 AMD_CG_SUPPORT_GFX_3D_CGLS |
1100 AMD_CG_SUPPORT_GFX_CGCG |
1101 AMD_CG_SUPPORT_GFX_CGLS |
1102 AMD_CG_SUPPORT_BIF_MGCG |
1103 AMD_CG_SUPPORT_BIF_LS |
1104 AMD_CG_SUPPORT_HDP_MGCG |
1105 AMD_CG_SUPPORT_HDP_LS |
1106 AMD_CG_SUPPORT_DRM_MGCG |
1107 AMD_CG_SUPPORT_DRM_LS |
1108 AMD_CG_SUPPORT_MC_MGCG |
1109 AMD_CG_SUPPORT_MC_LS |
1110 AMD_CG_SUPPORT_SDMA_MGCG |
1111 AMD_CG_SUPPORT_SDMA_LS |
1112 AMD_CG_SUPPORT_VCN_MGCG;
1113
1114 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1115 }
1116 break;
1117 case IP_VERSION(9, 4, 1):
1118 adev->asic_funcs = &vega20_asic_funcs;
1119 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1120 AMD_CG_SUPPORT_GFX_MGLS |
1121 AMD_CG_SUPPORT_GFX_CGCG |
1122 AMD_CG_SUPPORT_GFX_CGLS |
1123 AMD_CG_SUPPORT_GFX_CP_LS |
1124 AMD_CG_SUPPORT_HDP_MGCG |
1125 AMD_CG_SUPPORT_HDP_LS |
1126 AMD_CG_SUPPORT_SDMA_MGCG |
1127 AMD_CG_SUPPORT_SDMA_LS |
1128 AMD_CG_SUPPORT_MC_MGCG |
1129 AMD_CG_SUPPORT_MC_LS |
1130 AMD_CG_SUPPORT_IH_CG |
1131 AMD_CG_SUPPORT_VCN_MGCG |
1132 AMD_CG_SUPPORT_JPEG_MGCG;
1133 adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1134 adev->external_rev_id = adev->rev_id + 0x32;
1135 break;
1136 case IP_VERSION(9, 3, 0):
1137 adev->asic_funcs = &soc15_asic_funcs;
1138
1139 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1140 adev->external_rev_id = adev->rev_id + 0x91;
1141 else
1142 adev->external_rev_id = adev->rev_id + 0xa1;
1143 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1144 AMD_CG_SUPPORT_GFX_MGLS |
1145 AMD_CG_SUPPORT_GFX_3D_CGCG |
1146 AMD_CG_SUPPORT_GFX_3D_CGLS |
1147 AMD_CG_SUPPORT_GFX_CGCG |
1148 AMD_CG_SUPPORT_GFX_CGLS |
1149 AMD_CG_SUPPORT_GFX_CP_LS |
1150 AMD_CG_SUPPORT_MC_MGCG |
1151 AMD_CG_SUPPORT_MC_LS |
1152 AMD_CG_SUPPORT_SDMA_MGCG |
1153 AMD_CG_SUPPORT_SDMA_LS |
1154 AMD_CG_SUPPORT_BIF_LS |
1155 AMD_CG_SUPPORT_HDP_LS |
1156 AMD_CG_SUPPORT_VCN_MGCG |
1157 AMD_CG_SUPPORT_JPEG_MGCG |
1158 AMD_CG_SUPPORT_IH_CG |
1159 AMD_CG_SUPPORT_ATHUB_LS |
1160 AMD_CG_SUPPORT_ATHUB_MGCG |
1161 AMD_CG_SUPPORT_DF_MGCG;
1162 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1163 AMD_PG_SUPPORT_VCN |
1164 AMD_PG_SUPPORT_JPEG |
1165 AMD_PG_SUPPORT_VCN_DPG;
1166 break;
1167 case IP_VERSION(9, 4, 2):
1168 adev->asic_funcs = &vega20_asic_funcs;
1169 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1170 AMD_CG_SUPPORT_GFX_MGLS |
1171 AMD_CG_SUPPORT_GFX_CP_LS |
1172 AMD_CG_SUPPORT_HDP_LS |
1173 AMD_CG_SUPPORT_SDMA_MGCG |
1174 AMD_CG_SUPPORT_SDMA_LS |
1175 AMD_CG_SUPPORT_IH_CG |
1176 AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
1177 adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
1178 adev->external_rev_id = adev->rev_id + 0x3c;
1179 break;
1180 default:
1181 /* FIXME: not supported yet */
1182 return -EINVAL;
1183 }
1184
1185 if (amdgpu_sriov_vf(adev)) {
1186 amdgpu_virt_init_setting(adev);
1187 xgpu_ai_mailbox_set_irq_funcs(adev);
1188 }
1189
1190 return 0;
1191}
1192
1193static int soc15_common_late_init(void *handle)
1194{
1195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1196
1197 if (amdgpu_sriov_vf(adev))
1198 xgpu_ai_mailbox_get_irq(adev);
1199
1200 return 0;
1201}
1202
1203static int soc15_common_sw_init(void *handle)
1204{
1205 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1206
1207 if (amdgpu_sriov_vf(adev))
1208 xgpu_ai_mailbox_add_irq_id(adev);
1209
1210 if (adev->df.funcs &&
1211 adev->df.funcs->sw_init)
1212 adev->df.funcs->sw_init(adev);
1213
1214 return 0;
1215}
1216
1217static int soc15_common_sw_fini(void *handle)
1218{
1219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220
1221 if (adev->df.funcs &&
1222 adev->df.funcs->sw_fini)
1223 adev->df.funcs->sw_fini(adev);
1224 return 0;
1225}
1226
1227static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1228{
1229 int i;
1230 struct amdgpu_ring *ring;
1231
1232 /* sdma/ih doorbell range are programed by hypervisor */
1233 if (!amdgpu_sriov_vf(adev)) {
1234 for (i = 0; i < adev->sdma.num_instances; i++) {
1235 ring = &adev->sdma.instance[i].ring;
1236 adev->nbio.funcs->sdma_doorbell_range(adev, i,
1237 ring->use_doorbell, ring->doorbell_index,
1238 adev->doorbell_index.sdma_doorbell_range);
1239 }
1240
1241 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1242 adev->irq.ih.doorbell_index);
1243 }
1244}
1245
1246static int soc15_common_hw_init(void *handle)
1247{
1248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249
1250 /* enable pcie gen2/3 link */
1251 soc15_pcie_gen3_enable(adev);
1252 /* enable aspm */
1253 soc15_program_aspm(adev);
1254 /* setup nbio registers */
1255 adev->nbio.funcs->init_registers(adev);
1256 /* remap HDP registers to a hole in mmio space,
1257 * for the purpose of expose those registers
1258 * to process space
1259 */
1260 if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
1261 adev->nbio.funcs->remap_hdp_registers(adev);
1262
1263 /* enable the doorbell aperture */
1264 soc15_enable_doorbell_aperture(adev, true);
1265 /* HW doorbell routing policy: doorbell writing not
1266 * in SDMA/IH/MM/ACV range will be routed to CP. So
1267 * we need to init SDMA/IH/MM/ACV doorbell range prior
1268 * to CP ip block init and ring test.
1269 */
1270 soc15_doorbell_range_init(adev);
1271
1272 return 0;
1273}
1274
1275static int soc15_common_hw_fini(void *handle)
1276{
1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278
1279 /* disable the doorbell aperture */
1280 soc15_enable_doorbell_aperture(adev, false);
1281 if (amdgpu_sriov_vf(adev))
1282 xgpu_ai_mailbox_put_irq(adev);
1283
1284 if (adev->nbio.ras_if &&
1285 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1286 if (adev->nbio.ras &&
1287 adev->nbio.ras->init_ras_controller_interrupt)
1288 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1289 if (adev->nbio.ras &&
1290 adev->nbio.ras->init_ras_err_event_athub_interrupt)
1291 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1292 }
1293
1294 return 0;
1295}
1296
1297static int soc15_common_suspend(void *handle)
1298{
1299 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300
1301 return soc15_common_hw_fini(adev);
1302}
1303
1304static int soc15_common_resume(void *handle)
1305{
1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1307
1308 return soc15_common_hw_init(adev);
1309}
1310
1311static bool soc15_common_is_idle(void *handle)
1312{
1313 return true;
1314}
1315
1316static int soc15_common_wait_for_idle(void *handle)
1317{
1318 return 0;
1319}
1320
1321static int soc15_common_soft_reset(void *handle)
1322{
1323 return 0;
1324}
1325
1326static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1327{
1328 uint32_t def, data;
1329
1330 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1331
1332 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1333 data &= ~(0x01000000 |
1334 0x02000000 |
1335 0x04000000 |
1336 0x08000000 |
1337 0x10000000 |
1338 0x20000000 |
1339 0x40000000 |
1340 0x80000000);
1341 else
1342 data |= (0x01000000 |
1343 0x02000000 |
1344 0x04000000 |
1345 0x08000000 |
1346 0x10000000 |
1347 0x20000000 |
1348 0x40000000 |
1349 0x80000000);
1350
1351 if (def != data)
1352 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1353}
1354
1355static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1356{
1357 uint32_t def, data;
1358
1359 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1360
1361 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1362 data |= 1;
1363 else
1364 data &= ~1;
1365
1366 if (def != data)
1367 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1368}
1369
1370static int soc15_common_set_clockgating_state(void *handle,
1371 enum amd_clockgating_state state)
1372{
1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374
1375 if (amdgpu_sriov_vf(adev))
1376 return 0;
1377
1378 switch (adev->ip_versions[NBIO_HWIP][0]) {
1379 case IP_VERSION(6, 1, 0):
1380 case IP_VERSION(6, 2, 0):
1381 case IP_VERSION(7, 4, 0):
1382 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1383 state == AMD_CG_STATE_GATE);
1384 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1385 state == AMD_CG_STATE_GATE);
1386 adev->hdp.funcs->update_clock_gating(adev,
1387 state == AMD_CG_STATE_GATE);
1388 soc15_update_drm_clock_gating(adev,
1389 state == AMD_CG_STATE_GATE);
1390 soc15_update_drm_light_sleep(adev,
1391 state == AMD_CG_STATE_GATE);
1392 adev->smuio.funcs->update_rom_clock_gating(adev,
1393 state == AMD_CG_STATE_GATE);
1394 adev->df.funcs->update_medium_grain_clock_gating(adev,
1395 state == AMD_CG_STATE_GATE);
1396 break;
1397 case IP_VERSION(7, 0, 0):
1398 case IP_VERSION(7, 0, 1):
1399 case IP_VERSION(2, 5, 0):
1400 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1401 state == AMD_CG_STATE_GATE);
1402 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1403 state == AMD_CG_STATE_GATE);
1404 adev->hdp.funcs->update_clock_gating(adev,
1405 state == AMD_CG_STATE_GATE);
1406 soc15_update_drm_clock_gating(adev,
1407 state == AMD_CG_STATE_GATE);
1408 soc15_update_drm_light_sleep(adev,
1409 state == AMD_CG_STATE_GATE);
1410 break;
1411 case IP_VERSION(7, 4, 1):
1412 case IP_VERSION(7, 4, 4):
1413 adev->hdp.funcs->update_clock_gating(adev,
1414 state == AMD_CG_STATE_GATE);
1415 break;
1416 default:
1417 break;
1418 }
1419 return 0;
1420}
1421
1422static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1423{
1424 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1425 int data;
1426
1427 if (amdgpu_sriov_vf(adev))
1428 *flags = 0;
1429
1430 adev->nbio.funcs->get_clockgating_state(adev, flags);
1431
1432 adev->hdp.funcs->get_clock_gating_state(adev, flags);
1433
1434 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
1435
1436 /* AMD_CG_SUPPORT_DRM_MGCG */
1437 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1438 if (!(data & 0x01000000))
1439 *flags |= AMD_CG_SUPPORT_DRM_MGCG;
1440
1441 /* AMD_CG_SUPPORT_DRM_LS */
1442 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1443 if (data & 0x1)
1444 *flags |= AMD_CG_SUPPORT_DRM_LS;
1445 }
1446
1447 /* AMD_CG_SUPPORT_ROM_MGCG */
1448 adev->smuio.funcs->get_clock_gating_state(adev, flags);
1449
1450 adev->df.funcs->get_clockgating_state(adev, flags);
1451}
1452
1453static int soc15_common_set_powergating_state(void *handle,
1454 enum amd_powergating_state state)
1455{
1456 /* todo */
1457 return 0;
1458}
1459
1460static const struct amd_ip_funcs soc15_common_ip_funcs = {
1461 .name = "soc15_common",
1462 .early_init = soc15_common_early_init,
1463 .late_init = soc15_common_late_init,
1464 .sw_init = soc15_common_sw_init,
1465 .sw_fini = soc15_common_sw_fini,
1466 .hw_init = soc15_common_hw_init,
1467 .hw_fini = soc15_common_hw_fini,
1468 .suspend = soc15_common_suspend,
1469 .resume = soc15_common_resume,
1470 .is_idle = soc15_common_is_idle,
1471 .wait_for_idle = soc15_common_wait_for_idle,
1472 .soft_reset = soc15_common_soft_reset,
1473 .set_clockgating_state = soc15_common_set_clockgating_state,
1474 .set_powergating_state = soc15_common_set_powergating_state,
1475 .get_clockgating_state= soc15_common_get_clockgating_state,
1476};