Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include <drm/drm_cache.h>
29#include "amdgpu.h"
30#include "gmc_v8_0.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_amdkfd.h"
33#include "amdgpu_gem.h"
34
35#include "gmc/gmc_8_1_d.h"
36#include "gmc/gmc_8_1_sh_mask.h"
37
38#include "bif/bif_5_0_d.h"
39#include "bif/bif_5_0_sh_mask.h"
40
41#include "oss/oss_3_0_d.h"
42#include "oss/oss_3_0_sh_mask.h"
43
44#include "dce/dce_10_0_d.h"
45#include "dce/dce_10_0_sh_mask.h"
46
47#include "vid.h"
48#include "vi.h"
49
50#include "amdgpu_atombios.h"
51
52#include "ivsrcid/ivsrcid_vislands30.h"
53
54static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
55static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56static int gmc_v8_0_wait_for_idle(void *handle);
57
58MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
59MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
61MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
62MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
63MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
64MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
65MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
66
67static const u32 golden_settings_tonga_a11[] =
68{
69 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
70 mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
71 mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
72 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
73 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
74 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
75 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
76};
77
78static const u32 tonga_mgcg_cgcg_init[] =
79{
80 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
81};
82
83static const u32 golden_settings_fiji_a10[] =
84{
85 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
86 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
87 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
88 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
89};
90
91static const u32 fiji_mgcg_cgcg_init[] =
92{
93 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
94};
95
96static const u32 golden_settings_polaris11_a11[] =
97{
98 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
99 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
100 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
101 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
102};
103
104static const u32 golden_settings_polaris10_a11[] =
105{
106 mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
107 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
108 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
109 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
110 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
111};
112
113static const u32 cz_mgcg_cgcg_init[] =
114{
115 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
116};
117
118static const u32 stoney_mgcg_cgcg_init[] =
119{
120 mmATC_MISC_CG, 0xffffffff, 0x000c0200,
121 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
122};
123
124static const u32 golden_settings_stoney_common[] =
125{
126 mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004,
127 mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000
128};
129
130static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
131{
132 switch (adev->asic_type) {
133 case CHIP_FIJI:
134 amdgpu_device_program_register_sequence(adev,
135 fiji_mgcg_cgcg_init,
136 ARRAY_SIZE(fiji_mgcg_cgcg_init));
137 amdgpu_device_program_register_sequence(adev,
138 golden_settings_fiji_a10,
139 ARRAY_SIZE(golden_settings_fiji_a10));
140 break;
141 case CHIP_TONGA:
142 amdgpu_device_program_register_sequence(adev,
143 tonga_mgcg_cgcg_init,
144 ARRAY_SIZE(tonga_mgcg_cgcg_init));
145 amdgpu_device_program_register_sequence(adev,
146 golden_settings_tonga_a11,
147 ARRAY_SIZE(golden_settings_tonga_a11));
148 break;
149 case CHIP_POLARIS11:
150 case CHIP_POLARIS12:
151 case CHIP_VEGAM:
152 amdgpu_device_program_register_sequence(adev,
153 golden_settings_polaris11_a11,
154 ARRAY_SIZE(golden_settings_polaris11_a11));
155 break;
156 case CHIP_POLARIS10:
157 amdgpu_device_program_register_sequence(adev,
158 golden_settings_polaris10_a11,
159 ARRAY_SIZE(golden_settings_polaris10_a11));
160 break;
161 case CHIP_CARRIZO:
162 amdgpu_device_program_register_sequence(adev,
163 cz_mgcg_cgcg_init,
164 ARRAY_SIZE(cz_mgcg_cgcg_init));
165 break;
166 case CHIP_STONEY:
167 amdgpu_device_program_register_sequence(adev,
168 stoney_mgcg_cgcg_init,
169 ARRAY_SIZE(stoney_mgcg_cgcg_init));
170 amdgpu_device_program_register_sequence(adev,
171 golden_settings_stoney_common,
172 ARRAY_SIZE(golden_settings_stoney_common));
173 break;
174 default:
175 break;
176 }
177}
178
179static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
180{
181 u32 blackout;
182
183 gmc_v8_0_wait_for_idle(adev);
184
185 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
186 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
187 /* Block CPU access */
188 WREG32(mmBIF_FB_EN, 0);
189 /* blackout the MC */
190 blackout = REG_SET_FIELD(blackout,
191 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1);
192 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout);
193 }
194 /* wait for the MC to settle */
195 udelay(100);
196}
197
198static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
199{
200 u32 tmp;
201
202 /* unblackout the MC */
203 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
204 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
205 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
206 /* allow CPU access */
207 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
208 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
209 WREG32(mmBIF_FB_EN, tmp);
210}
211
212/**
213 * gmc_v8_0_init_microcode - load ucode images from disk
214 *
215 * @adev: amdgpu_device pointer
216 *
217 * Use the firmware interface to load the ucode images into
218 * the driver (not loaded into hw).
219 * Returns 0 on success, error on failure.
220 */
221static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
222{
223 const char *chip_name;
224 char fw_name[30];
225 int err;
226
227 DRM_DEBUG("\n");
228
229 switch (adev->asic_type) {
230 case CHIP_TONGA:
231 chip_name = "tonga";
232 break;
233 case CHIP_POLARIS11:
234 if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
235 ASICID_IS_P31(adev->pdev->device, adev->pdev->revision))
236 chip_name = "polaris11_k";
237 else
238 chip_name = "polaris11";
239 break;
240 case CHIP_POLARIS10:
241 if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision))
242 chip_name = "polaris10_k";
243 else
244 chip_name = "polaris10";
245 break;
246 case CHIP_POLARIS12:
247 if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
248 chip_name = "polaris12_k";
249 } else {
250 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
251 /* Polaris12 32bit ASIC needs a special MC firmware */
252 if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
253 chip_name = "polaris12_32";
254 else
255 chip_name = "polaris12";
256 }
257 break;
258 case CHIP_FIJI:
259 case CHIP_CARRIZO:
260 case CHIP_STONEY:
261 case CHIP_VEGAM:
262 return 0;
263 default: BUG();
264 }
265
266 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
267 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
268 if (err)
269 goto out;
270 err = amdgpu_ucode_validate(adev->gmc.fw);
271
272out:
273 if (err) {
274 pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
275 release_firmware(adev->gmc.fw);
276 adev->gmc.fw = NULL;
277 }
278 return err;
279}
280
281/**
282 * gmc_v8_0_tonga_mc_load_microcode - load tonga MC ucode into the hw
283 *
284 * @adev: amdgpu_device pointer
285 *
286 * Load the GDDR MC ucode into the hw (VI).
287 * Returns 0 on success, error on failure.
288 */
289static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
290{
291 const struct mc_firmware_header_v1_0 *hdr;
292 const __le32 *fw_data = NULL;
293 const __le32 *io_mc_regs = NULL;
294 u32 running;
295 int i, ucode_size, regs_size;
296
297 /* Skip MC ucode loading on SR-IOV capable boards.
298 * vbios does this for us in asic_init in that case.
299 * Skip MC ucode loading on VF, because hypervisor will do that
300 * for this adaptor.
301 */
302 if (amdgpu_sriov_bios(adev))
303 return 0;
304
305 if (!adev->gmc.fw)
306 return -EINVAL;
307
308 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
309 amdgpu_ucode_print_mc_hdr(&hdr->header);
310
311 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
312 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
313 io_mc_regs = (const __le32 *)
314 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
315 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
316 fw_data = (const __le32 *)
317 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
318
319 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
320
321 if (running == 0) {
322 /* reset the engine and set to writable */
323 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
324 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
325
326 /* load mc io regs */
327 for (i = 0; i < regs_size; i++) {
328 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
329 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
330 }
331 /* load the MC ucode */
332 for (i = 0; i < ucode_size; i++)
333 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
334
335 /* put the engine back into the active state */
336 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
337 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
338 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
339
340 /* wait for training to complete */
341 for (i = 0; i < adev->usec_timeout; i++) {
342 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
343 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
344 break;
345 udelay(1);
346 }
347 for (i = 0; i < adev->usec_timeout; i++) {
348 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
349 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
350 break;
351 udelay(1);
352 }
353 }
354
355 return 0;
356}
357
358static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
359{
360 const struct mc_firmware_header_v1_0 *hdr;
361 const __le32 *fw_data = NULL;
362 const __le32 *io_mc_regs = NULL;
363 u32 data;
364 int i, ucode_size, regs_size;
365
366 /* Skip MC ucode loading on SR-IOV capable boards.
367 * vbios does this for us in asic_init in that case.
368 * Skip MC ucode loading on VF, because hypervisor will do that
369 * for this adaptor.
370 */
371 if (amdgpu_sriov_bios(adev))
372 return 0;
373
374 if (!adev->gmc.fw)
375 return -EINVAL;
376
377 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
378 amdgpu_ucode_print_mc_hdr(&hdr->header);
379
380 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
381 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
382 io_mc_regs = (const __le32 *)
383 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
384 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
385 fw_data = (const __le32 *)
386 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
387
388 data = RREG32(mmMC_SEQ_MISC0);
389 data &= ~(0x40);
390 WREG32(mmMC_SEQ_MISC0, data);
391
392 /* load mc io regs */
393 for (i = 0; i < regs_size; i++) {
394 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
395 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
396 }
397
398 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
399 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
400
401 /* load the MC ucode */
402 for (i = 0; i < ucode_size; i++)
403 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
404
405 /* put the engine back into the active state */
406 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
407 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
408 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
409
410 /* wait for training to complete */
411 for (i = 0; i < adev->usec_timeout; i++) {
412 data = RREG32(mmMC_SEQ_MISC0);
413 if (data & 0x80)
414 break;
415 udelay(1);
416 }
417
418 return 0;
419}
420
421static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
422 struct amdgpu_gmc *mc)
423{
424 u64 base = 0;
425
426 if (!amdgpu_sriov_vf(adev))
427 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
428 base <<= 24;
429
430 amdgpu_gmc_vram_location(adev, mc, base);
431 amdgpu_gmc_gart_location(adev, mc);
432}
433
434/**
435 * gmc_v8_0_mc_program - program the GPU memory controller
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * Set the location of vram, gart, and AGP in the GPU's
440 * physical address space (VI).
441 */
442static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
443{
444 u32 tmp;
445 int i, j;
446
447 /* Initialize HDP */
448 for (i = 0, j = 0; i < 32; i++, j += 0x6) {
449 WREG32((0xb05 + j), 0x00000000);
450 WREG32((0xb06 + j), 0x00000000);
451 WREG32((0xb07 + j), 0x00000000);
452 WREG32((0xb08 + j), 0x00000000);
453 WREG32((0xb09 + j), 0x00000000);
454 }
455 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
456
457 if (gmc_v8_0_wait_for_idle((void *)adev)) {
458 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
459 }
460 if (adev->mode_info.num_crtc) {
461 /* Lockout access through VGA aperture*/
462 tmp = RREG32(mmVGA_HDP_CONTROL);
463 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
464 WREG32(mmVGA_HDP_CONTROL, tmp);
465
466 /* disable VGA render */
467 tmp = RREG32(mmVGA_RENDER_CONTROL);
468 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
469 WREG32(mmVGA_RENDER_CONTROL, tmp);
470 }
471 /* Update configuration */
472 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
473 adev->gmc.vram_start >> 12);
474 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
475 adev->gmc.vram_end >> 12);
476 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
477 adev->vram_scratch.gpu_addr >> 12);
478
479 if (amdgpu_sriov_vf(adev)) {
480 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
481 tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
482 WREG32(mmMC_VM_FB_LOCATION, tmp);
483 /* XXX double check these! */
484 WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
485 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
486 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
487 }
488
489 WREG32(mmMC_VM_AGP_BASE, 0);
490 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
491 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
492 if (gmc_v8_0_wait_for_idle((void *)adev)) {
493 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
494 }
495
496 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
497
498 tmp = RREG32(mmHDP_MISC_CNTL);
499 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
500 WREG32(mmHDP_MISC_CNTL, tmp);
501
502 tmp = RREG32(mmHDP_HOST_PATH_CNTL);
503 WREG32(mmHDP_HOST_PATH_CNTL, tmp);
504}
505
506/**
507 * gmc_v8_0_mc_init - initialize the memory controller driver params
508 *
509 * @adev: amdgpu_device pointer
510 *
511 * Look up the amount of vram, vram width, and decide how to place
512 * vram and gart within the GPU's physical address space (VI).
513 * Returns 0 for success.
514 */
515static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
516{
517 int r;
518 u32 tmp;
519
520 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
521 if (!adev->gmc.vram_width) {
522 int chansize, numchan;
523
524 /* Get VRAM informations */
525 tmp = RREG32(mmMC_ARB_RAMCFG);
526 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
527 chansize = 64;
528 } else {
529 chansize = 32;
530 }
531 tmp = RREG32(mmMC_SHARED_CHMAP);
532 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
533 case 0:
534 default:
535 numchan = 1;
536 break;
537 case 1:
538 numchan = 2;
539 break;
540 case 2:
541 numchan = 4;
542 break;
543 case 3:
544 numchan = 8;
545 break;
546 case 4:
547 numchan = 3;
548 break;
549 case 5:
550 numchan = 6;
551 break;
552 case 6:
553 numchan = 10;
554 break;
555 case 7:
556 numchan = 12;
557 break;
558 case 8:
559 numchan = 16;
560 break;
561 }
562 adev->gmc.vram_width = numchan * chansize;
563 }
564 /* size in MB on si */
565 tmp = RREG32(mmCONFIG_MEMSIZE);
566 /* some boards may have garbage in the upper 16 bits */
567 if (tmp & 0xffff0000) {
568 DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
569 if (tmp & 0xffff)
570 tmp &= 0xffff;
571 }
572 adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL;
573 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
574
575 if (!(adev->flags & AMD_IS_APU)) {
576 r = amdgpu_device_resize_fb_bar(adev);
577 if (r)
578 return r;
579 }
580 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
581 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
582
583#ifdef CONFIG_X86_64
584 if (adev->flags & AMD_IS_APU) {
585 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
586 adev->gmc.aper_size = adev->gmc.real_vram_size;
587 }
588#endif
589
590 /* In case the PCI BAR is larger than the actual amount of vram */
591 adev->gmc.visible_vram_size = adev->gmc.aper_size;
592 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
593 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
594
595 /* set the gart size */
596 if (amdgpu_gart_size == -1) {
597 switch (adev->asic_type) {
598 case CHIP_POLARIS10: /* all engines support GPUVM */
599 case CHIP_POLARIS11: /* all engines support GPUVM */
600 case CHIP_POLARIS12: /* all engines support GPUVM */
601 case CHIP_VEGAM: /* all engines support GPUVM */
602 default:
603 adev->gmc.gart_size = 256ULL << 20;
604 break;
605 case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
606 case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
607 case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
608 case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
609 adev->gmc.gart_size = 1024ULL << 20;
610 break;
611 }
612 } else {
613 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
614 }
615
616 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
617 gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
618
619 return 0;
620}
621
622/**
623 * gmc_v8_0_flush_gpu_tlb_pasid - tlb flush via pasid
624 *
625 * @adev: amdgpu_device pointer
626 * @pasid: pasid to be flush
627 * @flush_type: type of flush
628 * @all_hub: flush all hubs
629 *
630 * Flush the TLB for the requested pasid.
631 */
632static int gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
633 uint16_t pasid, uint32_t flush_type,
634 bool all_hub)
635{
636 int vmid;
637 unsigned int tmp;
638
639 if (amdgpu_in_reset(adev))
640 return -EIO;
641
642 for (vmid = 1; vmid < 16; vmid++) {
643
644 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
645 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
646 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
647 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
648 RREG32(mmVM_INVALIDATE_RESPONSE);
649 break;
650 }
651 }
652
653 return 0;
654
655}
656
657/*
658 * GART
659 * VMID 0 is the physical GPU addresses as used by the kernel.
660 * VMIDs 1-15 are used for userspace clients and are handled
661 * by the amdgpu vm/hsa code.
662 */
663
664/**
665 * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
666 *
667 * @adev: amdgpu_device pointer
668 * @vmid: vm instance to flush
669 * @vmhub: which hub to flush
670 * @flush_type: type of flush
671 *
672 * Flush the TLB for the requested page table (VI).
673 */
674static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
675 uint32_t vmhub, uint32_t flush_type)
676{
677 /* bits 0-15 are the VM contexts0-15 */
678 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
679}
680
681static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
682 unsigned vmid, uint64_t pd_addr)
683{
684 uint32_t reg;
685
686 if (vmid < 8)
687 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
688 else
689 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
690 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
691
692 /* bits 0-15 are the VM contexts0-15 */
693 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
694
695 return pd_addr;
696}
697
698static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
699 unsigned pasid)
700{
701 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
702}
703
704/*
705 * PTE format on VI:
706 * 63:40 reserved
707 * 39:12 4k physical page base address
708 * 11:7 fragment
709 * 6 write
710 * 5 read
711 * 4 exe
712 * 3 reserved
713 * 2 snooped
714 * 1 system
715 * 0 valid
716 *
717 * PDE format on VI:
718 * 63:59 block fragment size
719 * 58:40 reserved
720 * 39:1 physical base address of PTE
721 * bits 5:1 must be 0.
722 * 0 valid
723 */
724
725static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
726 uint64_t *addr, uint64_t *flags)
727{
728 BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
729}
730
731static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
732 struct amdgpu_bo_va_mapping *mapping,
733 uint64_t *flags)
734{
735 *flags &= ~AMDGPU_PTE_EXECUTABLE;
736 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
737 *flags &= ~AMDGPU_PTE_PRT;
738}
739
740/**
741 * gmc_v8_0_set_fault_enable_default - update VM fault handling
742 *
743 * @adev: amdgpu_device pointer
744 * @value: true redirects VM faults to the default page
745 */
746static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device *adev,
747 bool value)
748{
749 u32 tmp;
750
751 tmp = RREG32(mmVM_CONTEXT1_CNTL);
752 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
753 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
754 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
755 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
756 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
757 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
758 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
759 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
760 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
761 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
762 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
763 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
764 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
765 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
766 WREG32(mmVM_CONTEXT1_CNTL, tmp);
767}
768
769/**
770 * gmc_v8_0_set_prt - set PRT VM fault
771 *
772 * @adev: amdgpu_device pointer
773 * @enable: enable/disable VM fault handling for PRT
774*/
775static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
776{
777 u32 tmp;
778
779 if (enable && !adev->gmc.prt_warning) {
780 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
781 adev->gmc.prt_warning = true;
782 }
783
784 tmp = RREG32(mmVM_PRT_CNTL);
785 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
786 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
787 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
788 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
789 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
790 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
791 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
792 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
793 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
794 L2_CACHE_STORE_INVALID_ENTRIES, enable);
795 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
796 L1_TLB_STORE_INVALID_ENTRIES, enable);
797 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
798 MASK_PDE0_FAULT, enable);
799 WREG32(mmVM_PRT_CNTL, tmp);
800
801 if (enable) {
802 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
803 uint32_t high = adev->vm_manager.max_pfn -
804 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
805
806 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
807 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
808 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
809 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
810 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
811 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
812 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
813 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
814 } else {
815 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
816 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
817 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
818 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
819 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
820 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
821 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
822 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
823 }
824}
825
826/**
827 * gmc_v8_0_gart_enable - gart enable
828 *
829 * @adev: amdgpu_device pointer
830 *
831 * This sets up the TLBs, programs the page tables for VMID0,
832 * sets up the hw for VMIDs 1-15 which are allocated on
833 * demand, and sets up the global locations for the LDS, GDS,
834 * and GPUVM for FSA64 clients (VI).
835 * Returns 0 for success, errors for failure.
836 */
837static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
838{
839 uint64_t table_addr;
840 int r, i;
841 u32 tmp, field;
842
843 if (adev->gart.bo == NULL) {
844 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
845 return -EINVAL;
846 }
847 r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
848 if (r)
849 return r;
850
851 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
852
853 /* Setup TLB control */
854 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
855 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
856 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
857 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
858 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
859 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
860 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
861 /* Setup L2 cache */
862 tmp = RREG32(mmVM_L2_CNTL);
863 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
864 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
865 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
866 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
867 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
868 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
869 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
870 WREG32(mmVM_L2_CNTL, tmp);
871 tmp = RREG32(mmVM_L2_CNTL2);
872 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
873 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
874 WREG32(mmVM_L2_CNTL2, tmp);
875
876 field = adev->vm_manager.fragment_size;
877 tmp = RREG32(mmVM_L2_CNTL3);
878 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
879 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
880 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
881 WREG32(mmVM_L2_CNTL3, tmp);
882 /* XXX: set to enable PTE/PDE in system memory */
883 tmp = RREG32(mmVM_L2_CNTL4);
884 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0);
885 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0);
886 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0);
887 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0);
888 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0);
889 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0);
890 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0);
891 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0);
892 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0);
893 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0);
894 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0);
895 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
896 WREG32(mmVM_L2_CNTL4, tmp);
897 /* setup context0 */
898 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
899 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
900 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
901 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
902 (u32)(adev->dummy_page_addr >> 12));
903 WREG32(mmVM_CONTEXT0_CNTL2, 0);
904 tmp = RREG32(mmVM_CONTEXT0_CNTL);
905 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
906 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
907 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
908 WREG32(mmVM_CONTEXT0_CNTL, tmp);
909
910 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0);
911 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0);
912 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0);
913
914 /* empty context1-15 */
915 /* FIXME start with 4G, once using 2 level pt switch to full
916 * vm size space
917 */
918 /* set vm size, must be a multiple of 4 */
919 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
920 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
921 for (i = 1; i < AMDGPU_NUM_VMID; i++) {
922 if (i < 8)
923 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
924 table_addr >> 12);
925 else
926 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
927 table_addr >> 12);
928 }
929
930 /* enable context1-15 */
931 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
932 (u32)(adev->dummy_page_addr >> 12));
933 WREG32(mmVM_CONTEXT1_CNTL2, 4);
934 tmp = RREG32(mmVM_CONTEXT1_CNTL);
935 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
936 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
937 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
938 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
939 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
940 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
941 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
942 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
943 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
944 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
945 adev->vm_manager.block_size - 9);
946 WREG32(mmVM_CONTEXT1_CNTL, tmp);
947 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
948 gmc_v8_0_set_fault_enable_default(adev, false);
949 else
950 gmc_v8_0_set_fault_enable_default(adev, true);
951
952 gmc_v8_0_flush_gpu_tlb(adev, 0, 0, 0);
953 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
954 (unsigned)(adev->gmc.gart_size >> 20),
955 (unsigned long long)table_addr);
956 adev->gart.ready = true;
957 return 0;
958}
959
960static int gmc_v8_0_gart_init(struct amdgpu_device *adev)
961{
962 int r;
963
964 if (adev->gart.bo) {
965 WARN(1, "R600 PCIE GART already initialized\n");
966 return 0;
967 }
968 /* Initialize common gart structure */
969 r = amdgpu_gart_init(adev);
970 if (r)
971 return r;
972 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
973 adev->gart.gart_pte_flags = AMDGPU_PTE_EXECUTABLE;
974 return amdgpu_gart_table_vram_alloc(adev);
975}
976
977/**
978 * gmc_v8_0_gart_disable - gart disable
979 *
980 * @adev: amdgpu_device pointer
981 *
982 * This disables all VM page table (VI).
983 */
984static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
985{
986 u32 tmp;
987
988 /* Disable all tables */
989 WREG32(mmVM_CONTEXT0_CNTL, 0);
990 WREG32(mmVM_CONTEXT1_CNTL, 0);
991 /* Setup TLB control */
992 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
993 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
994 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
995 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
996 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
997 /* Setup L2 cache */
998 tmp = RREG32(mmVM_L2_CNTL);
999 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
1000 WREG32(mmVM_L2_CNTL, tmp);
1001 WREG32(mmVM_L2_CNTL2, 0);
1002}
1003
1004/**
1005 * gmc_v8_0_vm_decode_fault - print human readable fault info
1006 *
1007 * @adev: amdgpu_device pointer
1008 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
1009 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
1010 * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
1011 * @pasid: debug logging only - no functional use
1012 *
1013 * Print human readable fault information (VI).
1014 */
1015static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
1016 u32 addr, u32 mc_client, unsigned pasid)
1017{
1018 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
1019 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1020 PROTECTIONS);
1021 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
1022 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
1023 u32 mc_id;
1024
1025 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1026 MEMORY_CLIENT_ID);
1027
1028 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
1029 protections, vmid, pasid, addr,
1030 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1031 MEMORY_CLIENT_RW) ?
1032 "write" : "read", block, mc_client, mc_id);
1033}
1034
1035static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type)
1036{
1037 switch (mc_seq_vram_type) {
1038 case MC_SEQ_MISC0__MT__GDDR1:
1039 return AMDGPU_VRAM_TYPE_GDDR1;
1040 case MC_SEQ_MISC0__MT__DDR2:
1041 return AMDGPU_VRAM_TYPE_DDR2;
1042 case MC_SEQ_MISC0__MT__GDDR3:
1043 return AMDGPU_VRAM_TYPE_GDDR3;
1044 case MC_SEQ_MISC0__MT__GDDR4:
1045 return AMDGPU_VRAM_TYPE_GDDR4;
1046 case MC_SEQ_MISC0__MT__GDDR5:
1047 return AMDGPU_VRAM_TYPE_GDDR5;
1048 case MC_SEQ_MISC0__MT__HBM:
1049 return AMDGPU_VRAM_TYPE_HBM;
1050 case MC_SEQ_MISC0__MT__DDR3:
1051 return AMDGPU_VRAM_TYPE_DDR3;
1052 default:
1053 return AMDGPU_VRAM_TYPE_UNKNOWN;
1054 }
1055}
1056
1057static int gmc_v8_0_early_init(void *handle)
1058{
1059 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1060
1061 gmc_v8_0_set_gmc_funcs(adev);
1062 gmc_v8_0_set_irq_funcs(adev);
1063
1064 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1065 adev->gmc.shared_aperture_end =
1066 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1067 adev->gmc.private_aperture_start =
1068 adev->gmc.shared_aperture_end + 1;
1069 adev->gmc.private_aperture_end =
1070 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1071
1072 return 0;
1073}
1074
1075static int gmc_v8_0_late_init(void *handle)
1076{
1077 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1078
1079 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
1080 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1081 else
1082 return 0;
1083}
1084
1085static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
1086{
1087 u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
1088 unsigned size;
1089
1090 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
1091 size = AMDGPU_VBIOS_VGA_ALLOCATION;
1092 } else {
1093 u32 viewport = RREG32(mmVIEWPORT_SIZE);
1094 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
1095 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
1096 4);
1097 }
1098
1099 return size;
1100}
1101
1102#define mmMC_SEQ_MISC0_FIJI 0xA71
1103
1104static int gmc_v8_0_sw_init(void *handle)
1105{
1106 int r;
1107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1108
1109 adev->num_vmhubs = 1;
1110
1111 if (adev->flags & AMD_IS_APU) {
1112 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
1113 } else {
1114 u32 tmp;
1115
1116 if ((adev->asic_type == CHIP_FIJI) ||
1117 (adev->asic_type == CHIP_VEGAM))
1118 tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
1119 else
1120 tmp = RREG32(mmMC_SEQ_MISC0);
1121 tmp &= MC_SEQ_MISC0__MT__MASK;
1122 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
1123 }
1124
1125 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
1126 if (r)
1127 return r;
1128
1129 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
1130 if (r)
1131 return r;
1132
1133 /* Adjust VM size here.
1134 * Currently set to 4GB ((1 << 20) 4k pages).
1135 * Max GPUVM size for cayman and SI is 40 bits.
1136 */
1137 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
1138
1139 /* Set the internal MC address mask
1140 * This is the max address of the GPU's
1141 * internal address space.
1142 */
1143 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1144
1145 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
1146 if (r) {
1147 pr_warn("No suitable DMA available\n");
1148 return r;
1149 }
1150 adev->need_swiotlb = drm_need_swiotlb(40);
1151
1152 r = gmc_v8_0_init_microcode(adev);
1153 if (r) {
1154 DRM_ERROR("Failed to load mc firmware!\n");
1155 return r;
1156 }
1157
1158 r = gmc_v8_0_mc_init(adev);
1159 if (r)
1160 return r;
1161
1162 amdgpu_gmc_get_vbios_allocations(adev);
1163
1164 /* Memory manager */
1165 r = amdgpu_bo_init(adev);
1166 if (r)
1167 return r;
1168
1169 r = gmc_v8_0_gart_init(adev);
1170 if (r)
1171 return r;
1172
1173 /*
1174 * number of VMs
1175 * VMID 0 is reserved for System
1176 * amdgpu graphics/compute will use VMIDs 1-7
1177 * amdkfd will use VMIDs 8-15
1178 */
1179 adev->vm_manager.first_kfd_vmid = 8;
1180 amdgpu_vm_manager_init(adev);
1181
1182 /* base offset of vram pages */
1183 if (adev->flags & AMD_IS_APU) {
1184 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
1185
1186 tmp <<= 22;
1187 adev->vm_manager.vram_base_offset = tmp;
1188 } else {
1189 adev->vm_manager.vram_base_offset = 0;
1190 }
1191
1192 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
1193 GFP_KERNEL);
1194 if (!adev->gmc.vm_fault_info)
1195 return -ENOMEM;
1196 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1197
1198 return 0;
1199}
1200
1201static int gmc_v8_0_sw_fini(void *handle)
1202{
1203 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1204
1205 amdgpu_gem_force_release(adev);
1206 amdgpu_vm_manager_fini(adev);
1207 kfree(adev->gmc.vm_fault_info);
1208 amdgpu_gart_table_vram_free(adev);
1209 amdgpu_bo_fini(adev);
1210 release_firmware(adev->gmc.fw);
1211 adev->gmc.fw = NULL;
1212
1213 return 0;
1214}
1215
1216static int gmc_v8_0_hw_init(void *handle)
1217{
1218 int r;
1219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1220
1221 gmc_v8_0_init_golden_registers(adev);
1222
1223 gmc_v8_0_mc_program(adev);
1224
1225 if (adev->asic_type == CHIP_TONGA) {
1226 r = gmc_v8_0_tonga_mc_load_microcode(adev);
1227 if (r) {
1228 DRM_ERROR("Failed to load MC firmware!\n");
1229 return r;
1230 }
1231 } else if (adev->asic_type == CHIP_POLARIS11 ||
1232 adev->asic_type == CHIP_POLARIS10 ||
1233 adev->asic_type == CHIP_POLARIS12) {
1234 r = gmc_v8_0_polaris_mc_load_microcode(adev);
1235 if (r) {
1236 DRM_ERROR("Failed to load MC firmware!\n");
1237 return r;
1238 }
1239 }
1240
1241 r = gmc_v8_0_gart_enable(adev);
1242 if (r)
1243 return r;
1244
1245 return r;
1246}
1247
1248static int gmc_v8_0_hw_fini(void *handle)
1249{
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251
1252 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1253 gmc_v8_0_gart_disable(adev);
1254
1255 return 0;
1256}
1257
1258static int gmc_v8_0_suspend(void *handle)
1259{
1260 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1261
1262 gmc_v8_0_hw_fini(adev);
1263
1264 return 0;
1265}
1266
1267static int gmc_v8_0_resume(void *handle)
1268{
1269 int r;
1270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1271
1272 r = gmc_v8_0_hw_init(adev);
1273 if (r)
1274 return r;
1275
1276 amdgpu_vmid_reset_all(adev);
1277
1278 return 0;
1279}
1280
1281static bool gmc_v8_0_is_idle(void *handle)
1282{
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284 u32 tmp = RREG32(mmSRBM_STATUS);
1285
1286 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1287 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
1288 return false;
1289
1290 return true;
1291}
1292
1293static int gmc_v8_0_wait_for_idle(void *handle)
1294{
1295 unsigned i;
1296 u32 tmp;
1297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298
1299 for (i = 0; i < adev->usec_timeout; i++) {
1300 /* read MC_STATUS */
1301 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
1302 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1303 SRBM_STATUS__MCC_BUSY_MASK |
1304 SRBM_STATUS__MCD_BUSY_MASK |
1305 SRBM_STATUS__VMC_BUSY_MASK |
1306 SRBM_STATUS__VMC1_BUSY_MASK);
1307 if (!tmp)
1308 return 0;
1309 udelay(1);
1310 }
1311 return -ETIMEDOUT;
1312
1313}
1314
1315static bool gmc_v8_0_check_soft_reset(void *handle)
1316{
1317 u32 srbm_soft_reset = 0;
1318 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 u32 tmp = RREG32(mmSRBM_STATUS);
1320
1321 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
1322 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1323 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
1324
1325 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
1326 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
1327 if (!(adev->flags & AMD_IS_APU))
1328 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
1329 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
1330 }
1331 if (srbm_soft_reset) {
1332 adev->gmc.srbm_soft_reset = srbm_soft_reset;
1333 return true;
1334 } else {
1335 adev->gmc.srbm_soft_reset = 0;
1336 return false;
1337 }
1338}
1339
1340static int gmc_v8_0_pre_soft_reset(void *handle)
1341{
1342 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1343
1344 if (!adev->gmc.srbm_soft_reset)
1345 return 0;
1346
1347 gmc_v8_0_mc_stop(adev);
1348 if (gmc_v8_0_wait_for_idle(adev)) {
1349 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1350 }
1351
1352 return 0;
1353}
1354
1355static int gmc_v8_0_soft_reset(void *handle)
1356{
1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1358 u32 srbm_soft_reset;
1359
1360 if (!adev->gmc.srbm_soft_reset)
1361 return 0;
1362 srbm_soft_reset = adev->gmc.srbm_soft_reset;
1363
1364 if (srbm_soft_reset) {
1365 u32 tmp;
1366
1367 tmp = RREG32(mmSRBM_SOFT_RESET);
1368 tmp |= srbm_soft_reset;
1369 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1370 WREG32(mmSRBM_SOFT_RESET, tmp);
1371 tmp = RREG32(mmSRBM_SOFT_RESET);
1372
1373 udelay(50);
1374
1375 tmp &= ~srbm_soft_reset;
1376 WREG32(mmSRBM_SOFT_RESET, tmp);
1377 tmp = RREG32(mmSRBM_SOFT_RESET);
1378
1379 /* Wait a little for things to settle down */
1380 udelay(50);
1381 }
1382
1383 return 0;
1384}
1385
1386static int gmc_v8_0_post_soft_reset(void *handle)
1387{
1388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389
1390 if (!adev->gmc.srbm_soft_reset)
1391 return 0;
1392
1393 gmc_v8_0_mc_resume(adev);
1394 return 0;
1395}
1396
1397static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
1398 struct amdgpu_irq_src *src,
1399 unsigned type,
1400 enum amdgpu_interrupt_state state)
1401{
1402 u32 tmp;
1403 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1404 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1405 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1406 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1407 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1408 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
1409 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
1410
1411 switch (state) {
1412 case AMDGPU_IRQ_STATE_DISABLE:
1413 /* system context */
1414 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1415 tmp &= ~bits;
1416 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1417 /* VMs */
1418 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1419 tmp &= ~bits;
1420 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1421 break;
1422 case AMDGPU_IRQ_STATE_ENABLE:
1423 /* system context */
1424 tmp = RREG32(mmVM_CONTEXT0_CNTL);
1425 tmp |= bits;
1426 WREG32(mmVM_CONTEXT0_CNTL, tmp);
1427 /* VMs */
1428 tmp = RREG32(mmVM_CONTEXT1_CNTL);
1429 tmp |= bits;
1430 WREG32(mmVM_CONTEXT1_CNTL, tmp);
1431 break;
1432 default:
1433 break;
1434 }
1435
1436 return 0;
1437}
1438
1439static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1440 struct amdgpu_irq_src *source,
1441 struct amdgpu_iv_entry *entry)
1442{
1443 u32 addr, status, mc_client, vmid;
1444
1445 if (amdgpu_sriov_vf(adev)) {
1446 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1447 entry->src_id, entry->src_data[0]);
1448 dev_err(adev->dev, " Can't decode VM fault info here on SRIOV VF\n");
1449 return 0;
1450 }
1451
1452 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1453 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1454 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1455 /* reset addr and status */
1456 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1457
1458 if (!addr && !status)
1459 return 0;
1460
1461 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1462 gmc_v8_0_set_fault_enable_default(adev, false);
1463
1464 if (printk_ratelimit()) {
1465 struct amdgpu_task_info task_info;
1466
1467 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1468 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1469
1470 dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
1471 entry->src_id, entry->src_data[0], task_info.process_name,
1472 task_info.tgid, task_info.task_name, task_info.pid);
1473 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1474 addr);
1475 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1476 status);
1477 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
1478 entry->pasid);
1479 }
1480
1481 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1482 VMID);
1483 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
1484 && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
1485 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
1486 u32 protections = REG_GET_FIELD(status,
1487 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1488 PROTECTIONS);
1489
1490 info->vmid = vmid;
1491 info->mc_id = REG_GET_FIELD(status,
1492 VM_CONTEXT1_PROTECTION_FAULT_STATUS,
1493 MEMORY_CLIENT_ID);
1494 info->status = status;
1495 info->page_addr = addr;
1496 info->prot_valid = protections & 0x7 ? true : false;
1497 info->prot_read = protections & 0x8 ? true : false;
1498 info->prot_write = protections & 0x10 ? true : false;
1499 info->prot_exec = protections & 0x20 ? true : false;
1500 mb();
1501 atomic_set(&adev->gmc.vm_fault_info_updated, 1);
1502 }
1503
1504 return 0;
1505}
1506
1507static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
1508 bool enable)
1509{
1510 uint32_t data;
1511
1512 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
1513 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1514 data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1515 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1516
1517 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1518 data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1519 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1520
1521 data = RREG32(mmMC_HUB_MISC_VM_CG);
1522 data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
1523 WREG32(mmMC_HUB_MISC_VM_CG, data);
1524
1525 data = RREG32(mmMC_XPB_CLK_GAT);
1526 data |= MC_XPB_CLK_GAT__ENABLE_MASK;
1527 WREG32(mmMC_XPB_CLK_GAT, data);
1528
1529 data = RREG32(mmATC_MISC_CG);
1530 data |= ATC_MISC_CG__ENABLE_MASK;
1531 WREG32(mmATC_MISC_CG, data);
1532
1533 data = RREG32(mmMC_CITF_MISC_WR_CG);
1534 data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
1535 WREG32(mmMC_CITF_MISC_WR_CG, data);
1536
1537 data = RREG32(mmMC_CITF_MISC_RD_CG);
1538 data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
1539 WREG32(mmMC_CITF_MISC_RD_CG, data);
1540
1541 data = RREG32(mmMC_CITF_MISC_VM_CG);
1542 data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
1543 WREG32(mmMC_CITF_MISC_VM_CG, data);
1544
1545 data = RREG32(mmVM_L2_CG);
1546 data |= VM_L2_CG__ENABLE_MASK;
1547 WREG32(mmVM_L2_CG, data);
1548 } else {
1549 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1550 data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
1551 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1552
1553 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1554 data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
1555 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1556
1557 data = RREG32(mmMC_HUB_MISC_VM_CG);
1558 data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
1559 WREG32(mmMC_HUB_MISC_VM_CG, data);
1560
1561 data = RREG32(mmMC_XPB_CLK_GAT);
1562 data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
1563 WREG32(mmMC_XPB_CLK_GAT, data);
1564
1565 data = RREG32(mmATC_MISC_CG);
1566 data &= ~ATC_MISC_CG__ENABLE_MASK;
1567 WREG32(mmATC_MISC_CG, data);
1568
1569 data = RREG32(mmMC_CITF_MISC_WR_CG);
1570 data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
1571 WREG32(mmMC_CITF_MISC_WR_CG, data);
1572
1573 data = RREG32(mmMC_CITF_MISC_RD_CG);
1574 data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
1575 WREG32(mmMC_CITF_MISC_RD_CG, data);
1576
1577 data = RREG32(mmMC_CITF_MISC_VM_CG);
1578 data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
1579 WREG32(mmMC_CITF_MISC_VM_CG, data);
1580
1581 data = RREG32(mmVM_L2_CG);
1582 data &= ~VM_L2_CG__ENABLE_MASK;
1583 WREG32(mmVM_L2_CG, data);
1584 }
1585}
1586
1587static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
1588 bool enable)
1589{
1590 uint32_t data;
1591
1592 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
1593 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1594 data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1595 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1596
1597 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1598 data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1599 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1600
1601 data = RREG32(mmMC_HUB_MISC_VM_CG);
1602 data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1603 WREG32(mmMC_HUB_MISC_VM_CG, data);
1604
1605 data = RREG32(mmMC_XPB_CLK_GAT);
1606 data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1607 WREG32(mmMC_XPB_CLK_GAT, data);
1608
1609 data = RREG32(mmATC_MISC_CG);
1610 data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1611 WREG32(mmATC_MISC_CG, data);
1612
1613 data = RREG32(mmMC_CITF_MISC_WR_CG);
1614 data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1615 WREG32(mmMC_CITF_MISC_WR_CG, data);
1616
1617 data = RREG32(mmMC_CITF_MISC_RD_CG);
1618 data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1619 WREG32(mmMC_CITF_MISC_RD_CG, data);
1620
1621 data = RREG32(mmMC_CITF_MISC_VM_CG);
1622 data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1623 WREG32(mmMC_CITF_MISC_VM_CG, data);
1624
1625 data = RREG32(mmVM_L2_CG);
1626 data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
1627 WREG32(mmVM_L2_CG, data);
1628 } else {
1629 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1630 data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
1631 WREG32(mmMC_HUB_MISC_HUB_CG, data);
1632
1633 data = RREG32(mmMC_HUB_MISC_SIP_CG);
1634 data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
1635 WREG32(mmMC_HUB_MISC_SIP_CG, data);
1636
1637 data = RREG32(mmMC_HUB_MISC_VM_CG);
1638 data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1639 WREG32(mmMC_HUB_MISC_VM_CG, data);
1640
1641 data = RREG32(mmMC_XPB_CLK_GAT);
1642 data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
1643 WREG32(mmMC_XPB_CLK_GAT, data);
1644
1645 data = RREG32(mmATC_MISC_CG);
1646 data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
1647 WREG32(mmATC_MISC_CG, data);
1648
1649 data = RREG32(mmMC_CITF_MISC_WR_CG);
1650 data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
1651 WREG32(mmMC_CITF_MISC_WR_CG, data);
1652
1653 data = RREG32(mmMC_CITF_MISC_RD_CG);
1654 data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
1655 WREG32(mmMC_CITF_MISC_RD_CG, data);
1656
1657 data = RREG32(mmMC_CITF_MISC_VM_CG);
1658 data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
1659 WREG32(mmMC_CITF_MISC_VM_CG, data);
1660
1661 data = RREG32(mmVM_L2_CG);
1662 data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
1663 WREG32(mmVM_L2_CG, data);
1664 }
1665}
1666
1667static int gmc_v8_0_set_clockgating_state(void *handle,
1668 enum amd_clockgating_state state)
1669{
1670 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1671
1672 if (amdgpu_sriov_vf(adev))
1673 return 0;
1674
1675 switch (adev->asic_type) {
1676 case CHIP_FIJI:
1677 fiji_update_mc_medium_grain_clock_gating(adev,
1678 state == AMD_CG_STATE_GATE);
1679 fiji_update_mc_light_sleep(adev,
1680 state == AMD_CG_STATE_GATE);
1681 break;
1682 default:
1683 break;
1684 }
1685 return 0;
1686}
1687
1688static int gmc_v8_0_set_powergating_state(void *handle,
1689 enum amd_powergating_state state)
1690{
1691 return 0;
1692}
1693
1694static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
1695{
1696 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1697 int data;
1698
1699 if (amdgpu_sriov_vf(adev))
1700 *flags = 0;
1701
1702 /* AMD_CG_SUPPORT_MC_MGCG */
1703 data = RREG32(mmMC_HUB_MISC_HUB_CG);
1704 if (data & MC_HUB_MISC_HUB_CG__ENABLE_MASK)
1705 *flags |= AMD_CG_SUPPORT_MC_MGCG;
1706
1707 /* AMD_CG_SUPPORT_MC_LS */
1708 if (data & MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK)
1709 *flags |= AMD_CG_SUPPORT_MC_LS;
1710}
1711
1712static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
1713 .name = "gmc_v8_0",
1714 .early_init = gmc_v8_0_early_init,
1715 .late_init = gmc_v8_0_late_init,
1716 .sw_init = gmc_v8_0_sw_init,
1717 .sw_fini = gmc_v8_0_sw_fini,
1718 .hw_init = gmc_v8_0_hw_init,
1719 .hw_fini = gmc_v8_0_hw_fini,
1720 .suspend = gmc_v8_0_suspend,
1721 .resume = gmc_v8_0_resume,
1722 .is_idle = gmc_v8_0_is_idle,
1723 .wait_for_idle = gmc_v8_0_wait_for_idle,
1724 .check_soft_reset = gmc_v8_0_check_soft_reset,
1725 .pre_soft_reset = gmc_v8_0_pre_soft_reset,
1726 .soft_reset = gmc_v8_0_soft_reset,
1727 .post_soft_reset = gmc_v8_0_post_soft_reset,
1728 .set_clockgating_state = gmc_v8_0_set_clockgating_state,
1729 .set_powergating_state = gmc_v8_0_set_powergating_state,
1730 .get_clockgating_state = gmc_v8_0_get_clockgating_state,
1731};
1732
1733static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
1734 .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
1735 .flush_gpu_tlb_pasid = gmc_v8_0_flush_gpu_tlb_pasid,
1736 .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
1737 .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
1738 .set_prt = gmc_v8_0_set_prt,
1739 .get_vm_pde = gmc_v8_0_get_vm_pde,
1740 .get_vm_pte = gmc_v8_0_get_vm_pte,
1741 .get_vbios_fb_size = gmc_v8_0_get_vbios_fb_size,
1742};
1743
1744static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
1745 .set = gmc_v8_0_vm_fault_interrupt_state,
1746 .process = gmc_v8_0_process_interrupt,
1747};
1748
1749static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
1750{
1751 adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
1752}
1753
1754static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
1755{
1756 adev->gmc.vm_fault.num_types = 1;
1757 adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
1758}
1759
1760const struct amdgpu_ip_block_version gmc_v8_0_ip_block =
1761{
1762 .type = AMD_IP_BLOCK_TYPE_GMC,
1763 .major = 8,
1764 .minor = 0,
1765 .rev = 0,
1766 .funcs = &gmc_v8_0_ip_funcs,
1767};
1768
1769const struct amdgpu_ip_block_version gmc_v8_1_ip_block =
1770{
1771 .type = AMD_IP_BLOCK_TYPE_GMC,
1772 .major = 8,
1773 .minor = 1,
1774 .rev = 0,
1775 .funcs = &gmc_v8_0_ip_funcs,
1776};
1777
1778const struct amdgpu_ip_block_version gmc_v8_5_ip_block =
1779{
1780 .type = AMD_IP_BLOCK_TYPE_GMC,
1781 .major = 8,
1782 .minor = 5,
1783 .rev = 0,
1784 .funcs = &gmc_v8_0_ip_funcs,
1785};