Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25
26#include <drm/drm_cache.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atomfirmware.h"
30#include "gmc_v11_0.h"
31#include "umc_v8_10.h"
32#include "athub/athub_3_0_0_sh_mask.h"
33#include "athub/athub_3_0_0_offset.h"
34#include "dcn/dcn_3_2_0_offset.h"
35#include "dcn/dcn_3_2_0_sh_mask.h"
36#include "oss/osssys_6_0_0_offset.h"
37#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38#include "navi10_enum.h"
39#include "soc15.h"
40#include "soc15d.h"
41#include "soc15_common.h"
42#include "nbio_v4_3.h"
43#include "gfxhub_v3_0.h"
44#include "gfxhub_v3_0_3.h"
45#include "mmhub_v3_0.h"
46#include "mmhub_v3_0_1.h"
47#include "mmhub_v3_0_2.h"
48#include "athub_v3_0.h"
49
50
51static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
52 struct amdgpu_irq_src *src,
53 unsigned type,
54 enum amdgpu_interrupt_state state)
55{
56 return 0;
57}
58
59static int
60gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
61 struct amdgpu_irq_src *src, unsigned type,
62 enum amdgpu_interrupt_state state)
63{
64 switch (state) {
65 case AMDGPU_IRQ_STATE_DISABLE:
66 /* MM HUB */
67 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
68 /* GFX HUB */
69 /* This works because this interrupt is only
70 * enabled at init/resume and disabled in
71 * fini/suspend, so the overall state doesn't
72 * change over the course of suspend/resume.
73 */
74 if (!adev->in_s0ix)
75 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
76 break;
77 case AMDGPU_IRQ_STATE_ENABLE:
78 /* MM HUB */
79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
80 /* GFX HUB */
81 /* This works because this interrupt is only
82 * enabled at init/resume and disabled in
83 * fini/suspend, so the overall state doesn't
84 * change over the course of suspend/resume.
85 */
86 if (!adev->in_s0ix)
87 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
88 break;
89 default:
90 break;
91 }
92
93 return 0;
94}
95
96static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
97 struct amdgpu_irq_src *source,
98 struct amdgpu_iv_entry *entry)
99{
100 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
101 uint32_t status = 0;
102 u64 addr;
103
104 addr = (u64)entry->src_data[0] << 12;
105 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
106
107 if (!amdgpu_sriov_vf(adev)) {
108 /*
109 * Issue a dummy read to wait for the status register to
110 * be updated to avoid reading an incorrect value due to
111 * the new fast GRBM interface.
112 */
113 if (entry->vmid_src == AMDGPU_GFXHUB_0)
114 RREG32(hub->vm_l2_pro_fault_status);
115
116 status = RREG32(hub->vm_l2_pro_fault_status);
117 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
118 }
119
120 if (printk_ratelimit()) {
121 struct amdgpu_task_info task_info;
122
123 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
124 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
125
126 dev_err(adev->dev,
127 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
128 "for process %s pid %d thread %s pid %d)\n",
129 entry->vmid_src ? "mmhub" : "gfxhub",
130 entry->src_id, entry->ring_id, entry->vmid,
131 entry->pasid, task_info.process_name, task_info.tgid,
132 task_info.task_name, task_info.pid);
133 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
134 addr, entry->client_id);
135 if (!amdgpu_sriov_vf(adev))
136 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
137 }
138
139 return 0;
140}
141
142static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
143 .set = gmc_v11_0_vm_fault_interrupt_state,
144 .process = gmc_v11_0_process_interrupt,
145};
146
147static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
148 .set = gmc_v11_0_ecc_interrupt_state,
149 .process = amdgpu_umc_process_ecc_irq,
150};
151
152static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
153{
154 adev->gmc.vm_fault.num_types = 1;
155 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
156
157 if (!amdgpu_sriov_vf(adev)) {
158 adev->gmc.ecc_irq.num_types = 1;
159 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
160 }
161}
162
163/**
164 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
165 *
166 * @adev: amdgpu_device pointer
167 * @vmhub: vmhub type
168 *
169 */
170static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
171 uint32_t vmhub)
172{
173 return ((vmhub == AMDGPU_MMHUB_0) &&
174 (!amdgpu_sriov_vf(adev)));
175}
176
177static bool gmc_v11_0_get_vmid_pasid_mapping_info(
178 struct amdgpu_device *adev,
179 uint8_t vmid, uint16_t *p_pasid)
180{
181 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
182
183 return !!(*p_pasid);
184}
185
186/*
187 * GART
188 * VMID 0 is the physical GPU addresses as used by the kernel.
189 * VMIDs 1-15 are used for userspace clients and are handled
190 * by the amdgpu vm/hsa code.
191 */
192
193static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
194 unsigned int vmhub, uint32_t flush_type)
195{
196 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
197 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
198 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
199 u32 tmp;
200 /* Use register 17 for GART */
201 const unsigned eng = 17;
202 unsigned int i;
203 unsigned char hub_ip = 0;
204
205 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
206 GC_HWIP : MMHUB_HWIP;
207
208 spin_lock(&adev->gmc.invalidate_lock);
209 /*
210 * It may lose gpuvm invalidate acknowldege state across power-gating
211 * off cycle, add semaphore acquire before invalidation and semaphore
212 * release after invalidation to avoid entering power gated state
213 * to WA the Issue
214 */
215
216 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
217 if (use_semaphore) {
218 for (i = 0; i < adev->usec_timeout; i++) {
219 /* a read return value of 1 means semaphore acuqire */
220 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
221 hub->eng_distance * eng, hub_ip);
222 if (tmp & 0x1)
223 break;
224 udelay(1);
225 }
226
227 if (i >= adev->usec_timeout)
228 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
229 }
230
231 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
232
233 /* Wait for ACK with a delay.*/
234 for (i = 0; i < adev->usec_timeout; i++) {
235 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
236 hub->eng_distance * eng, hub_ip);
237 tmp &= 1 << vmid;
238 if (tmp)
239 break;
240
241 udelay(1);
242 }
243
244 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
245 if (use_semaphore)
246 /*
247 * add semaphore release after invalidation,
248 * write with 0 means semaphore release
249 */
250 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
251 hub->eng_distance * eng, 0, hub_ip);
252
253 /* Issue additional private vm invalidation to MMHUB */
254 if ((vmhub != AMDGPU_GFXHUB_0) &&
255 (hub->vm_l2_bank_select_reserved_cid2) &&
256 !amdgpu_sriov_vf(adev)) {
257 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
258 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
259 inv_req |= (1 << 25);
260 /* Issue private invalidation */
261 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
262 /* Read back to ensure invalidation is done*/
263 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
264 }
265
266 spin_unlock(&adev->gmc.invalidate_lock);
267
268 if (i < adev->usec_timeout)
269 return;
270
271 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
272}
273
274/**
275 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
276 *
277 * @adev: amdgpu_device pointer
278 * @vmid: vm instance to flush
279 * @vmhub: which hub to flush
280 * @flush_type: the flush type
281 *
282 * Flush the TLB for the requested page table.
283 */
284static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
285 uint32_t vmhub, uint32_t flush_type)
286{
287 if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron)
288 return;
289
290 /* flush hdp cache */
291 adev->hdp.funcs->flush_hdp(adev, NULL);
292
293 /* For SRIOV run time, driver shouldn't access the register through MMIO
294 * Directly use kiq to do the vm invalidation instead
295 */
296 if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
297 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
298 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
299 const unsigned eng = 17;
300 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
301 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
302 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
303
304 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
305 1 << vmid);
306 return;
307 }
308
309 mutex_lock(&adev->mman.gtt_window_lock);
310 gmc_v11_0_flush_vm_hub(adev, vmid, vmhub, 0);
311 mutex_unlock(&adev->mman.gtt_window_lock);
312 return;
313}
314
315/**
316 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
317 *
318 * @adev: amdgpu_device pointer
319 * @pasid: pasid to be flush
320 * @flush_type: the flush type
321 * @all_hub: flush all hubs
322 *
323 * Flush the TLB for the requested pasid.
324 */
325static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
326 uint16_t pasid, uint32_t flush_type,
327 bool all_hub)
328{
329 int vmid, i;
330 signed long r;
331 uint32_t seq;
332 uint16_t queried_pasid;
333 bool ret;
334 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
335 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
336
337 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
338 spin_lock(&adev->gfx.kiq.ring_lock);
339 /* 2 dwords flush + 8 dwords fence */
340 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
341 kiq->pmf->kiq_invalidate_tlbs(ring,
342 pasid, flush_type, all_hub);
343 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
344 if (r) {
345 amdgpu_ring_undo(ring);
346 spin_unlock(&adev->gfx.kiq.ring_lock);
347 return -ETIME;
348 }
349
350 amdgpu_ring_commit(ring);
351 spin_unlock(&adev->gfx.kiq.ring_lock);
352 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
353 if (r < 1) {
354 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
355 return -ETIME;
356 }
357
358 return 0;
359 }
360
361 for (vmid = 1; vmid < 16; vmid++) {
362
363 ret = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
364 &queried_pasid);
365 if (ret && queried_pasid == pasid) {
366 if (all_hub) {
367 for (i = 0; i < adev->num_vmhubs; i++)
368 gmc_v11_0_flush_gpu_tlb(adev, vmid,
369 i, flush_type);
370 } else {
371 gmc_v11_0_flush_gpu_tlb(adev, vmid,
372 AMDGPU_GFXHUB_0, flush_type);
373 }
374 }
375 }
376
377 return 0;
378}
379
380static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
381 unsigned vmid, uint64_t pd_addr)
382{
383 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
384 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
385 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
386 unsigned eng = ring->vm_inv_eng;
387
388 /*
389 * It may lose gpuvm invalidate acknowldege state across power-gating
390 * off cycle, add semaphore acquire before invalidation and semaphore
391 * release after invalidation to avoid entering power gated state
392 * to WA the Issue
393 */
394
395 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
396 if (use_semaphore)
397 /* a read return value of 1 means semaphore acuqire */
398 amdgpu_ring_emit_reg_wait(ring,
399 hub->vm_inv_eng0_sem +
400 hub->eng_distance * eng, 0x1, 0x1);
401
402 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
403 (hub->ctx_addr_distance * vmid),
404 lower_32_bits(pd_addr));
405
406 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
407 (hub->ctx_addr_distance * vmid),
408 upper_32_bits(pd_addr));
409
410 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
411 hub->eng_distance * eng,
412 hub->vm_inv_eng0_ack +
413 hub->eng_distance * eng,
414 req, 1 << vmid);
415
416 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
417 if (use_semaphore)
418 /*
419 * add semaphore release after invalidation,
420 * write with 0 means semaphore release
421 */
422 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
423 hub->eng_distance * eng, 0);
424
425 return pd_addr;
426}
427
428static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
429 unsigned pasid)
430{
431 struct amdgpu_device *adev = ring->adev;
432 uint32_t reg;
433
434 /* MES fw manages IH_VMID_x_LUT updating */
435 if (ring->is_mes_queue)
436 return;
437
438 if (ring->vm_hub == AMDGPU_GFXHUB_0)
439 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
440 else
441 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
442
443 amdgpu_ring_emit_wreg(ring, reg, pasid);
444}
445
446/*
447 * PTE format:
448 * 63:59 reserved
449 * 58:57 reserved
450 * 56 F
451 * 55 L
452 * 54 reserved
453 * 53:52 SW
454 * 51 T
455 * 50:48 mtype
456 * 47:12 4k physical page base address
457 * 11:7 fragment
458 * 6 write
459 * 5 read
460 * 4 exe
461 * 3 Z
462 * 2 snooped
463 * 1 system
464 * 0 valid
465 *
466 * PDE format:
467 * 63:59 block fragment size
468 * 58:55 reserved
469 * 54 P
470 * 53:48 reserved
471 * 47:6 physical base address of PD or PTE
472 * 5:3 reserved
473 * 2 C
474 * 1 system
475 * 0 valid
476 */
477
478static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
479{
480 switch (flags) {
481 case AMDGPU_VM_MTYPE_DEFAULT:
482 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
483 case AMDGPU_VM_MTYPE_NC:
484 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
485 case AMDGPU_VM_MTYPE_WC:
486 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
487 case AMDGPU_VM_MTYPE_CC:
488 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
489 case AMDGPU_VM_MTYPE_UC:
490 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
491 default:
492 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
493 }
494}
495
496static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
497 uint64_t *addr, uint64_t *flags)
498{
499 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
500 *addr = adev->vm_manager.vram_base_offset + *addr -
501 adev->gmc.vram_start;
502 BUG_ON(*addr & 0xFFFF00000000003FULL);
503
504 if (!adev->gmc.translate_further)
505 return;
506
507 if (level == AMDGPU_VM_PDB1) {
508 /* Set the block fragment size */
509 if (!(*flags & AMDGPU_PDE_PTE))
510 *flags |= AMDGPU_PDE_BFS(0x9);
511
512 } else if (level == AMDGPU_VM_PDB0) {
513 if (*flags & AMDGPU_PDE_PTE)
514 *flags &= ~AMDGPU_PDE_PTE;
515 else
516 *flags |= AMDGPU_PTE_TF;
517 }
518}
519
520static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
521 struct amdgpu_bo_va_mapping *mapping,
522 uint64_t *flags)
523{
524 struct amdgpu_bo *bo = mapping->bo_va->base.bo;
525
526 *flags &= ~AMDGPU_PTE_EXECUTABLE;
527 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
528
529 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
530 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
531
532 *flags &= ~AMDGPU_PTE_NOALLOC;
533 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
534
535 if (mapping->flags & AMDGPU_PTE_PRT) {
536 *flags |= AMDGPU_PTE_PRT;
537 *flags |= AMDGPU_PTE_SNOOPED;
538 *flags |= AMDGPU_PTE_LOG;
539 *flags |= AMDGPU_PTE_SYSTEM;
540 *flags &= ~AMDGPU_PTE_VALID;
541 }
542
543 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
544 AMDGPU_GEM_CREATE_UNCACHED))
545 *flags = (*flags & ~AMDGPU_PTE_MTYPE_NV10_MASK) |
546 AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
547}
548
549static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
550{
551 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
552 unsigned size;
553
554 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
555 size = AMDGPU_VBIOS_VGA_ALLOCATION;
556 } else {
557 u32 viewport;
558 u32 pitch;
559
560 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
561 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
562 size = (REG_GET_FIELD(viewport,
563 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
564 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
565 4);
566 }
567
568 return size;
569}
570
571static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
572 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
573 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
574 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
575 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
576 .map_mtype = gmc_v11_0_map_mtype,
577 .get_vm_pde = gmc_v11_0_get_vm_pde,
578 .get_vm_pte = gmc_v11_0_get_vm_pte,
579 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
580};
581
582static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
583{
584 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
585}
586
587static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
588{
589 switch (adev->ip_versions[UMC_HWIP][0]) {
590 case IP_VERSION(8, 10, 0):
591 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
592 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
593 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
594 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
595 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
596 if (adev->umc.node_inst_num == 4)
597 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
598 else
599 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
600 adev->umc.ras = &umc_v8_10_ras;
601 break;
602 case IP_VERSION(8, 11, 0):
603 break;
604 default:
605 break;
606 }
607}
608
609
610static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
611{
612 switch (adev->ip_versions[MMHUB_HWIP][0]) {
613 case IP_VERSION(3, 0, 1):
614 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
615 break;
616 case IP_VERSION(3, 0, 2):
617 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
618 break;
619 default:
620 adev->mmhub.funcs = &mmhub_v3_0_funcs;
621 break;
622 }
623}
624
625static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
626{
627 switch (adev->ip_versions[GC_HWIP][0]) {
628 case IP_VERSION(11, 0, 3):
629 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
630 break;
631 default:
632 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
633 break;
634 }
635}
636
637static int gmc_v11_0_early_init(void *handle)
638{
639 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
640
641 gmc_v11_0_set_gfxhub_funcs(adev);
642 gmc_v11_0_set_mmhub_funcs(adev);
643 gmc_v11_0_set_gmc_funcs(adev);
644 gmc_v11_0_set_irq_funcs(adev);
645 gmc_v11_0_set_umc_funcs(adev);
646
647 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
648 adev->gmc.shared_aperture_end =
649 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
650 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
651 adev->gmc.private_aperture_end =
652 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
653
654 return 0;
655}
656
657static int gmc_v11_0_late_init(void *handle)
658{
659 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
660 int r;
661
662 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
663 if (r)
664 return r;
665
666 r = amdgpu_gmc_ras_late_init(adev);
667 if (r)
668 return r;
669
670 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
671}
672
673static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
674 struct amdgpu_gmc *mc)
675{
676 u64 base = 0;
677
678 base = adev->mmhub.funcs->get_fb_location(adev);
679
680 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
681 amdgpu_gmc_gart_location(adev, mc);
682 amdgpu_gmc_agp_location(adev, mc);
683
684 /* base offset of vram pages */
685 if (amdgpu_sriov_vf(adev))
686 adev->vm_manager.vram_base_offset = 0;
687 else
688 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
689}
690
691/**
692 * gmc_v11_0_mc_init - initialize the memory controller driver params
693 *
694 * @adev: amdgpu_device pointer
695 *
696 * Look up the amount of vram, vram width, and decide how to place
697 * vram and gart within the GPU's physical address space.
698 * Returns 0 for success.
699 */
700static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
701{
702 int r;
703
704 /* size in MB on si */
705 adev->gmc.mc_vram_size =
706 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
707 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
708
709 if (!(adev->flags & AMD_IS_APU)) {
710 r = amdgpu_device_resize_fb_bar(adev);
711 if (r)
712 return r;
713 }
714 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
715 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
716
717#ifdef CONFIG_X86_64
718 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
719 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
720 adev->gmc.aper_size = adev->gmc.real_vram_size;
721 }
722#endif
723 /* In case the PCI BAR is larger than the actual amount of vram */
724 adev->gmc.visible_vram_size = adev->gmc.aper_size;
725 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
726 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
727
728 /* set the gart size */
729 if (amdgpu_gart_size == -1) {
730 adev->gmc.gart_size = 512ULL << 20;
731 } else
732 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
733
734 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
735
736 return 0;
737}
738
739static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
740{
741 int r;
742
743 if (adev->gart.bo) {
744 WARN(1, "PCIE GART already initialized\n");
745 return 0;
746 }
747
748 /* Initialize common gart structure */
749 r = amdgpu_gart_init(adev);
750 if (r)
751 return r;
752
753 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
754 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
755 AMDGPU_PTE_EXECUTABLE;
756
757 return amdgpu_gart_table_vram_alloc(adev);
758}
759
760static int gmc_v11_0_sw_init(void *handle)
761{
762 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
763 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
764
765 adev->mmhub.funcs->init(adev);
766
767 spin_lock_init(&adev->gmc.invalidate_lock);
768
769 r = amdgpu_atomfirmware_get_vram_info(adev,
770 &vram_width, &vram_type, &vram_vendor);
771 adev->gmc.vram_width = vram_width;
772
773 adev->gmc.vram_type = vram_type;
774 adev->gmc.vram_vendor = vram_vendor;
775
776 switch (adev->ip_versions[GC_HWIP][0]) {
777 case IP_VERSION(11, 0, 0):
778 case IP_VERSION(11, 0, 1):
779 case IP_VERSION(11, 0, 2):
780 case IP_VERSION(11, 0, 3):
781 case IP_VERSION(11, 0, 4):
782 adev->num_vmhubs = 2;
783 /*
784 * To fulfill 4-level page support,
785 * vm size is 256TB (48bit), maximum size,
786 * block size 512 (9bit)
787 */
788 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
789 break;
790 default:
791 break;
792 }
793
794 /* This interrupt is VMC page fault.*/
795 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
796 VMC_1_0__SRCID__VM_FAULT,
797 &adev->gmc.vm_fault);
798
799 if (r)
800 return r;
801
802 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
803 UTCL2_1_0__SRCID__FAULT,
804 &adev->gmc.vm_fault);
805 if (r)
806 return r;
807
808 if (!amdgpu_sriov_vf(adev)) {
809 /* interrupt sent to DF. */
810 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
811 &adev->gmc.ecc_irq);
812 if (r)
813 return r;
814 }
815
816 /*
817 * Set the internal MC address mask This is the max address of the GPU's
818 * internal address space.
819 */
820 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
821
822 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
823 if (r) {
824 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
825 return r;
826 }
827
828 adev->need_swiotlb = drm_need_swiotlb(44);
829
830 r = gmc_v11_0_mc_init(adev);
831 if (r)
832 return r;
833
834 amdgpu_gmc_get_vbios_allocations(adev);
835
836 /* Memory manager */
837 r = amdgpu_bo_init(adev);
838 if (r)
839 return r;
840
841 r = gmc_v11_0_gart_init(adev);
842 if (r)
843 return r;
844
845 /*
846 * number of VMs
847 * VMID 0 is reserved for System
848 * amdgpu graphics/compute will use VMIDs 1-7
849 * amdkfd will use VMIDs 8-15
850 */
851 adev->vm_manager.first_kfd_vmid = 8;
852
853 amdgpu_vm_manager_init(adev);
854
855 r = amdgpu_gmc_ras_sw_init(adev);
856 if (r)
857 return r;
858
859 return 0;
860}
861
862/**
863 * gmc_v11_0_gart_fini - vm fini callback
864 *
865 * @adev: amdgpu_device pointer
866 *
867 * Tears down the driver GART/VM setup (CIK).
868 */
869static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
870{
871 amdgpu_gart_table_vram_free(adev);
872}
873
874static int gmc_v11_0_sw_fini(void *handle)
875{
876 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
877
878 amdgpu_vm_manager_fini(adev);
879 gmc_v11_0_gart_fini(adev);
880 amdgpu_gem_force_release(adev);
881 amdgpu_bo_fini(adev);
882
883 return 0;
884}
885
886static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
887{
888 if (amdgpu_sriov_vf(adev)) {
889 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
890
891 WREG32(hub->vm_contexts_disable, 0);
892 return;
893 }
894}
895
896/**
897 * gmc_v11_0_gart_enable - gart enable
898 *
899 * @adev: amdgpu_device pointer
900 */
901static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
902{
903 int r;
904 bool value;
905
906 if (adev->gart.bo == NULL) {
907 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
908 return -EINVAL;
909 }
910
911 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
912
913 r = adev->mmhub.funcs->gart_enable(adev);
914 if (r)
915 return r;
916
917 /* Flush HDP after it is initialized */
918 adev->hdp.funcs->flush_hdp(adev, NULL);
919
920 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
921 false : true;
922
923 adev->mmhub.funcs->set_fault_enable_default(adev, value);
924 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
925
926 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
927 (unsigned)(adev->gmc.gart_size >> 20),
928 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
929
930 return 0;
931}
932
933static int gmc_v11_0_hw_init(void *handle)
934{
935 int r;
936 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
937
938 /* The sequence of these two function calls matters.*/
939 gmc_v11_0_init_golden_registers(adev);
940
941 r = gmc_v11_0_gart_enable(adev);
942 if (r)
943 return r;
944
945 if (adev->umc.funcs && adev->umc.funcs->init_registers)
946 adev->umc.funcs->init_registers(adev);
947
948 return 0;
949}
950
951/**
952 * gmc_v11_0_gart_disable - gart disable
953 *
954 * @adev: amdgpu_device pointer
955 *
956 * This disables all VM page table.
957 */
958static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
959{
960 adev->mmhub.funcs->gart_disable(adev);
961}
962
963static int gmc_v11_0_hw_fini(void *handle)
964{
965 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
966
967 if (amdgpu_sriov_vf(adev)) {
968 /* full access mode, so don't touch any GMC register */
969 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
970 return 0;
971 }
972
973 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
974 gmc_v11_0_gart_disable(adev);
975
976 return 0;
977}
978
979static int gmc_v11_0_suspend(void *handle)
980{
981 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
982
983 gmc_v11_0_hw_fini(adev);
984
985 return 0;
986}
987
988static int gmc_v11_0_resume(void *handle)
989{
990 int r;
991 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
992
993 r = gmc_v11_0_hw_init(adev);
994 if (r)
995 return r;
996
997 amdgpu_vmid_reset_all(adev);
998
999 return 0;
1000}
1001
1002static bool gmc_v11_0_is_idle(void *handle)
1003{
1004 /* MC is always ready in GMC v11.*/
1005 return true;
1006}
1007
1008static int gmc_v11_0_wait_for_idle(void *handle)
1009{
1010 /* There is no need to wait for MC idle in GMC v11.*/
1011 return 0;
1012}
1013
1014static int gmc_v11_0_soft_reset(void *handle)
1015{
1016 return 0;
1017}
1018
1019static int gmc_v11_0_set_clockgating_state(void *handle,
1020 enum amd_clockgating_state state)
1021{
1022 int r;
1023 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1024
1025 r = adev->mmhub.funcs->set_clockgating(adev, state);
1026 if (r)
1027 return r;
1028
1029 return athub_v3_0_set_clockgating(adev, state);
1030}
1031
1032static void gmc_v11_0_get_clockgating_state(void *handle, u64 *flags)
1033{
1034 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1035
1036 adev->mmhub.funcs->get_clockgating(adev, flags);
1037
1038 athub_v3_0_get_clockgating(adev, flags);
1039}
1040
1041static int gmc_v11_0_set_powergating_state(void *handle,
1042 enum amd_powergating_state state)
1043{
1044 return 0;
1045}
1046
1047const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1048 .name = "gmc_v11_0",
1049 .early_init = gmc_v11_0_early_init,
1050 .sw_init = gmc_v11_0_sw_init,
1051 .hw_init = gmc_v11_0_hw_init,
1052 .late_init = gmc_v11_0_late_init,
1053 .sw_fini = gmc_v11_0_sw_fini,
1054 .hw_fini = gmc_v11_0_hw_fini,
1055 .suspend = gmc_v11_0_suspend,
1056 .resume = gmc_v11_0_resume,
1057 .is_idle = gmc_v11_0_is_idle,
1058 .wait_for_idle = gmc_v11_0_wait_for_idle,
1059 .soft_reset = gmc_v11_0_soft_reset,
1060 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1061 .set_powergating_state = gmc_v11_0_set_powergating_state,
1062 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1063};
1064
1065const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1066 .type = AMD_IP_BLOCK_TYPE_GMC,
1067 .major = 11,
1068 .minor = 0,
1069 .rev = 0,
1070 .funcs = &gmc_v11_0_ip_funcs,
1071};