Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25#include "amdgpu.h"
26#include "amdgpu_atomfirmware.h"
27#include "gmc_v10_0.h"
28#include "umc_v8_7.h"
29
30#include "athub/athub_2_0_0_sh_mask.h"
31#include "athub/athub_2_0_0_offset.h"
32#include "dcn/dcn_2_0_0_offset.h"
33#include "dcn/dcn_2_0_0_sh_mask.h"
34#include "oss/osssys_5_0_0_offset.h"
35#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36#include "navi10_enum.h"
37
38#include "soc15.h"
39#include "soc15d.h"
40#include "soc15_common.h"
41
42#include "nbio_v2_3.h"
43
44#include "gfxhub_v2_0.h"
45#include "gfxhub_v2_1.h"
46#include "mmhub_v2_0.h"
47#include "mmhub_v2_3.h"
48#include "athub_v2_0.h"
49#include "athub_v2_1.h"
50
51#include "amdgpu_reset.h"
52
53#if 0
54static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
55{
56 /* TODO add golden setting for hdp */
57};
58#endif
59
60static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
61 struct amdgpu_irq_src *src,
62 unsigned type,
63 enum amdgpu_interrupt_state state)
64{
65 return 0;
66}
67
68static int
69gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
70 struct amdgpu_irq_src *src, unsigned type,
71 enum amdgpu_interrupt_state state)
72{
73 switch (state) {
74 case AMDGPU_IRQ_STATE_DISABLE:
75 /* MM HUB */
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
77 /* GFX HUB */
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
79 break;
80 case AMDGPU_IRQ_STATE_ENABLE:
81 /* MM HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
83 /* GFX HUB */
84 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
85 break;
86 default:
87 break;
88 }
89
90 return 0;
91}
92
93static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
94 struct amdgpu_irq_src *source,
95 struct amdgpu_iv_entry *entry)
96{
97 bool retry_fault = !!(entry->src_data[1] & 0x80);
98 bool write_fault = !!(entry->src_data[1] & 0x20);
99 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
100 struct amdgpu_task_info task_info;
101 uint32_t status = 0;
102 u64 addr;
103
104 addr = (u64)entry->src_data[0] << 12;
105 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
106
107 if (retry_fault) {
108 /* Returning 1 here also prevents sending the IV to the KFD */
109
110 /* Process it onyl if it's the first fault for this address */
111 if (entry->ih != &adev->irq.ih_soft &&
112 amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
113 entry->timestamp))
114 return 1;
115
116 /* Delegate it to a different ring if the hardware hasn't
117 * already done it.
118 */
119 if (entry->ih == &adev->irq.ih) {
120 amdgpu_irq_delegate(adev, entry, 8);
121 return 1;
122 }
123
124 /* Try to handle the recoverable page faults by filling page
125 * tables
126 */
127 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
128 return 1;
129 }
130
131 if (!amdgpu_sriov_vf(adev)) {
132 /*
133 * Issue a dummy read to wait for the status register to
134 * be updated to avoid reading an incorrect value due to
135 * the new fast GRBM interface.
136 */
137 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
138 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
139 RREG32(hub->vm_l2_pro_fault_status);
140
141 status = RREG32(hub->vm_l2_pro_fault_status);
142 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
143 }
144
145 if (!printk_ratelimit())
146 return 0;
147
148 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
149 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
150
151 dev_err(adev->dev,
152 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
153 "for process %s pid %d thread %s pid %d)\n",
154 entry->vmid_src ? "mmhub" : "gfxhub",
155 entry->src_id, entry->ring_id, entry->vmid,
156 entry->pasid, task_info.process_name, task_info.tgid,
157 task_info.task_name, task_info.pid);
158 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
159 addr, entry->client_id,
160 soc15_ih_clientid_name[entry->client_id]);
161
162 if (!amdgpu_sriov_vf(adev))
163 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
164 status);
165
166 return 0;
167}
168
169static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
170 .set = gmc_v10_0_vm_fault_interrupt_state,
171 .process = gmc_v10_0_process_interrupt,
172};
173
174static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
175 .set = gmc_v10_0_ecc_interrupt_state,
176 .process = amdgpu_umc_process_ecc_irq,
177};
178
179static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
180{
181 adev->gmc.vm_fault.num_types = 1;
182 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
183
184 if (!amdgpu_sriov_vf(adev)) {
185 adev->gmc.ecc_irq.num_types = 1;
186 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
187 }
188}
189
190/**
191 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
192 *
193 * @adev: amdgpu_device pointer
194 * @vmhub: vmhub type
195 *
196 */
197static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
198 uint32_t vmhub)
199{
200 return ((vmhub == AMDGPU_MMHUB_0 ||
201 vmhub == AMDGPU_MMHUB_1) &&
202 (!amdgpu_sriov_vf(adev)));
203}
204
205static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
206 struct amdgpu_device *adev,
207 uint8_t vmid, uint16_t *p_pasid)
208{
209 uint32_t value;
210
211 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
212 + vmid);
213 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
214
215 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
216}
217
218/*
219 * GART
220 * VMID 0 is the physical GPU addresses as used by the kernel.
221 * VMIDs 1-15 are used for userspace clients and are handled
222 * by the amdgpu vm/hsa code.
223 */
224
225static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
226 unsigned int vmhub, uint32_t flush_type)
227{
228 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
229 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
230 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
231 u32 tmp;
232 /* Use register 17 for GART */
233 const unsigned eng = 17;
234 unsigned int i;
235 unsigned char hub_ip = 0;
236
237 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
238 GC_HWIP : MMHUB_HWIP;
239
240 spin_lock(&adev->gmc.invalidate_lock);
241 /*
242 * It may lose gpuvm invalidate acknowldege state across power-gating
243 * off cycle, add semaphore acquire before invalidation and semaphore
244 * release after invalidation to avoid entering power gated state
245 * to WA the Issue
246 */
247
248 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
249 if (use_semaphore) {
250 for (i = 0; i < adev->usec_timeout; i++) {
251 /* a read return value of 1 means semaphore acuqire */
252 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
253 hub->eng_distance * eng, hub_ip);
254
255 if (tmp & 0x1)
256 break;
257 udelay(1);
258 }
259
260 if (i >= adev->usec_timeout)
261 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
262 }
263
264 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
265 hub->eng_distance * eng,
266 inv_req, hub_ip);
267
268 /*
269 * Issue a dummy read to wait for the ACK register to be cleared
270 * to avoid a false ACK due to the new fast GRBM interface.
271 */
272 if ((vmhub == AMDGPU_GFXHUB_0) &&
273 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
274 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
275 hub->eng_distance * eng, hub_ip);
276
277 /* Wait for ACK with a delay.*/
278 for (i = 0; i < adev->usec_timeout; i++) {
279 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
280 hub->eng_distance * eng, hub_ip);
281
282 tmp &= 1 << vmid;
283 if (tmp)
284 break;
285
286 udelay(1);
287 }
288
289 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
290 if (use_semaphore)
291 /*
292 * add semaphore release after invalidation,
293 * write with 0 means semaphore release
294 */
295 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
296 hub->eng_distance * eng, 0, hub_ip);
297
298 spin_unlock(&adev->gmc.invalidate_lock);
299
300 if (i < adev->usec_timeout)
301 return;
302
303 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
304}
305
306/**
307 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
308 *
309 * @adev: amdgpu_device pointer
310 * @vmid: vm instance to flush
311 * @vmhub: vmhub type
312 * @flush_type: the flush type
313 *
314 * Flush the TLB for the requested page table.
315 */
316static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
317 uint32_t vmhub, uint32_t flush_type)
318{
319 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
320 struct dma_fence *fence;
321 struct amdgpu_job *job;
322
323 int r;
324
325 /* flush hdp cache */
326 adev->hdp.funcs->flush_hdp(adev, NULL);
327
328 /* For SRIOV run time, driver shouldn't access the register through MMIO
329 * Directly use kiq to do the vm invalidation instead
330 */
331 if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
332 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
333 down_read_trylock(&adev->reset_domain->sem)) {
334 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
335 const unsigned eng = 17;
336 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
337 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
338 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
339
340 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
341 1 << vmid);
342
343 up_read(&adev->reset_domain->sem);
344 return;
345 }
346
347 mutex_lock(&adev->mman.gtt_window_lock);
348
349 if (vmhub == AMDGPU_MMHUB_0) {
350 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
351 mutex_unlock(&adev->mman.gtt_window_lock);
352 return;
353 }
354
355 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
356
357 if (!adev->mman.buffer_funcs_enabled ||
358 !adev->ib_pool_ready ||
359 amdgpu_in_reset(adev) ||
360 ring->sched.ready == false) {
361 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
362 mutex_unlock(&adev->mman.gtt_window_lock);
363 return;
364 }
365
366 /* The SDMA on Navi has a bug which can theoretically result in memory
367 * corruption if an invalidation happens at the same time as an VA
368 * translation. Avoid this by doing the invalidation from the SDMA
369 * itself.
370 */
371 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
372 &job);
373 if (r)
374 goto error_alloc;
375
376 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
377 job->vm_needs_flush = true;
378 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
379 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
380 r = amdgpu_job_submit(job, &adev->mman.entity,
381 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
382 if (r)
383 goto error_submit;
384
385 mutex_unlock(&adev->mman.gtt_window_lock);
386
387 dma_fence_wait(fence, false);
388 dma_fence_put(fence);
389
390 return;
391
392error_submit:
393 amdgpu_job_free(job);
394
395error_alloc:
396 mutex_unlock(&adev->mman.gtt_window_lock);
397 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
398}
399
400/**
401 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
402 *
403 * @adev: amdgpu_device pointer
404 * @pasid: pasid to be flush
405 * @flush_type: the flush type
406 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
407 *
408 * Flush the TLB for the requested pasid.
409 */
410static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
411 uint16_t pasid, uint32_t flush_type,
412 bool all_hub)
413{
414 int vmid, i;
415 signed long r;
416 uint32_t seq;
417 uint16_t queried_pasid;
418 bool ret;
419 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
420 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
421
422 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
423 spin_lock(&adev->gfx.kiq.ring_lock);
424 /* 2 dwords flush + 8 dwords fence */
425 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
426 kiq->pmf->kiq_invalidate_tlbs(ring,
427 pasid, flush_type, all_hub);
428 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
429 if (r) {
430 amdgpu_ring_undo(ring);
431 spin_unlock(&adev->gfx.kiq.ring_lock);
432 return -ETIME;
433 }
434
435 amdgpu_ring_commit(ring);
436 spin_unlock(&adev->gfx.kiq.ring_lock);
437 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
438 if (r < 1) {
439 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
440 return -ETIME;
441 }
442
443 return 0;
444 }
445
446 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
447
448 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
449 &queried_pasid);
450 if (ret && queried_pasid == pasid) {
451 if (all_hub) {
452 for (i = 0; i < adev->num_vmhubs; i++)
453 gmc_v10_0_flush_gpu_tlb(adev, vmid,
454 i, flush_type);
455 } else {
456 gmc_v10_0_flush_gpu_tlb(adev, vmid,
457 AMDGPU_GFXHUB_0, flush_type);
458 }
459 break;
460 }
461 }
462
463 return 0;
464}
465
466static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
467 unsigned vmid, uint64_t pd_addr)
468{
469 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
470 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
471 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
472 unsigned eng = ring->vm_inv_eng;
473
474 /*
475 * It may lose gpuvm invalidate acknowldege state across power-gating
476 * off cycle, add semaphore acquire before invalidation and semaphore
477 * release after invalidation to avoid entering power gated state
478 * to WA the Issue
479 */
480
481 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
482 if (use_semaphore)
483 /* a read return value of 1 means semaphore acuqire */
484 amdgpu_ring_emit_reg_wait(ring,
485 hub->vm_inv_eng0_sem +
486 hub->eng_distance * eng, 0x1, 0x1);
487
488 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
489 (hub->ctx_addr_distance * vmid),
490 lower_32_bits(pd_addr));
491
492 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
493 (hub->ctx_addr_distance * vmid),
494 upper_32_bits(pd_addr));
495
496 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
497 hub->eng_distance * eng,
498 hub->vm_inv_eng0_ack +
499 hub->eng_distance * eng,
500 req, 1 << vmid);
501
502 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
503 if (use_semaphore)
504 /*
505 * add semaphore release after invalidation,
506 * write with 0 means semaphore release
507 */
508 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
509 hub->eng_distance * eng, 0);
510
511 return pd_addr;
512}
513
514static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
515 unsigned pasid)
516{
517 struct amdgpu_device *adev = ring->adev;
518 uint32_t reg;
519
520 /* MES fw manages IH_VMID_x_LUT updating */
521 if (ring->is_mes_queue)
522 return;
523
524 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
525 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
526 else
527 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
528
529 amdgpu_ring_emit_wreg(ring, reg, pasid);
530}
531
532/*
533 * PTE format on NAVI 10:
534 * 63:59 reserved
535 * 58 reserved and for sienna_cichlid is used for MALL noalloc
536 * 57 reserved
537 * 56 F
538 * 55 L
539 * 54 reserved
540 * 53:52 SW
541 * 51 T
542 * 50:48 mtype
543 * 47:12 4k physical page base address
544 * 11:7 fragment
545 * 6 write
546 * 5 read
547 * 4 exe
548 * 3 Z
549 * 2 snooped
550 * 1 system
551 * 0 valid
552 *
553 * PDE format on NAVI 10:
554 * 63:59 block fragment size
555 * 58:55 reserved
556 * 54 P
557 * 53:48 reserved
558 * 47:6 physical base address of PD or PTE
559 * 5:3 reserved
560 * 2 C
561 * 1 system
562 * 0 valid
563 */
564
565static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
566{
567 switch (flags) {
568 case AMDGPU_VM_MTYPE_DEFAULT:
569 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
570 case AMDGPU_VM_MTYPE_NC:
571 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
572 case AMDGPU_VM_MTYPE_WC:
573 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
574 case AMDGPU_VM_MTYPE_CC:
575 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
576 case AMDGPU_VM_MTYPE_UC:
577 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
578 default:
579 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
580 }
581}
582
583static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
584 uint64_t *addr, uint64_t *flags)
585{
586 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
587 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
588 BUG_ON(*addr & 0xFFFF00000000003FULL);
589
590 if (!adev->gmc.translate_further)
591 return;
592
593 if (level == AMDGPU_VM_PDB1) {
594 /* Set the block fragment size */
595 if (!(*flags & AMDGPU_PDE_PTE))
596 *flags |= AMDGPU_PDE_BFS(0x9);
597
598 } else if (level == AMDGPU_VM_PDB0) {
599 if (*flags & AMDGPU_PDE_PTE)
600 *flags &= ~AMDGPU_PDE_PTE;
601 else
602 *flags |= AMDGPU_PTE_TF;
603 }
604}
605
606static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
607 struct amdgpu_bo_va_mapping *mapping,
608 uint64_t *flags)
609{
610 *flags &= ~AMDGPU_PTE_EXECUTABLE;
611 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
612
613 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
614 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
615
616 *flags &= ~AMDGPU_PTE_NOALLOC;
617 *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
618
619 if (mapping->flags & AMDGPU_PTE_PRT) {
620 *flags |= AMDGPU_PTE_PRT;
621 *flags |= AMDGPU_PTE_SNOOPED;
622 *flags |= AMDGPU_PTE_LOG;
623 *flags |= AMDGPU_PTE_SYSTEM;
624 *flags &= ~AMDGPU_PTE_VALID;
625 }
626}
627
628static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
629{
630 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
631 unsigned size;
632
633 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
634 size = AMDGPU_VBIOS_VGA_ALLOCATION;
635 } else {
636 u32 viewport;
637 u32 pitch;
638
639 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
640 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
641 size = (REG_GET_FIELD(viewport,
642 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
643 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
644 4);
645 }
646
647 return size;
648}
649
650static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
651 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
652 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
653 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
654 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
655 .map_mtype = gmc_v10_0_map_mtype,
656 .get_vm_pde = gmc_v10_0_get_vm_pde,
657 .get_vm_pte = gmc_v10_0_get_vm_pte,
658 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
659};
660
661static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
662{
663 if (adev->gmc.gmc_funcs == NULL)
664 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
665}
666
667static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
668{
669 switch (adev->ip_versions[UMC_HWIP][0]) {
670 case IP_VERSION(8, 7, 0):
671 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
672 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
673 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
674 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
675 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
676 adev->umc.ras = &umc_v8_7_ras;
677 break;
678 default:
679 break;
680 }
681 if (adev->umc.ras) {
682 amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
683
684 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
685 adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
686 adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
687 adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
688
689 /* If don't define special ras_late_init function, use default ras_late_init */
690 if (!adev->umc.ras->ras_block.ras_late_init)
691 adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
692
693 /* If not defined special ras_cb function, use default ras_cb */
694 if (!adev->umc.ras->ras_block.ras_cb)
695 adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
696 }
697}
698
699
700static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
701{
702 switch (adev->ip_versions[MMHUB_HWIP][0]) {
703 case IP_VERSION(2, 3, 0):
704 case IP_VERSION(2, 4, 0):
705 case IP_VERSION(2, 4, 1):
706 adev->mmhub.funcs = &mmhub_v2_3_funcs;
707 break;
708 default:
709 adev->mmhub.funcs = &mmhub_v2_0_funcs;
710 break;
711 }
712}
713
714static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
715{
716 switch (adev->ip_versions[GC_HWIP][0]) {
717 case IP_VERSION(10, 3, 0):
718 case IP_VERSION(10, 3, 2):
719 case IP_VERSION(10, 3, 1):
720 case IP_VERSION(10, 3, 4):
721 case IP_VERSION(10, 3, 5):
722 case IP_VERSION(10, 3, 6):
723 case IP_VERSION(10, 3, 3):
724 case IP_VERSION(10, 3, 7):
725 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
726 break;
727 default:
728 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
729 break;
730 }
731}
732
733
734static int gmc_v10_0_early_init(void *handle)
735{
736 int r;
737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738
739 gmc_v10_0_set_mmhub_funcs(adev);
740 gmc_v10_0_set_gfxhub_funcs(adev);
741 gmc_v10_0_set_gmc_funcs(adev);
742 gmc_v10_0_set_irq_funcs(adev);
743 gmc_v10_0_set_umc_funcs(adev);
744
745 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
746 adev->gmc.shared_aperture_end =
747 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
748 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
749 adev->gmc.private_aperture_end =
750 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
751
752 r = amdgpu_gmc_ras_early_init(adev);
753 if (r)
754 return r;
755
756 return 0;
757}
758
759static int gmc_v10_0_late_init(void *handle)
760{
761 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
762 int r;
763
764 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
765 if (r)
766 return r;
767
768 r = amdgpu_gmc_ras_late_init(adev);
769 if (r)
770 return r;
771
772 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
773}
774
775static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
776 struct amdgpu_gmc *mc)
777{
778 u64 base = 0;
779
780 base = adev->gfxhub.funcs->get_fb_location(adev);
781
782 /* add the xgmi offset of the physical node */
783 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
784
785 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
786 amdgpu_gmc_gart_location(adev, mc);
787 amdgpu_gmc_agp_location(adev, mc);
788
789 /* base offset of vram pages */
790 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
791
792 /* add the xgmi offset of the physical node */
793 adev->vm_manager.vram_base_offset +=
794 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
795}
796
797/**
798 * gmc_v10_0_mc_init - initialize the memory controller driver params
799 *
800 * @adev: amdgpu_device pointer
801 *
802 * Look up the amount of vram, vram width, and decide how to place
803 * vram and gart within the GPU's physical address space.
804 * Returns 0 for success.
805 */
806static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
807{
808 int r;
809
810 /* size in MB on si */
811 adev->gmc.mc_vram_size =
812 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
813 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
814
815 if (!(adev->flags & AMD_IS_APU)) {
816 r = amdgpu_device_resize_fb_bar(adev);
817 if (r)
818 return r;
819 }
820 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
821 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
822
823#ifdef CONFIG_X86_64
824 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
825 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
826 adev->gmc.aper_size = adev->gmc.real_vram_size;
827 }
828#endif
829
830 /* In case the PCI BAR is larger than the actual amount of vram */
831 adev->gmc.visible_vram_size = adev->gmc.aper_size;
832 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
833 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
834
835 /* set the gart size */
836 if (amdgpu_gart_size == -1)
837 adev->gmc.gart_size = 512ULL << 20;
838 else
839 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
840
841 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
842
843 return 0;
844}
845
846static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
847{
848 int r;
849
850 if (adev->gart.bo) {
851 WARN(1, "NAVI10 PCIE GART already initialized\n");
852 return 0;
853 }
854
855 /* Initialize common gart structure */
856 r = amdgpu_gart_init(adev);
857 if (r)
858 return r;
859
860 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
861 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
862 AMDGPU_PTE_EXECUTABLE;
863
864 return amdgpu_gart_table_vram_alloc(adev);
865}
866
867static int gmc_v10_0_sw_init(void *handle)
868{
869 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
870 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
871
872 adev->gfxhub.funcs->init(adev);
873
874 adev->mmhub.funcs->init(adev);
875
876 spin_lock_init(&adev->gmc.invalidate_lock);
877
878 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
879 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
880 adev->gmc.vram_width = 64;
881 } else if (amdgpu_emu_mode == 1) {
882 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
883 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
884 } else {
885 r = amdgpu_atomfirmware_get_vram_info(adev,
886 &vram_width, &vram_type, &vram_vendor);
887 adev->gmc.vram_width = vram_width;
888
889 adev->gmc.vram_type = vram_type;
890 adev->gmc.vram_vendor = vram_vendor;
891 }
892
893 switch (adev->ip_versions[GC_HWIP][0]) {
894 case IP_VERSION(10, 3, 0):
895 adev->gmc.mall_size = 128 * 1024 * 1024;
896 break;
897 case IP_VERSION(10, 3, 2):
898 adev->gmc.mall_size = 96 * 1024 * 1024;
899 break;
900 case IP_VERSION(10, 3, 4):
901 adev->gmc.mall_size = 32 * 1024 * 1024;
902 break;
903 case IP_VERSION(10, 3, 5):
904 adev->gmc.mall_size = 16 * 1024 * 1024;
905 break;
906 default:
907 adev->gmc.mall_size = 0;
908 break;
909 }
910
911 switch (adev->ip_versions[GC_HWIP][0]) {
912 case IP_VERSION(10, 1, 10):
913 case IP_VERSION(10, 1, 1):
914 case IP_VERSION(10, 1, 2):
915 case IP_VERSION(10, 1, 3):
916 case IP_VERSION(10, 1, 4):
917 case IP_VERSION(10, 3, 0):
918 case IP_VERSION(10, 3, 2):
919 case IP_VERSION(10, 3, 1):
920 case IP_VERSION(10, 3, 4):
921 case IP_VERSION(10, 3, 5):
922 case IP_VERSION(10, 3, 6):
923 case IP_VERSION(10, 3, 3):
924 case IP_VERSION(10, 3, 7):
925 adev->num_vmhubs = 2;
926 /*
927 * To fulfill 4-level page support,
928 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
929 * block size 512 (9bit)
930 */
931 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
932 break;
933 default:
934 break;
935 }
936
937 /* This interrupt is VMC page fault.*/
938 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
939 VMC_1_0__SRCID__VM_FAULT,
940 &adev->gmc.vm_fault);
941
942 if (r)
943 return r;
944
945 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
946 UTCL2_1_0__SRCID__FAULT,
947 &adev->gmc.vm_fault);
948 if (r)
949 return r;
950
951 if (!amdgpu_sriov_vf(adev)) {
952 /* interrupt sent to DF. */
953 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
954 &adev->gmc.ecc_irq);
955 if (r)
956 return r;
957 }
958
959 /*
960 * Set the internal MC address mask This is the max address of the GPU's
961 * internal address space.
962 */
963 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
964
965 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
966 if (r) {
967 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
968 return r;
969 }
970
971 r = gmc_v10_0_mc_init(adev);
972 if (r)
973 return r;
974
975 amdgpu_gmc_get_vbios_allocations(adev);
976
977 /* Memory manager */
978 r = amdgpu_bo_init(adev);
979 if (r)
980 return r;
981
982 r = gmc_v10_0_gart_init(adev);
983 if (r)
984 return r;
985
986 /*
987 * number of VMs
988 * VMID 0 is reserved for System
989 * amdgpu graphics/compute will use VMIDs 1-7
990 * amdkfd will use VMIDs 8-15
991 */
992 adev->vm_manager.first_kfd_vmid = 8;
993
994 amdgpu_vm_manager_init(adev);
995
996 return 0;
997}
998
999/**
1000 * gmc_v10_0_gart_fini - vm fini callback
1001 *
1002 * @adev: amdgpu_device pointer
1003 *
1004 * Tears down the driver GART/VM setup (CIK).
1005 */
1006static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
1007{
1008 amdgpu_gart_table_vram_free(adev);
1009}
1010
1011static int gmc_v10_0_sw_fini(void *handle)
1012{
1013 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1014
1015 amdgpu_vm_manager_fini(adev);
1016 gmc_v10_0_gart_fini(adev);
1017 amdgpu_gem_force_release(adev);
1018 amdgpu_bo_fini(adev);
1019
1020 return 0;
1021}
1022
1023static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
1024{
1025}
1026
1027/**
1028 * gmc_v10_0_gart_enable - gart enable
1029 *
1030 * @adev: amdgpu_device pointer
1031 */
1032static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
1033{
1034 int r;
1035 bool value;
1036
1037 if (adev->gart.bo == NULL) {
1038 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1039 return -EINVAL;
1040 }
1041
1042 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
1043 r = adev->gfxhub.funcs->gart_enable(adev);
1044 if (r)
1045 return r;
1046
1047 r = adev->mmhub.funcs->gart_enable(adev);
1048 if (r)
1049 return r;
1050
1051 adev->hdp.funcs->init_registers(adev);
1052
1053 /* Flush HDP after it is initialized */
1054 adev->hdp.funcs->flush_hdp(adev, NULL);
1055
1056 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1057 false : true;
1058
1059 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1060 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1061 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1062 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1063
1064 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1065 (unsigned)(adev->gmc.gart_size >> 20),
1066 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1067
1068 return 0;
1069}
1070
1071static int gmc_v10_0_hw_init(void *handle)
1072{
1073 int r;
1074 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1075
1076 /* The sequence of these two function calls matters.*/
1077 gmc_v10_0_init_golden_registers(adev);
1078
1079 /*
1080 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1081 * register setup within GMC, or else system hang when harvesting SA.
1082 */
1083 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1084 adev->gfxhub.funcs->utcl2_harvest(adev);
1085
1086 r = gmc_v10_0_gart_enable(adev);
1087 if (r)
1088 return r;
1089
1090 if (amdgpu_emu_mode == 1) {
1091 r = amdgpu_gmc_vram_checking(adev);
1092 if (r)
1093 return r;
1094 }
1095
1096 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1097 adev->umc.funcs->init_registers(adev);
1098
1099 return 0;
1100}
1101
1102/**
1103 * gmc_v10_0_gart_disable - gart disable
1104 *
1105 * @adev: amdgpu_device pointer
1106 *
1107 * This disables all VM page table.
1108 */
1109static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1110{
1111 adev->gfxhub.funcs->gart_disable(adev);
1112 adev->mmhub.funcs->gart_disable(adev);
1113}
1114
1115static int gmc_v10_0_hw_fini(void *handle)
1116{
1117 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1118
1119 gmc_v10_0_gart_disable(adev);
1120
1121 if (amdgpu_sriov_vf(adev)) {
1122 /* full access mode, so don't touch any GMC register */
1123 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1124 return 0;
1125 }
1126
1127 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1128 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1129
1130 return 0;
1131}
1132
1133static int gmc_v10_0_suspend(void *handle)
1134{
1135 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1136
1137 gmc_v10_0_hw_fini(adev);
1138
1139 return 0;
1140}
1141
1142static int gmc_v10_0_resume(void *handle)
1143{
1144 int r;
1145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1146
1147 r = gmc_v10_0_hw_init(adev);
1148 if (r)
1149 return r;
1150
1151 amdgpu_vmid_reset_all(adev);
1152
1153 return 0;
1154}
1155
1156static bool gmc_v10_0_is_idle(void *handle)
1157{
1158 /* MC is always ready in GMC v10.*/
1159 return true;
1160}
1161
1162static int gmc_v10_0_wait_for_idle(void *handle)
1163{
1164 /* There is no need to wait for MC idle in GMC v10.*/
1165 return 0;
1166}
1167
1168static int gmc_v10_0_soft_reset(void *handle)
1169{
1170 return 0;
1171}
1172
1173static int gmc_v10_0_set_clockgating_state(void *handle,
1174 enum amd_clockgating_state state)
1175{
1176 int r;
1177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1178
1179 /*
1180 * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
1181 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
1182 * seen any issue on the DF 3.0.2 series platform.
1183 */
1184 if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
1185 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
1186 return 0;
1187 }
1188
1189 r = adev->mmhub.funcs->set_clockgating(adev, state);
1190 if (r)
1191 return r;
1192
1193 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1194 return athub_v2_1_set_clockgating(adev, state);
1195 else
1196 return athub_v2_0_set_clockgating(adev, state);
1197}
1198
1199static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
1200{
1201 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1202
1203 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
1204 adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
1205 return;
1206
1207 adev->mmhub.funcs->get_clockgating(adev, flags);
1208
1209 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1210 athub_v2_1_get_clockgating(adev, flags);
1211 else
1212 athub_v2_0_get_clockgating(adev, flags);
1213}
1214
1215static int gmc_v10_0_set_powergating_state(void *handle,
1216 enum amd_powergating_state state)
1217{
1218 return 0;
1219}
1220
1221const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1222 .name = "gmc_v10_0",
1223 .early_init = gmc_v10_0_early_init,
1224 .late_init = gmc_v10_0_late_init,
1225 .sw_init = gmc_v10_0_sw_init,
1226 .sw_fini = gmc_v10_0_sw_fini,
1227 .hw_init = gmc_v10_0_hw_init,
1228 .hw_fini = gmc_v10_0_hw_fini,
1229 .suspend = gmc_v10_0_suspend,
1230 .resume = gmc_v10_0_resume,
1231 .is_idle = gmc_v10_0_is_idle,
1232 .wait_for_idle = gmc_v10_0_wait_for_idle,
1233 .soft_reset = gmc_v10_0_soft_reset,
1234 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1235 .set_powergating_state = gmc_v10_0_set_powergating_state,
1236 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1237};
1238
1239const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1240{
1241 .type = AMD_IP_BLOCK_TYPE_GMC,
1242 .major = 10,
1243 .minor = 0,
1244 .rev = 0,
1245 .funcs = &gmc_v10_0_ip_funcs,
1246};