Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/pci.h>
25
26#include <drm/drm_cache.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atomfirmware.h"
30#include "gmc_v11_0.h"
31#include "umc_v8_10.h"
32#include "athub/athub_3_0_0_sh_mask.h"
33#include "athub/athub_3_0_0_offset.h"
34#include "dcn/dcn_3_2_0_offset.h"
35#include "dcn/dcn_3_2_0_sh_mask.h"
36#include "oss/osssys_6_0_0_offset.h"
37#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
38#include "navi10_enum.h"
39#include "soc15.h"
40#include "soc15d.h"
41#include "soc15_common.h"
42#include "nbio_v4_3.h"
43#include "gfxhub_v3_0.h"
44#include "gfxhub_v3_0_3.h"
45#include "gfxhub_v11_5_0.h"
46#include "mmhub_v3_0.h"
47#include "mmhub_v3_0_1.h"
48#include "mmhub_v3_0_2.h"
49#include "mmhub_v3_3.h"
50#include "athub_v3_0.h"
51
52
53static int gmc_v11_0_ecc_interrupt_state(struct amdgpu_device *adev,
54 struct amdgpu_irq_src *src,
55 unsigned int type,
56 enum amdgpu_interrupt_state state)
57{
58 return 0;
59}
60
61static int
62gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
63 struct amdgpu_irq_src *src, unsigned int type,
64 enum amdgpu_interrupt_state state)
65{
66 switch (state) {
67 case AMDGPU_IRQ_STATE_DISABLE:
68 /* MM HUB */
69 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
70 /* GFX HUB */
71 /* This works because this interrupt is only
72 * enabled at init/resume and disabled in
73 * fini/suspend, so the overall state doesn't
74 * change over the course of suspend/resume.
75 */
76 if (!adev->in_s0ix && (adev->in_runpm || adev->in_suspend ||
77 amdgpu_in_reset(adev)))
78 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
79 break;
80 case AMDGPU_IRQ_STATE_ENABLE:
81 /* MM HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
83 /* GFX HUB */
84 /* This works because this interrupt is only
85 * enabled at init/resume and disabled in
86 * fini/suspend, so the overall state doesn't
87 * change over the course of suspend/resume.
88 */
89 if (!adev->in_s0ix)
90 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
91 break;
92 default:
93 break;
94 }
95
96 return 0;
97}
98
99static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
100 struct amdgpu_irq_src *source,
101 struct amdgpu_iv_entry *entry)
102{
103 uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
104 AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
105 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
106 uint32_t status = 0;
107 u64 addr;
108
109 addr = (u64)entry->src_data[0] << 12;
110 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
111
112 if (!amdgpu_sriov_vf(adev)) {
113 /*
114 * Issue a dummy read to wait for the status register to
115 * be updated to avoid reading an incorrect value due to
116 * the new fast GRBM interface.
117 */
118 if (entry->vmid_src == AMDGPU_GFXHUB(0))
119 RREG32(hub->vm_l2_pro_fault_status);
120
121 status = RREG32(hub->vm_l2_pro_fault_status);
122 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
123
124 amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
125 entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
126 }
127
128 if (printk_ratelimit()) {
129 struct amdgpu_task_info *task_info;
130
131 dev_err(adev->dev,
132 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
133 entry->vmid_src ? "mmhub" : "gfxhub",
134 entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
135 task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
136 if (task_info) {
137 amdgpu_vm_print_task_info(adev, task_info);
138 amdgpu_vm_put_task_info(task_info);
139 }
140
141 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
142 addr, entry->client_id);
143
144 /* Only print L2 fault status if the status register could be read and
145 * contains useful information
146 */
147 if (status != 0)
148 hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
149 }
150
151 return 0;
152}
153
154static const struct amdgpu_irq_src_funcs gmc_v11_0_irq_funcs = {
155 .set = gmc_v11_0_vm_fault_interrupt_state,
156 .process = gmc_v11_0_process_interrupt,
157};
158
159static const struct amdgpu_irq_src_funcs gmc_v11_0_ecc_funcs = {
160 .set = gmc_v11_0_ecc_interrupt_state,
161 .process = amdgpu_umc_process_ecc_irq,
162};
163
164static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev)
165{
166 adev->gmc.vm_fault.num_types = 1;
167 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs;
168
169 if (!amdgpu_sriov_vf(adev)) {
170 adev->gmc.ecc_irq.num_types = 1;
171 adev->gmc.ecc_irq.funcs = &gmc_v11_0_ecc_funcs;
172 }
173}
174
175/**
176 * gmc_v11_0_use_invalidate_semaphore - judge whether to use semaphore
177 *
178 * @adev: amdgpu_device pointer
179 * @vmhub: vmhub type
180 *
181 */
182static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev,
183 uint32_t vmhub)
184{
185 return ((vmhub == AMDGPU_MMHUB0(0)) &&
186 (!amdgpu_sriov_vf(adev)));
187}
188
189static bool gmc_v11_0_get_vmid_pasid_mapping_info(
190 struct amdgpu_device *adev,
191 uint8_t vmid, uint16_t *p_pasid)
192{
193 *p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
194
195 return !!(*p_pasid);
196}
197
198/**
199 * gmc_v11_0_flush_gpu_tlb - gart tlb flush callback
200 *
201 * @adev: amdgpu_device pointer
202 * @vmid: vm instance to flush
203 * @vmhub: which hub to flush
204 * @flush_type: the flush type
205 *
206 * Flush the TLB for the requested page table.
207 */
208static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
209 uint32_t vmhub, uint32_t flush_type)
210{
211 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(adev, vmhub);
212 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
213 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
214 /* Use register 17 for GART */
215 const unsigned int eng = 17;
216 unsigned char hub_ip;
217 u32 sem, req, ack;
218 unsigned int i;
219 u32 tmp;
220
221 if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
222 return;
223
224 sem = hub->vm_inv_eng0_sem + hub->eng_distance * eng;
225 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
226 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
227
228 /* flush hdp cache */
229 amdgpu_device_flush_hdp(adev, NULL);
230
231 /* This is necessary for SRIOV as well as for GFXOFF to function
232 * properly under bare metal
233 */
234 if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
235 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
236 amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
237 1 << vmid, GET_INST(GC, 0));
238 return;
239 }
240
241 /* This path is needed before KIQ/MES/GFXOFF are set up */
242 hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
243
244 spin_lock(&adev->gmc.invalidate_lock);
245 /*
246 * It may lose gpuvm invalidate acknowldege state across power-gating
247 * off cycle, add semaphore acquire before invalidation and semaphore
248 * release after invalidation to avoid entering power gated state
249 * to WA the Issue
250 */
251
252 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
253 if (use_semaphore) {
254 for (i = 0; i < adev->usec_timeout; i++) {
255 /* a read return value of 1 means semaphore acuqire */
256 tmp = RREG32_RLC_NO_KIQ(sem, hub_ip);
257 if (tmp & 0x1)
258 break;
259 udelay(1);
260 }
261
262 if (i >= adev->usec_timeout)
263 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
264 }
265
266 WREG32_RLC_NO_KIQ(req, inv_req, hub_ip);
267
268 /* Wait for ACK with a delay.*/
269 for (i = 0; i < adev->usec_timeout; i++) {
270 tmp = RREG32_RLC_NO_KIQ(ack, hub_ip);
271 tmp &= 1 << vmid;
272 if (tmp)
273 break;
274
275 udelay(1);
276 }
277
278 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
279 if (use_semaphore)
280 WREG32_RLC_NO_KIQ(sem, 0, hub_ip);
281
282 /* Issue additional private vm invalidation to MMHUB */
283 if ((vmhub != AMDGPU_GFXHUB(0)) &&
284 (hub->vm_l2_bank_select_reserved_cid2) &&
285 !amdgpu_sriov_vf(adev)) {
286 inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
287 /* bit 25: RSERVED_CACHE_PRIVATE_INVALIDATION */
288 inv_req |= (1 << 25);
289 /* Issue private invalidation */
290 WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
291 /* Read back to ensure invalidation is done*/
292 RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
293 }
294
295 spin_unlock(&adev->gmc.invalidate_lock);
296
297 if (i >= adev->usec_timeout)
298 dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
299}
300
301/**
302 * gmc_v11_0_flush_gpu_tlb_pasid - tlb flush via pasid
303 *
304 * @adev: amdgpu_device pointer
305 * @pasid: pasid to be flush
306 * @flush_type: the flush type
307 * @all_hub: flush all hubs
308 * @inst: is used to select which instance of KIQ to use for the invalidation
309 *
310 * Flush the TLB for the requested pasid.
311 */
312static void gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
313 uint16_t pasid, uint32_t flush_type,
314 bool all_hub, uint32_t inst)
315{
316 uint16_t queried;
317 int vmid, i;
318
319 for (vmid = 1; vmid < 16; vmid++) {
320 bool valid;
321
322 valid = gmc_v11_0_get_vmid_pasid_mapping_info(adev, vmid,
323 &queried);
324 if (!valid || queried != pasid)
325 continue;
326
327 if (all_hub) {
328 for_each_set_bit(i, adev->vmhubs_mask,
329 AMDGPU_MAX_VMHUBS)
330 gmc_v11_0_flush_gpu_tlb(adev, vmid, i,
331 flush_type);
332 } else {
333 gmc_v11_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
334 flush_type);
335 }
336 }
337}
338
339static uint64_t gmc_v11_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
340 unsigned int vmid, uint64_t pd_addr)
341{
342 bool use_semaphore = gmc_v11_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
343 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
344 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
345 unsigned int eng = ring->vm_inv_eng;
346
347 /*
348 * It may lose gpuvm invalidate acknowldege state across power-gating
349 * off cycle, add semaphore acquire before invalidation and semaphore
350 * release after invalidation to avoid entering power gated state
351 * to WA the Issue
352 */
353
354 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
355 if (use_semaphore)
356 /* a read return value of 1 means semaphore acuqire */
357 amdgpu_ring_emit_reg_wait(ring,
358 hub->vm_inv_eng0_sem +
359 hub->eng_distance * eng, 0x1, 0x1);
360
361 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
362 (hub->ctx_addr_distance * vmid),
363 lower_32_bits(pd_addr));
364
365 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
366 (hub->ctx_addr_distance * vmid),
367 upper_32_bits(pd_addr));
368
369 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
370 hub->eng_distance * eng,
371 hub->vm_inv_eng0_ack +
372 hub->eng_distance * eng,
373 req, 1 << vmid);
374
375 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
376 if (use_semaphore)
377 /*
378 * add semaphore release after invalidation,
379 * write with 0 means semaphore release
380 */
381 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
382 hub->eng_distance * eng, 0);
383
384 return pd_addr;
385}
386
387static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
388 unsigned int pasid)
389{
390 struct amdgpu_device *adev = ring->adev;
391 uint32_t reg;
392
393 if (ring->vm_hub == AMDGPU_GFXHUB(0))
394 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
395 else
396 reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
397
398 amdgpu_ring_emit_wreg(ring, reg, pasid);
399}
400
401/*
402 * PTE format:
403 * 63:59 reserved
404 * 58:57 reserved
405 * 56 F
406 * 55 L
407 * 54 reserved
408 * 53:52 SW
409 * 51 T
410 * 50:48 mtype
411 * 47:12 4k physical page base address
412 * 11:7 fragment
413 * 6 write
414 * 5 read
415 * 4 exe
416 * 3 Z
417 * 2 snooped
418 * 1 system
419 * 0 valid
420 *
421 * PDE format:
422 * 63:59 block fragment size
423 * 58:55 reserved
424 * 54 P
425 * 53:48 reserved
426 * 47:6 physical base address of PD or PTE
427 * 5:3 reserved
428 * 2 C
429 * 1 system
430 * 0 valid
431 */
432
433static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
434 uint64_t *addr, uint64_t *flags)
435{
436 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
437 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
438 BUG_ON(*addr & 0xFFFF00000000003FULL);
439
440 if (!adev->gmc.translate_further)
441 return;
442
443 if (level == AMDGPU_VM_PDB1) {
444 /* Set the block fragment size */
445 if (!(*flags & AMDGPU_PDE_PTE))
446 *flags |= AMDGPU_PDE_BFS(0x9);
447
448 } else if (level == AMDGPU_VM_PDB0) {
449 if (*flags & AMDGPU_PDE_PTE)
450 *flags &= ~AMDGPU_PDE_PTE;
451 else
452 *flags |= AMDGPU_PTE_TF;
453 }
454}
455
456static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
457 struct amdgpu_vm *vm,
458 struct amdgpu_bo *bo,
459 uint32_t vm_flags,
460 uint64_t *flags)
461{
462 if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
463 *flags |= AMDGPU_PTE_EXECUTABLE;
464 else
465 *flags &= ~AMDGPU_PTE_EXECUTABLE;
466
467 switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
468 case AMDGPU_VM_MTYPE_DEFAULT:
469 case AMDGPU_VM_MTYPE_NC:
470 default:
471 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
472 break;
473 case AMDGPU_VM_MTYPE_WC:
474 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
475 break;
476 case AMDGPU_VM_MTYPE_CC:
477 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
478 break;
479 case AMDGPU_VM_MTYPE_UC:
480 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
481 break;
482 }
483
484 if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
485 *flags |= AMDGPU_PTE_NOALLOC;
486 else
487 *flags &= ~AMDGPU_PTE_NOALLOC;
488
489 if (vm_flags & AMDGPU_VM_PAGE_PRT) {
490 *flags |= AMDGPU_PTE_PRT;
491 *flags |= AMDGPU_PTE_SNOOPED;
492 *flags |= AMDGPU_PTE_LOG;
493 *flags |= AMDGPU_PTE_SYSTEM;
494 *flags &= ~AMDGPU_PTE_VALID;
495 }
496
497 if (bo && bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
498 AMDGPU_GEM_CREATE_EXT_COHERENT |
499 AMDGPU_GEM_CREATE_UNCACHED))
500 *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
501}
502
503static unsigned int gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
504{
505 u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
506 unsigned int size;
507
508 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
509 size = AMDGPU_VBIOS_VGA_ALLOCATION;
510 } else {
511 u32 viewport;
512 u32 pitch;
513
514 viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
515 pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
516 size = (REG_GET_FIELD(viewport,
517 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
518 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
519 4);
520 }
521
522 return size;
523}
524
525static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
526 .flush_gpu_tlb = gmc_v11_0_flush_gpu_tlb,
527 .flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
528 .emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
529 .emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
530 .get_vm_pde = gmc_v11_0_get_vm_pde,
531 .get_vm_pte = gmc_v11_0_get_vm_pte,
532 .get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
533};
534
535static void gmc_v11_0_set_gmc_funcs(struct amdgpu_device *adev)
536{
537 adev->gmc.gmc_funcs = &gmc_v11_0_gmc_funcs;
538}
539
540static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
541{
542 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
543 case IP_VERSION(8, 10, 0):
544 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
545 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
546 adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
547 adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
548 adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM;
549 if (adev->umc.node_inst_num == 4)
550 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl_ext0[0][0][0];
551 else
552 adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
553 adev->umc.ras = &umc_v8_10_ras;
554 break;
555 case IP_VERSION(8, 11, 0):
556 break;
557 default:
558 break;
559 }
560}
561
562
563static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
564{
565 switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
566 case IP_VERSION(3, 0, 1):
567 adev->mmhub.funcs = &mmhub_v3_0_1_funcs;
568 break;
569 case IP_VERSION(3, 0, 2):
570 adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
571 break;
572 case IP_VERSION(3, 3, 0):
573 case IP_VERSION(3, 3, 1):
574 case IP_VERSION(3, 3, 2):
575 adev->mmhub.funcs = &mmhub_v3_3_funcs;
576 break;
577 default:
578 adev->mmhub.funcs = &mmhub_v3_0_funcs;
579 break;
580 }
581}
582
583static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev)
584{
585 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
586 case IP_VERSION(11, 0, 3):
587 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs;
588 break;
589 case IP_VERSION(11, 5, 0):
590 case IP_VERSION(11, 5, 1):
591 case IP_VERSION(11, 5, 2):
592 case IP_VERSION(11, 5, 3):
593 adev->gfxhub.funcs = &gfxhub_v11_5_0_funcs;
594 break;
595 default:
596 adev->gfxhub.funcs = &gfxhub_v3_0_funcs;
597 break;
598 }
599}
600
601static int gmc_v11_0_early_init(struct amdgpu_ip_block *ip_block)
602{
603 struct amdgpu_device *adev = ip_block->adev;
604
605 gmc_v11_0_set_gfxhub_funcs(adev);
606 gmc_v11_0_set_mmhub_funcs(adev);
607 gmc_v11_0_set_gmc_funcs(adev);
608 gmc_v11_0_set_irq_funcs(adev);
609 gmc_v11_0_set_umc_funcs(adev);
610
611 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
612 adev->gmc.shared_aperture_end =
613 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
614 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
615 adev->gmc.private_aperture_end =
616 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
617 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
618
619 return 0;
620}
621
622static int gmc_v11_0_late_init(struct amdgpu_ip_block *ip_block)
623{
624 struct amdgpu_device *adev = ip_block->adev;
625 int r;
626
627 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
628 if (r)
629 return r;
630
631 r = amdgpu_gmc_ras_late_init(adev);
632 if (r)
633 return r;
634
635 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
636}
637
638static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
639 struct amdgpu_gmc *mc)
640{
641 u64 base = 0;
642
643 base = adev->mmhub.funcs->get_fb_location(adev);
644
645 amdgpu_gmc_set_agp_default(adev, mc);
646 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
647 amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
648 if (!amdgpu_sriov_vf(adev) &&
649 (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
650 (amdgpu_agp == 1))
651 amdgpu_gmc_agp_location(adev, mc);
652
653 /* base offset of vram pages */
654 if (amdgpu_sriov_vf(adev))
655 adev->vm_manager.vram_base_offset = 0;
656 else
657 adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
658}
659
660/**
661 * gmc_v11_0_mc_init - initialize the memory controller driver params
662 *
663 * @adev: amdgpu_device pointer
664 *
665 * Look up the amount of vram, vram width, and decide how to place
666 * vram and gart within the GPU's physical address space.
667 * Returns 0 for success.
668 */
669static int gmc_v11_0_mc_init(struct amdgpu_device *adev)
670{
671 int r;
672
673 /* size in MB on si */
674 adev->gmc.mc_vram_size =
675 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
676 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
677
678 if (!(adev->flags & AMD_IS_APU)) {
679 r = amdgpu_device_resize_fb_bar(adev);
680 if (r)
681 return r;
682 }
683 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
684 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
685
686#ifdef CONFIG_X86_64
687 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
688 adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
689 adev->gmc.aper_size = adev->gmc.real_vram_size;
690 }
691#endif
692 /* In case the PCI BAR is larger than the actual amount of vram */
693 adev->gmc.visible_vram_size = adev->gmc.aper_size;
694 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
695 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
696
697 /* set the gart size */
698 if (amdgpu_gart_size == -1)
699 adev->gmc.gart_size = 512ULL << 20;
700 else
701 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
702
703 gmc_v11_0_vram_gtt_location(adev, &adev->gmc);
704
705 return 0;
706}
707
708static int gmc_v11_0_gart_init(struct amdgpu_device *adev)
709{
710 int r;
711
712 if (adev->gart.bo) {
713 WARN(1, "PCIE GART already initialized\n");
714 return 0;
715 }
716
717 /* Initialize common gart structure */
718 r = amdgpu_gart_init(adev);
719 if (r)
720 return r;
721
722 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
723 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC) |
724 AMDGPU_PTE_EXECUTABLE;
725
726 return amdgpu_gart_table_vram_alloc(adev);
727}
728
729static int gmc_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
730{
731 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
732 struct amdgpu_device *adev = ip_block->adev;
733
734 adev->mmhub.funcs->init(adev);
735
736 adev->gfxhub.funcs->init(adev);
737
738 spin_lock_init(&adev->gmc.invalidate_lock);
739
740 r = amdgpu_atomfirmware_get_vram_info(adev,
741 &vram_width, &vram_type, &vram_vendor);
742 adev->gmc.vram_width = vram_width;
743
744 adev->gmc.vram_type = vram_type;
745 adev->gmc.vram_vendor = vram_vendor;
746
747 /* The mall_size is already calculated as mall_size_per_umc * num_umc.
748 * However, for gfx1151, which features a 2-to-1 UMC mapping,
749 * the result must be multiplied by 2 to determine the actual mall size.
750 */
751 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
752 case IP_VERSION(11, 5, 1):
753 adev->gmc.mall_size *= 2;
754 break;
755 default:
756 break;
757 }
758
759 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
760 case IP_VERSION(11, 0, 0):
761 case IP_VERSION(11, 0, 1):
762 case IP_VERSION(11, 0, 2):
763 case IP_VERSION(11, 0, 3):
764 case IP_VERSION(11, 0, 4):
765 case IP_VERSION(11, 5, 0):
766 case IP_VERSION(11, 5, 1):
767 case IP_VERSION(11, 5, 2):
768 case IP_VERSION(11, 5, 3):
769 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
770 set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
771 /*
772 * To fulfill 4-level page support,
773 * vm size is 256TB (48bit), maximum size,
774 * block size 512 (9bit)
775 */
776 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
777 break;
778 default:
779 break;
780 }
781
782 /* This interrupt is VMC page fault.*/
783 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
784 VMC_1_0__SRCID__VM_FAULT,
785 &adev->gmc.vm_fault);
786
787 if (r)
788 return r;
789
790 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
791 UTCL2_1_0__SRCID__FAULT,
792 &adev->gmc.vm_fault);
793 if (r)
794 return r;
795
796 if (!amdgpu_sriov_vf(adev)) {
797 /* interrupt sent to DF. */
798 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
799 &adev->gmc.ecc_irq);
800 if (r)
801 return r;
802 }
803
804 /*
805 * Set the internal MC address mask This is the max address of the GPU's
806 * internal address space.
807 */
808 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
809
810 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
811 if (r) {
812 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
813 return r;
814 }
815
816 adev->need_swiotlb = drm_need_swiotlb(44);
817
818 r = gmc_v11_0_mc_init(adev);
819 if (r)
820 return r;
821
822 amdgpu_gmc_get_vbios_allocations(adev);
823
824 /* Memory manager */
825 r = amdgpu_bo_init(adev);
826 if (r)
827 return r;
828
829 r = gmc_v11_0_gart_init(adev);
830 if (r)
831 return r;
832
833 /*
834 * number of VMs
835 * VMID 0 is reserved for System
836 * amdgpu graphics/compute will use VMIDs 1-7
837 * amdkfd will use VMIDs 8-15
838 */
839 adev->vm_manager.first_kfd_vmid = adev->gfx.disable_kq ? 1 : 8;
840
841 amdgpu_vm_manager_init(adev);
842
843 r = amdgpu_gmc_ras_sw_init(adev);
844 if (r)
845 return r;
846
847 return 0;
848}
849
850/**
851 * gmc_v11_0_gart_fini - vm fini callback
852 *
853 * @adev: amdgpu_device pointer
854 *
855 * Tears down the driver GART/VM setup (CIK).
856 */
857static void gmc_v11_0_gart_fini(struct amdgpu_device *adev)
858{
859 amdgpu_gart_table_vram_free(adev);
860}
861
862static int gmc_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
863{
864 struct amdgpu_device *adev = ip_block->adev;
865
866 amdgpu_vm_manager_fini(adev);
867 gmc_v11_0_gart_fini(adev);
868 amdgpu_gem_force_release(adev);
869 amdgpu_bo_fini(adev);
870
871 return 0;
872}
873
874static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev)
875{
876 if (amdgpu_sriov_vf(adev)) {
877 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
878
879 WREG32(hub->vm_contexts_disable, 0);
880 return;
881 }
882}
883
884/**
885 * gmc_v11_0_gart_enable - gart enable
886 *
887 * @adev: amdgpu_device pointer
888 */
889static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
890{
891 int r;
892 bool value;
893
894 if (adev->gart.bo == NULL) {
895 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
896 return -EINVAL;
897 }
898
899 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
900
901 r = adev->mmhub.funcs->gart_enable(adev);
902 if (r)
903 return r;
904
905 /* Flush HDP after it is initialized */
906 amdgpu_device_flush_hdp(adev, NULL);
907
908 value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
909
910 adev->mmhub.funcs->set_fault_enable_default(adev, value);
911 gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
912
913 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
914 (unsigned int)(adev->gmc.gart_size >> 20),
915 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
916
917 return 0;
918}
919
920static int gmc_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
921{
922 struct amdgpu_device *adev = ip_block->adev;
923 int r;
924
925 adev->gmc.flush_pasid_uses_kiq = !amdgpu_emu_mode;
926
927 /* The sequence of these two function calls matters.*/
928 gmc_v11_0_init_golden_registers(adev);
929
930 r = gmc_v11_0_gart_enable(adev);
931 if (r)
932 return r;
933
934 if (adev->umc.funcs && adev->umc.funcs->init_registers)
935 adev->umc.funcs->init_registers(adev);
936
937 return 0;
938}
939
940/**
941 * gmc_v11_0_gart_disable - gart disable
942 *
943 * @adev: amdgpu_device pointer
944 *
945 * This disables all VM page table.
946 */
947static void gmc_v11_0_gart_disable(struct amdgpu_device *adev)
948{
949 adev->mmhub.funcs->gart_disable(adev);
950}
951
952static int gmc_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
953{
954 struct amdgpu_device *adev = ip_block->adev;
955
956 if (amdgpu_sriov_vf(adev)) {
957 /* full access mode, so don't touch any GMC register */
958 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
959 return 0;
960 }
961
962 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
963
964 if (adev->gmc.ecc_irq.funcs &&
965 amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
966 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
967
968 gmc_v11_0_gart_disable(adev);
969
970 return 0;
971}
972
973static int gmc_v11_0_suspend(struct amdgpu_ip_block *ip_block)
974{
975 gmc_v11_0_hw_fini(ip_block);
976
977 return 0;
978}
979
980static int gmc_v11_0_resume(struct amdgpu_ip_block *ip_block)
981{
982 int r;
983
984 r = gmc_v11_0_hw_init(ip_block);
985 if (r)
986 return r;
987
988 amdgpu_vmid_reset_all(ip_block->adev);
989
990 return 0;
991}
992
993static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
994{
995 /* MC is always ready in GMC v11.*/
996 return true;
997}
998
999static int gmc_v11_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1000{
1001 /* There is no need to wait for MC idle in GMC v11.*/
1002 return 0;
1003}
1004
1005static int gmc_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1006 enum amd_clockgating_state state)
1007{
1008 int r;
1009 struct amdgpu_device *adev = ip_block->adev;
1010
1011 r = adev->mmhub.funcs->set_clockgating(adev, state);
1012 if (r)
1013 return r;
1014
1015 return athub_v3_0_set_clockgating(adev, state);
1016}
1017
1018static void gmc_v11_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
1019{
1020 struct amdgpu_device *adev = ip_block->adev;
1021
1022 adev->mmhub.funcs->get_clockgating(adev, flags);
1023
1024 athub_v3_0_get_clockgating(adev, flags);
1025}
1026
1027static int gmc_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
1028 enum amd_powergating_state state)
1029{
1030 return 0;
1031}
1032
1033const struct amd_ip_funcs gmc_v11_0_ip_funcs = {
1034 .name = "gmc_v11_0",
1035 .early_init = gmc_v11_0_early_init,
1036 .sw_init = gmc_v11_0_sw_init,
1037 .hw_init = gmc_v11_0_hw_init,
1038 .late_init = gmc_v11_0_late_init,
1039 .sw_fini = gmc_v11_0_sw_fini,
1040 .hw_fini = gmc_v11_0_hw_fini,
1041 .suspend = gmc_v11_0_suspend,
1042 .resume = gmc_v11_0_resume,
1043 .is_idle = gmc_v11_0_is_idle,
1044 .wait_for_idle = gmc_v11_0_wait_for_idle,
1045 .set_clockgating_state = gmc_v11_0_set_clockgating_state,
1046 .set_powergating_state = gmc_v11_0_set_powergating_state,
1047 .get_clockgating_state = gmc_v11_0_get_clockgating_state,
1048};
1049
1050const struct amdgpu_ip_block_version gmc_v11_0_ip_block = {
1051 .type = AMD_IP_BLOCK_TYPE_GMC,
1052 .major = 11,
1053 .minor = 0,
1054 .rev = 0,
1055 .funcs = &gmc_v11_0_ip_funcs,
1056};