Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "gfxhub_v1_2.h"
25#include "gfxhub_v1_1.h"
26
27#include "gc/gc_9_4_3_offset.h"
28#include "gc/gc_9_4_3_sh_mask.h"
29#include "vega10_enum.h"
30
31#include "soc15_common.h"
32
33#define regVM_L2_CNTL3_DEFAULT 0x80100007
34#define regVM_L2_CNTL4_DEFAULT 0x000000c1
35
36static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
37{
38 return (u64)RREG32_SOC15(GC, 0, regMC_VM_FB_OFFSET) << 24;
39}
40
41static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
42 uint32_t vmid,
43 uint64_t page_table_base)
44{
45 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
46
47 WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
48 hub->ctx_addr_distance * vmid,
49 lower_32_bits(page_table_base));
50
51 WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
52 hub->ctx_addr_distance * vmid,
53 upper_32_bits(page_table_base));
54}
55
56static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
57{
58 uint64_t pt_base;
59
60 if (adev->gmc.pdb0_bo)
61 pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
62 else
63 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
64
65 gfxhub_v1_2_setup_vm_pt_regs(adev, 0, pt_base);
66
67 /* If use GART for FB translation, vmid0 page table covers both
68 * vram and system memory (gart)
69 */
70 if (adev->gmc.pdb0_bo) {
71 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
72 (u32)(adev->gmc.fb_start >> 12));
73 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
74 (u32)(adev->gmc.fb_start >> 44));
75
76 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
77 (u32)(adev->gmc.gart_end >> 12));
78 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
79 (u32)(adev->gmc.gart_end >> 44));
80 } else {
81 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
82 (u32)(adev->gmc.gart_start >> 12));
83 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
84 (u32)(adev->gmc.gart_start >> 44));
85
86 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
87 (u32)(adev->gmc.gart_end >> 12));
88 WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
89 (u32)(adev->gmc.gart_end >> 44));
90 }
91}
92
93static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev)
94{
95 uint64_t value;
96 uint32_t tmp;
97
98 /* Program the AGP BAR */
99 WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0);
100 WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
101 WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
102
103 if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
104 /* Program the system aperture low logical page number. */
105 WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
106 min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
107
108 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
109 /*
110 * Raven2 has a HW issue that it is unable to use the
111 * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
112 * So here is the workaround that increase system
113 * aperture high address (add 1) to get rid of the VM
114 * fault and hardware hang.
115 */
116 WREG32_SOC15_RLC(GC, 0,
117 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
118 max((adev->gmc.fb_end >> 18) + 0x1,
119 adev->gmc.agp_end >> 18));
120 else
121 WREG32_SOC15_RLC(GC, 0,
122 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
123 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
124
125 /* Set default page address. */
126 value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
127 WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
128 (u32)(value >> 12));
129 WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
130 (u32)(value >> 44));
131
132 /* Program "protection fault". */
133 WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
134 (u32)(adev->dummy_page_addr >> 12));
135 WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
136 (u32)((u64)adev->dummy_page_addr >> 44));
137
138 tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
139 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
140 ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
141 WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
142 }
143
144 /* In the case squeezing vram into GART aperture, we don't use
145 * FB aperture and AGP aperture. Disable them.
146 */
147 if (adev->gmc.pdb0_bo) {
148 WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0);
149 WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
150 WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0);
151 WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
152 WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
153 WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
154 }
155}
156
157static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev)
158{
159 uint32_t tmp;
160
161 /* Setup TLB control */
162 tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
163
164 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
165 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
166 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
167 ENABLE_ADVANCED_DRIVER_MODEL, 1);
168 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
169 SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
170 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
171 MTYPE, MTYPE_UC);/* XXX for emulation. */
172 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
173
174 WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
175}
176
177static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev)
178{
179 uint32_t tmp;
180
181 /* Setup L2 cache */
182 tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
183 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
184 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
185 /* XXX for emulation, Refer to closed source code.*/
186 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
187 0);
188 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
189 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
190 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
191 WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp);
192
193 tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2);
194 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
195 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
196 WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp);
197
198 tmp = regVM_L2_CNTL3_DEFAULT;
199 if (adev->gmc.translate_further) {
200 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
201 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
202 L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
203 } else {
204 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
205 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
206 L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
207 }
208 WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp);
209
210 tmp = regVM_L2_CNTL4_DEFAULT;
211 if (adev->gmc.xgmi.connected_to_cpu) {
212 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
213 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
214 } else {
215 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
216 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
217 }
218 WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp);
219}
220
221static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev)
222{
223 uint32_t tmp;
224
225 tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL);
226 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
227 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
228 adev->gmc.vmid0_page_table_depth);
229 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
230 adev->gmc.vmid0_page_table_block_size);
231 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
232 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
233 WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp);
234}
235
236static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev)
237{
238 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
239 0XFFFFFFFF);
240 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
241 0x0000000F);
242
243 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
244 0);
245 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
246 0);
247
248 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
249 WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
250
251}
252
253static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
254{
255 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
256 unsigned num_level, block_size;
257 uint32_t tmp;
258 int i;
259
260 num_level = adev->vm_manager.num_level;
261 block_size = adev->vm_manager.block_size;
262 if (adev->gmc.translate_further)
263 num_level -= 1;
264 else
265 block_size -= 9;
266
267 for (i = 0; i <= 14; i++) {
268 tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i);
269 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
270 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
271 num_level);
272 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
273 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
274 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
275 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
276 1);
277 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
278 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
279 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
280 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
281 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
282 READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
283 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
284 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
285 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
286 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
287 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
288 PAGE_TABLE_BLOCK_SIZE,
289 block_size);
290 /* Send no-retry XNACK on fault to suppress VM fault storm.
291 * On Aldebaran, XNACK can be enabled in the SQ per-process.
292 * Retry faults need to be enabled for that to work.
293 */
294 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
295 RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
296 !adev->gmc.noretry ||
297 adev->asic_type == CHIP_ALDEBARAN);
298 WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL,
299 i * hub->ctx_distance, tmp);
300 WREG32_SOC15_OFFSET(GC, 0,
301 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
302 i * hub->ctx_addr_distance, 0);
303 WREG32_SOC15_OFFSET(GC, 0,
304 regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
305 i * hub->ctx_addr_distance, 0);
306 WREG32_SOC15_OFFSET(GC, 0,
307 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
308 i * hub->ctx_addr_distance,
309 lower_32_bits(adev->vm_manager.max_pfn - 1));
310 WREG32_SOC15_OFFSET(GC, 0,
311 regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
312 i * hub->ctx_addr_distance,
313 upper_32_bits(adev->vm_manager.max_pfn - 1));
314 }
315}
316
317static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev)
318{
319 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
320 unsigned i;
321
322 for (i = 0 ; i < 18; ++i) {
323 WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
324 i * hub->eng_addr_distance, 0xffffffff);
325 WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
326 i * hub->eng_addr_distance, 0x1f);
327 }
328}
329
330static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
331{
332 if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
333 /*
334 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
335 * VF copy registers so vbios post doesn't program them, for
336 * SRIOV driver need to program them
337 */
338 WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE,
339 adev->gmc.vram_start >> 24);
340 WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP,
341 adev->gmc.vram_end >> 24);
342 }
343
344 /* GART Enable. */
345 gfxhub_v1_2_init_gart_aperture_regs(adev);
346 gfxhub_v1_2_init_system_aperture_regs(adev);
347 gfxhub_v1_2_init_tlb_regs(adev);
348 if (!amdgpu_sriov_vf(adev))
349 gfxhub_v1_2_init_cache_regs(adev);
350
351 gfxhub_v1_2_enable_system_domain(adev);
352 if (!amdgpu_sriov_vf(adev))
353 gfxhub_v1_2_disable_identity_aperture(adev);
354 gfxhub_v1_2_setup_vmid_config(adev);
355 gfxhub_v1_2_program_invalidation(adev);
356
357 return 0;
358}
359
360static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
361{
362 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
363 u32 tmp;
364 u32 i;
365
366 /* Disable all tables */
367 for (i = 0; i < 16; i++)
368 WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL,
369 i * hub->ctx_distance, 0);
370
371 /* Setup TLB control */
372 tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
373 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
374 tmp = REG_SET_FIELD(tmp,
375 MC_VM_MX_L1_TLB_CNTL,
376 ENABLE_ADVANCED_DRIVER_MODEL,
377 0);
378 WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
379
380 /* Setup L2 cache */
381 tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
382 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
383 WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp);
384 WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0);
385}
386
387/**
388 * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
389 *
390 * @adev: amdgpu_device pointer
391 * @value: true redirects VM faults to the default page
392 */
393static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
394 bool value)
395{
396 u32 tmp;
397 tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
398 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
399 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
400 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
401 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
402 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
403 PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
404 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
405 PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
406 tmp = REG_SET_FIELD(tmp,
407 VM_L2_PROTECTION_FAULT_CNTL,
408 TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
409 value);
410 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
411 NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
412 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
413 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
414 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
415 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
416 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
417 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
418 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
419 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
420 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
421 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
422 if (!value) {
423 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
424 CRASH_ON_NO_RETRY_FAULT, 1);
425 tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
426 CRASH_ON_RETRY_FAULT, 1);
427 }
428 WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
429}
430
431static void gfxhub_v1_2_init(struct amdgpu_device *adev)
432{
433 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
434
435 hub->ctx0_ptb_addr_lo32 =
436 SOC15_REG_OFFSET(GC, 0,
437 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
438 hub->ctx0_ptb_addr_hi32 =
439 SOC15_REG_OFFSET(GC, 0,
440 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
441 hub->vm_inv_eng0_sem =
442 SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_SEM);
443 hub->vm_inv_eng0_req =
444 SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_REQ);
445 hub->vm_inv_eng0_ack =
446 SOC15_REG_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ACK);
447 hub->vm_context0_cntl =
448 SOC15_REG_OFFSET(GC, 0, regVM_CONTEXT0_CNTL);
449 hub->vm_l2_pro_fault_status =
450 SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS);
451 hub->vm_l2_pro_fault_cntl =
452 SOC15_REG_OFFSET(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
453
454 hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
455 hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
456 regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
457 hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
458 hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
459 regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
460}
461
462
463const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
464 .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
465 .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
466 .gart_enable = gfxhub_v1_2_gart_enable,
467 .gart_disable = gfxhub_v1_2_gart_disable,
468 .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
469 .init = gfxhub_v1_2_init,
470 .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
471};