Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_1_0_offset.h"
34#include "gc/gc_10_1_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37
38#include "soc15_common.h"
39#include "soc15.h"
40#include "navi10_sdma_pkt_open.h"
41#include "nbio_v2_3.h"
42#include "sdma_common.h"
43#include "sdma_v5_0.h"
44
45MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
46MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
47
48MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
49MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
50
51MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
52MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
53
54MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma.bin");
55MODULE_FIRMWARE("amdgpu/cyan_skillfish_sdma1.bin");
56
57MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
58MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
59
60#define SDMA1_REG_OFFSET 0x600
61#define SDMA0_HYP_DEC_REG_START 0x5880
62#define SDMA0_HYP_DEC_REG_END 0x5893
63#define SDMA1_HYP_DEC_REG_OFFSET 0x20
64
65static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
66static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
67static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
68static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
69
70static const struct soc15_reg_golden golden_settings_sdma_5[] = {
71 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
72 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
73 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
74 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
75 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
76 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
77 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
78 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
80 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
81 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
95};
96
97static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
102 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
110 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
111 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
112 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
118};
119
120static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
123};
124
125static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
126 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
127 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
128};
129
130static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
137};
138
139static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
150 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
151 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
152 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
153 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
161 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
162 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
164 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
167 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
168};
169
170static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
171{
172 u32 base;
173
174 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
175 internal_offset <= SDMA0_HYP_DEC_REG_END) {
176 base = adev->reg_offset[GC_HWIP][0][1];
177 if (instance == 1)
178 internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
179 } else {
180 base = adev->reg_offset[GC_HWIP][0][0];
181 if (instance == 1)
182 internal_offset += SDMA1_REG_OFFSET;
183 }
184
185 return base + internal_offset;
186}
187
188static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
189{
190 switch (adev->ip_versions[SDMA0_HWIP][0]) {
191 case IP_VERSION(5, 0, 0):
192 soc15_program_register_sequence(adev,
193 golden_settings_sdma_5,
194 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
195 soc15_program_register_sequence(adev,
196 golden_settings_sdma_nv10,
197 (const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
198 break;
199 case IP_VERSION(5, 0, 2):
200 soc15_program_register_sequence(adev,
201 golden_settings_sdma_5,
202 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
203 soc15_program_register_sequence(adev,
204 golden_settings_sdma_nv14,
205 (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
206 break;
207 case IP_VERSION(5, 0, 5):
208 if (amdgpu_sriov_vf(adev))
209 soc15_program_register_sequence(adev,
210 golden_settings_sdma_5_sriov,
211 (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
212 else
213 soc15_program_register_sequence(adev,
214 golden_settings_sdma_5,
215 (const u32)ARRAY_SIZE(golden_settings_sdma_5));
216 soc15_program_register_sequence(adev,
217 golden_settings_sdma_nv12,
218 (const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
219 break;
220 case IP_VERSION(5, 0, 1):
221 soc15_program_register_sequence(adev,
222 golden_settings_sdma_cyan_skillfish,
223 (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
224 break;
225 default:
226 break;
227 }
228}
229
230/**
231 * sdma_v5_0_init_microcode - load ucode images from disk
232 *
233 * @adev: amdgpu_device pointer
234 *
235 * Use the firmware interface to load the ucode images into
236 * the driver (not loaded into hw).
237 * Returns 0 on success, error on failure.
238 */
239
240// emulation only, won't work on real chip
241// navi10 real chip need to use PSP to load firmware
242static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
243{
244 const char *chip_name;
245 char fw_name[40];
246 int err = 0, i;
247 struct amdgpu_firmware_info *info = NULL;
248 const struct common_firmware_header *header = NULL;
249 const struct sdma_firmware_header_v1_0 *hdr;
250
251 if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
252 return 0;
253
254 DRM_DEBUG("\n");
255
256 switch (adev->ip_versions[SDMA0_HWIP][0]) {
257 case IP_VERSION(5, 0, 0):
258 chip_name = "navi10";
259 break;
260 case IP_VERSION(5, 0, 2):
261 chip_name = "navi14";
262 break;
263 case IP_VERSION(5, 0, 5):
264 chip_name = "navi12";
265 break;
266 case IP_VERSION(5, 0, 1):
267 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
268 chip_name = "cyan_skillfish2";
269 else
270 chip_name = "cyan_skillfish";
271 break;
272 default:
273 BUG();
274 }
275
276 for (i = 0; i < adev->sdma.num_instances; i++) {
277 if (i == 0)
278 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
279 else
280 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
281 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
282 if (err)
283 goto out;
284 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
285 if (err)
286 goto out;
287 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
288 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
289 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
290 if (adev->sdma.instance[i].feature_version >= 20)
291 adev->sdma.instance[i].burst_nop = true;
292 DRM_DEBUG("psp_load == '%s'\n",
293 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
294
295 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
296 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
297 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
298 info->fw = adev->sdma.instance[i].fw;
299 header = (const struct common_firmware_header *)info->fw->data;
300 adev->firmware.fw_size +=
301 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
302 }
303 }
304out:
305 if (err) {
306 DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
307 for (i = 0; i < adev->sdma.num_instances; i++) {
308 release_firmware(adev->sdma.instance[i].fw);
309 adev->sdma.instance[i].fw = NULL;
310 }
311 }
312 return err;
313}
314
315static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
316{
317 unsigned ret;
318
319 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
320 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
321 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
322 amdgpu_ring_write(ring, 1);
323 ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
324 amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
325
326 return ret;
327}
328
329static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
330 unsigned offset)
331{
332 unsigned cur;
333
334 BUG_ON(offset > ring->buf_mask);
335 BUG_ON(ring->ring[offset] != 0x55aa55aa);
336
337 cur = (ring->wptr - 1) & ring->buf_mask;
338 if (cur > offset)
339 ring->ring[offset] = cur - offset;
340 else
341 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
342}
343
344/**
345 * sdma_v5_0_ring_get_rptr - get the current read pointer
346 *
347 * @ring: amdgpu ring pointer
348 *
349 * Get the current rptr from the hardware (NAVI10+).
350 */
351static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
352{
353 u64 *rptr;
354
355 /* XXX check if swapping is necessary on BE */
356 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
357
358 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
359 return ((*rptr) >> 2);
360}
361
362/**
363 * sdma_v5_0_ring_get_wptr - get the current write pointer
364 *
365 * @ring: amdgpu ring pointer
366 *
367 * Get the current wptr from the hardware (NAVI10+).
368 */
369static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
370{
371 struct amdgpu_device *adev = ring->adev;
372 u64 wptr;
373
374 if (ring->use_doorbell) {
375 /* XXX check if swapping is necessary on BE */
376 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
377 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
378 } else {
379 wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
380 wptr = wptr << 32;
381 wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
382 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
383 }
384
385 return wptr >> 2;
386}
387
388/**
389 * sdma_v5_0_ring_set_wptr - commit the write pointer
390 *
391 * @ring: amdgpu ring pointer
392 *
393 * Write the wptr back to the hardware (NAVI10+).
394 */
395static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
396{
397 struct amdgpu_device *adev = ring->adev;
398
399 DRM_DEBUG("Setting write pointer\n");
400 if (ring->use_doorbell) {
401 DRM_DEBUG("Using doorbell -- "
402 "wptr_offs == 0x%08x "
403 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
404 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
405 ring->wptr_offs,
406 lower_32_bits(ring->wptr << 2),
407 upper_32_bits(ring->wptr << 2));
408 /* XXX check if swapping is necessary on BE */
409 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
410 adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
411 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
412 ring->doorbell_index, ring->wptr << 2);
413 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
414 } else {
415 DRM_DEBUG("Not using doorbell -- "
416 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
417 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
418 ring->me,
419 lower_32_bits(ring->wptr << 2),
420 ring->me,
421 upper_32_bits(ring->wptr << 2));
422 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
423 lower_32_bits(ring->wptr << 2));
424 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
425 upper_32_bits(ring->wptr << 2));
426 }
427}
428
429static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
430{
431 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
432 int i;
433
434 for (i = 0; i < count; i++)
435 if (sdma && sdma->burst_nop && (i == 0))
436 amdgpu_ring_write(ring, ring->funcs->nop |
437 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
438 else
439 amdgpu_ring_write(ring, ring->funcs->nop);
440}
441
442/**
443 * sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
444 *
445 * @ring: amdgpu ring pointer
446 * @job: job to retrieve vmid from
447 * @ib: IB object to schedule
448 * @flags: unused
449 *
450 * Schedule an IB in the DMA ring (NAVI10).
451 */
452static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
453 struct amdgpu_job *job,
454 struct amdgpu_ib *ib,
455 uint32_t flags)
456{
457 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
458 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
459
460 /* An IB packet must end on a 8 DW boundary--the next dword
461 * must be on a 8-dword boundary. Our IB packet below is 6
462 * dwords long, thus add x number of NOPs, such that, in
463 * modular arithmetic,
464 * wptr + 6 + x = 8k, k >= 0, which in C is,
465 * (wptr + 6 + x) % 8 = 0.
466 * The expression below, is a solution of x.
467 */
468 sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
469
470 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
471 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
472 /* base must be 32 byte aligned */
473 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
474 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
475 amdgpu_ring_write(ring, ib->length_dw);
476 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
477 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
478}
479
480/**
481 * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
482 *
483 * @ring: amdgpu ring pointer
484 *
485 * flush the IB by graphics cache rinse.
486 */
487static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
488{
489 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
490 SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
491 SDMA_GCR_GLI_INV(1);
492
493 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
494 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
495 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
496 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
497 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
498 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
499 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
500 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
501 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
502}
503
504/**
505 * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
506 *
507 * @ring: amdgpu ring pointer
508 *
509 * Emit an hdp flush packet on the requested DMA ring.
510 */
511static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
512{
513 struct amdgpu_device *adev = ring->adev;
514 u32 ref_and_mask = 0;
515 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
516
517 if (ring->me == 0)
518 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
519 else
520 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
521
522 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
523 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
524 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
525 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
526 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
527 amdgpu_ring_write(ring, ref_and_mask); /* reference */
528 amdgpu_ring_write(ring, ref_and_mask); /* mask */
529 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
530 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
531}
532
533/**
534 * sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
535 *
536 * @ring: amdgpu ring pointer
537 * @addr: address
538 * @seq: sequence number
539 * @flags: fence related flags
540 *
541 * Add a DMA fence packet to the ring to write
542 * the fence seq number and DMA trap packet to generate
543 * an interrupt if needed (NAVI10).
544 */
545static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
546 unsigned flags)
547{
548 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
549 /* write the fence */
550 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
551 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
552 /* zero in first two bits */
553 BUG_ON(addr & 0x3);
554 amdgpu_ring_write(ring, lower_32_bits(addr));
555 amdgpu_ring_write(ring, upper_32_bits(addr));
556 amdgpu_ring_write(ring, lower_32_bits(seq));
557
558 /* optionally write high bits as well */
559 if (write64bit) {
560 addr += 4;
561 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
562 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
563 /* zero in first two bits */
564 BUG_ON(addr & 0x3);
565 amdgpu_ring_write(ring, lower_32_bits(addr));
566 amdgpu_ring_write(ring, upper_32_bits(addr));
567 amdgpu_ring_write(ring, upper_32_bits(seq));
568 }
569
570 if (flags & AMDGPU_FENCE_FLAG_INT) {
571 /* generate an interrupt */
572 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
573 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
574 }
575}
576
577
578/**
579 * sdma_v5_0_gfx_stop - stop the gfx async dma engines
580 *
581 * @adev: amdgpu_device pointer
582 *
583 * Stop the gfx async dma ring buffers (NAVI10).
584 */
585static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
586{
587 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
588 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
589 u32 rb_cntl, ib_cntl;
590 int i;
591
592 if ((adev->mman.buffer_funcs_ring == sdma0) ||
593 (adev->mman.buffer_funcs_ring == sdma1))
594 amdgpu_ttm_set_buffer_funcs_status(adev, false);
595
596 for (i = 0; i < adev->sdma.num_instances; i++) {
597 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
598 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
599 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
600 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
601 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
602 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
603 }
604}
605
606/**
607 * sdma_v5_0_rlc_stop - stop the compute async dma engines
608 *
609 * @adev: amdgpu_device pointer
610 *
611 * Stop the compute async dma queues (NAVI10).
612 */
613static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
614{
615 /* XXX todo */
616}
617
618/**
619 * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
620 *
621 * @adev: amdgpu_device pointer
622 * @enable: enable/disable the DMA MEs context switch.
623 *
624 * Halt or unhalt the async dma engines context switch (NAVI10).
625 */
626static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
627{
628 u32 f32_cntl = 0, phase_quantum = 0;
629 int i;
630
631 if (amdgpu_sdma_phase_quantum) {
632 unsigned value = amdgpu_sdma_phase_quantum;
633 unsigned unit = 0;
634
635 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
636 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
637 value = (value + 1) >> 1;
638 unit++;
639 }
640 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
641 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
642 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
643 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
644 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
645 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
646 WARN_ONCE(1,
647 "clamping sdma_phase_quantum to %uK clock cycles\n",
648 value << unit);
649 }
650 phase_quantum =
651 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
652 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
653 }
654
655 for (i = 0; i < adev->sdma.num_instances; i++) {
656 if (!amdgpu_sriov_vf(adev)) {
657 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
658 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
659 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
660 }
661
662 if (enable && amdgpu_sdma_phase_quantum) {
663 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
664 phase_quantum);
665 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
666 phase_quantum);
667 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
668 phase_quantum);
669 }
670 if (!amdgpu_sriov_vf(adev))
671 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
672 }
673
674}
675
676/**
677 * sdma_v5_0_enable - stop the async dma engines
678 *
679 * @adev: amdgpu_device pointer
680 * @enable: enable/disable the DMA MEs.
681 *
682 * Halt or unhalt the async dma engines (NAVI10).
683 */
684static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
685{
686 u32 f32_cntl;
687 int i;
688
689 if (!enable) {
690 sdma_v5_0_gfx_stop(adev);
691 sdma_v5_0_rlc_stop(adev);
692 }
693
694 if (amdgpu_sriov_vf(adev))
695 return;
696
697 for (i = 0; i < adev->sdma.num_instances; i++) {
698 f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
699 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
700 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
701 }
702}
703
704/**
705 * sdma_v5_0_gfx_resume - setup and start the async dma engines
706 *
707 * @adev: amdgpu_device pointer
708 *
709 * Set up the gfx DMA ring buffers and enable them (NAVI10).
710 * Returns 0 for success, error for failure.
711 */
712static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
713{
714 struct amdgpu_ring *ring;
715 u32 rb_cntl, ib_cntl;
716 u32 rb_bufsz;
717 u32 wb_offset;
718 u32 doorbell;
719 u32 doorbell_offset;
720 u32 temp;
721 u32 wptr_poll_cntl;
722 u64 wptr_gpu_addr;
723 int i, r;
724
725 for (i = 0; i < adev->sdma.num_instances; i++) {
726 ring = &adev->sdma.instance[i].ring;
727 wb_offset = (ring->rptr_offs * 4);
728
729 if (!amdgpu_sriov_vf(adev))
730 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
731
732 /* Set ring buffer size in dwords */
733 rb_bufsz = order_base_2(ring->ring_size / 4);
734 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
735 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
736#ifdef __BIG_ENDIAN
737 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
738 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
739 RPTR_WRITEBACK_SWAP_ENABLE, 1);
740#endif
741 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
742
743 /* Initialize the ring buffer's read and write pointers */
744 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
745 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
746 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
747 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
748
749 /* setup the wptr shadow polling */
750 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
751 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
752 lower_32_bits(wptr_gpu_addr));
753 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
754 upper_32_bits(wptr_gpu_addr));
755 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
756 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
757 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
758 SDMA0_GFX_RB_WPTR_POLL_CNTL,
759 F32_POLL_ENABLE, 1);
760 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
761 wptr_poll_cntl);
762
763 /* set the wb address whether it's enabled or not */
764 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
765 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
766 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
767 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
768
769 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
770
771 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
772 ring->gpu_addr >> 8);
773 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
774 ring->gpu_addr >> 40);
775
776 ring->wptr = 0;
777
778 /* before programing wptr to a less value, need set minor_ptr_update first */
779 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
780
781 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
782 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
783 lower_32_bits(ring->wptr) << 2);
784 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
785 upper_32_bits(ring->wptr) << 2);
786 }
787
788 doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
789 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
790 mmSDMA0_GFX_DOORBELL_OFFSET));
791
792 if (ring->use_doorbell) {
793 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
794 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
795 OFFSET, ring->doorbell_index);
796 } else {
797 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
798 }
799 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
800 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
801 doorbell_offset);
802
803 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
804 ring->doorbell_index, 20);
805
806 if (amdgpu_sriov_vf(adev))
807 sdma_v5_0_ring_set_wptr(ring);
808
809 /* set minor_ptr_update to 0 after wptr programed */
810 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
811
812 if (!amdgpu_sriov_vf(adev)) {
813 /* set utc l1 enable flag always to 1 */
814 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
815 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
816
817 /* enable MCBP */
818 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
819 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
820
821 /* Set up RESP_MODE to non-copy addresses */
822 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
823 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
824 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
825 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
826
827 /* program default cache read and write policy */
828 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
829 /* clean read policy and write policy bits */
830 temp &= 0xFF0FFF;
831 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
832 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
833 }
834
835 if (!amdgpu_sriov_vf(adev)) {
836 /* unhalt engine */
837 temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
838 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
839 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
840 }
841
842 /* enable DMA RB */
843 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
844 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
845
846 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
847 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
848#ifdef __BIG_ENDIAN
849 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
850#endif
851 /* enable DMA IBs */
852 WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
853
854 ring->sched.ready = true;
855
856 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
857 sdma_v5_0_ctx_switch_enable(adev, true);
858 sdma_v5_0_enable(adev, true);
859 }
860
861 r = amdgpu_ring_test_helper(ring);
862 if (r)
863 return r;
864
865 if (adev->mman.buffer_funcs_ring == ring)
866 amdgpu_ttm_set_buffer_funcs_status(adev, true);
867 }
868
869 return 0;
870}
871
872/**
873 * sdma_v5_0_rlc_resume - setup and start the async dma engines
874 *
875 * @adev: amdgpu_device pointer
876 *
877 * Set up the compute DMA queues and enable them (NAVI10).
878 * Returns 0 for success, error for failure.
879 */
880static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
881{
882 return 0;
883}
884
885/**
886 * sdma_v5_0_load_microcode - load the sDMA ME ucode
887 *
888 * @adev: amdgpu_device pointer
889 *
890 * Loads the sDMA0/1 ucode.
891 * Returns 0 for success, -EINVAL if the ucode is not available.
892 */
893static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
894{
895 const struct sdma_firmware_header_v1_0 *hdr;
896 const __le32 *fw_data;
897 u32 fw_size;
898 int i, j;
899
900 /* halt the MEs */
901 sdma_v5_0_enable(adev, false);
902
903 for (i = 0; i < adev->sdma.num_instances; i++) {
904 if (!adev->sdma.instance[i].fw)
905 return -EINVAL;
906
907 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
908 amdgpu_ucode_print_sdma_hdr(&hdr->header);
909 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
910
911 fw_data = (const __le32 *)
912 (adev->sdma.instance[i].fw->data +
913 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
914
915 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
916
917 for (j = 0; j < fw_size; j++) {
918 if (amdgpu_emu_mode == 1 && j % 500 == 0)
919 msleep(1);
920 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
921 }
922
923 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
924 }
925
926 return 0;
927}
928
929/**
930 * sdma_v5_0_start - setup and start the async dma engines
931 *
932 * @adev: amdgpu_device pointer
933 *
934 * Set up the DMA engines and enable them (NAVI10).
935 * Returns 0 for success, error for failure.
936 */
937static int sdma_v5_0_start(struct amdgpu_device *adev)
938{
939 int r = 0;
940
941 if (amdgpu_sriov_vf(adev)) {
942 sdma_v5_0_ctx_switch_enable(adev, false);
943 sdma_v5_0_enable(adev, false);
944
945 /* set RB registers */
946 r = sdma_v5_0_gfx_resume(adev);
947 return r;
948 }
949
950 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
951 r = sdma_v5_0_load_microcode(adev);
952 if (r)
953 return r;
954 }
955
956 /* unhalt the MEs */
957 sdma_v5_0_enable(adev, true);
958 /* enable sdma ring preemption */
959 sdma_v5_0_ctx_switch_enable(adev, true);
960
961 /* start the gfx rings and rlc compute queues */
962 r = sdma_v5_0_gfx_resume(adev);
963 if (r)
964 return r;
965 r = sdma_v5_0_rlc_resume(adev);
966
967 return r;
968}
969
970/**
971 * sdma_v5_0_ring_test_ring - simple async dma engine test
972 *
973 * @ring: amdgpu_ring structure holding ring information
974 *
975 * Test the DMA engine by writing using it to write an
976 * value to memory. (NAVI10).
977 * Returns 0 for success, error for failure.
978 */
979static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
980{
981 struct amdgpu_device *adev = ring->adev;
982 unsigned i;
983 unsigned index;
984 int r;
985 u32 tmp;
986 u64 gpu_addr;
987
988 r = amdgpu_device_wb_get(adev, &index);
989 if (r) {
990 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
991 return r;
992 }
993
994 gpu_addr = adev->wb.gpu_addr + (index * 4);
995 tmp = 0xCAFEDEAD;
996 adev->wb.wb[index] = cpu_to_le32(tmp);
997
998 r = amdgpu_ring_alloc(ring, 5);
999 if (r) {
1000 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
1001 amdgpu_device_wb_free(adev, index);
1002 return r;
1003 }
1004
1005 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1006 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1007 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1008 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1009 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1010 amdgpu_ring_write(ring, 0xDEADBEEF);
1011 amdgpu_ring_commit(ring);
1012
1013 for (i = 0; i < adev->usec_timeout; i++) {
1014 tmp = le32_to_cpu(adev->wb.wb[index]);
1015 if (tmp == 0xDEADBEEF)
1016 break;
1017 if (amdgpu_emu_mode == 1)
1018 msleep(1);
1019 else
1020 udelay(1);
1021 }
1022
1023 if (i >= adev->usec_timeout)
1024 r = -ETIMEDOUT;
1025
1026 amdgpu_device_wb_free(adev, index);
1027
1028 return r;
1029}
1030
1031/**
1032 * sdma_v5_0_ring_test_ib - test an IB on the DMA engine
1033 *
1034 * @ring: amdgpu_ring structure holding ring information
1035 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1036 *
1037 * Test a simple IB in the DMA ring (NAVI10).
1038 * Returns 0 on success, error on failure.
1039 */
1040static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1041{
1042 struct amdgpu_device *adev = ring->adev;
1043 struct amdgpu_ib ib;
1044 struct dma_fence *f = NULL;
1045 unsigned index;
1046 long r;
1047 u32 tmp = 0;
1048 u64 gpu_addr;
1049
1050 r = amdgpu_device_wb_get(adev, &index);
1051 if (r) {
1052 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
1053 return r;
1054 }
1055
1056 gpu_addr = adev->wb.gpu_addr + (index * 4);
1057 tmp = 0xCAFEDEAD;
1058 adev->wb.wb[index] = cpu_to_le32(tmp);
1059 memset(&ib, 0, sizeof(ib));
1060 r = amdgpu_ib_get(adev, NULL, 256,
1061 AMDGPU_IB_POOL_DIRECT, &ib);
1062 if (r) {
1063 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1064 goto err0;
1065 }
1066
1067 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1068 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1069 ib.ptr[1] = lower_32_bits(gpu_addr);
1070 ib.ptr[2] = upper_32_bits(gpu_addr);
1071 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1072 ib.ptr[4] = 0xDEADBEEF;
1073 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1074 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1075 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1076 ib.length_dw = 8;
1077
1078 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1079 if (r)
1080 goto err1;
1081
1082 r = dma_fence_wait_timeout(f, false, timeout);
1083 if (r == 0) {
1084 DRM_ERROR("amdgpu: IB test timed out\n");
1085 r = -ETIMEDOUT;
1086 goto err1;
1087 } else if (r < 0) {
1088 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1089 goto err1;
1090 }
1091 tmp = le32_to_cpu(adev->wb.wb[index]);
1092 if (tmp == 0xDEADBEEF)
1093 r = 0;
1094 else
1095 r = -EINVAL;
1096
1097err1:
1098 amdgpu_ib_free(adev, &ib, NULL);
1099 dma_fence_put(f);
1100err0:
1101 amdgpu_device_wb_free(adev, index);
1102 return r;
1103}
1104
1105
1106/**
1107 * sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
1108 *
1109 * @ib: indirect buffer to fill with commands
1110 * @pe: addr of the page entry
1111 * @src: src addr to copy from
1112 * @count: number of page entries to update
1113 *
1114 * Update PTEs by copying them from the GART using sDMA (NAVI10).
1115 */
1116static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
1117 uint64_t pe, uint64_t src,
1118 unsigned count)
1119{
1120 unsigned bytes = count * 8;
1121
1122 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1123 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1124 ib->ptr[ib->length_dw++] = bytes - 1;
1125 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1126 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1127 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1128 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1129 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1130
1131}
1132
1133/**
1134 * sdma_v5_0_vm_write_pte - update PTEs by writing them manually
1135 *
1136 * @ib: indirect buffer to fill with commands
1137 * @pe: addr of the page entry
1138 * @value: dst addr to write into pe
1139 * @count: number of page entries to update
1140 * @incr: increase next addr by incr bytes
1141 *
1142 * Update PTEs by writing them manually using sDMA (NAVI10).
1143 */
1144static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1145 uint64_t value, unsigned count,
1146 uint32_t incr)
1147{
1148 unsigned ndw = count * 2;
1149
1150 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1151 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1152 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1153 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1154 ib->ptr[ib->length_dw++] = ndw - 1;
1155 for (; ndw > 0; ndw -= 2) {
1156 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1157 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1158 value += incr;
1159 }
1160}
1161
1162/**
1163 * sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
1164 *
1165 * @ib: indirect buffer to fill with commands
1166 * @pe: addr of the page entry
1167 * @addr: dst addr to write into pe
1168 * @count: number of page entries to update
1169 * @incr: increase next addr by incr bytes
1170 * @flags: access flags
1171 *
1172 * Update the page tables using sDMA (NAVI10).
1173 */
1174static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1175 uint64_t pe,
1176 uint64_t addr, unsigned count,
1177 uint32_t incr, uint64_t flags)
1178{
1179 /* for physically contiguous pages (vram) */
1180 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1181 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1182 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1183 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1184 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1185 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1186 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1187 ib->ptr[ib->length_dw++] = incr; /* increment size */
1188 ib->ptr[ib->length_dw++] = 0;
1189 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1190}
1191
1192/**
1193 * sdma_v5_0_ring_pad_ib - pad the IB
1194 * @ring: amdgpu_ring structure holding ring information
1195 * @ib: indirect buffer to fill with padding
1196 *
1197 * Pad the IB with NOPs to a boundary multiple of 8.
1198 */
1199static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1200{
1201 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1202 u32 pad_count;
1203 int i;
1204
1205 pad_count = (-ib->length_dw) & 0x7;
1206 for (i = 0; i < pad_count; i++)
1207 if (sdma && sdma->burst_nop && (i == 0))
1208 ib->ptr[ib->length_dw++] =
1209 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1210 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1211 else
1212 ib->ptr[ib->length_dw++] =
1213 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1214}
1215
1216
1217/**
1218 * sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
1219 *
1220 * @ring: amdgpu_ring pointer
1221 *
1222 * Make sure all previous operations are completed (CIK).
1223 */
1224static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1225{
1226 uint32_t seq = ring->fence_drv.sync_seq;
1227 uint64_t addr = ring->fence_drv.gpu_addr;
1228
1229 /* wait for idle */
1230 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1231 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1232 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1233 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1234 amdgpu_ring_write(ring, addr & 0xfffffffc);
1235 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1236 amdgpu_ring_write(ring, seq); /* reference */
1237 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1238 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1239 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1240}
1241
1242
1243/**
1244 * sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
1245 *
1246 * @ring: amdgpu_ring pointer
1247 * @vmid: vmid number to use
1248 * @pd_addr: address
1249 *
1250 * Update the page table base and flush the VM TLB
1251 * using sDMA (NAVI10).
1252 */
1253static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1254 unsigned vmid, uint64_t pd_addr)
1255{
1256 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1257}
1258
1259static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
1260 uint32_t reg, uint32_t val)
1261{
1262 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1263 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1264 amdgpu_ring_write(ring, reg);
1265 amdgpu_ring_write(ring, val);
1266}
1267
1268static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1269 uint32_t val, uint32_t mask)
1270{
1271 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1272 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1273 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1274 amdgpu_ring_write(ring, reg << 2);
1275 amdgpu_ring_write(ring, 0);
1276 amdgpu_ring_write(ring, val); /* reference */
1277 amdgpu_ring_write(ring, mask); /* mask */
1278 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1279 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1280}
1281
1282static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1283 uint32_t reg0, uint32_t reg1,
1284 uint32_t ref, uint32_t mask)
1285{
1286 amdgpu_ring_emit_wreg(ring, reg0, ref);
1287 /* wait for a cycle to reset vm_inv_eng*_ack */
1288 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1289 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1290}
1291
1292static int sdma_v5_0_early_init(void *handle)
1293{
1294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1295
1296 sdma_v5_0_set_ring_funcs(adev);
1297 sdma_v5_0_set_buffer_funcs(adev);
1298 sdma_v5_0_set_vm_pte_funcs(adev);
1299 sdma_v5_0_set_irq_funcs(adev);
1300
1301 return 0;
1302}
1303
1304
1305static int sdma_v5_0_sw_init(void *handle)
1306{
1307 struct amdgpu_ring *ring;
1308 int r, i;
1309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1310
1311 /* SDMA trap event */
1312 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
1313 SDMA0_5_0__SRCID__SDMA_TRAP,
1314 &adev->sdma.trap_irq);
1315 if (r)
1316 return r;
1317
1318 /* SDMA trap event */
1319 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
1320 SDMA1_5_0__SRCID__SDMA_TRAP,
1321 &adev->sdma.trap_irq);
1322 if (r)
1323 return r;
1324
1325 r = sdma_v5_0_init_microcode(adev);
1326 if (r) {
1327 DRM_ERROR("Failed to load sdma firmware!\n");
1328 return r;
1329 }
1330
1331 for (i = 0; i < adev->sdma.num_instances; i++) {
1332 ring = &adev->sdma.instance[i].ring;
1333 ring->ring_obj = NULL;
1334 ring->use_doorbell = true;
1335
1336 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1337 ring->use_doorbell?"true":"false");
1338
1339 ring->doorbell_index = (i == 0) ?
1340 (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
1341 : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
1342
1343 sprintf(ring->name, "sdma%d", i);
1344 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1345 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
1346 AMDGPU_SDMA_IRQ_INSTANCE1,
1347 AMDGPU_RING_PRIO_DEFAULT, NULL);
1348 if (r)
1349 return r;
1350 }
1351
1352 return r;
1353}
1354
1355static int sdma_v5_0_sw_fini(void *handle)
1356{
1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1358 int i;
1359
1360 for (i = 0; i < adev->sdma.num_instances; i++) {
1361 release_firmware(adev->sdma.instance[i].fw);
1362 adev->sdma.instance[i].fw = NULL;
1363
1364 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1365 }
1366
1367 return 0;
1368}
1369
1370static int sdma_v5_0_hw_init(void *handle)
1371{
1372 int r;
1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1374
1375 sdma_v5_0_init_golden_registers(adev);
1376
1377 r = sdma_v5_0_start(adev);
1378
1379 return r;
1380}
1381
1382static int sdma_v5_0_hw_fini(void *handle)
1383{
1384 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1385
1386 if (amdgpu_sriov_vf(adev))
1387 return 0;
1388
1389 sdma_v5_0_ctx_switch_enable(adev, false);
1390 sdma_v5_0_enable(adev, false);
1391
1392 return 0;
1393}
1394
1395static int sdma_v5_0_suspend(void *handle)
1396{
1397 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1398
1399 return sdma_v5_0_hw_fini(adev);
1400}
1401
1402static int sdma_v5_0_resume(void *handle)
1403{
1404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405
1406 return sdma_v5_0_hw_init(adev);
1407}
1408
1409static bool sdma_v5_0_is_idle(void *handle)
1410{
1411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1412 u32 i;
1413
1414 for (i = 0; i < adev->sdma.num_instances; i++) {
1415 u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1416
1417 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1418 return false;
1419 }
1420
1421 return true;
1422}
1423
1424static int sdma_v5_0_wait_for_idle(void *handle)
1425{
1426 unsigned i;
1427 u32 sdma0, sdma1;
1428 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1429
1430 for (i = 0; i < adev->usec_timeout; i++) {
1431 sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1432 sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1433
1434 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1435 return 0;
1436 udelay(1);
1437 }
1438 return -ETIMEDOUT;
1439}
1440
1441static int sdma_v5_0_soft_reset(void *handle)
1442{
1443 /* todo */
1444
1445 return 0;
1446}
1447
1448static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
1449{
1450 int i, r = 0;
1451 struct amdgpu_device *adev = ring->adev;
1452 u32 index = 0;
1453 u64 sdma_gfx_preempt;
1454
1455 amdgpu_sdma_get_index_from_ring(ring, &index);
1456 if (index == 0)
1457 sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
1458 else
1459 sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
1460
1461 /* assert preemption condition */
1462 amdgpu_ring_set_preempt_cond_exec(ring, false);
1463
1464 /* emit the trailing fence */
1465 ring->trail_seq += 1;
1466 amdgpu_ring_alloc(ring, 10);
1467 sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1468 ring->trail_seq, 0);
1469 amdgpu_ring_commit(ring);
1470
1471 /* assert IB preemption */
1472 WREG32(sdma_gfx_preempt, 1);
1473
1474 /* poll the trailing fence */
1475 for (i = 0; i < adev->usec_timeout; i++) {
1476 if (ring->trail_seq ==
1477 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1478 break;
1479 udelay(1);
1480 }
1481
1482 if (i >= adev->usec_timeout) {
1483 r = -EINVAL;
1484 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1485 }
1486
1487 /* deassert IB preemption */
1488 WREG32(sdma_gfx_preempt, 0);
1489
1490 /* deassert the preemption condition */
1491 amdgpu_ring_set_preempt_cond_exec(ring, true);
1492 return r;
1493}
1494
1495static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
1496 struct amdgpu_irq_src *source,
1497 unsigned type,
1498 enum amdgpu_interrupt_state state)
1499{
1500 u32 sdma_cntl;
1501
1502 if (!amdgpu_sriov_vf(adev)) {
1503 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
1504 sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1505 sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1506
1507 sdma_cntl = RREG32(reg_offset);
1508 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1509 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1510 WREG32(reg_offset, sdma_cntl);
1511 }
1512
1513 return 0;
1514}
1515
1516static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
1517 struct amdgpu_irq_src *source,
1518 struct amdgpu_iv_entry *entry)
1519{
1520 DRM_DEBUG("IH: SDMA trap\n");
1521 switch (entry->client_id) {
1522 case SOC15_IH_CLIENTID_SDMA0:
1523 switch (entry->ring_id) {
1524 case 0:
1525 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1526 break;
1527 case 1:
1528 /* XXX compute */
1529 break;
1530 case 2:
1531 /* XXX compute */
1532 break;
1533 case 3:
1534 /* XXX page queue*/
1535 break;
1536 }
1537 break;
1538 case SOC15_IH_CLIENTID_SDMA1:
1539 switch (entry->ring_id) {
1540 case 0:
1541 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1542 break;
1543 case 1:
1544 /* XXX compute */
1545 break;
1546 case 2:
1547 /* XXX compute */
1548 break;
1549 case 3:
1550 /* XXX page queue*/
1551 break;
1552 }
1553 break;
1554 }
1555 return 0;
1556}
1557
1558static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1559 struct amdgpu_irq_src *source,
1560 struct amdgpu_iv_entry *entry)
1561{
1562 return 0;
1563}
1564
1565static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1566 bool enable)
1567{
1568 uint32_t data, def;
1569 int i;
1570
1571 for (i = 0; i < adev->sdma.num_instances; i++) {
1572 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1573 /* Enable sdma clock gating */
1574 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1575 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1576 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1577 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1578 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1579 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1580 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1581 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1582 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1583 if (def != data)
1584 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1585 } else {
1586 /* Disable sdma clock gating */
1587 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1588 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1589 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1590 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1591 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1592 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1593 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1594 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1595 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1596 if (def != data)
1597 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1598 }
1599 }
1600}
1601
1602static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1603 bool enable)
1604{
1605 uint32_t data, def;
1606 int i;
1607
1608 for (i = 0; i < adev->sdma.num_instances; i++) {
1609 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1610 /* Enable sdma mem light sleep */
1611 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1612 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1613 if (def != data)
1614 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1615
1616 } else {
1617 /* Disable sdma mem light sleep */
1618 def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1619 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1620 if (def != data)
1621 WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1622
1623 }
1624 }
1625}
1626
1627static int sdma_v5_0_set_clockgating_state(void *handle,
1628 enum amd_clockgating_state state)
1629{
1630 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1631
1632 if (amdgpu_sriov_vf(adev))
1633 return 0;
1634
1635 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1636 case IP_VERSION(5, 0, 0):
1637 case IP_VERSION(5, 0, 2):
1638 case IP_VERSION(5, 0, 5):
1639 sdma_v5_0_update_medium_grain_clock_gating(adev,
1640 state == AMD_CG_STATE_GATE);
1641 sdma_v5_0_update_medium_grain_light_sleep(adev,
1642 state == AMD_CG_STATE_GATE);
1643 break;
1644 default:
1645 break;
1646 }
1647
1648 return 0;
1649}
1650
1651static int sdma_v5_0_set_powergating_state(void *handle,
1652 enum amd_powergating_state state)
1653{
1654 return 0;
1655}
1656
1657static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags)
1658{
1659 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1660 int data;
1661
1662 if (amdgpu_sriov_vf(adev))
1663 *flags = 0;
1664
1665 /* AMD_CG_SUPPORT_SDMA_MGCG */
1666 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1667 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1668 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1669
1670 /* AMD_CG_SUPPORT_SDMA_LS */
1671 data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1672 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1673 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1674}
1675
1676const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
1677 .name = "sdma_v5_0",
1678 .early_init = sdma_v5_0_early_init,
1679 .late_init = NULL,
1680 .sw_init = sdma_v5_0_sw_init,
1681 .sw_fini = sdma_v5_0_sw_fini,
1682 .hw_init = sdma_v5_0_hw_init,
1683 .hw_fini = sdma_v5_0_hw_fini,
1684 .suspend = sdma_v5_0_suspend,
1685 .resume = sdma_v5_0_resume,
1686 .is_idle = sdma_v5_0_is_idle,
1687 .wait_for_idle = sdma_v5_0_wait_for_idle,
1688 .soft_reset = sdma_v5_0_soft_reset,
1689 .set_clockgating_state = sdma_v5_0_set_clockgating_state,
1690 .set_powergating_state = sdma_v5_0_set_powergating_state,
1691 .get_clockgating_state = sdma_v5_0_get_clockgating_state,
1692};
1693
1694static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
1695 .type = AMDGPU_RING_TYPE_SDMA,
1696 .align_mask = 0xf,
1697 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1698 .support_64bit_ptrs = true,
1699 .vmhub = AMDGPU_GFXHUB_0,
1700 .get_rptr = sdma_v5_0_ring_get_rptr,
1701 .get_wptr = sdma_v5_0_ring_get_wptr,
1702 .set_wptr = sdma_v5_0_ring_set_wptr,
1703 .emit_frame_size =
1704 5 + /* sdma_v5_0_ring_init_cond_exec */
1705 6 + /* sdma_v5_0_ring_emit_hdp_flush */
1706 3 + /* hdp_invalidate */
1707 6 + /* sdma_v5_0_ring_emit_pipeline_sync */
1708 /* sdma_v5_0_ring_emit_vm_flush */
1709 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1710 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
1711 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
1712 .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
1713 .emit_ib = sdma_v5_0_ring_emit_ib,
1714 .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
1715 .emit_fence = sdma_v5_0_ring_emit_fence,
1716 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
1717 .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
1718 .emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
1719 .test_ring = sdma_v5_0_ring_test_ring,
1720 .test_ib = sdma_v5_0_ring_test_ib,
1721 .insert_nop = sdma_v5_0_ring_insert_nop,
1722 .pad_ib = sdma_v5_0_ring_pad_ib,
1723 .emit_wreg = sdma_v5_0_ring_emit_wreg,
1724 .emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
1725 .emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
1726 .init_cond_exec = sdma_v5_0_ring_init_cond_exec,
1727 .patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
1728 .preempt_ib = sdma_v5_0_ring_preempt_ib,
1729};
1730
1731static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
1732{
1733 int i;
1734
1735 for (i = 0; i < adev->sdma.num_instances; i++) {
1736 adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
1737 adev->sdma.instance[i].ring.me = i;
1738 }
1739}
1740
1741static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
1742 .set = sdma_v5_0_set_trap_irq_state,
1743 .process = sdma_v5_0_process_trap_irq,
1744};
1745
1746static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
1747 .process = sdma_v5_0_process_illegal_inst_irq,
1748};
1749
1750static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
1751{
1752 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1753 adev->sdma.num_instances;
1754 adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
1755 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
1756}
1757
1758/**
1759 * sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
1760 *
1761 * @ib: indirect buffer to copy to
1762 * @src_offset: src GPU address
1763 * @dst_offset: dst GPU address
1764 * @byte_count: number of bytes to xfer
1765 * @tmz: if a secure copy should be used
1766 *
1767 * Copy GPU buffers using the DMA engine (NAVI10).
1768 * Used by the amdgpu ttm implementation to move pages if
1769 * registered as the asic copy callback.
1770 */
1771static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
1772 uint64_t src_offset,
1773 uint64_t dst_offset,
1774 uint32_t byte_count,
1775 bool tmz)
1776{
1777 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1778 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1779 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1780 ib->ptr[ib->length_dw++] = byte_count - 1;
1781 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1782 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1783 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1784 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1785 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1786}
1787
1788/**
1789 * sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
1790 *
1791 * @ib: indirect buffer to fill
1792 * @src_data: value to write to buffer
1793 * @dst_offset: dst GPU address
1794 * @byte_count: number of bytes to xfer
1795 *
1796 * Fill GPU buffers using the DMA engine (NAVI10).
1797 */
1798static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
1799 uint32_t src_data,
1800 uint64_t dst_offset,
1801 uint32_t byte_count)
1802{
1803 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1804 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1805 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1806 ib->ptr[ib->length_dw++] = src_data;
1807 ib->ptr[ib->length_dw++] = byte_count - 1;
1808}
1809
1810static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
1811 .copy_max_bytes = 0x400000,
1812 .copy_num_dw = 7,
1813 .emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
1814
1815 .fill_max_bytes = 0x400000,
1816 .fill_num_dw = 5,
1817 .emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
1818};
1819
1820static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
1821{
1822 if (adev->mman.buffer_funcs == NULL) {
1823 adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
1824 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1825 }
1826}
1827
1828static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
1829 .copy_pte_num_dw = 7,
1830 .copy_pte = sdma_v5_0_vm_copy_pte,
1831 .write_pte = sdma_v5_0_vm_write_pte,
1832 .set_pte_pde = sdma_v5_0_vm_set_pte_pde,
1833};
1834
1835static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1836{
1837 unsigned i;
1838
1839 if (adev->vm_manager.vm_pte_funcs == NULL) {
1840 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
1841 for (i = 0; i < adev->sdma.num_instances; i++) {
1842 adev->vm_manager.vm_pte_scheds[i] =
1843 &adev->sdma.instance[i].ring.sched;
1844 }
1845 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1846 }
1847}
1848
1849const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
1850 .type = AMD_IP_BLOCK_TYPE_SDMA,
1851 .major = 5,
1852 .minor = 0,
1853 .rev = 0,
1854 .funcs = &sdma_v5_0_ip_funcs,
1855};