Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_ucode.h"
28#include "amdgpu_trace.h"
29
30#include "sdma0/sdma0_4_0_offset.h"
31#include "sdma0/sdma0_4_0_sh_mask.h"
32#include "sdma1/sdma1_4_0_offset.h"
33#include "sdma1/sdma1_4_0_sh_mask.h"
34#include "hdp/hdp_4_0_offset.h"
35#include "sdma0/sdma0_4_1_default.h"
36
37#include "soc15_common.h"
38#include "soc15.h"
39#include "vega10_sdma_pkt_open.h"
40
41MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
42MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
43MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
44MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
45MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
46MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
47MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
48
49#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
50#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
51
52static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
53static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
54static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
55static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
56
57static const struct soc15_reg_golden golden_settings_sdma_4[] = {
58 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
59 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
60 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
61 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
62 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
63 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
64 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
65 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
66 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
67 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
68 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
69 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
70 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
71 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
72 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
73 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
76 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
77 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
78 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
80 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
82};
83
84static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
85 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
86 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
87 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
88 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
89};
90
91static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
92 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
93 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
94 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
95 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
96};
97
98static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
99{
100 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
101 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
102 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
103 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
106 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
107 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
108 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
109 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
110};
111
112static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
113{
114 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
115 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
116 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
117 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
118 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
119 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
120 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
121 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
122 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
123 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
124 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
125 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
126 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
127 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
128 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
129 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
130 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
131 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
132};
133
134static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
135{
136 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
137 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
138};
139
140static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
141 u32 instance, u32 offset)
142{
143 return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
144 (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
145}
146
147static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
148{
149 switch (adev->asic_type) {
150 case CHIP_VEGA10:
151 soc15_program_register_sequence(adev,
152 golden_settings_sdma_4,
153 ARRAY_SIZE(golden_settings_sdma_4));
154 soc15_program_register_sequence(adev,
155 golden_settings_sdma_vg10,
156 ARRAY_SIZE(golden_settings_sdma_vg10));
157 break;
158 case CHIP_VEGA12:
159 soc15_program_register_sequence(adev,
160 golden_settings_sdma_4,
161 ARRAY_SIZE(golden_settings_sdma_4));
162 soc15_program_register_sequence(adev,
163 golden_settings_sdma_vg12,
164 ARRAY_SIZE(golden_settings_sdma_vg12));
165 break;
166 case CHIP_VEGA20:
167 soc15_program_register_sequence(adev,
168 golden_settings_sdma_4_2,
169 ARRAY_SIZE(golden_settings_sdma_4_2));
170 break;
171 case CHIP_RAVEN:
172 soc15_program_register_sequence(adev,
173 golden_settings_sdma_4_1,
174 ARRAY_SIZE(golden_settings_sdma_4_1));
175 soc15_program_register_sequence(adev,
176 golden_settings_sdma_rv1,
177 ARRAY_SIZE(golden_settings_sdma_rv1));
178 break;
179 default:
180 break;
181 }
182}
183
184/**
185 * sdma_v4_0_init_microcode - load ucode images from disk
186 *
187 * @adev: amdgpu_device pointer
188 *
189 * Use the firmware interface to load the ucode images into
190 * the driver (not loaded into hw).
191 * Returns 0 on success, error on failure.
192 */
193
194// emulation only, won't work on real chip
195// vega10 real chip need to use PSP to load firmware
196static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
197{
198 const char *chip_name;
199 char fw_name[30];
200 int err = 0, i;
201 struct amdgpu_firmware_info *info = NULL;
202 const struct common_firmware_header *header = NULL;
203 const struct sdma_firmware_header_v1_0 *hdr;
204
205 DRM_DEBUG("\n");
206
207 switch (adev->asic_type) {
208 case CHIP_VEGA10:
209 chip_name = "vega10";
210 break;
211 case CHIP_VEGA12:
212 chip_name = "vega12";
213 break;
214 case CHIP_VEGA20:
215 chip_name = "vega20";
216 break;
217 case CHIP_RAVEN:
218 chip_name = "raven";
219 break;
220 default:
221 BUG();
222 }
223
224 for (i = 0; i < adev->sdma.num_instances; i++) {
225 if (i == 0)
226 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
227 else
228 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
229 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
230 if (err)
231 goto out;
232 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
233 if (err)
234 goto out;
235 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
236 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
237 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
238 if (adev->sdma.instance[i].feature_version >= 20)
239 adev->sdma.instance[i].burst_nop = true;
240 DRM_DEBUG("psp_load == '%s'\n",
241 adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
242
243 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
244 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
245 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
246 info->fw = adev->sdma.instance[i].fw;
247 header = (const struct common_firmware_header *)info->fw->data;
248 adev->firmware.fw_size +=
249 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
250 }
251 }
252out:
253 if (err) {
254 DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
255 for (i = 0; i < adev->sdma.num_instances; i++) {
256 release_firmware(adev->sdma.instance[i].fw);
257 adev->sdma.instance[i].fw = NULL;
258 }
259 }
260 return err;
261}
262
263/**
264 * sdma_v4_0_ring_get_rptr - get the current read pointer
265 *
266 * @ring: amdgpu ring pointer
267 *
268 * Get the current rptr from the hardware (VEGA10+).
269 */
270static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
271{
272 u64 *rptr;
273
274 /* XXX check if swapping is necessary on BE */
275 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
276
277 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
278 return ((*rptr) >> 2);
279}
280
281/**
282 * sdma_v4_0_ring_get_wptr - get the current write pointer
283 *
284 * @ring: amdgpu ring pointer
285 *
286 * Get the current wptr from the hardware (VEGA10+).
287 */
288static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
289{
290 struct amdgpu_device *adev = ring->adev;
291 u64 wptr;
292
293 if (ring->use_doorbell) {
294 /* XXX check if swapping is necessary on BE */
295 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
296 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
297 } else {
298 u32 lowbit, highbit;
299 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
300
301 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
302 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
303
304 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
305 me, highbit, lowbit);
306 wptr = highbit;
307 wptr = wptr << 32;
308 wptr |= lowbit;
309 }
310
311 return wptr >> 2;
312}
313
314/**
315 * sdma_v4_0_ring_set_wptr - commit the write pointer
316 *
317 * @ring: amdgpu ring pointer
318 *
319 * Write the wptr back to the hardware (VEGA10+).
320 */
321static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
322{
323 struct amdgpu_device *adev = ring->adev;
324
325 DRM_DEBUG("Setting write pointer\n");
326 if (ring->use_doorbell) {
327 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
328
329 DRM_DEBUG("Using doorbell -- "
330 "wptr_offs == 0x%08x "
331 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
332 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
333 ring->wptr_offs,
334 lower_32_bits(ring->wptr << 2),
335 upper_32_bits(ring->wptr << 2));
336 /* XXX check if swapping is necessary on BE */
337 WRITE_ONCE(*wb, (ring->wptr << 2));
338 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
339 ring->doorbell_index, ring->wptr << 2);
340 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
341 } else {
342 int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
343
344 DRM_DEBUG("Not using doorbell -- "
345 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
346 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
347 me,
348 lower_32_bits(ring->wptr << 2),
349 me,
350 upper_32_bits(ring->wptr << 2));
351 WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
352 WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
353 }
354}
355
356static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
357{
358 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
359 int i;
360
361 for (i = 0; i < count; i++)
362 if (sdma && sdma->burst_nop && (i == 0))
363 amdgpu_ring_write(ring, ring->funcs->nop |
364 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
365 else
366 amdgpu_ring_write(ring, ring->funcs->nop);
367}
368
369/**
370 * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
371 *
372 * @ring: amdgpu ring pointer
373 * @ib: IB object to schedule
374 *
375 * Schedule an IB in the DMA ring (VEGA10).
376 */
377static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
378 struct amdgpu_ib *ib,
379 unsigned vmid, bool ctx_switch)
380{
381 /* IB packet must end on a 8 DW boundary */
382 sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
383
384 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
385 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
386 /* base must be 32 byte aligned */
387 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
388 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
389 amdgpu_ring_write(ring, ib->length_dw);
390 amdgpu_ring_write(ring, 0);
391 amdgpu_ring_write(ring, 0);
392
393}
394
395static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
396 int mem_space, int hdp,
397 uint32_t addr0, uint32_t addr1,
398 uint32_t ref, uint32_t mask,
399 uint32_t inv)
400{
401 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
402 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
403 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
404 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
405 if (mem_space) {
406 /* memory */
407 amdgpu_ring_write(ring, addr0);
408 amdgpu_ring_write(ring, addr1);
409 } else {
410 /* registers */
411 amdgpu_ring_write(ring, addr0 << 2);
412 amdgpu_ring_write(ring, addr1 << 2);
413 }
414 amdgpu_ring_write(ring, ref); /* reference */
415 amdgpu_ring_write(ring, mask); /* mask */
416 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
417 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
418}
419
420/**
421 * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
422 *
423 * @ring: amdgpu ring pointer
424 *
425 * Emit an hdp flush packet on the requested DMA ring.
426 */
427static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
428{
429 struct amdgpu_device *adev = ring->adev;
430 u32 ref_and_mask = 0;
431 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
432
433 if (ring == &ring->adev->sdma.instance[0].ring)
434 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
435 else
436 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
437
438 sdma_v4_0_wait_reg_mem(ring, 0, 1,
439 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
440 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
441 ref_and_mask, ref_and_mask, 10);
442}
443
444/**
445 * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
446 *
447 * @ring: amdgpu ring pointer
448 * @fence: amdgpu fence object
449 *
450 * Add a DMA fence packet to the ring to write
451 * the fence seq number and DMA trap packet to generate
452 * an interrupt if needed (VEGA10).
453 */
454static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
455 unsigned flags)
456{
457 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
458 /* write the fence */
459 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
460 /* zero in first two bits */
461 BUG_ON(addr & 0x3);
462 amdgpu_ring_write(ring, lower_32_bits(addr));
463 amdgpu_ring_write(ring, upper_32_bits(addr));
464 amdgpu_ring_write(ring, lower_32_bits(seq));
465
466 /* optionally write high bits as well */
467 if (write64bit) {
468 addr += 4;
469 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
470 /* zero in first two bits */
471 BUG_ON(addr & 0x3);
472 amdgpu_ring_write(ring, lower_32_bits(addr));
473 amdgpu_ring_write(ring, upper_32_bits(addr));
474 amdgpu_ring_write(ring, upper_32_bits(seq));
475 }
476
477 /* generate an interrupt */
478 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
479 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
480}
481
482
483/**
484 * sdma_v4_0_gfx_stop - stop the gfx async dma engines
485 *
486 * @adev: amdgpu_device pointer
487 *
488 * Stop the gfx async dma ring buffers (VEGA10).
489 */
490static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
491{
492 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
493 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
494 u32 rb_cntl, ib_cntl;
495 int i;
496
497 if ((adev->mman.buffer_funcs_ring == sdma0) ||
498 (adev->mman.buffer_funcs_ring == sdma1))
499 amdgpu_ttm_set_buffer_funcs_status(adev, false);
500
501 for (i = 0; i < adev->sdma.num_instances; i++) {
502 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
503 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
504 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
505 ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
506 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
507 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
508 }
509
510 sdma0->ready = false;
511 sdma1->ready = false;
512}
513
514/**
515 * sdma_v4_0_rlc_stop - stop the compute async dma engines
516 *
517 * @adev: amdgpu_device pointer
518 *
519 * Stop the compute async dma queues (VEGA10).
520 */
521static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
522{
523 /* XXX todo */
524}
525
526/**
527 * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
528 *
529 * @adev: amdgpu_device pointer
530 * @enable: enable/disable the DMA MEs context switch.
531 *
532 * Halt or unhalt the async dma engines context switch (VEGA10).
533 */
534static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
535{
536 u32 f32_cntl, phase_quantum = 0;
537 int i;
538
539 if (amdgpu_sdma_phase_quantum) {
540 unsigned value = amdgpu_sdma_phase_quantum;
541 unsigned unit = 0;
542
543 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
544 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
545 value = (value + 1) >> 1;
546 unit++;
547 }
548 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
549 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
550 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
551 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
552 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
553 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
554 WARN_ONCE(1,
555 "clamping sdma_phase_quantum to %uK clock cycles\n",
556 value << unit);
557 }
558 phase_quantum =
559 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
560 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
561 }
562
563 for (i = 0; i < adev->sdma.num_instances; i++) {
564 f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
565 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
566 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
567 if (enable && amdgpu_sdma_phase_quantum) {
568 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
569 phase_quantum);
570 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
571 phase_quantum);
572 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
573 phase_quantum);
574 }
575 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
576 }
577
578}
579
580/**
581 * sdma_v4_0_enable - stop the async dma engines
582 *
583 * @adev: amdgpu_device pointer
584 * @enable: enable/disable the DMA MEs.
585 *
586 * Halt or unhalt the async dma engines (VEGA10).
587 */
588static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
589{
590 u32 f32_cntl;
591 int i;
592
593 if (enable == false) {
594 sdma_v4_0_gfx_stop(adev);
595 sdma_v4_0_rlc_stop(adev);
596 }
597
598 for (i = 0; i < adev->sdma.num_instances; i++) {
599 f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
600 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
601 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
602 }
603}
604
605/**
606 * sdma_v4_0_gfx_resume - setup and start the async dma engines
607 *
608 * @adev: amdgpu_device pointer
609 *
610 * Set up the gfx DMA ring buffers and enable them (VEGA10).
611 * Returns 0 for success, error for failure.
612 */
613static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
614{
615 struct amdgpu_ring *ring;
616 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
617 u32 rb_bufsz;
618 u32 wb_offset;
619 u32 doorbell;
620 u32 doorbell_offset;
621 u32 temp;
622 u64 wptr_gpu_addr;
623 int i, r;
624
625 for (i = 0; i < adev->sdma.num_instances; i++) {
626 ring = &adev->sdma.instance[i].ring;
627 wb_offset = (ring->rptr_offs * 4);
628
629 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
630
631 /* Set ring buffer size in dwords */
632 rb_bufsz = order_base_2(ring->ring_size / 4);
633 rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
634 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
635#ifdef __BIG_ENDIAN
636 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
637 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
638 RPTR_WRITEBACK_SWAP_ENABLE, 1);
639#endif
640 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
641
642 /* Initialize the ring buffer's read and write pointers */
643 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
644 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
645 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
646 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
647
648 /* set the wb address whether it's enabled or not */
649 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
650 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
651 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
652 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
653
654 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
655
656 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
657 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
658
659 ring->wptr = 0;
660
661 /* before programing wptr to a less value, need set minor_ptr_update first */
662 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
663
664 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
665 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
666 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
667 }
668
669 doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
670 doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
671
672 if (ring->use_doorbell) {
673 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
674 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
675 OFFSET, ring->doorbell_index);
676 } else {
677 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
678 }
679 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
680 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
681 adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
682 ring->doorbell_index);
683
684 if (amdgpu_sriov_vf(adev))
685 sdma_v4_0_ring_set_wptr(ring);
686
687 /* set minor_ptr_update to 0 after wptr programed */
688 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
689
690 /* set utc l1 enable flag always to 1 */
691 temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
692 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
693 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
694
695 if (!amdgpu_sriov_vf(adev)) {
696 /* unhalt engine */
697 temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
698 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
699 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
700 }
701
702 /* setup the wptr shadow polling */
703 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
704 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
705 lower_32_bits(wptr_gpu_addr));
706 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
707 upper_32_bits(wptr_gpu_addr));
708 wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
709 if (amdgpu_sriov_vf(adev))
710 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
711 else
712 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
713 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
714
715 /* enable DMA RB */
716 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
717 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
718
719 ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
720 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
721#ifdef __BIG_ENDIAN
722 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
723#endif
724 /* enable DMA IBs */
725 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
726
727 ring->ready = true;
728
729 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
730 sdma_v4_0_ctx_switch_enable(adev, true);
731 sdma_v4_0_enable(adev, true);
732 }
733
734 r = amdgpu_ring_test_ring(ring);
735 if (r) {
736 ring->ready = false;
737 return r;
738 }
739
740 if (adev->mman.buffer_funcs_ring == ring)
741 amdgpu_ttm_set_buffer_funcs_status(adev, true);
742
743 }
744
745 return 0;
746}
747
748static void
749sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
750{
751 uint32_t def, data;
752
753 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
754 /* disable idle interrupt */
755 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
756 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
757
758 if (data != def)
759 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
760 } else {
761 /* disable idle interrupt */
762 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
763 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
764 if (data != def)
765 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
766 }
767}
768
769static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
770{
771 uint32_t def, data;
772
773 /* Enable HW based PG. */
774 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
775 data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
776 if (data != def)
777 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
778
779 /* enable interrupt */
780 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
781 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
782 if (data != def)
783 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
784
785 /* Configure hold time to filter in-valid power on/off request. Use default right now */
786 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
787 data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
788 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
789 /* Configure switch time for hysteresis purpose. Use default right now */
790 data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
791 data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
792 if(data != def)
793 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
794}
795
796static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
797{
798 if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
799 return;
800
801 switch (adev->asic_type) {
802 case CHIP_RAVEN:
803 sdma_v4_1_init_power_gating(adev);
804 sdma_v4_1_update_power_gating(adev, true);
805 break;
806 default:
807 break;
808 }
809}
810
811/**
812 * sdma_v4_0_rlc_resume - setup and start the async dma engines
813 *
814 * @adev: amdgpu_device pointer
815 *
816 * Set up the compute DMA queues and enable them (VEGA10).
817 * Returns 0 for success, error for failure.
818 */
819static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
820{
821 sdma_v4_0_init_pg(adev);
822
823 return 0;
824}
825
826/**
827 * sdma_v4_0_load_microcode - load the sDMA ME ucode
828 *
829 * @adev: amdgpu_device pointer
830 *
831 * Loads the sDMA0/1 ucode.
832 * Returns 0 for success, -EINVAL if the ucode is not available.
833 */
834static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
835{
836 const struct sdma_firmware_header_v1_0 *hdr;
837 const __le32 *fw_data;
838 u32 fw_size;
839 int i, j;
840
841 /* halt the MEs */
842 sdma_v4_0_enable(adev, false);
843
844 for (i = 0; i < adev->sdma.num_instances; i++) {
845 if (!adev->sdma.instance[i].fw)
846 return -EINVAL;
847
848 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
849 amdgpu_ucode_print_sdma_hdr(&hdr->header);
850 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
851
852 fw_data = (const __le32 *)
853 (adev->sdma.instance[i].fw->data +
854 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
855
856 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
857
858 for (j = 0; j < fw_size; j++)
859 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
860
861 WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
862 }
863
864 return 0;
865}
866
867/**
868 * sdma_v4_0_start - setup and start the async dma engines
869 *
870 * @adev: amdgpu_device pointer
871 *
872 * Set up the DMA engines and enable them (VEGA10).
873 * Returns 0 for success, error for failure.
874 */
875static int sdma_v4_0_start(struct amdgpu_device *adev)
876{
877 int r = 0;
878
879 if (amdgpu_sriov_vf(adev)) {
880 sdma_v4_0_ctx_switch_enable(adev, false);
881 sdma_v4_0_enable(adev, false);
882
883 /* set RB registers */
884 r = sdma_v4_0_gfx_resume(adev);
885 return r;
886 }
887
888 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
889 r = sdma_v4_0_load_microcode(adev);
890 if (r)
891 return r;
892 }
893
894 /* unhalt the MEs */
895 sdma_v4_0_enable(adev, true);
896 /* enable sdma ring preemption */
897 sdma_v4_0_ctx_switch_enable(adev, true);
898
899 /* start the gfx rings and rlc compute queues */
900 r = sdma_v4_0_gfx_resume(adev);
901 if (r)
902 return r;
903 r = sdma_v4_0_rlc_resume(adev);
904
905 return r;
906}
907
908/**
909 * sdma_v4_0_ring_test_ring - simple async dma engine test
910 *
911 * @ring: amdgpu_ring structure holding ring information
912 *
913 * Test the DMA engine by writing using it to write an
914 * value to memory. (VEGA10).
915 * Returns 0 for success, error for failure.
916 */
917static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
918{
919 struct amdgpu_device *adev = ring->adev;
920 unsigned i;
921 unsigned index;
922 int r;
923 u32 tmp;
924 u64 gpu_addr;
925
926 r = amdgpu_device_wb_get(adev, &index);
927 if (r) {
928 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
929 return r;
930 }
931
932 gpu_addr = adev->wb.gpu_addr + (index * 4);
933 tmp = 0xCAFEDEAD;
934 adev->wb.wb[index] = cpu_to_le32(tmp);
935
936 r = amdgpu_ring_alloc(ring, 5);
937 if (r) {
938 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
939 amdgpu_device_wb_free(adev, index);
940 return r;
941 }
942
943 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
944 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
945 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
946 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
947 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
948 amdgpu_ring_write(ring, 0xDEADBEEF);
949 amdgpu_ring_commit(ring);
950
951 for (i = 0; i < adev->usec_timeout; i++) {
952 tmp = le32_to_cpu(adev->wb.wb[index]);
953 if (tmp == 0xDEADBEEF)
954 break;
955 DRM_UDELAY(1);
956 }
957
958 if (i < adev->usec_timeout) {
959 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
960 } else {
961 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
962 ring->idx, tmp);
963 r = -EINVAL;
964 }
965 amdgpu_device_wb_free(adev, index);
966
967 return r;
968}
969
970/**
971 * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
972 *
973 * @ring: amdgpu_ring structure holding ring information
974 *
975 * Test a simple IB in the DMA ring (VEGA10).
976 * Returns 0 on success, error on failure.
977 */
978static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
979{
980 struct amdgpu_device *adev = ring->adev;
981 struct amdgpu_ib ib;
982 struct dma_fence *f = NULL;
983 unsigned index;
984 long r;
985 u32 tmp = 0;
986 u64 gpu_addr;
987
988 r = amdgpu_device_wb_get(adev, &index);
989 if (r) {
990 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
991 return r;
992 }
993
994 gpu_addr = adev->wb.gpu_addr + (index * 4);
995 tmp = 0xCAFEDEAD;
996 adev->wb.wb[index] = cpu_to_le32(tmp);
997 memset(&ib, 0, sizeof(ib));
998 r = amdgpu_ib_get(adev, NULL, 256, &ib);
999 if (r) {
1000 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
1001 goto err0;
1002 }
1003
1004 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1005 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1006 ib.ptr[1] = lower_32_bits(gpu_addr);
1007 ib.ptr[2] = upper_32_bits(gpu_addr);
1008 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1009 ib.ptr[4] = 0xDEADBEEF;
1010 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1011 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1012 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1013 ib.length_dw = 8;
1014
1015 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1016 if (r)
1017 goto err1;
1018
1019 r = dma_fence_wait_timeout(f, false, timeout);
1020 if (r == 0) {
1021 DRM_ERROR("amdgpu: IB test timed out\n");
1022 r = -ETIMEDOUT;
1023 goto err1;
1024 } else if (r < 0) {
1025 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1026 goto err1;
1027 }
1028 tmp = le32_to_cpu(adev->wb.wb[index]);
1029 if (tmp == 0xDEADBEEF) {
1030 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1031 r = 0;
1032 } else {
1033 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
1034 r = -EINVAL;
1035 }
1036err1:
1037 amdgpu_ib_free(adev, &ib, NULL);
1038 dma_fence_put(f);
1039err0:
1040 amdgpu_device_wb_free(adev, index);
1041 return r;
1042}
1043
1044
1045/**
1046 * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
1047 *
1048 * @ib: indirect buffer to fill with commands
1049 * @pe: addr of the page entry
1050 * @src: src addr to copy from
1051 * @count: number of page entries to update
1052 *
1053 * Update PTEs by copying them from the GART using sDMA (VEGA10).
1054 */
1055static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
1056 uint64_t pe, uint64_t src,
1057 unsigned count)
1058{
1059 unsigned bytes = count * 8;
1060
1061 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1062 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1063 ib->ptr[ib->length_dw++] = bytes - 1;
1064 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1065 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1066 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1067 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1068 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1069
1070}
1071
1072/**
1073 * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
1074 *
1075 * @ib: indirect buffer to fill with commands
1076 * @pe: addr of the page entry
1077 * @addr: dst addr to write into pe
1078 * @count: number of page entries to update
1079 * @incr: increase next addr by incr bytes
1080 * @flags: access flags
1081 *
1082 * Update PTEs by writing them manually using sDMA (VEGA10).
1083 */
1084static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1085 uint64_t value, unsigned count,
1086 uint32_t incr)
1087{
1088 unsigned ndw = count * 2;
1089
1090 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1091 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1092 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1093 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1094 ib->ptr[ib->length_dw++] = ndw - 1;
1095 for (; ndw > 0; ndw -= 2) {
1096 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1097 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1098 value += incr;
1099 }
1100}
1101
1102/**
1103 * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1104 *
1105 * @ib: indirect buffer to fill with commands
1106 * @pe: addr of the page entry
1107 * @addr: dst addr to write into pe
1108 * @count: number of page entries to update
1109 * @incr: increase next addr by incr bytes
1110 * @flags: access flags
1111 *
1112 * Update the page tables using sDMA (VEGA10).
1113 */
1114static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1115 uint64_t pe,
1116 uint64_t addr, unsigned count,
1117 uint32_t incr, uint64_t flags)
1118{
1119 /* for physically contiguous pages (vram) */
1120 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1121 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1122 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1123 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1124 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1125 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1126 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1127 ib->ptr[ib->length_dw++] = incr; /* increment size */
1128 ib->ptr[ib->length_dw++] = 0;
1129 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1130}
1131
1132/**
1133 * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
1134 *
1135 * @ib: indirect buffer to fill with padding
1136 *
1137 */
1138static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1139{
1140 struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
1141 u32 pad_count;
1142 int i;
1143
1144 pad_count = (8 - (ib->length_dw & 0x7)) % 8;
1145 for (i = 0; i < pad_count; i++)
1146 if (sdma && sdma->burst_nop && (i == 0))
1147 ib->ptr[ib->length_dw++] =
1148 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1149 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1150 else
1151 ib->ptr[ib->length_dw++] =
1152 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1153}
1154
1155
1156/**
1157 * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
1158 *
1159 * @ring: amdgpu_ring pointer
1160 *
1161 * Make sure all previous operations are completed (CIK).
1162 */
1163static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1164{
1165 uint32_t seq = ring->fence_drv.sync_seq;
1166 uint64_t addr = ring->fence_drv.gpu_addr;
1167
1168 /* wait for idle */
1169 sdma_v4_0_wait_reg_mem(ring, 1, 0,
1170 addr & 0xfffffffc,
1171 upper_32_bits(addr) & 0xffffffff,
1172 seq, 0xffffffff, 4);
1173}
1174
1175
1176/**
1177 * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1178 *
1179 * @ring: amdgpu_ring pointer
1180 * @vm: amdgpu_vm pointer
1181 *
1182 * Update the page table base and flush the VM TLB
1183 * using sDMA (VEGA10).
1184 */
1185static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1186 unsigned vmid, uint64_t pd_addr)
1187{
1188 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1189}
1190
1191static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1192 uint32_t reg, uint32_t val)
1193{
1194 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1195 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1196 amdgpu_ring_write(ring, reg);
1197 amdgpu_ring_write(ring, val);
1198}
1199
1200static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1201 uint32_t val, uint32_t mask)
1202{
1203 sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1204}
1205
1206static int sdma_v4_0_early_init(void *handle)
1207{
1208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1209
1210 if (adev->asic_type == CHIP_RAVEN)
1211 adev->sdma.num_instances = 1;
1212 else
1213 adev->sdma.num_instances = 2;
1214
1215 sdma_v4_0_set_ring_funcs(adev);
1216 sdma_v4_0_set_buffer_funcs(adev);
1217 sdma_v4_0_set_vm_pte_funcs(adev);
1218 sdma_v4_0_set_irq_funcs(adev);
1219
1220 return 0;
1221}
1222
1223
1224static int sdma_v4_0_sw_init(void *handle)
1225{
1226 struct amdgpu_ring *ring;
1227 int r, i;
1228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1229
1230 /* SDMA trap event */
1231 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
1232 &adev->sdma.trap_irq);
1233 if (r)
1234 return r;
1235
1236 /* SDMA trap event */
1237 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
1238 &adev->sdma.trap_irq);
1239 if (r)
1240 return r;
1241
1242 r = sdma_v4_0_init_microcode(adev);
1243 if (r) {
1244 DRM_ERROR("Failed to load sdma firmware!\n");
1245 return r;
1246 }
1247
1248 for (i = 0; i < adev->sdma.num_instances; i++) {
1249 ring = &adev->sdma.instance[i].ring;
1250 ring->ring_obj = NULL;
1251 ring->use_doorbell = true;
1252
1253 DRM_INFO("use_doorbell being set to: [%s]\n",
1254 ring->use_doorbell?"true":"false");
1255
1256 ring->doorbell_index = (i == 0) ?
1257 (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
1258 : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
1259
1260 sprintf(ring->name, "sdma%d", i);
1261 r = amdgpu_ring_init(adev, ring, 1024,
1262 &adev->sdma.trap_irq,
1263 (i == 0) ?
1264 AMDGPU_SDMA_IRQ_TRAP0 :
1265 AMDGPU_SDMA_IRQ_TRAP1);
1266 if (r)
1267 return r;
1268 }
1269
1270 return r;
1271}
1272
1273static int sdma_v4_0_sw_fini(void *handle)
1274{
1275 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1276 int i;
1277
1278 for (i = 0; i < adev->sdma.num_instances; i++)
1279 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1280
1281 for (i = 0; i < adev->sdma.num_instances; i++) {
1282 release_firmware(adev->sdma.instance[i].fw);
1283 adev->sdma.instance[i].fw = NULL;
1284 }
1285
1286 return 0;
1287}
1288
1289static int sdma_v4_0_hw_init(void *handle)
1290{
1291 int r;
1292 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1293
1294 sdma_v4_0_init_golden_registers(adev);
1295
1296 r = sdma_v4_0_start(adev);
1297
1298 return r;
1299}
1300
1301static int sdma_v4_0_hw_fini(void *handle)
1302{
1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304
1305 if (amdgpu_sriov_vf(adev))
1306 return 0;
1307
1308 sdma_v4_0_ctx_switch_enable(adev, false);
1309 sdma_v4_0_enable(adev, false);
1310
1311 return 0;
1312}
1313
1314static int sdma_v4_0_suspend(void *handle)
1315{
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317
1318 return sdma_v4_0_hw_fini(adev);
1319}
1320
1321static int sdma_v4_0_resume(void *handle)
1322{
1323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324
1325 return sdma_v4_0_hw_init(adev);
1326}
1327
1328static bool sdma_v4_0_is_idle(void *handle)
1329{
1330 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1331 u32 i;
1332
1333 for (i = 0; i < adev->sdma.num_instances; i++) {
1334 u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1335
1336 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1337 return false;
1338 }
1339
1340 return true;
1341}
1342
1343static int sdma_v4_0_wait_for_idle(void *handle)
1344{
1345 unsigned i;
1346 u32 sdma0, sdma1;
1347 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1348
1349 for (i = 0; i < adev->usec_timeout; i++) {
1350 sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1351 sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1352
1353 if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1354 return 0;
1355 udelay(1);
1356 }
1357 return -ETIMEDOUT;
1358}
1359
1360static int sdma_v4_0_soft_reset(void *handle)
1361{
1362 /* todo */
1363
1364 return 0;
1365}
1366
1367static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
1368 struct amdgpu_irq_src *source,
1369 unsigned type,
1370 enum amdgpu_interrupt_state state)
1371{
1372 u32 sdma_cntl;
1373
1374 u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
1375 sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
1376 sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
1377
1378 sdma_cntl = RREG32(reg_offset);
1379 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1380 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1381 WREG32(reg_offset, sdma_cntl);
1382
1383 return 0;
1384}
1385
1386static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
1387 struct amdgpu_irq_src *source,
1388 struct amdgpu_iv_entry *entry)
1389{
1390 DRM_DEBUG("IH: SDMA trap\n");
1391 switch (entry->client_id) {
1392 case SOC15_IH_CLIENTID_SDMA0:
1393 switch (entry->ring_id) {
1394 case 0:
1395 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1396 break;
1397 case 1:
1398 /* XXX compute */
1399 break;
1400 case 2:
1401 /* XXX compute */
1402 break;
1403 case 3:
1404 /* XXX page queue*/
1405 break;
1406 }
1407 break;
1408 case SOC15_IH_CLIENTID_SDMA1:
1409 switch (entry->ring_id) {
1410 case 0:
1411 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1412 break;
1413 case 1:
1414 /* XXX compute */
1415 break;
1416 case 2:
1417 /* XXX compute */
1418 break;
1419 case 3:
1420 /* XXX page queue*/
1421 break;
1422 }
1423 break;
1424 }
1425 return 0;
1426}
1427
1428static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1429 struct amdgpu_irq_src *source,
1430 struct amdgpu_iv_entry *entry)
1431{
1432 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1433 schedule_work(&adev->reset_work);
1434 return 0;
1435}
1436
1437
1438static void sdma_v4_0_update_medium_grain_clock_gating(
1439 struct amdgpu_device *adev,
1440 bool enable)
1441{
1442 uint32_t data, def;
1443
1444 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1445 /* enable sdma0 clock gating */
1446 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1447 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1448 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1449 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1450 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1451 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1452 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1453 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1454 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1455 if (def != data)
1456 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1457
1458 if (adev->sdma.num_instances > 1) {
1459 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1460 data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1461 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1462 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1463 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1464 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1465 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1466 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1467 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1468 if (def != data)
1469 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
1470 }
1471 } else {
1472 /* disable sdma0 clock gating */
1473 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1474 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1475 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1476 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1477 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1478 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1479 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1480 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1481 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1482
1483 if (def != data)
1484 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), data);
1485
1486 if (adev->sdma.num_instances > 1) {
1487 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL));
1488 data |= (SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1489 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1490 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1491 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1492 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1493 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1494 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1495 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1496 if (def != data)
1497 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), data);
1498 }
1499 }
1500}
1501
1502
1503static void sdma_v4_0_update_medium_grain_light_sleep(
1504 struct amdgpu_device *adev,
1505 bool enable)
1506{
1507 uint32_t data, def;
1508
1509 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1510 /* 1-not override: enable sdma0 mem light sleep */
1511 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1512 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1513 if (def != data)
1514 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1515
1516 /* 1-not override: enable sdma1 mem light sleep */
1517 if (adev->sdma.num_instances > 1) {
1518 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1519 data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1520 if (def != data)
1521 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
1522 }
1523 } else {
1524 /* 0-override:disable sdma0 mem light sleep */
1525 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1526 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1527 if (def != data)
1528 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1529
1530 /* 0-override:disable sdma1 mem light sleep */
1531 if (adev->sdma.num_instances > 1) {
1532 def = data = RREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL));
1533 data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1534 if (def != data)
1535 WREG32(SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), data);
1536 }
1537 }
1538}
1539
1540static int sdma_v4_0_set_clockgating_state(void *handle,
1541 enum amd_clockgating_state state)
1542{
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544
1545 if (amdgpu_sriov_vf(adev))
1546 return 0;
1547
1548 switch (adev->asic_type) {
1549 case CHIP_VEGA10:
1550 case CHIP_VEGA12:
1551 case CHIP_VEGA20:
1552 case CHIP_RAVEN:
1553 sdma_v4_0_update_medium_grain_clock_gating(adev,
1554 state == AMD_CG_STATE_GATE ? true : false);
1555 sdma_v4_0_update_medium_grain_light_sleep(adev,
1556 state == AMD_CG_STATE_GATE ? true : false);
1557 break;
1558 default:
1559 break;
1560 }
1561 return 0;
1562}
1563
1564static int sdma_v4_0_set_powergating_state(void *handle,
1565 enum amd_powergating_state state)
1566{
1567 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1568
1569 switch (adev->asic_type) {
1570 case CHIP_RAVEN:
1571 sdma_v4_1_update_power_gating(adev,
1572 state == AMD_PG_STATE_GATE ? true : false);
1573 break;
1574 default:
1575 break;
1576 }
1577
1578 return 0;
1579}
1580
1581static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
1582{
1583 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1584 int data;
1585
1586 if (amdgpu_sriov_vf(adev))
1587 *flags = 0;
1588
1589 /* AMD_CG_SUPPORT_SDMA_MGCG */
1590 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
1591 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1592 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1593
1594 /* AMD_CG_SUPPORT_SDMA_LS */
1595 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1596 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1597 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1598}
1599
1600const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
1601 .name = "sdma_v4_0",
1602 .early_init = sdma_v4_0_early_init,
1603 .late_init = NULL,
1604 .sw_init = sdma_v4_0_sw_init,
1605 .sw_fini = sdma_v4_0_sw_fini,
1606 .hw_init = sdma_v4_0_hw_init,
1607 .hw_fini = sdma_v4_0_hw_fini,
1608 .suspend = sdma_v4_0_suspend,
1609 .resume = sdma_v4_0_resume,
1610 .is_idle = sdma_v4_0_is_idle,
1611 .wait_for_idle = sdma_v4_0_wait_for_idle,
1612 .soft_reset = sdma_v4_0_soft_reset,
1613 .set_clockgating_state = sdma_v4_0_set_clockgating_state,
1614 .set_powergating_state = sdma_v4_0_set_powergating_state,
1615 .get_clockgating_state = sdma_v4_0_get_clockgating_state,
1616};
1617
1618static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
1619 .type = AMDGPU_RING_TYPE_SDMA,
1620 .align_mask = 0xf,
1621 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1622 .support_64bit_ptrs = true,
1623 .vmhub = AMDGPU_MMHUB,
1624 .get_rptr = sdma_v4_0_ring_get_rptr,
1625 .get_wptr = sdma_v4_0_ring_get_wptr,
1626 .set_wptr = sdma_v4_0_ring_set_wptr,
1627 .emit_frame_size =
1628 6 + /* sdma_v4_0_ring_emit_hdp_flush */
1629 3 + /* hdp invalidate */
1630 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
1631 /* sdma_v4_0_ring_emit_vm_flush */
1632 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1633 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1634 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
1635 .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
1636 .emit_ib = sdma_v4_0_ring_emit_ib,
1637 .emit_fence = sdma_v4_0_ring_emit_fence,
1638 .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
1639 .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
1640 .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
1641 .test_ring = sdma_v4_0_ring_test_ring,
1642 .test_ib = sdma_v4_0_ring_test_ib,
1643 .insert_nop = sdma_v4_0_ring_insert_nop,
1644 .pad_ib = sdma_v4_0_ring_pad_ib,
1645 .emit_wreg = sdma_v4_0_ring_emit_wreg,
1646 .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
1647 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1648};
1649
1650static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1651{
1652 int i;
1653
1654 for (i = 0; i < adev->sdma.num_instances; i++)
1655 adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
1656}
1657
1658static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
1659 .set = sdma_v4_0_set_trap_irq_state,
1660 .process = sdma_v4_0_process_trap_irq,
1661};
1662
1663static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
1664 .process = sdma_v4_0_process_illegal_inst_irq,
1665};
1666
1667static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1668{
1669 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
1670 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
1671 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
1672}
1673
1674/**
1675 * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
1676 *
1677 * @ring: amdgpu_ring structure holding ring information
1678 * @src_offset: src GPU address
1679 * @dst_offset: dst GPU address
1680 * @byte_count: number of bytes to xfer
1681 *
1682 * Copy GPU buffers using the DMA engine (VEGA10/12).
1683 * Used by the amdgpu ttm implementation to move pages if
1684 * registered as the asic copy callback.
1685 */
1686static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
1687 uint64_t src_offset,
1688 uint64_t dst_offset,
1689 uint32_t byte_count)
1690{
1691 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1692 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1693 ib->ptr[ib->length_dw++] = byte_count - 1;
1694 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1695 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1696 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1697 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1698 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1699}
1700
1701/**
1702 * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
1703 *
1704 * @ring: amdgpu_ring structure holding ring information
1705 * @src_data: value to write to buffer
1706 * @dst_offset: dst GPU address
1707 * @byte_count: number of bytes to xfer
1708 *
1709 * Fill GPU buffers using the DMA engine (VEGA10/12).
1710 */
1711static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
1712 uint32_t src_data,
1713 uint64_t dst_offset,
1714 uint32_t byte_count)
1715{
1716 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1717 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1718 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1719 ib->ptr[ib->length_dw++] = src_data;
1720 ib->ptr[ib->length_dw++] = byte_count - 1;
1721}
1722
1723static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
1724 .copy_max_bytes = 0x400000,
1725 .copy_num_dw = 7,
1726 .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
1727
1728 .fill_max_bytes = 0x400000,
1729 .fill_num_dw = 5,
1730 .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
1731};
1732
1733static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
1734{
1735 if (adev->mman.buffer_funcs == NULL) {
1736 adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
1737 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1738 }
1739}
1740
1741static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
1742 .copy_pte_num_dw = 7,
1743 .copy_pte = sdma_v4_0_vm_copy_pte,
1744
1745 .write_pte = sdma_v4_0_vm_write_pte,
1746 .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
1747};
1748
1749static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1750{
1751 unsigned i;
1752
1753 if (adev->vm_manager.vm_pte_funcs == NULL) {
1754 adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
1755 for (i = 0; i < adev->sdma.num_instances; i++)
1756 adev->vm_manager.vm_pte_rings[i] =
1757 &adev->sdma.instance[i].ring;
1758
1759 adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
1760 }
1761}
1762
1763const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
1764 .type = AMD_IP_BLOCK_TYPE_SDMA,
1765 .major = 4,
1766 .minor = 0,
1767 .rev = 0,
1768 .funcs = &sdma_v4_0_ip_funcs,
1769};