Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/delay.h>
25#include <linux/firmware.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ucode.h"
31#include "amdgpu_trace.h"
32
33#include "gc/gc_10_3_0_offset.h"
34#include "gc/gc_10_3_0_sh_mask.h"
35#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37#include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h"
38#include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h"
39
40#include "soc15_common.h"
41#include "soc15.h"
42#include "navi10_sdma_pkt_open.h"
43#include "nbio_v2_3.h"
44#include "sdma_common.h"
45#include "sdma_v5_2.h"
46
47MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin");
48MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin");
49MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
50MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
51
52MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
53MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
54MODULE_FIRMWARE("amdgpu/sdma_5_2_6.bin");
55MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
56
57#define SDMA1_REG_OFFSET 0x600
58#define SDMA3_REG_OFFSET 0x400
59#define SDMA0_HYP_DEC_REG_START 0x5880
60#define SDMA0_HYP_DEC_REG_END 0x5893
61#define SDMA1_HYP_DEC_REG_OFFSET 0x20
62
63static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
64static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
65static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
66static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
67
68static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
69{
70 u32 base;
71
72 if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
73 internal_offset <= SDMA0_HYP_DEC_REG_END) {
74 base = adev->reg_offset[GC_HWIP][0][1];
75 if (instance != 0)
76 internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
77 } else {
78 if (instance < 2) {
79 base = adev->reg_offset[GC_HWIP][0][0];
80 if (instance == 1)
81 internal_offset += SDMA1_REG_OFFSET;
82 } else {
83 base = adev->reg_offset[GC_HWIP][0][2];
84 if (instance == 3)
85 internal_offset += SDMA3_REG_OFFSET;
86 }
87 }
88
89 return base + internal_offset;
90}
91
92static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring,
93 uint64_t addr)
94{
95 unsigned ret;
96
97 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
98 amdgpu_ring_write(ring, lower_32_bits(addr));
99 amdgpu_ring_write(ring, upper_32_bits(addr));
100 amdgpu_ring_write(ring, 1);
101 /* this is the offset we need patch later */
102 ret = ring->wptr & ring->buf_mask;
103 /* insert dummy here and patch it later */
104 amdgpu_ring_write(ring, 0);
105
106 return ret;
107}
108
109/**
110 * sdma_v5_2_ring_get_rptr - get the current read pointer
111 *
112 * @ring: amdgpu ring pointer
113 *
114 * Get the current rptr from the hardware (NAVI10+).
115 */
116static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring)
117{
118 u64 *rptr;
119
120 /* XXX check if swapping is necessary on BE */
121 rptr = (u64 *)ring->rptr_cpu_addr;
122
123 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
124 return ((*rptr) >> 2);
125}
126
127/**
128 * sdma_v5_2_ring_get_wptr - get the current write pointer
129 *
130 * @ring: amdgpu ring pointer
131 *
132 * Get the current wptr from the hardware (NAVI10+).
133 */
134static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring)
135{
136 struct amdgpu_device *adev = ring->adev;
137 u64 wptr;
138
139 if (ring->use_doorbell) {
140 /* XXX check if swapping is necessary on BE */
141 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
142 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
143 } else {
144 wptr = RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
145 wptr = wptr << 32;
146 wptr |= RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
147 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
148 }
149
150 return wptr >> 2;
151}
152
153/**
154 * sdma_v5_2_ring_set_wptr - commit the write pointer
155 *
156 * @ring: amdgpu ring pointer
157 *
158 * Write the wptr back to the hardware (NAVI10+).
159 */
160static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
161{
162 struct amdgpu_device *adev = ring->adev;
163
164 DRM_DEBUG("Setting write pointer\n");
165 if (ring->use_doorbell) {
166 DRM_DEBUG("Using doorbell -- "
167 "wptr_offs == 0x%08x "
168 "lower_32_bits(ring->wptr << 2) == 0x%08x "
169 "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
170 ring->wptr_offs,
171 lower_32_bits(ring->wptr << 2),
172 upper_32_bits(ring->wptr << 2));
173 /* XXX check if swapping is necessary on BE */
174 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
175 ring->wptr << 2);
176 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
177 ring->doorbell_index, ring->wptr << 2);
178 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
179 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(5, 2, 1)) {
180 /* SDMA seems to miss doorbells sometimes when powergating kicks in.
181 * Updating the wptr directly will wake it. This is only safe because
182 * we disallow gfxoff in begin_use() and then allow it again in end_use().
183 */
184 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
185 lower_32_bits(ring->wptr << 2));
186 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
187 upper_32_bits(ring->wptr << 2));
188 }
189 } else {
190 DRM_DEBUG("Not using doorbell -- "
191 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
192 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
193 ring->me,
194 lower_32_bits(ring->wptr << 2),
195 ring->me,
196 upper_32_bits(ring->wptr << 2));
197 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
198 lower_32_bits(ring->wptr << 2));
199 WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
200 upper_32_bits(ring->wptr << 2));
201 }
202}
203
204static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
205{
206 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
207 int i;
208
209 for (i = 0; i < count; i++)
210 if (sdma && sdma->burst_nop && (i == 0))
211 amdgpu_ring_write(ring, ring->funcs->nop |
212 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
213 else
214 amdgpu_ring_write(ring, ring->funcs->nop);
215}
216
217/**
218 * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
219 *
220 * @ring: amdgpu ring pointer
221 * @job: job to retrieve vmid from
222 * @ib: IB object to schedule
223 * @flags: unused
224 *
225 * Schedule an IB in the DMA ring.
226 */
227static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
228 struct amdgpu_job *job,
229 struct amdgpu_ib *ib,
230 uint32_t flags)
231{
232 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
233 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
234
235 /* An IB packet must end on a 8 DW boundary--the next dword
236 * must be on a 8-dword boundary. Our IB packet below is 6
237 * dwords long, thus add x number of NOPs, such that, in
238 * modular arithmetic,
239 * wptr + 6 + x = 8k, k >= 0, which in C is,
240 * (wptr + 6 + x) % 8 = 0.
241 * The expression below, is a solution of x.
242 */
243 sdma_v5_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
244
245 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
246 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
247 /* base must be 32 byte aligned */
248 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
249 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
250 amdgpu_ring_write(ring, ib->length_dw);
251 amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
252 amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
253}
254
255/**
256 * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
257 *
258 * @ring: amdgpu ring pointer
259 *
260 * flush the IB by graphics cache rinse.
261 */
262static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
263{
264 uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
265 SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
266 SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
267 SDMA_GCR_GLI_INV(1);
268
269 /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
270 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
271 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
272 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
273 SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
274 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
275 SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
276 amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
277 SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
278}
279
280/**
281 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
282 *
283 * @ring: amdgpu ring pointer
284 *
285 * Emit an hdp flush packet on the requested DMA ring.
286 */
287static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
288{
289 struct amdgpu_device *adev = ring->adev;
290 u32 ref_and_mask = 0;
291 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
292
293 if (ring->me > 1) {
294 amdgpu_asic_flush_hdp(adev, ring);
295 } else {
296 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
297
298 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
299 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
300 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
301 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
302 amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
303 amdgpu_ring_write(ring, ref_and_mask); /* reference */
304 amdgpu_ring_write(ring, ref_and_mask); /* mask */
305 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
306 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
307 }
308}
309
310/**
311 * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
312 *
313 * @ring: amdgpu ring pointer
314 * @addr: address
315 * @seq: sequence number
316 * @flags: fence related flags
317 *
318 * Add a DMA fence packet to the ring to write
319 * the fence seq number and DMA trap packet to generate
320 * an interrupt if needed.
321 */
322static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
323 unsigned flags)
324{
325 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
326 /* write the fence */
327 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
328 SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
329 /* zero in first two bits */
330 BUG_ON(addr & 0x3);
331 amdgpu_ring_write(ring, lower_32_bits(addr));
332 amdgpu_ring_write(ring, upper_32_bits(addr));
333 amdgpu_ring_write(ring, lower_32_bits(seq));
334
335 /* optionally write high bits as well */
336 if (write64bit) {
337 addr += 4;
338 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
339 SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
340 /* zero in first two bits */
341 BUG_ON(addr & 0x3);
342 amdgpu_ring_write(ring, lower_32_bits(addr));
343 amdgpu_ring_write(ring, upper_32_bits(addr));
344 amdgpu_ring_write(ring, upper_32_bits(seq));
345 }
346
347 if ((flags & AMDGPU_FENCE_FLAG_INT)) {
348 uint32_t ctx = ring->is_mes_queue ?
349 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
350 /* generate an interrupt */
351 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
352 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
353 }
354}
355
356
357/**
358 * sdma_v5_2_gfx_stop - stop the gfx async dma engines
359 *
360 * @adev: amdgpu_device pointer
361 *
362 * Stop the gfx async dma ring buffers.
363 */
364static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
365{
366 u32 rb_cntl, ib_cntl;
367 int i;
368
369 for (i = 0; i < adev->sdma.num_instances; i++) {
370 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
371 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
372 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
373 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
374 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
375 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
376 }
377}
378
379/**
380 * sdma_v5_2_rlc_stop - stop the compute async dma engines
381 *
382 * @adev: amdgpu_device pointer
383 *
384 * Stop the compute async dma queues.
385 */
386static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
387{
388 /* XXX todo */
389}
390
391/**
392 * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
393 *
394 * @adev: amdgpu_device pointer
395 * @enable: enable/disable the DMA MEs context switch.
396 *
397 * Halt or unhalt the async dma engines context switch.
398 */
399static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
400{
401 u32 f32_cntl, phase_quantum = 0;
402 int i;
403
404 if (amdgpu_sdma_phase_quantum) {
405 unsigned value = amdgpu_sdma_phase_quantum;
406 unsigned unit = 0;
407
408 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
409 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
410 value = (value + 1) >> 1;
411 unit++;
412 }
413 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
414 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
415 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
416 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
417 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
418 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
419 WARN_ONCE(1,
420 "clamping sdma_phase_quantum to %uK clock cycles\n",
421 value << unit);
422 }
423 phase_quantum =
424 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
425 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
426 }
427
428 for (i = 0; i < adev->sdma.num_instances; i++) {
429 if (enable && amdgpu_sdma_phase_quantum) {
430 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
431 phase_quantum);
432 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
433 phase_quantum);
434 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
435 phase_quantum);
436 }
437
438 if (!amdgpu_sriov_vf(adev)) {
439 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
440 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
441 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
442 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
443 }
444 }
445
446}
447
448/**
449 * sdma_v5_2_enable - stop the async dma engines
450 *
451 * @adev: amdgpu_device pointer
452 * @enable: enable/disable the DMA MEs.
453 *
454 * Halt or unhalt the async dma engines.
455 */
456static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
457{
458 u32 f32_cntl;
459 int i;
460
461 if (!enable) {
462 sdma_v5_2_gfx_stop(adev);
463 sdma_v5_2_rlc_stop(adev);
464 }
465
466 if (!amdgpu_sriov_vf(adev)) {
467 for (i = 0; i < adev->sdma.num_instances; i++) {
468 f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
469 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
470 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
471 }
472 }
473}
474
475/**
476 * sdma_v5_2_gfx_resume - setup and start the async dma engines
477 *
478 * @adev: amdgpu_device pointer
479 *
480 * Set up the gfx DMA ring buffers and enable them.
481 * Returns 0 for success, error for failure.
482 */
483static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
484{
485 struct amdgpu_ring *ring;
486 u32 rb_cntl, ib_cntl;
487 u32 rb_bufsz;
488 u32 doorbell;
489 u32 doorbell_offset;
490 u32 temp;
491 u32 wptr_poll_cntl;
492 u64 wptr_gpu_addr;
493 int i, r;
494
495 for (i = 0; i < adev->sdma.num_instances; i++) {
496 ring = &adev->sdma.instance[i].ring;
497
498 if (!amdgpu_sriov_vf(adev))
499 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
500
501 /* Set ring buffer size in dwords */
502 rb_bufsz = order_base_2(ring->ring_size / 4);
503 rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
504 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
505#ifdef __BIG_ENDIAN
506 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
507 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
508 RPTR_WRITEBACK_SWAP_ENABLE, 1);
509#endif
510 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
511
512 /* Initialize the ring buffer's read and write pointers */
513 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
514 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
515 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
516 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
517
518 /* setup the wptr shadow polling */
519 wptr_gpu_addr = ring->wptr_gpu_addr;
520 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
521 lower_32_bits(wptr_gpu_addr));
522 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
523 upper_32_bits(wptr_gpu_addr));
524 wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
525 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
526 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
527 SDMA0_GFX_RB_WPTR_POLL_CNTL,
528 F32_POLL_ENABLE, 1);
529 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
530 wptr_poll_cntl);
531
532 /* set the wb address whether it's enabled or not */
533 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
534 upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
535 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
536 lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
537
538 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
539
540 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
541 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
542
543 ring->wptr = 0;
544
545 /* before programing wptr to a less value, need set minor_ptr_update first */
546 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
547
548 if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
549 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
550 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
551 }
552
553 doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
554 doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
555
556 if (ring->use_doorbell) {
557 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
558 doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
559 OFFSET, ring->doorbell_index);
560 } else {
561 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
562 }
563 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
564 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
565
566 adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
567 ring->doorbell_index,
568 adev->doorbell_index.sdma_doorbell_range);
569
570 if (amdgpu_sriov_vf(adev))
571 sdma_v5_2_ring_set_wptr(ring);
572
573 /* set minor_ptr_update to 0 after wptr programed */
574
575 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
576
577 /* SRIOV VF has no control of any of registers below */
578 if (!amdgpu_sriov_vf(adev)) {
579 /* set utc l1 enable flag always to 1 */
580 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
581 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
582
583 /* enable MCBP */
584 temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
585 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
586
587 /* Set up RESP_MODE to non-copy addresses */
588 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
589 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
590 temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
591 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
592
593 /* program default cache read and write policy */
594 temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
595 /* clean read policy and write policy bits */
596 temp &= 0xFF0FFF;
597 temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
598 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
599 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
600 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
601
602 /* unhalt engine */
603 temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
604 temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
605 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
606 }
607
608 /* enable DMA RB */
609 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
610 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
611
612 ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
613 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
614#ifdef __BIG_ENDIAN
615 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
616#endif
617 /* enable DMA IBs */
618 WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
619
620 if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
621 sdma_v5_2_ctx_switch_enable(adev, true);
622 sdma_v5_2_enable(adev, true);
623 }
624
625 r = amdgpu_ring_test_helper(ring);
626 if (r)
627 return r;
628 }
629
630 return 0;
631}
632
633/**
634 * sdma_v5_2_rlc_resume - setup and start the async dma engines
635 *
636 * @adev: amdgpu_device pointer
637 *
638 * Set up the compute DMA queues and enable them.
639 * Returns 0 for success, error for failure.
640 */
641static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev)
642{
643 return 0;
644}
645
646/**
647 * sdma_v5_2_load_microcode - load the sDMA ME ucode
648 *
649 * @adev: amdgpu_device pointer
650 *
651 * Loads the sDMA0/1/2/3 ucode.
652 * Returns 0 for success, -EINVAL if the ucode is not available.
653 */
654static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
655{
656 const struct sdma_firmware_header_v1_0 *hdr;
657 const __le32 *fw_data;
658 u32 fw_size;
659 int i, j;
660
661 /* halt the MEs */
662 sdma_v5_2_enable(adev, false);
663
664 for (i = 0; i < adev->sdma.num_instances; i++) {
665 if (!adev->sdma.instance[i].fw)
666 return -EINVAL;
667
668 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
669 amdgpu_ucode_print_sdma_hdr(&hdr->header);
670 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
671
672 fw_data = (const __le32 *)
673 (adev->sdma.instance[i].fw->data +
674 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
675
676 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
677
678 for (j = 0; j < fw_size; j++) {
679 if (amdgpu_emu_mode == 1 && j % 500 == 0)
680 msleep(1);
681 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
682 }
683
684 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
685 }
686
687 return 0;
688}
689
690static int sdma_v5_2_soft_reset(void *handle)
691{
692 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
693 u32 grbm_soft_reset;
694 u32 tmp;
695 int i;
696
697 for (i = 0; i < adev->sdma.num_instances; i++) {
698 grbm_soft_reset = REG_SET_FIELD(0,
699 GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
700 1);
701 grbm_soft_reset <<= i;
702
703 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
704 tmp |= grbm_soft_reset;
705 DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
706 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
707 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
708
709 udelay(50);
710
711 tmp &= ~grbm_soft_reset;
712 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
713 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
714
715 udelay(50);
716 }
717
718 return 0;
719}
720
721/**
722 * sdma_v5_2_start - setup and start the async dma engines
723 *
724 * @adev: amdgpu_device pointer
725 *
726 * Set up the DMA engines and enable them.
727 * Returns 0 for success, error for failure.
728 */
729static int sdma_v5_2_start(struct amdgpu_device *adev)
730{
731 int r = 0;
732
733 if (amdgpu_sriov_vf(adev)) {
734 sdma_v5_2_ctx_switch_enable(adev, false);
735 sdma_v5_2_enable(adev, false);
736
737 /* set RB registers */
738 r = sdma_v5_2_gfx_resume(adev);
739 return r;
740 }
741
742 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
743 r = sdma_v5_2_load_microcode(adev);
744 if (r)
745 return r;
746
747 /* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
748 if (amdgpu_emu_mode == 1)
749 msleep(1000);
750 }
751
752 sdma_v5_2_soft_reset(adev);
753 /* unhalt the MEs */
754 sdma_v5_2_enable(adev, true);
755 /* enable sdma ring preemption */
756 sdma_v5_2_ctx_switch_enable(adev, true);
757
758 /* start the gfx rings and rlc compute queues */
759 r = sdma_v5_2_gfx_resume(adev);
760 if (r)
761 return r;
762 r = sdma_v5_2_rlc_resume(adev);
763
764 return r;
765}
766
767static int sdma_v5_2_mqd_init(struct amdgpu_device *adev, void *mqd,
768 struct amdgpu_mqd_prop *prop)
769{
770 struct v10_sdma_mqd *m = mqd;
771 uint64_t wb_gpu_addr;
772
773 m->sdmax_rlcx_rb_cntl =
774 order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
775 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
776 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
777 1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
778
779 m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
780 m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
781
782 m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
783 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
784
785 wb_gpu_addr = prop->wptr_gpu_addr;
786 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
787 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
788
789 wb_gpu_addr = prop->rptr_gpu_addr;
790 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
791 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
792
793 m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
794 mmSDMA0_GFX_IB_CNTL));
795
796 m->sdmax_rlcx_doorbell_offset =
797 prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
798
799 m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
800
801 return 0;
802}
803
804static void sdma_v5_2_set_mqd_funcs(struct amdgpu_device *adev)
805{
806 adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
807 adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_2_mqd_init;
808}
809
810/**
811 * sdma_v5_2_ring_test_ring - simple async dma engine test
812 *
813 * @ring: amdgpu_ring structure holding ring information
814 *
815 * Test the DMA engine by writing using it to write an
816 * value to memory.
817 * Returns 0 for success, error for failure.
818 */
819static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
820{
821 struct amdgpu_device *adev = ring->adev;
822 unsigned i;
823 unsigned index;
824 int r;
825 u32 tmp;
826 u64 gpu_addr;
827 volatile uint32_t *cpu_ptr = NULL;
828
829 tmp = 0xCAFEDEAD;
830
831 if (ring->is_mes_queue) {
832 uint32_t offset = 0;
833 offset = amdgpu_mes_ctx_get_offs(ring,
834 AMDGPU_MES_CTX_PADDING_OFFS);
835 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
836 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
837 *cpu_ptr = tmp;
838 } else {
839 r = amdgpu_device_wb_get(adev, &index);
840 if (r) {
841 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
842 return r;
843 }
844
845 gpu_addr = adev->wb.gpu_addr + (index * 4);
846 adev->wb.wb[index] = cpu_to_le32(tmp);
847 }
848
849 r = amdgpu_ring_alloc(ring, 20);
850 if (r) {
851 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
852 if (!ring->is_mes_queue)
853 amdgpu_device_wb_free(adev, index);
854 return r;
855 }
856
857 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
858 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
859 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
860 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
861 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
862 amdgpu_ring_write(ring, 0xDEADBEEF);
863 amdgpu_ring_commit(ring);
864
865 for (i = 0; i < adev->usec_timeout; i++) {
866 if (ring->is_mes_queue)
867 tmp = le32_to_cpu(*cpu_ptr);
868 else
869 tmp = le32_to_cpu(adev->wb.wb[index]);
870 if (tmp == 0xDEADBEEF)
871 break;
872 if (amdgpu_emu_mode == 1)
873 msleep(1);
874 else
875 udelay(1);
876 }
877
878 if (i >= adev->usec_timeout)
879 r = -ETIMEDOUT;
880
881 if (!ring->is_mes_queue)
882 amdgpu_device_wb_free(adev, index);
883
884 return r;
885}
886
887/**
888 * sdma_v5_2_ring_test_ib - test an IB on the DMA engine
889 *
890 * @ring: amdgpu_ring structure holding ring information
891 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
892 *
893 * Test a simple IB in the DMA ring.
894 * Returns 0 on success, error on failure.
895 */
896static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
897{
898 struct amdgpu_device *adev = ring->adev;
899 struct amdgpu_ib ib;
900 struct dma_fence *f = NULL;
901 unsigned index;
902 long r;
903 u32 tmp = 0;
904 u64 gpu_addr;
905 volatile uint32_t *cpu_ptr = NULL;
906
907 tmp = 0xCAFEDEAD;
908 memset(&ib, 0, sizeof(ib));
909
910 if (ring->is_mes_queue) {
911 uint32_t offset = 0;
912 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
913 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
914 ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
915
916 offset = amdgpu_mes_ctx_get_offs(ring,
917 AMDGPU_MES_CTX_PADDING_OFFS);
918 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
919 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
920 *cpu_ptr = tmp;
921 } else {
922 r = amdgpu_device_wb_get(adev, &index);
923 if (r) {
924 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
925 return r;
926 }
927
928 gpu_addr = adev->wb.gpu_addr + (index * 4);
929 adev->wb.wb[index] = cpu_to_le32(tmp);
930
931 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
932 if (r) {
933 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
934 goto err0;
935 }
936 }
937
938 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
939 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
940 ib.ptr[1] = lower_32_bits(gpu_addr);
941 ib.ptr[2] = upper_32_bits(gpu_addr);
942 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
943 ib.ptr[4] = 0xDEADBEEF;
944 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
945 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
946 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
947 ib.length_dw = 8;
948
949 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
950 if (r)
951 goto err1;
952
953 r = dma_fence_wait_timeout(f, false, timeout);
954 if (r == 0) {
955 DRM_ERROR("amdgpu: IB test timed out\n");
956 r = -ETIMEDOUT;
957 goto err1;
958 } else if (r < 0) {
959 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
960 goto err1;
961 }
962
963 if (ring->is_mes_queue)
964 tmp = le32_to_cpu(*cpu_ptr);
965 else
966 tmp = le32_to_cpu(adev->wb.wb[index]);
967
968 if (tmp == 0xDEADBEEF)
969 r = 0;
970 else
971 r = -EINVAL;
972
973err1:
974 amdgpu_ib_free(adev, &ib, NULL);
975 dma_fence_put(f);
976err0:
977 if (!ring->is_mes_queue)
978 amdgpu_device_wb_free(adev, index);
979 return r;
980}
981
982
983/**
984 * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART
985 *
986 * @ib: indirect buffer to fill with commands
987 * @pe: addr of the page entry
988 * @src: src addr to copy from
989 * @count: number of page entries to update
990 *
991 * Update PTEs by copying them from the GART using sDMA.
992 */
993static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
994 uint64_t pe, uint64_t src,
995 unsigned count)
996{
997 unsigned bytes = count * 8;
998
999 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1000 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1001 ib->ptr[ib->length_dw++] = bytes - 1;
1002 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1003 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1004 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1005 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1006 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1007
1008}
1009
1010/**
1011 * sdma_v5_2_vm_write_pte - update PTEs by writing them manually
1012 *
1013 * @ib: indirect buffer to fill with commands
1014 * @pe: addr of the page entry
1015 * @value: dst addr to write into pe
1016 * @count: number of page entries to update
1017 * @incr: increase next addr by incr bytes
1018 *
1019 * Update PTEs by writing them manually using sDMA.
1020 */
1021static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1022 uint64_t value, unsigned count,
1023 uint32_t incr)
1024{
1025 unsigned ndw = count * 2;
1026
1027 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1028 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1029 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1030 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1031 ib->ptr[ib->length_dw++] = ndw - 1;
1032 for (; ndw > 0; ndw -= 2) {
1033 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1034 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1035 value += incr;
1036 }
1037}
1038
1039/**
1040 * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA
1041 *
1042 * @ib: indirect buffer to fill with commands
1043 * @pe: addr of the page entry
1044 * @addr: dst addr to write into pe
1045 * @count: number of page entries to update
1046 * @incr: increase next addr by incr bytes
1047 * @flags: access flags
1048 *
1049 * Update the page tables using sDMA.
1050 */
1051static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1052 uint64_t pe,
1053 uint64_t addr, unsigned count,
1054 uint32_t incr, uint64_t flags)
1055{
1056 /* for physically contiguous pages (vram) */
1057 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1058 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1059 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1060 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1061 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1062 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1063 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1064 ib->ptr[ib->length_dw++] = incr; /* increment size */
1065 ib->ptr[ib->length_dw++] = 0;
1066 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1067}
1068
1069/**
1070 * sdma_v5_2_ring_pad_ib - pad the IB
1071 *
1072 * @ib: indirect buffer to fill with padding
1073 * @ring: amdgpu_ring structure holding ring information
1074 *
1075 * Pad the IB with NOPs to a boundary multiple of 8.
1076 */
1077static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1078{
1079 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1080 u32 pad_count;
1081 int i;
1082
1083 pad_count = (-ib->length_dw) & 0x7;
1084 for (i = 0; i < pad_count; i++)
1085 if (sdma && sdma->burst_nop && (i == 0))
1086 ib->ptr[ib->length_dw++] =
1087 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1088 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1089 else
1090 ib->ptr[ib->length_dw++] =
1091 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1092}
1093
1094
1095/**
1096 * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline
1097 *
1098 * @ring: amdgpu_ring pointer
1099 *
1100 * Make sure all previous operations are completed (CIK).
1101 */
1102static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1103{
1104 uint32_t seq = ring->fence_drv.sync_seq;
1105 uint64_t addr = ring->fence_drv.gpu_addr;
1106
1107 /* wait for idle */
1108 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1109 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1110 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1111 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1112 amdgpu_ring_write(ring, addr & 0xfffffffc);
1113 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1114 amdgpu_ring_write(ring, seq); /* reference */
1115 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1116 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1117 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1118}
1119
1120
1121/**
1122 * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
1123 *
1124 * @ring: amdgpu_ring pointer
1125 * @vmid: vmid number to use
1126 * @pd_addr: address
1127 *
1128 * Update the page table base and flush the VM TLB
1129 * using sDMA.
1130 */
1131static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1132 unsigned vmid, uint64_t pd_addr)
1133{
1134 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1135}
1136
1137static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
1138 uint32_t reg, uint32_t val)
1139{
1140 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1141 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1142 amdgpu_ring_write(ring, reg);
1143 amdgpu_ring_write(ring, val);
1144}
1145
1146static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1147 uint32_t val, uint32_t mask)
1148{
1149 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1150 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1151 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1152 amdgpu_ring_write(ring, reg << 2);
1153 amdgpu_ring_write(ring, 0);
1154 amdgpu_ring_write(ring, val); /* reference */
1155 amdgpu_ring_write(ring, mask); /* mask */
1156 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1157 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1158}
1159
1160static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1161 uint32_t reg0, uint32_t reg1,
1162 uint32_t ref, uint32_t mask)
1163{
1164 amdgpu_ring_emit_wreg(ring, reg0, ref);
1165 /* wait for a cycle to reset vm_inv_eng*_ack */
1166 amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1167 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1168}
1169
1170static int sdma_v5_2_early_init(void *handle)
1171{
1172 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1173 int r;
1174
1175 r = amdgpu_sdma_init_microcode(adev, 0, true);
1176 if (r)
1177 return r;
1178
1179 sdma_v5_2_set_ring_funcs(adev);
1180 sdma_v5_2_set_buffer_funcs(adev);
1181 sdma_v5_2_set_vm_pte_funcs(adev);
1182 sdma_v5_2_set_irq_funcs(adev);
1183 sdma_v5_2_set_mqd_funcs(adev);
1184
1185 return 0;
1186}
1187
1188static unsigned sdma_v5_2_seq_to_irq_id(int seq_num)
1189{
1190 switch (seq_num) {
1191 case 0:
1192 return SOC15_IH_CLIENTID_SDMA0;
1193 case 1:
1194 return SOC15_IH_CLIENTID_SDMA1;
1195 case 2:
1196 return SOC15_IH_CLIENTID_SDMA2;
1197 case 3:
1198 return SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid;
1199 default:
1200 break;
1201 }
1202 return -EINVAL;
1203}
1204
1205static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
1206{
1207 switch (seq_num) {
1208 case 0:
1209 return SDMA0_5_0__SRCID__SDMA_TRAP;
1210 case 1:
1211 return SDMA1_5_0__SRCID__SDMA_TRAP;
1212 case 2:
1213 return SDMA2_5_0__SRCID__SDMA_TRAP;
1214 case 3:
1215 return SDMA3_5_0__SRCID__SDMA_TRAP;
1216 default:
1217 break;
1218 }
1219 return -EINVAL;
1220}
1221
1222static int sdma_v5_2_sw_init(void *handle)
1223{
1224 struct amdgpu_ring *ring;
1225 int r, i;
1226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227
1228 /* SDMA trap event */
1229 for (i = 0; i < adev->sdma.num_instances; i++) {
1230 r = amdgpu_irq_add_id(adev, sdma_v5_2_seq_to_irq_id(i),
1231 sdma_v5_2_seq_to_trap_id(i),
1232 &adev->sdma.trap_irq);
1233 if (r)
1234 return r;
1235 }
1236
1237 for (i = 0; i < adev->sdma.num_instances; i++) {
1238 ring = &adev->sdma.instance[i].ring;
1239 ring->ring_obj = NULL;
1240 ring->use_doorbell = true;
1241 ring->me = i;
1242
1243 DRM_INFO("use_doorbell being set to: [%s]\n",
1244 ring->use_doorbell?"true":"false");
1245
1246 ring->doorbell_index =
1247 (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
1248
1249 ring->vm_hub = AMDGPU_GFXHUB(0);
1250 sprintf(ring->name, "sdma%d", i);
1251 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1252 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1253 AMDGPU_RING_PRIO_DEFAULT, NULL);
1254 if (r)
1255 return r;
1256 }
1257
1258 return r;
1259}
1260
1261static int sdma_v5_2_sw_fini(void *handle)
1262{
1263 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1264 int i;
1265
1266 for (i = 0; i < adev->sdma.num_instances; i++)
1267 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1268
1269 amdgpu_sdma_destroy_inst_ctx(adev, true);
1270
1271 return 0;
1272}
1273
1274static int sdma_v5_2_hw_init(void *handle)
1275{
1276 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1277
1278 return sdma_v5_2_start(adev);
1279}
1280
1281static int sdma_v5_2_hw_fini(void *handle)
1282{
1283 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1284
1285 if (amdgpu_sriov_vf(adev))
1286 return 0;
1287
1288 sdma_v5_2_ctx_switch_enable(adev, false);
1289 sdma_v5_2_enable(adev, false);
1290
1291 return 0;
1292}
1293
1294static int sdma_v5_2_suspend(void *handle)
1295{
1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297
1298 return sdma_v5_2_hw_fini(adev);
1299}
1300
1301static int sdma_v5_2_resume(void *handle)
1302{
1303 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304
1305 return sdma_v5_2_hw_init(adev);
1306}
1307
1308static bool sdma_v5_2_is_idle(void *handle)
1309{
1310 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1311 u32 i;
1312
1313 for (i = 0; i < adev->sdma.num_instances; i++) {
1314 u32 tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1315
1316 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1317 return false;
1318 }
1319
1320 return true;
1321}
1322
1323static int sdma_v5_2_wait_for_idle(void *handle)
1324{
1325 unsigned i;
1326 u32 sdma0, sdma1, sdma2, sdma3;
1327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328
1329 for (i = 0; i < adev->usec_timeout; i++) {
1330 sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1331 sdma1 = RREG32(sdma_v5_2_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1332 sdma2 = RREG32(sdma_v5_2_get_reg_offset(adev, 2, mmSDMA0_STATUS_REG));
1333 sdma3 = RREG32(sdma_v5_2_get_reg_offset(adev, 3, mmSDMA0_STATUS_REG));
1334
1335 if (sdma0 & sdma1 & sdma2 & sdma3 & SDMA0_STATUS_REG__IDLE_MASK)
1336 return 0;
1337 udelay(1);
1338 }
1339 return -ETIMEDOUT;
1340}
1341
1342static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
1343{
1344 int i, r = 0;
1345 struct amdgpu_device *adev = ring->adev;
1346 u32 index = 0;
1347 u64 sdma_gfx_preempt;
1348
1349 amdgpu_sdma_get_index_from_ring(ring, &index);
1350 sdma_gfx_preempt =
1351 sdma_v5_2_get_reg_offset(adev, index, mmSDMA0_GFX_PREEMPT);
1352
1353 /* assert preemption condition */
1354 amdgpu_ring_set_preempt_cond_exec(ring, false);
1355
1356 /* emit the trailing fence */
1357 ring->trail_seq += 1;
1358 amdgpu_ring_alloc(ring, 10);
1359 sdma_v5_2_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1360 ring->trail_seq, 0);
1361 amdgpu_ring_commit(ring);
1362
1363 /* assert IB preemption */
1364 WREG32(sdma_gfx_preempt, 1);
1365
1366 /* poll the trailing fence */
1367 for (i = 0; i < adev->usec_timeout; i++) {
1368 if (ring->trail_seq ==
1369 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1370 break;
1371 udelay(1);
1372 }
1373
1374 if (i >= adev->usec_timeout) {
1375 r = -EINVAL;
1376 DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1377 }
1378
1379 /* deassert IB preemption */
1380 WREG32(sdma_gfx_preempt, 0);
1381
1382 /* deassert the preemption condition */
1383 amdgpu_ring_set_preempt_cond_exec(ring, true);
1384 return r;
1385}
1386
1387static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
1388 struct amdgpu_irq_src *source,
1389 unsigned type,
1390 enum amdgpu_interrupt_state state)
1391{
1392 u32 sdma_cntl;
1393 u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
1394
1395 if (!amdgpu_sriov_vf(adev)) {
1396 sdma_cntl = RREG32(reg_offset);
1397 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1398 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1399 WREG32(reg_offset, sdma_cntl);
1400 }
1401
1402 return 0;
1403}
1404
1405static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev,
1406 struct amdgpu_irq_src *source,
1407 struct amdgpu_iv_entry *entry)
1408{
1409 uint32_t mes_queue_id = entry->src_data[0];
1410
1411 DRM_DEBUG("IH: SDMA trap\n");
1412
1413 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1414 struct amdgpu_mes_queue *queue;
1415
1416 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1417
1418 spin_lock(&adev->mes.queue_id_lock);
1419 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1420 if (queue) {
1421 DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1422 amdgpu_fence_process(queue->ring);
1423 }
1424 spin_unlock(&adev->mes.queue_id_lock);
1425 return 0;
1426 }
1427
1428 switch (entry->client_id) {
1429 case SOC15_IH_CLIENTID_SDMA0:
1430 switch (entry->ring_id) {
1431 case 0:
1432 amdgpu_fence_process(&adev->sdma.instance[0].ring);
1433 break;
1434 case 1:
1435 /* XXX compute */
1436 break;
1437 case 2:
1438 /* XXX compute */
1439 break;
1440 case 3:
1441 /* XXX page queue*/
1442 break;
1443 }
1444 break;
1445 case SOC15_IH_CLIENTID_SDMA1:
1446 switch (entry->ring_id) {
1447 case 0:
1448 amdgpu_fence_process(&adev->sdma.instance[1].ring);
1449 break;
1450 case 1:
1451 /* XXX compute */
1452 break;
1453 case 2:
1454 /* XXX compute */
1455 break;
1456 case 3:
1457 /* XXX page queue*/
1458 break;
1459 }
1460 break;
1461 case SOC15_IH_CLIENTID_SDMA2:
1462 switch (entry->ring_id) {
1463 case 0:
1464 amdgpu_fence_process(&adev->sdma.instance[2].ring);
1465 break;
1466 case 1:
1467 /* XXX compute */
1468 break;
1469 case 2:
1470 /* XXX compute */
1471 break;
1472 case 3:
1473 /* XXX page queue*/
1474 break;
1475 }
1476 break;
1477 case SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid:
1478 switch (entry->ring_id) {
1479 case 0:
1480 amdgpu_fence_process(&adev->sdma.instance[3].ring);
1481 break;
1482 case 1:
1483 /* XXX compute */
1484 break;
1485 case 2:
1486 /* XXX compute */
1487 break;
1488 case 3:
1489 /* XXX page queue*/
1490 break;
1491 }
1492 break;
1493 }
1494 return 0;
1495}
1496
1497static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1498 struct amdgpu_irq_src *source,
1499 struct amdgpu_iv_entry *entry)
1500{
1501 return 0;
1502}
1503
1504static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
1505 int i)
1506{
1507 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1508 case IP_VERSION(5, 2, 1):
1509 if (adev->sdma.instance[i].fw_version < 70)
1510 return false;
1511 break;
1512 case IP_VERSION(5, 2, 3):
1513 if (adev->sdma.instance[i].fw_version < 47)
1514 return false;
1515 break;
1516 case IP_VERSION(5, 2, 7):
1517 if (adev->sdma.instance[i].fw_version < 9)
1518 return false;
1519 break;
1520 default:
1521 return true;
1522 }
1523
1524 return true;
1525
1526}
1527
1528static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1529 bool enable)
1530{
1531 uint32_t data, def;
1532 int i;
1533
1534 for (i = 0; i < adev->sdma.num_instances; i++) {
1535
1536 if (!sdma_v5_2_firmware_mgcg_support(adev, i))
1537 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
1538
1539 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1540 /* Enable sdma clock gating */
1541 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1542 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1543 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1544 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1545 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1546 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1547 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1548 if (def != data)
1549 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1550 } else {
1551 /* Disable sdma clock gating */
1552 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1553 data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1554 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1555 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1556 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1557 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1558 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1559 if (def != data)
1560 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1561 }
1562 }
1563}
1564
1565static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1566 bool enable)
1567{
1568 uint32_t data, def;
1569 int i;
1570
1571 for (i = 0; i < adev->sdma.num_instances; i++) {
1572 if (adev->sdma.instance[i].fw_version < 70 &&
1573 amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
1574 IP_VERSION(5, 2, 1))
1575 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
1576
1577 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1578 /* Enable sdma mem light sleep */
1579 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1580 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1581 if (def != data)
1582 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1583
1584 } else {
1585 /* Disable sdma mem light sleep */
1586 def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1587 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1588 if (def != data)
1589 WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1590
1591 }
1592 }
1593}
1594
1595static int sdma_v5_2_set_clockgating_state(void *handle,
1596 enum amd_clockgating_state state)
1597{
1598 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1599
1600 if (amdgpu_sriov_vf(adev))
1601 return 0;
1602
1603 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1604 case IP_VERSION(5, 2, 0):
1605 case IP_VERSION(5, 2, 2):
1606 case IP_VERSION(5, 2, 1):
1607 case IP_VERSION(5, 2, 4):
1608 case IP_VERSION(5, 2, 5):
1609 case IP_VERSION(5, 2, 6):
1610 case IP_VERSION(5, 2, 3):
1611 case IP_VERSION(5, 2, 7):
1612 sdma_v5_2_update_medium_grain_clock_gating(adev,
1613 state == AMD_CG_STATE_GATE);
1614 sdma_v5_2_update_medium_grain_light_sleep(adev,
1615 state == AMD_CG_STATE_GATE);
1616 break;
1617 default:
1618 break;
1619 }
1620
1621 return 0;
1622}
1623
1624static int sdma_v5_2_set_powergating_state(void *handle,
1625 enum amd_powergating_state state)
1626{
1627 return 0;
1628}
1629
1630static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
1631{
1632 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1633 int data;
1634
1635 if (amdgpu_sriov_vf(adev))
1636 *flags = 0;
1637
1638 /* AMD_CG_SUPPORT_SDMA_MGCG */
1639 data = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1640 if (!(data & SDMA0_CLK_CTRL__CGCG_EN_OVERRIDE_MASK))
1641 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1642
1643 /* AMD_CG_SUPPORT_SDMA_LS */
1644 data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1645 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1646 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1647}
1648
1649static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
1650{
1651 struct amdgpu_device *adev = ring->adev;
1652
1653 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1654 * disallow GFXOFF in some cases leading to
1655 * hangs in SDMA. Disallow GFXOFF while SDMA is active.
1656 * We can probably just limit this to 5.2.3,
1657 * but it shouldn't hurt for other parts since
1658 * this GFXOFF will be disallowed anyway when SDMA is
1659 * active, this just makes it explicit.
1660 * sdma_v5_2_ring_set_wptr() takes advantage of this
1661 * to update the wptr because sometimes SDMA seems to miss
1662 * doorbells when entering PG. If you remove this, update
1663 * sdma_v5_2_ring_set_wptr() as well!
1664 */
1665 amdgpu_gfx_off_ctrl(adev, false);
1666}
1667
1668static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
1669{
1670 struct amdgpu_device *adev = ring->adev;
1671
1672 /* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1673 * disallow GFXOFF in some cases leading to
1674 * hangs in SDMA. Allow GFXOFF when SDMA is complete.
1675 */
1676 amdgpu_gfx_off_ctrl(adev, true);
1677}
1678
1679const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
1680 .name = "sdma_v5_2",
1681 .early_init = sdma_v5_2_early_init,
1682 .late_init = NULL,
1683 .sw_init = sdma_v5_2_sw_init,
1684 .sw_fini = sdma_v5_2_sw_fini,
1685 .hw_init = sdma_v5_2_hw_init,
1686 .hw_fini = sdma_v5_2_hw_fini,
1687 .suspend = sdma_v5_2_suspend,
1688 .resume = sdma_v5_2_resume,
1689 .is_idle = sdma_v5_2_is_idle,
1690 .wait_for_idle = sdma_v5_2_wait_for_idle,
1691 .soft_reset = sdma_v5_2_soft_reset,
1692 .set_clockgating_state = sdma_v5_2_set_clockgating_state,
1693 .set_powergating_state = sdma_v5_2_set_powergating_state,
1694 .get_clockgating_state = sdma_v5_2_get_clockgating_state,
1695};
1696
1697static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
1698 .type = AMDGPU_RING_TYPE_SDMA,
1699 .align_mask = 0xf,
1700 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1701 .support_64bit_ptrs = true,
1702 .secure_submission_supported = true,
1703 .get_rptr = sdma_v5_2_ring_get_rptr,
1704 .get_wptr = sdma_v5_2_ring_get_wptr,
1705 .set_wptr = sdma_v5_2_ring_set_wptr,
1706 .emit_frame_size =
1707 5 + /* sdma_v5_2_ring_init_cond_exec */
1708 6 + /* sdma_v5_2_ring_emit_hdp_flush */
1709 3 + /* hdp_invalidate */
1710 6 + /* sdma_v5_2_ring_emit_pipeline_sync */
1711 /* sdma_v5_2_ring_emit_vm_flush */
1712 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1713 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1714 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
1715 .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
1716 .emit_ib = sdma_v5_2_ring_emit_ib,
1717 .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
1718 .emit_fence = sdma_v5_2_ring_emit_fence,
1719 .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
1720 .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
1721 .emit_hdp_flush = sdma_v5_2_ring_emit_hdp_flush,
1722 .test_ring = sdma_v5_2_ring_test_ring,
1723 .test_ib = sdma_v5_2_ring_test_ib,
1724 .insert_nop = sdma_v5_2_ring_insert_nop,
1725 .pad_ib = sdma_v5_2_ring_pad_ib,
1726 .begin_use = sdma_v5_2_ring_begin_use,
1727 .end_use = sdma_v5_2_ring_end_use,
1728 .emit_wreg = sdma_v5_2_ring_emit_wreg,
1729 .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
1730 .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
1731 .init_cond_exec = sdma_v5_2_ring_init_cond_exec,
1732 .preempt_ib = sdma_v5_2_ring_preempt_ib,
1733};
1734
1735static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
1736{
1737 int i;
1738
1739 for (i = 0; i < adev->sdma.num_instances; i++) {
1740 adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs;
1741 adev->sdma.instance[i].ring.me = i;
1742 }
1743}
1744
1745static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs = {
1746 .set = sdma_v5_2_set_trap_irq_state,
1747 .process = sdma_v5_2_process_trap_irq,
1748};
1749
1750static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs = {
1751 .process = sdma_v5_2_process_illegal_inst_irq,
1752};
1753
1754static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
1755{
1756 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1757 adev->sdma.num_instances;
1758 adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs;
1759 adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs;
1760}
1761
1762/**
1763 * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
1764 *
1765 * @ib: indirect buffer to copy to
1766 * @src_offset: src GPU address
1767 * @dst_offset: dst GPU address
1768 * @byte_count: number of bytes to xfer
1769 * @copy_flags: copy flags for the buffers
1770 *
1771 * Copy GPU buffers using the DMA engine.
1772 * Used by the amdgpu ttm implementation to move pages if
1773 * registered as the asic copy callback.
1774 */
1775static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
1776 uint64_t src_offset,
1777 uint64_t dst_offset,
1778 uint32_t byte_count,
1779 uint32_t copy_flags)
1780{
1781 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1782 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1783 SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
1784 ib->ptr[ib->length_dw++] = byte_count - 1;
1785 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1786 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1787 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1788 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1789 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1790}
1791
1792/**
1793 * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
1794 *
1795 * @ib: indirect buffer to fill
1796 * @src_data: value to write to buffer
1797 * @dst_offset: dst GPU address
1798 * @byte_count: number of bytes to xfer
1799 *
1800 * Fill GPU buffers using the DMA engine.
1801 */
1802static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib,
1803 uint32_t src_data,
1804 uint64_t dst_offset,
1805 uint32_t byte_count)
1806{
1807 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1808 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1809 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1810 ib->ptr[ib->length_dw++] = src_data;
1811 ib->ptr[ib->length_dw++] = byte_count - 1;
1812}
1813
1814static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs = {
1815 .copy_max_bytes = 0x400000,
1816 .copy_num_dw = 7,
1817 .emit_copy_buffer = sdma_v5_2_emit_copy_buffer,
1818
1819 .fill_max_bytes = 0x400000,
1820 .fill_num_dw = 5,
1821 .emit_fill_buffer = sdma_v5_2_emit_fill_buffer,
1822};
1823
1824static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
1825{
1826 if (adev->mman.buffer_funcs == NULL) {
1827 adev->mman.buffer_funcs = &sdma_v5_2_buffer_funcs;
1828 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1829 }
1830}
1831
1832static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
1833 .copy_pte_num_dw = 7,
1834 .copy_pte = sdma_v5_2_vm_copy_pte,
1835 .write_pte = sdma_v5_2_vm_write_pte,
1836 .set_pte_pde = sdma_v5_2_vm_set_pte_pde,
1837};
1838
1839static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
1840{
1841 unsigned i;
1842
1843 if (adev->vm_manager.vm_pte_funcs == NULL) {
1844 adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
1845 for (i = 0; i < adev->sdma.num_instances; i++) {
1846 adev->vm_manager.vm_pte_scheds[i] =
1847 &adev->sdma.instance[i].ring.sched;
1848 }
1849 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1850 }
1851}
1852
1853const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
1854 .type = AMD_IP_BLOCK_TYPE_SDMA,
1855 .major = 5,
1856 .minor = 2,
1857 .rev = 0,
1858 .funcs = &sdma_v5_2_ip_funcs,
1859};