Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add initial support for sdma v6.0

Add functions for SDMA version 6.

Signed-off-by: Stanley Yang <Stanley.Yang@amd.com>
Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Stanley Yang and committed by
Alex Deucher
61a039d1 5e779b17

+1719 -1
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 142 142 sdma_v4_0.o \ 143 143 sdma_v4_4.o \ 144 144 sdma_v5_0.o \ 145 - sdma_v5_2.o 145 + sdma_v5_2.o \ 146 + sdma_v6_0.o 146 147 147 148 # add MES block 148 149 amdgpu-y += \
+1687
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 1 + /* 2 + * Copyright 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/delay.h> 25 + #include <linux/firmware.h> 26 + #include <linux/module.h> 27 + #include <linux/pci.h> 28 + 29 + #include "amdgpu.h" 30 + #include "amdgpu_ucode.h" 31 + #include "amdgpu_trace.h" 32 + 33 + #include "gc/gc_11_0_0_offset.h" 34 + #include "gc/gc_11_0_0_sh_mask.h" 35 + #include "gc/gc_11_0_0_default.h" 36 + #include "hdp/hdp_6_0_0_offset.h" 37 + #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 38 + 39 + #include "soc15_common.h" 40 + #include "soc15.h" 41 + #include "sdma_v6_0_0_pkt_open.h" 42 + #include "nbio_v4_3.h" 43 + #include "sdma_common.h" 44 + #include "sdma_v6_0.h" 45 + #include "v11_structs.h" 46 + 47 + MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin"); 48 + 49 + #define SDMA1_REG_OFFSET 0x600 50 + #define SDMA0_HYP_DEC_REG_START 0x5880 51 + #define SDMA0_HYP_DEC_REG_END 0x589a 52 + #define SDMA1_HYP_DEC_REG_OFFSET 0x20 53 + 54 + static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev); 55 + static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev); 56 + static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev); 57 + static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev); 58 + 59 + static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) 60 + { 61 + u32 base; 62 + 63 + if (internal_offset >= SDMA0_HYP_DEC_REG_START && 64 + internal_offset <= SDMA0_HYP_DEC_REG_END) { 65 + base = adev->reg_offset[GC_HWIP][0][1]; 66 + if (instance != 0) 67 + internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance; 68 + } else { 69 + base = adev->reg_offset[GC_HWIP][0][0]; 70 + if (instance == 1) 71 + internal_offset += SDMA1_REG_OFFSET; 72 + } 73 + 74 + return base + internal_offset; 75 + } 76 + 77 + static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) 78 + { 79 + int err = 0; 80 + const struct sdma_firmware_header_v2_0 *hdr; 81 + 82 + err = amdgpu_ucode_validate(sdma_inst->fw); 83 + if (err) 84 + return err; 85 + 86 + hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data; 87 + sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version); 88 + sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version); 89 + 90 + if (sdma_inst->feature_version >= 20) 91 + sdma_inst->burst_nop = true; 92 + 93 + return 0; 94 + } 95 + 96 + static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev) 97 + { 98 + release_firmware(adev->sdma.instance[0].fw); 99 + 100 + memset((void*)adev->sdma.instance, 0, 101 + sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES); 102 + } 103 + 104 + /** 105 + * sdma_v6_0_init_microcode - load ucode images from disk 106 + * 107 + * @adev: amdgpu_device pointer 108 + * 109 + * Use the firmware interface to load the ucode images into 110 + * the driver (not loaded into hw). 111 + * Returns 0 on success, error on failure. 112 + */ 113 + 114 + // emulation only, won't work on real chip 115 + // sdma 6.0.0 real chip need to use PSP to load firmware 116 + static int sdma_v6_0_init_microcode(struct amdgpu_device *adev) 117 + { 118 + char fw_name[30]; 119 + char ucode_prefix[30]; 120 + int err = 0, i; 121 + struct amdgpu_firmware_info *info = NULL; 122 + const struct sdma_firmware_header_v2_0 *sdma_hdr; 123 + 124 + DRM_DEBUG("\n"); 125 + 126 + amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 127 + 128 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 129 + 130 + err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev); 131 + if (err) 132 + goto out; 133 + 134 + err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]); 135 + if (err) 136 + goto out; 137 + 138 + for (i = 1; i < adev->sdma.num_instances; i++) { 139 + memcpy((void*)&adev->sdma.instance[i], 140 + (void*)&adev->sdma.instance[0], 141 + sizeof(struct amdgpu_sdma_instance)); 142 + } 143 + 144 + DRM_DEBUG("psp_load == '%s'\n", 145 + adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); 146 + 147 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 148 + sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; 149 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0]; 150 + info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0; 151 + info->fw = adev->sdma.instance[0].fw; 152 + adev->firmware.fw_size += 153 + ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE); 154 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1]; 155 + info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1; 156 + info->fw = adev->sdma.instance[0].fw; 157 + adev->firmware.fw_size += 158 + ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE); 159 + } 160 + 161 + out: 162 + if (err) { 163 + DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name); 164 + sdma_v6_0_destroy_inst_ctx(adev); 165 + } 166 + return err; 167 + } 168 + 169 + static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring) 170 + { 171 + unsigned ret; 172 + 173 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE)); 174 + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 175 + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 176 + amdgpu_ring_write(ring, 1); 177 + ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */ 178 + amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */ 179 + 180 + return ret; 181 + } 182 + 183 + static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring, 184 + unsigned offset) 185 + { 186 + unsigned cur; 187 + 188 + BUG_ON(offset > ring->buf_mask); 189 + BUG_ON(ring->ring[offset] != 0x55aa55aa); 190 + 191 + cur = (ring->wptr - 1) & ring->buf_mask; 192 + if (cur > offset) 193 + ring->ring[offset] = cur - offset; 194 + else 195 + ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 196 + } 197 + 198 + /** 199 + * sdma_v6_0_ring_get_rptr - get the current read pointer 200 + * 201 + * @ring: amdgpu ring pointer 202 + * 203 + * Get the current rptr from the hardware. 204 + */ 205 + static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring) 206 + { 207 + u64 *rptr; 208 + 209 + /* XXX check if swapping is necessary on BE */ 210 + rptr = (u64 *)ring->rptr_cpu_addr; 211 + 212 + DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); 213 + return ((*rptr) >> 2); 214 + } 215 + 216 + /** 217 + * sdma_v6_0_ring_get_wptr - get the current write pointer 218 + * 219 + * @ring: amdgpu ring pointer 220 + * 221 + * Get the current wptr from the hardware. 222 + */ 223 + static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring) 224 + { 225 + u64 wptr = 0; 226 + 227 + if (ring->use_doorbell) { 228 + /* XXX check if swapping is necessary on BE */ 229 + wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr)); 230 + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); 231 + } 232 + 233 + return wptr >> 2; 234 + } 235 + 236 + /** 237 + * sdma_v6_0_ring_set_wptr - commit the write pointer 238 + * 239 + * @ring: amdgpu ring pointer 240 + * 241 + * Write the wptr back to the hardware. 242 + */ 243 + static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring) 244 + { 245 + struct amdgpu_device *adev = ring->adev; 246 + 247 + DRM_DEBUG("Setting write pointer\n"); 248 + if (ring->use_doorbell) { 249 + DRM_DEBUG("Using doorbell -- " 250 + "wptr_offs == 0x%08x " 251 + "lower_32_bits(ring->wptr) << 2 == 0x%08x " 252 + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", 253 + ring->wptr_offs, 254 + lower_32_bits(ring->wptr << 2), 255 + upper_32_bits(ring->wptr << 2)); 256 + /* XXX check if swapping is necessary on BE */ 257 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 258 + ring->wptr << 2); 259 + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", 260 + ring->doorbell_index, ring->wptr << 2); 261 + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); 262 + } else { 263 + DRM_DEBUG("Not using doorbell -- " 264 + "regSDMA%i_GFX_RB_WPTR == 0x%08x " 265 + "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", 266 + ring->me, 267 + lower_32_bits(ring->wptr << 2), 268 + ring->me, 269 + upper_32_bits(ring->wptr << 2)); 270 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, ring->me, regSDMA0_QUEUE0_RB_WPTR), 271 + lower_32_bits(ring->wptr << 2)); 272 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, ring->me, regSDMA0_QUEUE0_RB_WPTR_HI), 273 + upper_32_bits(ring->wptr << 2)); 274 + } 275 + } 276 + 277 + static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 278 + { 279 + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 280 + int i; 281 + 282 + for (i = 0; i < count; i++) 283 + if (sdma && sdma->burst_nop && (i == 0)) 284 + amdgpu_ring_write(ring, ring->funcs->nop | 285 + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 286 + else 287 + amdgpu_ring_write(ring, ring->funcs->nop); 288 + } 289 + 290 + /** 291 + * sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine 292 + * 293 + * @ring: amdgpu ring pointer 294 + * @ib: IB object to schedule 295 + * 296 + * Schedule an IB in the DMA ring. 297 + */ 298 + static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 299 + struct amdgpu_job *job, 300 + struct amdgpu_ib *ib, 301 + uint32_t flags) 302 + { 303 + unsigned vmid = AMDGPU_JOB_GET_VMID(job); 304 + uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 305 + 306 + /* An IB packet must end on a 8 DW boundary--the next dword 307 + * must be on a 8-dword boundary. Our IB packet below is 6 308 + * dwords long, thus add x number of NOPs, such that, in 309 + * modular arithmetic, 310 + * wptr + 6 + x = 8k, k >= 0, which in C is, 311 + * (wptr + 6 + x) % 8 = 0. 312 + * The expression below, is a solution of x. 313 + */ 314 + sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 315 + 316 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) | 317 + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); 318 + /* base must be 32 byte aligned */ 319 + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 320 + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 321 + amdgpu_ring_write(ring, ib->length_dw); 322 + amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr)); 323 + amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr)); 324 + } 325 + 326 + /** 327 + * sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse 328 + * 329 + * @ring: amdgpu ring pointer 330 + * @job: job to retrieve vmid from 331 + * @ib: IB object to schedule 332 + * 333 + * flush the IB by graphics cache rinse. 334 + */ 335 + static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring) 336 + { 337 + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | 338 + SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | 339 + SDMA_GCR_GLI_INV(1); 340 + 341 + /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ 342 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ)); 343 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); 344 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | 345 + SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); 346 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | 347 + SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); 348 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | 349 + SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); 350 + } 351 + 352 + 353 + /** 354 + * sdma_v6_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 355 + * 356 + * @ring: amdgpu ring pointer 357 + * 358 + * Emit an hdp flush packet on the requested DMA ring. 359 + */ 360 + static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 361 + { 362 + struct amdgpu_device *adev = ring->adev; 363 + u32 ref_and_mask = 0; 364 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 365 + 366 + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; 367 + 368 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 369 + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 370 + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 371 + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); 372 + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); 373 + amdgpu_ring_write(ring, ref_and_mask); /* reference */ 374 + amdgpu_ring_write(ring, ref_and_mask); /* mask */ 375 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 376 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 377 + } 378 + 379 + /** 380 + * sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring 381 + * 382 + * @ring: amdgpu ring pointer 383 + * @fence: amdgpu fence object 384 + * 385 + * Add a DMA fence packet to the ring to write 386 + * the fence seq number and DMA trap packet to generate 387 + * an interrupt if needed. 388 + */ 389 + static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 390 + unsigned flags) 391 + { 392 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 393 + /* write the fence */ 394 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) | 395 + SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */ 396 + /* zero in first two bits */ 397 + BUG_ON(addr & 0x3); 398 + amdgpu_ring_write(ring, lower_32_bits(addr)); 399 + amdgpu_ring_write(ring, upper_32_bits(addr)); 400 + amdgpu_ring_write(ring, lower_32_bits(seq)); 401 + 402 + /* optionally write high bits as well */ 403 + if (write64bit) { 404 + addr += 4; 405 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) | 406 + SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); 407 + /* zero in first two bits */ 408 + BUG_ON(addr & 0x3); 409 + amdgpu_ring_write(ring, lower_32_bits(addr)); 410 + amdgpu_ring_write(ring, upper_32_bits(addr)); 411 + amdgpu_ring_write(ring, upper_32_bits(seq)); 412 + } 413 + 414 + if (flags & AMDGPU_FENCE_FLAG_INT) { 415 + uint32_t ctx = ring->is_mes_queue ? 416 + (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0; 417 + /* generate an interrupt */ 418 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP)); 419 + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx)); 420 + } 421 + } 422 + 423 + /** 424 + * sdma_v6_0_gfx_stop - stop the gfx async dma engines 425 + * 426 + * @adev: amdgpu_device pointer 427 + * 428 + * Stop the gfx async dma ring buffers. 429 + */ 430 + static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev) 431 + { 432 + struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 433 + struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 434 + u32 rb_cntl, ib_cntl; 435 + int i; 436 + 437 + if ((adev->mman.buffer_funcs_ring == sdma0) || 438 + (adev->mman.buffer_funcs_ring == sdma1)) 439 + amdgpu_ttm_set_buffer_funcs_status(adev, false); 440 + 441 + for (i = 0; i < adev->sdma.num_instances; i++) { 442 + rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); 443 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0); 444 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 445 + ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); 446 + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0); 447 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); 448 + } 449 + 450 + sdma0->sched.ready = false; 451 + sdma1->sched.ready = false; 452 + } 453 + 454 + /** 455 + * sdma_v6_0_rlc_stop - stop the compute async dma engines 456 + * 457 + * @adev: amdgpu_device pointer 458 + * 459 + * Stop the compute async dma queues. 460 + */ 461 + static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev) 462 + { 463 + /* XXX todo */ 464 + } 465 + 466 + /** 467 + * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch 468 + * 469 + * @adev: amdgpu_device pointer 470 + * @enable: enable/disable the DMA MEs context switch. 471 + * 472 + * Halt or unhalt the async dma engines context switch. 473 + */ 474 + static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) 475 + { 476 + } 477 + 478 + /** 479 + * sdma_v6_0_enable - stop the async dma engines 480 + * 481 + * @adev: amdgpu_device pointer 482 + * @enable: enable/disable the DMA MEs. 483 + * 484 + * Halt or unhalt the async dma engines. 485 + */ 486 + static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable) 487 + { 488 + u32 f32_cntl; 489 + int i; 490 + 491 + if (!enable) { 492 + sdma_v6_0_gfx_stop(adev); 493 + sdma_v6_0_rlc_stop(adev); 494 + } 495 + 496 + for (i = 0; i < adev->sdma.num_instances; i++) { 497 + f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); 498 + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); 499 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl); 500 + } 501 + } 502 + 503 + /** 504 + * sdma_v6_0_gfx_resume - setup and start the async dma engines 505 + * 506 + * @adev: amdgpu_device pointer 507 + * 508 + * Set up the gfx DMA ring buffers and enable them. 509 + * Returns 0 for success, error for failure. 510 + */ 511 + static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) 512 + { 513 + struct amdgpu_ring *ring; 514 + u32 rb_cntl, ib_cntl; 515 + u32 rb_bufsz; 516 + u32 doorbell; 517 + u32 doorbell_offset; 518 + u32 temp; 519 + u64 wptr_gpu_addr; 520 + int i, r; 521 + 522 + for (i = 0; i < adev->sdma.num_instances; i++) { 523 + ring = &adev->sdma.instance[i].ring; 524 + 525 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); 526 + 527 + /* Set ring buffer size in dwords */ 528 + rb_bufsz = order_base_2(ring->ring_size / 4); 529 + rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL)); 530 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); 531 + #ifdef __BIG_ENDIAN 532 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1); 533 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, 534 + RPTR_WRITEBACK_SWAP_ENABLE, 1); 535 + #endif 536 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1); 537 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 538 + 539 + /* Initialize the ring buffer's read and write pointers */ 540 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0); 541 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0); 542 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0); 543 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0); 544 + 545 + /* setup the wptr shadow polling */ 546 + wptr_gpu_addr = ring->wptr_gpu_addr; 547 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO), 548 + lower_32_bits(wptr_gpu_addr)); 549 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI), 550 + upper_32_bits(wptr_gpu_addr)); 551 + 552 + /* set the wb address whether it's enabled or not */ 553 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI), 554 + upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); 555 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO), 556 + lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); 557 + 558 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 559 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1); 560 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1); 561 + 562 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8); 563 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); 564 + 565 + ring->wptr = 0; 566 + 567 + /* before programing wptr to a less value, need set minor_ptr_update first */ 568 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1); 569 + 570 + if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ 571 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); 572 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); 573 + } 574 + 575 + doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL)); 576 + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET)); 577 + 578 + if (ring->use_doorbell) { 579 + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); 580 + doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET, 581 + OFFSET, ring->doorbell_index); 582 + } else { 583 + doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0); 584 + } 585 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell); 586 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset); 587 + 588 + if (i == 0) 589 + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, 590 + ring->doorbell_index, 591 + adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances); 592 + 593 + if (amdgpu_sriov_vf(adev)) 594 + sdma_v6_0_ring_set_wptr(ring); 595 + 596 + /* set minor_ptr_update to 0 after wptr programed */ 597 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); 598 + 599 + /* Set up RESP_MODE to non-copy addresses */ 600 + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); 601 + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); 602 + temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); 603 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp); 604 + 605 + /* program default cache read and write policy */ 606 + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE)); 607 + /* clean read policy and write policy bits */ 608 + temp &= 0xFF0FFF; 609 + temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | 610 + (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | 611 + SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); 612 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp); 613 + 614 + if (!amdgpu_sriov_vf(adev)) { 615 + /* unhalt engine */ 616 + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL)); 617 + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); 618 + temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0); 619 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp); 620 + } 621 + 622 + /* enable DMA RB */ 623 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1); 624 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl); 625 + 626 + ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL)); 627 + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1); 628 + #ifdef __BIG_ENDIAN 629 + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1); 630 + #endif 631 + /* enable DMA IBs */ 632 + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl); 633 + 634 + ring->sched.ready = true; 635 + 636 + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ 637 + sdma_v6_0_ctx_switch_enable(adev, true); 638 + sdma_v6_0_enable(adev, true); 639 + } 640 + 641 + r = amdgpu_ring_test_helper(ring); 642 + if (r) { 643 + ring->sched.ready = false; 644 + return r; 645 + } 646 + 647 + if (adev->mman.buffer_funcs_ring == ring) 648 + amdgpu_ttm_set_buffer_funcs_status(adev, true); 649 + } 650 + 651 + return 0; 652 + } 653 + 654 + /** 655 + * sdma_v6_0_rlc_resume - setup and start the async dma engines 656 + * 657 + * @adev: amdgpu_device pointer 658 + * 659 + * Set up the compute DMA queues and enable them. 660 + * Returns 0 for success, error for failure. 661 + */ 662 + static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev) 663 + { 664 + return 0; 665 + } 666 + 667 + /** 668 + * sdma_v6_0_load_microcode - load the sDMA ME ucode 669 + * 670 + * @adev: amdgpu_device pointer 671 + * 672 + * Loads the sDMA0/1 ucode. 673 + * Returns 0 for success, -EINVAL if the ucode is not available. 674 + */ 675 + static int sdma_v6_0_load_microcode(struct amdgpu_device *adev) 676 + { 677 + const struct sdma_firmware_header_v2_0 *hdr; 678 + const __le32 *fw_data; 679 + u32 fw_size; 680 + int i, j; 681 + bool use_broadcast; 682 + 683 + /* halt the MEs */ 684 + sdma_v6_0_enable(adev, false); 685 + 686 + if (!adev->sdma.instance[0].fw) 687 + return -EINVAL; 688 + 689 + /* use broadcast mode to load SDMA microcode by default */ 690 + use_broadcast = true; 691 + 692 + if (use_broadcast) { 693 + dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n"); 694 + /* load Control Thread microcode */ 695 + hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; 696 + amdgpu_ucode_print_sdma_hdr(&hdr->header); 697 + fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4; 698 + 699 + fw_data = (const __le32 *) 700 + (adev->sdma.instance[0].fw->data + 701 + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 702 + 703 + WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0); 704 + 705 + for (j = 0; j < fw_size; j++) { 706 + if (amdgpu_emu_mode == 1 && j % 500 == 0) 707 + msleep(1); 708 + WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++)); 709 + } 710 + 711 + /* load Context Switch microcode */ 712 + fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4; 713 + 714 + fw_data = (const __le32 *) 715 + (adev->sdma.instance[0].fw->data + 716 + le32_to_cpu(hdr->ctl_ucode_offset)); 717 + 718 + WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000); 719 + 720 + for (j = 0; j < fw_size; j++) { 721 + if (amdgpu_emu_mode == 1 && j % 500 == 0) 722 + msleep(1); 723 + WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++)); 724 + } 725 + } else { 726 + dev_info(adev->dev, "Use legacy method to load SDMA firmware\n"); 727 + for (i = 0; i < adev->sdma.num_instances; i++) { 728 + /* load Control Thread microcode */ 729 + hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data; 730 + amdgpu_ucode_print_sdma_hdr(&hdr->header); 731 + fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4; 732 + 733 + fw_data = (const __le32 *) 734 + (adev->sdma.instance[0].fw->data + 735 + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 736 + 737 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0); 738 + 739 + for (j = 0; j < fw_size; j++) { 740 + if (amdgpu_emu_mode == 1 && j % 500 == 0) 741 + msleep(1); 742 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 743 + } 744 + 745 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); 746 + 747 + /* load Context Switch microcode */ 748 + fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4; 749 + 750 + fw_data = (const __le32 *) 751 + (adev->sdma.instance[0].fw->data + 752 + le32_to_cpu(hdr->ctl_ucode_offset)); 753 + 754 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000); 755 + 756 + for (j = 0; j < fw_size; j++) { 757 + if (amdgpu_emu_mode == 1 && j % 500 == 0) 758 + msleep(1); 759 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); 760 + } 761 + 762 + WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version); 763 + } 764 + } 765 + 766 + return 0; 767 + } 768 + 769 + static int sdma_v6_0_soft_reset(void *handle) 770 + { 771 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 772 + u32 grbm_soft_reset; 773 + u32 tmp; 774 + int i; 775 + 776 + for (i = 0; i < adev->sdma.num_instances; i++) { 777 + grbm_soft_reset = REG_SET_FIELD(0, 778 + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, 779 + 1); 780 + grbm_soft_reset <<= i; 781 + 782 + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 783 + tmp |= grbm_soft_reset; 784 + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); 785 + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); 786 + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 787 + 788 + udelay(50); 789 + 790 + tmp &= ~grbm_soft_reset; 791 + WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp); 792 + tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET); 793 + 794 + udelay(50); 795 + } 796 + 797 + return 0; 798 + } 799 + 800 + /** 801 + * sdma_v6_0_start - setup and start the async dma engines 802 + * 803 + * @adev: amdgpu_device pointer 804 + * 805 + * Set up the DMA engines and enable them. 806 + * Returns 0 for success, error for failure. 807 + */ 808 + static int sdma_v6_0_start(struct amdgpu_device *adev) 809 + { 810 + int r = 0; 811 + 812 + if (amdgpu_sriov_vf(adev)) { 813 + sdma_v6_0_ctx_switch_enable(adev, false); 814 + sdma_v6_0_enable(adev, false); 815 + 816 + /* set RB registers */ 817 + r = sdma_v6_0_gfx_resume(adev); 818 + return r; 819 + } 820 + 821 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 822 + r = sdma_v6_0_load_microcode(adev); 823 + if (r) 824 + return r; 825 + 826 + /* The value of regSDMA_F32_CNTL is invalid the moment after loading fw */ 827 + if (amdgpu_emu_mode == 1) 828 + msleep(1000); 829 + } 830 + 831 + sdma_v6_0_soft_reset(adev); 832 + /* unhalt the MEs */ 833 + sdma_v6_0_enable(adev, true); 834 + /* enable sdma ring preemption */ 835 + sdma_v6_0_ctx_switch_enable(adev, true); 836 + 837 + /* start the gfx rings and rlc compute queues */ 838 + r = sdma_v6_0_gfx_resume(adev); 839 + if (r) 840 + return r; 841 + r = sdma_v6_0_rlc_resume(adev); 842 + 843 + return r; 844 + } 845 + 846 + static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd, 847 + struct amdgpu_mqd_prop *prop) 848 + { 849 + struct v11_sdma_mqd *m = mqd; 850 + uint64_t wb_gpu_addr; 851 + 852 + m->sdmax_rlcx_rb_cntl = 853 + order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 854 + 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 855 + 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; 856 + 857 + m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8); 858 + m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8); 859 + 860 + wb_gpu_addr = prop->wptr_gpu_addr; 861 + m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr); 862 + m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr); 863 + 864 + wb_gpu_addr = prop->rptr_gpu_addr; 865 + m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr); 866 + m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr); 867 + 868 + m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0, 869 + regSDMA0_QUEUE0_IB_CNTL)); 870 + 871 + m->sdmax_rlcx_doorbell_offset = 872 + prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 873 + 874 + m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1); 875 + 876 + m->sdmax_rlcx_skip_cntl = 0; 877 + m->sdmax_rlcx_context_status = 0; 878 + m->sdmax_rlcx_doorbell_log = 0; 879 + 880 + m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT; 881 + m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT; 882 + 883 + return 0; 884 + } 885 + 886 + static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev) 887 + { 888 + adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd); 889 + adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init; 890 + } 891 + 892 + /** 893 + * sdma_v6_0_ring_test_ring - simple async dma engine test 894 + * 895 + * @ring: amdgpu_ring structure holding ring information 896 + * 897 + * Test the DMA engine by writing using it to write an 898 + * value to memory. 899 + * Returns 0 for success, error for failure. 900 + */ 901 + static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring) 902 + { 903 + struct amdgpu_device *adev = ring->adev; 904 + unsigned i; 905 + unsigned index; 906 + int r; 907 + u32 tmp; 908 + u64 gpu_addr; 909 + volatile uint32_t *cpu_ptr = NULL; 910 + 911 + tmp = 0xCAFEDEAD; 912 + 913 + if (ring->is_mes_queue) { 914 + uint32_t offset = 0; 915 + offset = amdgpu_mes_ctx_get_offs(ring, 916 + AMDGPU_MES_CTX_PADDING_OFFS); 917 + gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 918 + cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 919 + *cpu_ptr = tmp; 920 + } else { 921 + r = amdgpu_device_wb_get(adev, &index); 922 + if (r) { 923 + dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 924 + return r; 925 + } 926 + 927 + gpu_addr = adev->wb.gpu_addr + (index * 4); 928 + adev->wb.wb[index] = cpu_to_le32(tmp); 929 + } 930 + 931 + r = amdgpu_ring_alloc(ring, 5); 932 + if (r) { 933 + DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); 934 + amdgpu_device_wb_free(adev, index); 935 + return r; 936 + } 937 + 938 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 939 + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 940 + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 941 + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 942 + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); 943 + amdgpu_ring_write(ring, 0xDEADBEEF); 944 + amdgpu_ring_commit(ring); 945 + 946 + for (i = 0; i < adev->usec_timeout; i++) { 947 + if (ring->is_mes_queue) 948 + tmp = le32_to_cpu(*cpu_ptr); 949 + else 950 + tmp = le32_to_cpu(adev->wb.wb[index]); 951 + if (tmp == 0xDEADBEEF) 952 + break; 953 + if (amdgpu_emu_mode == 1) 954 + msleep(1); 955 + else 956 + udelay(1); 957 + } 958 + 959 + if (i >= adev->usec_timeout) 960 + r = -ETIMEDOUT; 961 + 962 + if (!ring->is_mes_queue) 963 + amdgpu_device_wb_free(adev, index); 964 + 965 + return r; 966 + } 967 + 968 + /** 969 + * sdma_v6_0_ring_test_ib - test an IB on the DMA engine 970 + * 971 + * @ring: amdgpu_ring structure holding ring information 972 + * 973 + * Test a simple IB in the DMA ring. 974 + * Returns 0 on success, error on failure. 975 + */ 976 + static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 977 + { 978 + struct amdgpu_device *adev = ring->adev; 979 + struct amdgpu_ib ib; 980 + struct dma_fence *f = NULL; 981 + unsigned index; 982 + long r; 983 + u32 tmp = 0; 984 + u64 gpu_addr; 985 + volatile uint32_t *cpu_ptr = NULL; 986 + 987 + tmp = 0xCAFEDEAD; 988 + memset(&ib, 0, sizeof(ib)); 989 + 990 + if (ring->is_mes_queue) { 991 + uint32_t offset = 0; 992 + offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 993 + ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 994 + ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 995 + 996 + offset = amdgpu_mes_ctx_get_offs(ring, 997 + AMDGPU_MES_CTX_PADDING_OFFS); 998 + gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 999 + cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 1000 + *cpu_ptr = tmp; 1001 + } else { 1002 + r = amdgpu_device_wb_get(adev, &index); 1003 + if (r) { 1004 + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); 1005 + return r; 1006 + } 1007 + 1008 + gpu_addr = adev->wb.gpu_addr + (index * 4); 1009 + adev->wb.wb[index] = cpu_to_le32(tmp); 1010 + 1011 + r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 1012 + if (r) { 1013 + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 1014 + goto err0; 1015 + } 1016 + } 1017 + 1018 + ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 1019 + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 1020 + ib.ptr[1] = lower_32_bits(gpu_addr); 1021 + ib.ptr[2] = upper_32_bits(gpu_addr); 1022 + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); 1023 + ib.ptr[4] = 0xDEADBEEF; 1024 + ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1025 + ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1026 + ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 1027 + ib.length_dw = 8; 1028 + 1029 + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 1030 + if (r) 1031 + goto err1; 1032 + 1033 + r = dma_fence_wait_timeout(f, false, timeout); 1034 + if (r == 0) { 1035 + DRM_ERROR("amdgpu: IB test timed out\n"); 1036 + r = -ETIMEDOUT; 1037 + goto err1; 1038 + } else if (r < 0) { 1039 + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 1040 + goto err1; 1041 + } 1042 + 1043 + if (ring->is_mes_queue) 1044 + tmp = le32_to_cpu(*cpu_ptr); 1045 + else 1046 + tmp = le32_to_cpu(adev->wb.wb[index]); 1047 + 1048 + if (tmp == 0xDEADBEEF) 1049 + r = 0; 1050 + else 1051 + r = -EINVAL; 1052 + 1053 + err1: 1054 + amdgpu_ib_free(adev, &ib, NULL); 1055 + dma_fence_put(f); 1056 + err0: 1057 + if (!ring->is_mes_queue) 1058 + amdgpu_device_wb_free(adev, index); 1059 + return r; 1060 + } 1061 + 1062 + 1063 + /** 1064 + * sdma_v6_0_vm_copy_pte - update PTEs by copying them from the GART 1065 + * 1066 + * @ib: indirect buffer to fill with commands 1067 + * @pe: addr of the page entry 1068 + * @src: src addr to copy from 1069 + * @count: number of page entries to update 1070 + * 1071 + * Update PTEs by copying them from the GART using sDMA. 1072 + */ 1073 + static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib, 1074 + uint64_t pe, uint64_t src, 1075 + unsigned count) 1076 + { 1077 + unsigned bytes = count * 8; 1078 + 1079 + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1080 + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1081 + ib->ptr[ib->length_dw++] = bytes - 1; 1082 + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1083 + ib->ptr[ib->length_dw++] = lower_32_bits(src); 1084 + ib->ptr[ib->length_dw++] = upper_32_bits(src); 1085 + ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1086 + ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1087 + 1088 + } 1089 + 1090 + /** 1091 + * sdma_v6_0_vm_write_pte - update PTEs by writing them manually 1092 + * 1093 + * @ib: indirect buffer to fill with commands 1094 + * @pe: addr of the page entry 1095 + * @addr: dst addr to write into pe 1096 + * @count: number of page entries to update 1097 + * @incr: increase next addr by incr bytes 1098 + * @flags: access flags 1099 + * 1100 + * Update PTEs by writing them manually using sDMA. 1101 + */ 1102 + static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 1103 + uint64_t value, unsigned count, 1104 + uint32_t incr) 1105 + { 1106 + unsigned ndw = count * 2; 1107 + 1108 + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) | 1109 + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 1110 + ib->ptr[ib->length_dw++] = lower_32_bits(pe); 1111 + ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1112 + ib->ptr[ib->length_dw++] = ndw - 1; 1113 + for (; ndw > 0; ndw -= 2) { 1114 + ib->ptr[ib->length_dw++] = lower_32_bits(value); 1115 + ib->ptr[ib->length_dw++] = upper_32_bits(value); 1116 + value += incr; 1117 + } 1118 + } 1119 + 1120 + /** 1121 + * sdma_v6_0_vm_set_pte_pde - update the page tables using sDMA 1122 + * 1123 + * @ib: indirect buffer to fill with commands 1124 + * @pe: addr of the page entry 1125 + * @addr: dst addr to write into pe 1126 + * @count: number of page entries to update 1127 + * @incr: increase next addr by incr bytes 1128 + * @flags: access flags 1129 + * 1130 + * Update the page tables using sDMA. 1131 + */ 1132 + static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib, 1133 + uint64_t pe, 1134 + uint64_t addr, unsigned count, 1135 + uint32_t incr, uint64_t flags) 1136 + { 1137 + /* for physically contiguous pages (vram) */ 1138 + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE); 1139 + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 1140 + ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1141 + ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 1142 + ib->ptr[ib->length_dw++] = upper_32_bits(flags); 1143 + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 1144 + ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1145 + ib->ptr[ib->length_dw++] = incr; /* increment size */ 1146 + ib->ptr[ib->length_dw++] = 0; 1147 + ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ 1148 + } 1149 + 1150 + /** 1151 + * sdma_v6_0_ring_pad_ib - pad the IB 1152 + * @ib: indirect buffer to fill with padding 1153 + * 1154 + * Pad the IB with NOPs to a boundary multiple of 8. 1155 + */ 1156 + static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1157 + { 1158 + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 1159 + u32 pad_count; 1160 + int i; 1161 + 1162 + pad_count = (-ib->length_dw) & 0x7; 1163 + for (i = 0; i < pad_count; i++) 1164 + if (sdma && sdma->burst_nop && (i == 0)) 1165 + ib->ptr[ib->length_dw++] = 1166 + SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) | 1167 + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 1168 + else 1169 + ib->ptr[ib->length_dw++] = 1170 + SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP); 1171 + } 1172 + 1173 + /** 1174 + * sdma_v6_0_ring_emit_pipeline_sync - sync the pipeline 1175 + * 1176 + * @ring: amdgpu_ring pointer 1177 + * 1178 + * Make sure all previous operations are completed (CIK). 1179 + */ 1180 + static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1181 + { 1182 + uint32_t seq = ring->fence_drv.sync_seq; 1183 + uint64_t addr = ring->fence_drv.gpu_addr; 1184 + 1185 + /* wait for idle */ 1186 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1187 + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1188 + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1189 + SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1190 + amdgpu_ring_write(ring, addr & 0xfffffffc); 1191 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1192 + amdgpu_ring_write(ring, seq); /* reference */ 1193 + amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1194 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1195 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1196 + } 1197 + 1198 + /** 1199 + * sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA 1200 + * 1201 + * @ring: amdgpu_ring pointer 1202 + * @vm: amdgpu_vm pointer 1203 + * 1204 + * Update the page table base and flush the VM TLB 1205 + * using sDMA. 1206 + */ 1207 + static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1208 + unsigned vmid, uint64_t pd_addr) 1209 + { 1210 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1211 + } 1212 + 1213 + static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, 1214 + uint32_t reg, uint32_t val) 1215 + { 1216 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1217 + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1218 + amdgpu_ring_write(ring, reg); 1219 + amdgpu_ring_write(ring, val); 1220 + } 1221 + 1222 + static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1223 + uint32_t val, uint32_t mask) 1224 + { 1225 + amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1226 + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1227 + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ 1228 + amdgpu_ring_write(ring, reg << 2); 1229 + amdgpu_ring_write(ring, 0); 1230 + amdgpu_ring_write(ring, val); /* reference */ 1231 + amdgpu_ring_write(ring, mask); /* mask */ 1232 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1233 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); 1234 + } 1235 + 1236 + static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 1237 + uint32_t reg0, uint32_t reg1, 1238 + uint32_t ref, uint32_t mask) 1239 + { 1240 + amdgpu_ring_emit_wreg(ring, reg0, ref); 1241 + /* wait for a cycle to reset vm_inv_eng*_ack */ 1242 + amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0); 1243 + amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); 1244 + } 1245 + 1246 + static int sdma_v6_0_early_init(void *handle) 1247 + { 1248 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1249 + 1250 + sdma_v6_0_set_ring_funcs(adev); 1251 + sdma_v6_0_set_buffer_funcs(adev); 1252 + sdma_v6_0_set_vm_pte_funcs(adev); 1253 + sdma_v6_0_set_irq_funcs(adev); 1254 + sdma_v6_0_set_mqd_funcs(adev); 1255 + 1256 + return 0; 1257 + } 1258 + 1259 + static int sdma_v6_0_sw_init(void *handle) 1260 + { 1261 + struct amdgpu_ring *ring; 1262 + int r, i; 1263 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1264 + 1265 + /* SDMA trap event */ 1266 + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1267 + GFX_11_0_0__SRCID__SDMA_TRAP, 1268 + &adev->sdma.trap_irq); 1269 + if (r) 1270 + return r; 1271 + 1272 + r = sdma_v6_0_init_microcode(adev); 1273 + if (r) { 1274 + DRM_ERROR("Failed to load sdma firmware!\n"); 1275 + return r; 1276 + } 1277 + 1278 + for (i = 0; i < adev->sdma.num_instances; i++) { 1279 + ring = &adev->sdma.instance[i].ring; 1280 + ring->ring_obj = NULL; 1281 + ring->use_doorbell = true; 1282 + ring->me = i; 1283 + 1284 + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, 1285 + ring->use_doorbell?"true":"false"); 1286 + 1287 + ring->doorbell_index = 1288 + (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset 1289 + 1290 + sprintf(ring->name, "sdma%d", i); 1291 + r = amdgpu_ring_init(adev, ring, 1024, 1292 + &adev->sdma.trap_irq, 1293 + AMDGPU_SDMA_IRQ_INSTANCE0 + i, 1294 + AMDGPU_RING_PRIO_DEFAULT, NULL); 1295 + if (r) 1296 + return r; 1297 + } 1298 + 1299 + return r; 1300 + } 1301 + 1302 + static int sdma_v6_0_sw_fini(void *handle) 1303 + { 1304 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1305 + int i; 1306 + 1307 + for (i = 0; i < adev->sdma.num_instances; i++) 1308 + amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1309 + 1310 + sdma_v6_0_destroy_inst_ctx(adev); 1311 + 1312 + return 0; 1313 + } 1314 + 1315 + static int sdma_v6_0_hw_init(void *handle) 1316 + { 1317 + int r; 1318 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1319 + 1320 + r = sdma_v6_0_start(adev); 1321 + 1322 + return r; 1323 + } 1324 + 1325 + static int sdma_v6_0_hw_fini(void *handle) 1326 + { 1327 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1328 + 1329 + if (amdgpu_sriov_vf(adev)) 1330 + return 0; 1331 + 1332 + sdma_v6_0_ctx_switch_enable(adev, false); 1333 + sdma_v6_0_enable(adev, false); 1334 + 1335 + return 0; 1336 + } 1337 + 1338 + static int sdma_v6_0_suspend(void *handle) 1339 + { 1340 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1341 + 1342 + return sdma_v6_0_hw_fini(adev); 1343 + } 1344 + 1345 + static int sdma_v6_0_resume(void *handle) 1346 + { 1347 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1348 + 1349 + return sdma_v6_0_hw_init(adev); 1350 + } 1351 + 1352 + static bool sdma_v6_0_is_idle(void *handle) 1353 + { 1354 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1355 + u32 i; 1356 + 1357 + for (i = 0; i < adev->sdma.num_instances; i++) { 1358 + u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG)); 1359 + 1360 + if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) 1361 + return false; 1362 + } 1363 + 1364 + return true; 1365 + } 1366 + 1367 + static int sdma_v6_0_wait_for_idle(void *handle) 1368 + { 1369 + unsigned i; 1370 + u32 sdma0, sdma1; 1371 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1372 + 1373 + for (i = 0; i < adev->usec_timeout; i++) { 1374 + sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG)); 1375 + sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG)); 1376 + 1377 + if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) 1378 + return 0; 1379 + udelay(1); 1380 + } 1381 + return -ETIMEDOUT; 1382 + } 1383 + 1384 + static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) 1385 + { 1386 + int i, r = 0; 1387 + struct amdgpu_device *adev = ring->adev; 1388 + u32 index = 0; 1389 + u64 sdma_gfx_preempt; 1390 + 1391 + amdgpu_sdma_get_index_from_ring(ring, &index); 1392 + sdma_gfx_preempt = 1393 + sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT); 1394 + 1395 + /* assert preemption condition */ 1396 + amdgpu_ring_set_preempt_cond_exec(ring, false); 1397 + 1398 + /* emit the trailing fence */ 1399 + ring->trail_seq += 1; 1400 + amdgpu_ring_alloc(ring, 10); 1401 + sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, 1402 + ring->trail_seq, 0); 1403 + amdgpu_ring_commit(ring); 1404 + 1405 + /* assert IB preemption */ 1406 + WREG32(sdma_gfx_preempt, 1); 1407 + 1408 + /* poll the trailing fence */ 1409 + for (i = 0; i < adev->usec_timeout; i++) { 1410 + if (ring->trail_seq == 1411 + le32_to_cpu(*(ring->trail_fence_cpu_addr))) 1412 + break; 1413 + udelay(1); 1414 + } 1415 + 1416 + if (i >= adev->usec_timeout) { 1417 + r = -EINVAL; 1418 + DRM_ERROR("ring %d failed to be preempted\n", ring->idx); 1419 + } 1420 + 1421 + /* deassert IB preemption */ 1422 + WREG32(sdma_gfx_preempt, 0); 1423 + 1424 + /* deassert the preemption condition */ 1425 + amdgpu_ring_set_preempt_cond_exec(ring, true); 1426 + return r; 1427 + } 1428 + 1429 + static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, 1430 + struct amdgpu_irq_src *source, 1431 + unsigned type, 1432 + enum amdgpu_interrupt_state state) 1433 + { 1434 + u32 sdma_cntl; 1435 + 1436 + u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL); 1437 + 1438 + sdma_cntl = RREG32(reg_offset); 1439 + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1440 + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 1441 + WREG32(reg_offset, sdma_cntl); 1442 + 1443 + return 0; 1444 + } 1445 + 1446 + static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev, 1447 + struct amdgpu_irq_src *source, 1448 + struct amdgpu_iv_entry *entry) 1449 + { 1450 + int instances, queue; 1451 + uint32_t mes_queue_id = entry->src_data[0]; 1452 + 1453 + DRM_DEBUG("IH: SDMA trap\n"); 1454 + 1455 + if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 1456 + struct amdgpu_mes_queue *queue; 1457 + 1458 + mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 1459 + 1460 + spin_lock(&adev->mes.queue_id_lock); 1461 + queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 1462 + if (queue) { 1463 + DRM_DEBUG("process smda queue id = %d\n", mes_queue_id); 1464 + amdgpu_fence_process(queue->ring); 1465 + } 1466 + spin_unlock(&adev->mes.queue_id_lock); 1467 + return 0; 1468 + } 1469 + 1470 + queue = entry->ring_id & 0xf; 1471 + instances = (entry->ring_id & 0xf0) >> 4; 1472 + if (instances > 1) { 1473 + DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n"); 1474 + return -EINVAL; 1475 + } 1476 + 1477 + switch (entry->client_id) { 1478 + case SOC21_IH_CLIENTID_GFX: 1479 + switch (queue) { 1480 + case 0: 1481 + amdgpu_fence_process(&adev->sdma.instance[instances].ring); 1482 + break; 1483 + default: 1484 + break; 1485 + } 1486 + break; 1487 + } 1488 + return 0; 1489 + } 1490 + 1491 + static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev, 1492 + struct amdgpu_irq_src *source, 1493 + struct amdgpu_iv_entry *entry) 1494 + { 1495 + return 0; 1496 + } 1497 + 1498 + static int sdma_v6_0_set_clockgating_state(void *handle, 1499 + enum amd_clockgating_state state) 1500 + { 1501 + return 0; 1502 + } 1503 + 1504 + static int sdma_v6_0_set_powergating_state(void *handle, 1505 + enum amd_powergating_state state) 1506 + { 1507 + return 0; 1508 + } 1509 + 1510 + static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags) 1511 + { 1512 + } 1513 + 1514 + const struct amd_ip_funcs sdma_v6_0_ip_funcs = { 1515 + .name = "sdma_v6_0", 1516 + .early_init = sdma_v6_0_early_init, 1517 + .late_init = NULL, 1518 + .sw_init = sdma_v6_0_sw_init, 1519 + .sw_fini = sdma_v6_0_sw_fini, 1520 + .hw_init = sdma_v6_0_hw_init, 1521 + .hw_fini = sdma_v6_0_hw_fini, 1522 + .suspend = sdma_v6_0_suspend, 1523 + .resume = sdma_v6_0_resume, 1524 + .is_idle = sdma_v6_0_is_idle, 1525 + .wait_for_idle = sdma_v6_0_wait_for_idle, 1526 + .soft_reset = sdma_v6_0_soft_reset, 1527 + .set_clockgating_state = sdma_v6_0_set_clockgating_state, 1528 + .set_powergating_state = sdma_v6_0_set_powergating_state, 1529 + .get_clockgating_state = sdma_v6_0_get_clockgating_state, 1530 + }; 1531 + 1532 + static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = { 1533 + .type = AMDGPU_RING_TYPE_SDMA, 1534 + .align_mask = 0xf, 1535 + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 1536 + .support_64bit_ptrs = true, 1537 + .vmhub = AMDGPU_GFXHUB_0, 1538 + .get_rptr = sdma_v6_0_ring_get_rptr, 1539 + .get_wptr = sdma_v6_0_ring_get_wptr, 1540 + .set_wptr = sdma_v6_0_ring_set_wptr, 1541 + .emit_frame_size = 1542 + 5 + /* sdma_v6_0_ring_init_cond_exec */ 1543 + 6 + /* sdma_v6_0_ring_emit_hdp_flush */ 1544 + 6 + /* sdma_v6_0_ring_emit_pipeline_sync */ 1545 + /* sdma_v6_0_ring_emit_vm_flush */ 1546 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1547 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 1548 + 10 + 10 + 10, /* sdma_v6_0_ring_emit_fence x3 for user fence, vm fence */ 1549 + .emit_ib_size = 5 + 7 + 6, /* sdma_v6_0_ring_emit_ib */ 1550 + .emit_ib = sdma_v6_0_ring_emit_ib, 1551 + .emit_mem_sync = sdma_v6_0_ring_emit_mem_sync, 1552 + .emit_fence = sdma_v6_0_ring_emit_fence, 1553 + .emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync, 1554 + .emit_vm_flush = sdma_v6_0_ring_emit_vm_flush, 1555 + .emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush, 1556 + .test_ring = sdma_v6_0_ring_test_ring, 1557 + .test_ib = sdma_v6_0_ring_test_ib, 1558 + .insert_nop = sdma_v6_0_ring_insert_nop, 1559 + .pad_ib = sdma_v6_0_ring_pad_ib, 1560 + .emit_wreg = sdma_v6_0_ring_emit_wreg, 1561 + .emit_reg_wait = sdma_v6_0_ring_emit_reg_wait, 1562 + .emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait, 1563 + .init_cond_exec = sdma_v6_0_ring_init_cond_exec, 1564 + .patch_cond_exec = sdma_v6_0_ring_patch_cond_exec, 1565 + .preempt_ib = sdma_v6_0_ring_preempt_ib, 1566 + }; 1567 + 1568 + static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1569 + { 1570 + int i; 1571 + 1572 + for (i = 0; i < adev->sdma.num_instances; i++) { 1573 + adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs; 1574 + adev->sdma.instance[i].ring.me = i; 1575 + } 1576 + } 1577 + 1578 + static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = { 1579 + .set = sdma_v6_0_set_trap_irq_state, 1580 + .process = sdma_v6_0_process_trap_irq, 1581 + }; 1582 + 1583 + static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = { 1584 + .process = sdma_v6_0_process_illegal_inst_irq, 1585 + }; 1586 + 1587 + static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1588 + { 1589 + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + 1590 + adev->sdma.num_instances; 1591 + adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs; 1592 + adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs; 1593 + } 1594 + 1595 + /** 1596 + * sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine 1597 + * 1598 + * @ring: amdgpu_ring structure holding ring information 1599 + * @src_offset: src GPU address 1600 + * @dst_offset: dst GPU address 1601 + * @byte_count: number of bytes to xfer 1602 + * 1603 + * Copy GPU buffers using the DMA engine. 1604 + * Used by the amdgpu ttm implementation to move pages if 1605 + * registered as the asic copy callback. 1606 + */ 1607 + static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib, 1608 + uint64_t src_offset, 1609 + uint64_t dst_offset, 1610 + uint32_t byte_count, 1611 + bool tmz) 1612 + { 1613 + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | 1614 + SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | 1615 + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); 1616 + ib->ptr[ib->length_dw++] = byte_count - 1; 1617 + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1618 + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1619 + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1620 + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1621 + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1622 + } 1623 + 1624 + /** 1625 + * sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine 1626 + * 1627 + * @ring: amdgpu_ring structure holding ring information 1628 + * @src_data: value to write to buffer 1629 + * @dst_offset: dst GPU address 1630 + * @byte_count: number of bytes to xfer 1631 + * 1632 + * Fill GPU buffers using the DMA engine. 1633 + */ 1634 + static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib, 1635 + uint32_t src_data, 1636 + uint64_t dst_offset, 1637 + uint32_t byte_count) 1638 + { 1639 + ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL); 1640 + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1641 + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1642 + ib->ptr[ib->length_dw++] = src_data; 1643 + ib->ptr[ib->length_dw++] = byte_count - 1; 1644 + } 1645 + 1646 + static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = { 1647 + .copy_max_bytes = 0x400000, 1648 + .copy_num_dw = 7, 1649 + .emit_copy_buffer = sdma_v6_0_emit_copy_buffer, 1650 + 1651 + .fill_max_bytes = 0x400000, 1652 + .fill_num_dw = 5, 1653 + .emit_fill_buffer = sdma_v6_0_emit_fill_buffer, 1654 + }; 1655 + 1656 + static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev) 1657 + { 1658 + adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs; 1659 + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1660 + } 1661 + 1662 + static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = { 1663 + .copy_pte_num_dw = 7, 1664 + .copy_pte = sdma_v6_0_vm_copy_pte, 1665 + .write_pte = sdma_v6_0_vm_write_pte, 1666 + .set_pte_pde = sdma_v6_0_vm_set_pte_pde, 1667 + }; 1668 + 1669 + static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1670 + { 1671 + unsigned i; 1672 + 1673 + adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs; 1674 + for (i = 0; i < adev->sdma.num_instances; i++) { 1675 + adev->vm_manager.vm_pte_scheds[i] = 1676 + &adev->sdma.instance[i].ring.sched; 1677 + } 1678 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1679 + } 1680 + 1681 + const struct amdgpu_ip_block_version sdma_v6_0_ip_block = { 1682 + .type = AMD_IP_BLOCK_TYPE_SDMA, 1683 + .major = 6, 1684 + .minor = 0, 1685 + .rev = 0, 1686 + .funcs = &sdma_v6_0_ip_funcs, 1687 + };
+30
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.h
··· 1 + /* 2 + * Copyright 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __SDMA_V6_0_H__ 25 + #define __SDMA_V6_0_H__ 26 + 27 + extern const struct amd_ip_funcs sdma_v6_0_ip_funcs; 28 + extern const struct amdgpu_ip_block_version sdma_v6_0_ip_block; 29 + 30 + #endif /* __SDMA_V6_0_H__ */