Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Add gfx v12_0 ip block support (v6)

Initial support for GFX 12.

v1: Add gfx v12_0 ip block support. (Likun)
v2: Switch to gfx.kiq array.
Move the vmhub from ring callback to ring. (Hawking)
v3: Update various callback function impl. (Hawking)
v4: Warning fixes (Alex)
v5: squash in imu fix, csb, rlc autoload implementations (Alex)
v6: Rebase (Alex)

Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Likun Gao and committed by
Alex Deucher
52cb80c1 4632bec9

+4633 -1
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 167 167 imu_v11_0.o \ 168 168 gfx_v11_0.o \ 169 169 gfx_v11_0_3.o \ 170 - imu_v11_0_3.o 170 + imu_v11_0_3.o \ 171 + gfx_v12_0.o 171 172 172 173 # add async DMA block 173 174 amdgpu-y += \
+4602
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include <linux/delay.h> 24 + #include <linux/kernel.h> 25 + #include <linux/firmware.h> 26 + #include <linux/module.h> 27 + #include <linux/pci.h> 28 + #include "amdgpu.h" 29 + #include "amdgpu_gfx.h" 30 + #include "amdgpu_psp.h" 31 + #include "amdgpu_smu.h" 32 + #include "amdgpu_atomfirmware.h" 33 + #include "soc24.h" 34 + #include "nvd.h" 35 + 36 + #include "gc/gc_12_0_0_offset.h" 37 + #include "gc/gc_12_0_0_sh_mask.h" 38 + #include "smuio/smuio_14_0_2_offset.h" 39 + #include "smuio/smuio_14_0_2_sh_mask.h" 40 + #include "soc24_enum.h" 41 + #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h" 42 + 43 + #include "soc15.h" 44 + #include "soc15d.h" 45 + #include "clearstate_gfx12.h" 46 + #include "v12_structs.h" 47 + #include "gfx_v12_0.h" 48 + #include "nbif_v6_3_1.h" 49 + #include "mes_v12_0.h" 50 + 51 + #define GFX12_NUM_GFX_RINGS 1 52 + #define GFX12_MEC_HPD_SIZE 2048 53 + 54 + #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 55 + 56 + MODULE_FIRMWARE("amdgpu/gc_12_0_0_pfp.bin"); 57 + MODULE_FIRMWARE("amdgpu/gc_12_0_0_me.bin"); 58 + MODULE_FIRMWARE("amdgpu/gc_12_0_0_mec.bin"); 59 + MODULE_FIRMWARE("amdgpu/gc_12_0_0_rlc.bin"); 60 + MODULE_FIRMWARE("amdgpu/gc_12_0_0_toc.bin"); 61 + MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); 62 + MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); 63 + MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); 64 + MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); 65 + MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); 66 + 67 + #define DEFAULT_SH_MEM_CONFIG \ 68 + ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ 69 + (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ 70 + (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT)) 71 + 72 + static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev); 73 + static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev); 74 + static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev); 75 + static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev); 76 + static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev); 77 + static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev); 78 + static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 79 + struct amdgpu_cu_info *cu_info); 80 + static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev); 81 + static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 82 + u32 sh_num, u32 instance, int xcc_id); 83 + static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 84 + 85 + static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 86 + static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 87 + uint32_t val); 88 + static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 89 + static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 90 + uint16_t pasid, uint32_t flush_type, 91 + bool all_hub, uint8_t dst_sel); 92 + static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id); 93 + static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id); 94 + static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 95 + bool enable); 96 + 97 + static void gfx_v12_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, 98 + uint64_t queue_mask) 99 + { 100 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 101 + amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 102 + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 103 + amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 104 + amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 105 + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 106 + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 107 + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 108 + amdgpu_ring_write(kiq_ring, 0); 109 + } 110 + 111 + static void gfx_v12_0_kiq_map_queues(struct amdgpu_ring *kiq_ring, 112 + struct amdgpu_ring *ring) 113 + { 114 + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 115 + uint64_t wptr_addr = ring->wptr_gpu_addr; 116 + uint32_t me = 0, eng_sel = 0; 117 + 118 + switch (ring->funcs->type) { 119 + case AMDGPU_RING_TYPE_COMPUTE: 120 + me = 1; 121 + eng_sel = 0; 122 + break; 123 + case AMDGPU_RING_TYPE_GFX: 124 + me = 0; 125 + eng_sel = 4; 126 + break; 127 + case AMDGPU_RING_TYPE_MES: 128 + me = 2; 129 + eng_sel = 5; 130 + break; 131 + default: 132 + WARN_ON(1); 133 + } 134 + 135 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 136 + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 137 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 138 + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 139 + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 140 + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 141 + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 142 + PACKET3_MAP_QUEUES_ME((me)) | 143 + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 144 + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 145 + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 146 + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 147 + amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 148 + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 149 + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 150 + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 151 + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 152 + } 153 + 154 + static void gfx_v12_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 155 + struct amdgpu_ring *ring, 156 + enum amdgpu_unmap_queues_action action, 157 + u64 gpu_addr, u64 seq) 158 + { 159 + struct amdgpu_device *adev = kiq_ring->adev; 160 + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 161 + 162 + if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) { 163 + amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq); 164 + return; 165 + } 166 + 167 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 168 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 169 + PACKET3_UNMAP_QUEUES_ACTION(action) | 170 + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 171 + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 172 + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 173 + amdgpu_ring_write(kiq_ring, 174 + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 175 + 176 + if (action == PREEMPT_QUEUES_NO_UNMAP) { 177 + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 178 + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 179 + amdgpu_ring_write(kiq_ring, seq); 180 + } else { 181 + amdgpu_ring_write(kiq_ring, 0); 182 + amdgpu_ring_write(kiq_ring, 0); 183 + amdgpu_ring_write(kiq_ring, 0); 184 + } 185 + } 186 + 187 + static void gfx_v12_0_kiq_query_status(struct amdgpu_ring *kiq_ring, 188 + struct amdgpu_ring *ring, 189 + u64 addr, u64 seq) 190 + { 191 + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 192 + 193 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 194 + amdgpu_ring_write(kiq_ring, 195 + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 196 + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 197 + PACKET3_QUERY_STATUS_COMMAND(2)); 198 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 199 + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 200 + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 201 + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 202 + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 203 + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 204 + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 205 + } 206 + 207 + static void gfx_v12_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, 208 + uint16_t pasid, 209 + uint32_t flush_type, 210 + bool all_hub) 211 + { 212 + gfx_v12_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1); 213 + } 214 + 215 + static const struct kiq_pm4_funcs gfx_v12_0_kiq_pm4_funcs = { 216 + .kiq_set_resources = gfx_v12_0_kiq_set_resources, 217 + .kiq_map_queues = gfx_v12_0_kiq_map_queues, 218 + .kiq_unmap_queues = gfx_v12_0_kiq_unmap_queues, 219 + .kiq_query_status = gfx_v12_0_kiq_query_status, 220 + .kiq_invalidate_tlbs = gfx_v12_0_kiq_invalidate_tlbs, 221 + .set_resources_size = 8, 222 + .map_queues_size = 7, 223 + .unmap_queues_size = 6, 224 + .query_status_size = 7, 225 + .invalidate_tlbs_size = 2, 226 + }; 227 + 228 + static void gfx_v12_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 229 + { 230 + adev->gfx.kiq[0].pmf = &gfx_v12_0_kiq_pm4_funcs; 231 + } 232 + 233 + static void gfx_v12_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 234 + int mem_space, int opt, uint32_t addr0, 235 + uint32_t addr1, uint32_t ref, 236 + uint32_t mask, uint32_t inv) 237 + { 238 + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 239 + amdgpu_ring_write(ring, 240 + /* memory (1) or register (0) */ 241 + (WAIT_REG_MEM_MEM_SPACE(mem_space) | 242 + WAIT_REG_MEM_OPERATION(opt) | /* wait */ 243 + WAIT_REG_MEM_FUNCTION(3) | /* equal */ 244 + WAIT_REG_MEM_ENGINE(eng_sel))); 245 + 246 + if (mem_space) 247 + BUG_ON(addr0 & 0x3); /* Dword align */ 248 + amdgpu_ring_write(ring, addr0); 249 + amdgpu_ring_write(ring, addr1); 250 + amdgpu_ring_write(ring, ref); 251 + amdgpu_ring_write(ring, mask); 252 + amdgpu_ring_write(ring, inv); /* poll interval */ 253 + } 254 + 255 + static int gfx_v12_0_ring_test_ring(struct amdgpu_ring *ring) 256 + { 257 + struct amdgpu_device *adev = ring->adev; 258 + uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 259 + uint32_t tmp = 0; 260 + unsigned i; 261 + int r; 262 + 263 + WREG32(scratch, 0xCAFEDEAD); 264 + r = amdgpu_ring_alloc(ring, 5); 265 + if (r) { 266 + dev_err(adev->dev, 267 + "amdgpu: cp failed to lock ring %d (%d).\n", 268 + ring->idx, r); 269 + return r; 270 + } 271 + 272 + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 273 + gfx_v12_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF); 274 + } else { 275 + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 276 + amdgpu_ring_write(ring, scratch - 277 + PACKET3_SET_UCONFIG_REG_START); 278 + amdgpu_ring_write(ring, 0xDEADBEEF); 279 + } 280 + amdgpu_ring_commit(ring); 281 + 282 + for (i = 0; i < adev->usec_timeout; i++) { 283 + tmp = RREG32(scratch); 284 + if (tmp == 0xDEADBEEF) 285 + break; 286 + if (amdgpu_emu_mode == 1) 287 + msleep(1); 288 + else 289 + udelay(1); 290 + } 291 + 292 + if (i >= adev->usec_timeout) 293 + r = -ETIMEDOUT; 294 + return r; 295 + } 296 + 297 + static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 298 + { 299 + struct amdgpu_device *adev = ring->adev; 300 + struct amdgpu_ib ib; 301 + struct dma_fence *f = NULL; 302 + unsigned index; 303 + uint64_t gpu_addr; 304 + volatile uint32_t *cpu_ptr; 305 + long r; 306 + 307 + /* MES KIQ fw hasn't indirect buffer support for now */ 308 + if (adev->enable_mes_kiq && 309 + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) 310 + return 0; 311 + 312 + memset(&ib, 0, sizeof(ib)); 313 + 314 + if (ring->is_mes_queue) { 315 + uint32_t padding, offset; 316 + 317 + offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS); 318 + padding = amdgpu_mes_ctx_get_offs(ring, 319 + AMDGPU_MES_CTX_PADDING_OFFS); 320 + 321 + ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 322 + ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 323 + 324 + gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding); 325 + cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding); 326 + *cpu_ptr = cpu_to_le32(0xCAFEDEAD); 327 + } else { 328 + r = amdgpu_device_wb_get(adev, &index); 329 + if (r) 330 + return r; 331 + 332 + gpu_addr = adev->wb.gpu_addr + (index * 4); 333 + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); 334 + cpu_ptr = &adev->wb.wb[index]; 335 + 336 + r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib); 337 + if (r) { 338 + dev_err(adev->dev, "amdgpu: failed to get ib (%ld).\n", r); 339 + goto err1; 340 + } 341 + } 342 + 343 + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); 344 + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; 345 + ib.ptr[2] = lower_32_bits(gpu_addr); 346 + ib.ptr[3] = upper_32_bits(gpu_addr); 347 + ib.ptr[4] = 0xDEADBEEF; 348 + ib.length_dw = 5; 349 + 350 + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 351 + if (r) 352 + goto err2; 353 + 354 + r = dma_fence_wait_timeout(f, false, timeout); 355 + if (r == 0) { 356 + r = -ETIMEDOUT; 357 + goto err2; 358 + } else if (r < 0) { 359 + goto err2; 360 + } 361 + 362 + if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF) 363 + r = 0; 364 + else 365 + r = -EINVAL; 366 + err2: 367 + if (!ring->is_mes_queue) 368 + amdgpu_ib_free(adev, &ib, NULL); 369 + dma_fence_put(f); 370 + err1: 371 + if (!ring->is_mes_queue) 372 + amdgpu_device_wb_free(adev, index); 373 + return r; 374 + } 375 + 376 + static void gfx_v12_0_free_microcode(struct amdgpu_device *adev) 377 + { 378 + amdgpu_ucode_release(&adev->gfx.pfp_fw); 379 + amdgpu_ucode_release(&adev->gfx.me_fw); 380 + amdgpu_ucode_release(&adev->gfx.rlc_fw); 381 + amdgpu_ucode_release(&adev->gfx.mec_fw); 382 + 383 + kfree(adev->gfx.rlc.register_list_format); 384 + } 385 + 386 + static int gfx_v12_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix) 387 + { 388 + const struct psp_firmware_header_v1_0 *toc_hdr; 389 + int err = 0; 390 + char fw_name[40]; 391 + 392 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix); 393 + err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name); 394 + if (err) 395 + goto out; 396 + 397 + toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 398 + adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 399 + adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 400 + adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 401 + adev->psp.toc.start_addr = (uint8_t *)toc_hdr + 402 + le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); 403 + return 0; 404 + out: 405 + amdgpu_ucode_release(&adev->psp.toc_fw); 406 + return err; 407 + } 408 + 409 + static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) 410 + { 411 + char fw_name[40]; 412 + char ucode_prefix[30]; 413 + int err; 414 + const struct rlc_firmware_header_v2_0 *rlc_hdr; 415 + uint16_t version_major; 416 + uint16_t version_minor; 417 + 418 + DRM_DEBUG("\n"); 419 + 420 + amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 421 + 422 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); 423 + err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); 424 + if (err) 425 + goto out; 426 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP); 427 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK); 428 + 429 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); 430 + err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); 431 + if (err) 432 + goto out; 433 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME); 434 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); 435 + 436 + if (!amdgpu_sriov_vf(adev)) { 437 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); 438 + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); 439 + if (err) 440 + goto out; 441 + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 442 + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 443 + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 444 + err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); 445 + if (err) 446 + goto out; 447 + } 448 + 449 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); 450 + err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); 451 + if (err) 452 + goto out; 453 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC); 454 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK); 455 + amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK); 456 + 457 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 458 + err = gfx_v12_0_init_toc_microcode(adev, ucode_prefix); 459 + 460 + /* only one MEC for gfx 12 */ 461 + adev->gfx.mec2_fw = NULL; 462 + 463 + out: 464 + if (err) { 465 + amdgpu_ucode_release(&adev->gfx.pfp_fw); 466 + amdgpu_ucode_release(&adev->gfx.me_fw); 467 + amdgpu_ucode_release(&adev->gfx.rlc_fw); 468 + amdgpu_ucode_release(&adev->gfx.mec_fw); 469 + } 470 + 471 + return err; 472 + } 473 + 474 + static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev) 475 + { 476 + u32 count = 0; 477 + const struct cs_section_def *sect = NULL; 478 + const struct cs_extent_def *ext = NULL; 479 + 480 + count += 1; 481 + 482 + for (sect = gfx12_cs_data; sect->section != NULL; ++sect) { 483 + if (sect->id == SECT_CONTEXT) { 484 + for (ext = sect->section; ext->extent != NULL; ++ext) 485 + count += 2 + ext->reg_count; 486 + } else 487 + return 0; 488 + } 489 + 490 + return count; 491 + } 492 + 493 + static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, 494 + volatile u32 *buffer) 495 + { 496 + u32 count = 0, clustercount = 0, i; 497 + const struct cs_section_def *sect = NULL; 498 + const struct cs_extent_def *ext = NULL; 499 + 500 + if (adev->gfx.rlc.cs_data == NULL) 501 + return; 502 + if (buffer == NULL) 503 + return; 504 + 505 + count += 1; 506 + 507 + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 508 + if (sect->id == SECT_CONTEXT) { 509 + for (ext = sect->section; ext->extent != NULL; ++ext) { 510 + clustercount++; 511 + buffer[count++] = ext->reg_count; 512 + buffer[count++] = ext->reg_index; 513 + 514 + for (i = 0; i < ext->reg_count; i++) 515 + buffer[count++] = cpu_to_le32(ext->extent[i]); 516 + } 517 + } else 518 + return; 519 + } 520 + 521 + buffer[0] = clustercount; 522 + } 523 + 524 + static void gfx_v12_0_rlc_fini(struct amdgpu_device *adev) 525 + { 526 + /* clear state block */ 527 + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 528 + &adev->gfx.rlc.clear_state_gpu_addr, 529 + (void **)&adev->gfx.rlc.cs_ptr); 530 + 531 + /* jump table block */ 532 + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 533 + &adev->gfx.rlc.cp_table_gpu_addr, 534 + (void **)&adev->gfx.rlc.cp_table_ptr); 535 + } 536 + 537 + static void gfx_v12_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) 538 + { 539 + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; 540 + 541 + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0]; 542 + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); 543 + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1); 544 + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2); 545 + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3); 546 + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL); 547 + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX); 548 + reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0); 549 + adev->gfx.rlc.rlcg_reg_access_supported = true; 550 + } 551 + 552 + static int gfx_v12_0_rlc_init(struct amdgpu_device *adev) 553 + { 554 + const struct cs_section_def *cs_data; 555 + int r; 556 + 557 + adev->gfx.rlc.cs_data = gfx12_cs_data; 558 + 559 + cs_data = adev->gfx.rlc.cs_data; 560 + 561 + if (cs_data) { 562 + /* init clear state block */ 563 + r = amdgpu_gfx_rlc_init_csb(adev); 564 + if (r) 565 + return r; 566 + } 567 + 568 + /* init spm vmid with 0xf */ 569 + if (adev->gfx.rlc.funcs->update_spm_vmid) 570 + adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 571 + 572 + return 0; 573 + } 574 + 575 + static void gfx_v12_0_mec_fini(struct amdgpu_device *adev) 576 + { 577 + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 578 + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 579 + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL); 580 + } 581 + 582 + static void gfx_v12_0_me_init(struct amdgpu_device *adev) 583 + { 584 + bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 585 + 586 + amdgpu_gfx_graphics_queue_acquire(adev); 587 + } 588 + 589 + static int gfx_v12_0_mec_init(struct amdgpu_device *adev) 590 + { 591 + int r; 592 + u32 *hpd; 593 + size_t mec_hpd_size; 594 + 595 + bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 596 + 597 + /* take ownership of the relevant compute queues */ 598 + amdgpu_gfx_compute_queue_acquire(adev); 599 + mec_hpd_size = adev->gfx.num_compute_rings * GFX12_MEC_HPD_SIZE; 600 + 601 + if (mec_hpd_size) { 602 + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 603 + AMDGPU_GEM_DOMAIN_GTT, 604 + &adev->gfx.mec.hpd_eop_obj, 605 + &adev->gfx.mec.hpd_eop_gpu_addr, 606 + (void **)&hpd); 607 + if (r) { 608 + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 609 + gfx_v12_0_mec_fini(adev); 610 + return r; 611 + } 612 + 613 + memset(hpd, 0, mec_hpd_size); 614 + 615 + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 616 + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 617 + } 618 + 619 + return 0; 620 + } 621 + 622 + static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 623 + { 624 + WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 625 + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 626 + (address << SQ_IND_INDEX__INDEX__SHIFT)); 627 + return RREG32_SOC15(GC, 0, regSQ_IND_DATA); 628 + } 629 + 630 + static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 631 + uint32_t thread, uint32_t regno, 632 + uint32_t num, uint32_t *out) 633 + { 634 + WREG32_SOC15(GC, 0, regSQ_IND_INDEX, 635 + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 636 + (regno << SQ_IND_INDEX__INDEX__SHIFT) | 637 + (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 638 + (SQ_IND_INDEX__AUTO_INCR_MASK)); 639 + while (num--) 640 + *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA); 641 + } 642 + 643 + static void gfx_v12_0_read_wave_data(struct amdgpu_device *adev, 644 + uint32_t xcc_id, 645 + uint32_t simd, uint32_t wave, 646 + uint32_t *dst, int *no_fields) 647 + { 648 + /* in gfx12 the SIMD_ID is specified as part of the INSTANCE 649 + * field when performing a select_se_sh so it should be 650 + * zero here */ 651 + WARN_ON(simd != 0); 652 + 653 + /* type 3 wave data */ 654 + dst[(*no_fields)++] = 3; 655 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 656 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 657 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 658 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 659 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 660 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 661 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 662 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 663 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 664 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 665 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 666 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 667 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 668 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); 669 + } 670 + 671 + static void gfx_v12_0_read_wave_sgprs(struct amdgpu_device *adev, 672 + uint32_t xcc_id, uint32_t simd, 673 + uint32_t wave, uint32_t start, 674 + uint32_t size, uint32_t *dst) 675 + { 676 + WARN_ON(simd != 0); 677 + 678 + wave_read_regs( 679 + adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 680 + dst); 681 + } 682 + 683 + static void gfx_v12_0_read_wave_vgprs(struct amdgpu_device *adev, 684 + uint32_t xcc_id, uint32_t simd, 685 + uint32_t wave, uint32_t thread, 686 + uint32_t start, uint32_t size, 687 + uint32_t *dst) 688 + { 689 + wave_read_regs( 690 + adev, wave, thread, 691 + start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 692 + } 693 + 694 + static void gfx_v12_0_select_me_pipe_q(struct amdgpu_device *adev, 695 + u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 696 + { 697 + soc24_grbm_select(adev, me, pipe, q, vm); 698 + } 699 + 700 + static const struct amdgpu_gfx_funcs gfx_v12_0_gfx_funcs = { 701 + .get_gpu_clock_counter = &gfx_v12_0_get_gpu_clock_counter, 702 + .select_se_sh = &gfx_v12_0_select_se_sh, 703 + .read_wave_data = &gfx_v12_0_read_wave_data, 704 + .read_wave_sgprs = &gfx_v12_0_read_wave_sgprs, 705 + .read_wave_vgprs = &gfx_v12_0_read_wave_vgprs, 706 + .select_me_pipe_q = &gfx_v12_0_select_me_pipe_q, 707 + .update_perfmon_mgcg = &gfx_v12_0_update_perf_clk, 708 + }; 709 + 710 + static int gfx_v12_0_gpu_early_init(struct amdgpu_device *adev) 711 + { 712 + 713 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 714 + case IP_VERSION(12, 0, 0): 715 + case IP_VERSION(12, 0, 1): 716 + adev->gfx.config.max_hw_contexts = 8; 717 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 718 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 719 + adev->gfx.config.sc_hiz_tile_fifo_size = 0; 720 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 721 + break; 722 + default: 723 + BUG(); 724 + break; 725 + } 726 + 727 + return 0; 728 + } 729 + 730 + static int gfx_v12_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 731 + int me, int pipe, int queue) 732 + { 733 + int r; 734 + struct amdgpu_ring *ring; 735 + unsigned int irq_type; 736 + 737 + ring = &adev->gfx.gfx_ring[ring_id]; 738 + 739 + ring->me = me; 740 + ring->pipe = pipe; 741 + ring->queue = queue; 742 + 743 + ring->ring_obj = NULL; 744 + ring->use_doorbell = true; 745 + 746 + if (!ring_id) 747 + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 748 + else 749 + ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 750 + ring->vm_hub = AMDGPU_GFXHUB(0); 751 + sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 752 + 753 + irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 754 + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 755 + AMDGPU_RING_PRIO_DEFAULT, NULL); 756 + if (r) 757 + return r; 758 + return 0; 759 + } 760 + 761 + static int gfx_v12_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 762 + int mec, int pipe, int queue) 763 + { 764 + int r; 765 + unsigned irq_type; 766 + struct amdgpu_ring *ring; 767 + unsigned int hw_prio; 768 + 769 + ring = &adev->gfx.compute_ring[ring_id]; 770 + 771 + /* mec0 is me1 */ 772 + ring->me = mec + 1; 773 + ring->pipe = pipe; 774 + ring->queue = queue; 775 + 776 + ring->ring_obj = NULL; 777 + ring->use_doorbell = true; 778 + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 779 + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 780 + + (ring_id * GFX12_MEC_HPD_SIZE); 781 + ring->vm_hub = AMDGPU_GFXHUB(0); 782 + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 783 + 784 + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 785 + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 786 + + ring->pipe; 787 + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? 788 + AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; 789 + /* type-2 packets are deprecated on MEC, use type-3 instead */ 790 + r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type, 791 + hw_prio, NULL); 792 + if (r) 793 + return r; 794 + 795 + return 0; 796 + } 797 + 798 + static struct { 799 + SOC24_FIRMWARE_ID id; 800 + unsigned int offset; 801 + unsigned int size; 802 + unsigned int size_x16; 803 + } rlc_autoload_info[SOC24_FIRMWARE_ID_MAX]; 804 + 805 + #define RLC_TOC_OFFSET_DWUNIT 8 806 + #define RLC_SIZE_MULTIPLE 1024 807 + #define RLC_TOC_UMF_SIZE_inM 23ULL 808 + #define RLC_TOC_FORMAT_API 165ULL 809 + 810 + static void gfx_v12_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc) 811 + { 812 + RLC_TABLE_OF_CONTENT_V2 *ucode = rlc_toc; 813 + 814 + while (ucode && (ucode->id > SOC24_FIRMWARE_ID_INVALID)) { 815 + rlc_autoload_info[ucode->id].id = ucode->id; 816 + rlc_autoload_info[ucode->id].offset = 817 + ucode->offset * RLC_TOC_OFFSET_DWUNIT * 4; 818 + rlc_autoload_info[ucode->id].size = 819 + ucode->size_x16 ? ucode->size * RLC_SIZE_MULTIPLE * 4 : 820 + ucode->size * 4; 821 + ucode++; 822 + } 823 + } 824 + 825 + static uint32_t gfx_v12_0_calc_toc_total_size(struct amdgpu_device *adev) 826 + { 827 + uint32_t total_size = 0; 828 + SOC24_FIRMWARE_ID id; 829 + 830 + gfx_v12_0_parse_rlc_toc(adev, adev->psp.toc.start_addr); 831 + 832 + for (id = SOC24_FIRMWARE_ID_RLC_G_UCODE; id < SOC24_FIRMWARE_ID_MAX; id++) 833 + total_size += rlc_autoload_info[id].size; 834 + 835 + /* In case the offset in rlc toc ucode is aligned */ 836 + if (total_size < rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset) 837 + total_size = rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].offset + 838 + rlc_autoload_info[SOC24_FIRMWARE_ID_MAX-1].size; 839 + if (total_size < (RLC_TOC_UMF_SIZE_inM << 20)) 840 + total_size = RLC_TOC_UMF_SIZE_inM << 20; 841 + 842 + return total_size; 843 + } 844 + 845 + static int gfx_v12_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) 846 + { 847 + int r; 848 + uint32_t total_size; 849 + 850 + total_size = gfx_v12_0_calc_toc_total_size(adev); 851 + 852 + r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 853 + AMDGPU_GEM_DOMAIN_VRAM, 854 + &adev->gfx.rlc.rlc_autoload_bo, 855 + &adev->gfx.rlc.rlc_autoload_gpu_addr, 856 + (void **)&adev->gfx.rlc.rlc_autoload_ptr); 857 + 858 + if (r) { 859 + dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 860 + return r; 861 + } 862 + 863 + return 0; 864 + } 865 + 866 + static void gfx_v12_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 867 + SOC24_FIRMWARE_ID id, 868 + const void *fw_data, 869 + uint32_t fw_size) 870 + { 871 + uint32_t toc_offset; 872 + uint32_t toc_fw_size; 873 + char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 874 + 875 + if (id <= SOC24_FIRMWARE_ID_INVALID || id >= SOC24_FIRMWARE_ID_MAX) 876 + return; 877 + 878 + toc_offset = rlc_autoload_info[id].offset; 879 + toc_fw_size = rlc_autoload_info[id].size; 880 + 881 + if (fw_size == 0) 882 + fw_size = toc_fw_size; 883 + 884 + if (fw_size > toc_fw_size) 885 + fw_size = toc_fw_size; 886 + 887 + memcpy(ptr + toc_offset, fw_data, fw_size); 888 + 889 + if (fw_size < toc_fw_size) 890 + memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 891 + } 892 + 893 + static void 894 + gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 895 + { 896 + void *data; 897 + uint32_t size; 898 + uint32_t *toc_ptr; 899 + 900 + data = adev->psp.toc.start_addr; 901 + size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_TOC].size; 902 + 903 + toc_ptr = (uint32_t *)data + size / 4 - 2; 904 + *toc_ptr = (RLC_TOC_FORMAT_API << 24) | 0x1; 905 + 906 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_TOC, 907 + data, size); 908 + } 909 + 910 + static void 911 + gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 912 + { 913 + const __le32 *fw_data; 914 + uint32_t fw_size; 915 + const struct gfx_firmware_header_v2_0 *cpv2_hdr; 916 + const struct rlc_firmware_header_v2_0 *rlc_hdr; 917 + const struct rlc_firmware_header_v2_2 *rlcv22_hdr; 918 + uint16_t version_major, version_minor; 919 + 920 + /* pfp ucode */ 921 + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 922 + adev->gfx.pfp_fw->data; 923 + /* instruction */ 924 + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 925 + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 926 + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 927 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP, 928 + fw_data, fw_size); 929 + /* data */ 930 + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 931 + le32_to_cpu(cpv2_hdr->data_offset_bytes)); 932 + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 933 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK, 934 + fw_data, fw_size); 935 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK, 936 + fw_data, fw_size); 937 + /* me ucode */ 938 + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 939 + adev->gfx.me_fw->data; 940 + /* instruction */ 941 + fw_data = (const __le32 *)(adev->gfx.me_fw->data + 942 + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 943 + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 944 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME, 945 + fw_data, fw_size); 946 + /* data */ 947 + fw_data = (const __le32 *)(adev->gfx.me_fw->data + 948 + le32_to_cpu(cpv2_hdr->data_offset_bytes)); 949 + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 950 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P0_STACK, 951 + fw_data, fw_size); 952 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_ME_P1_STACK, 953 + fw_data, fw_size); 954 + /* mec ucode */ 955 + cpv2_hdr = (const struct gfx_firmware_header_v2_0 *) 956 + adev->gfx.mec_fw->data; 957 + /* instruction */ 958 + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 959 + le32_to_cpu(cpv2_hdr->ucode_offset_bytes)); 960 + fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes); 961 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC, 962 + fw_data, fw_size); 963 + /* data */ 964 + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 965 + le32_to_cpu(cpv2_hdr->data_offset_bytes)); 966 + fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes); 967 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK, 968 + fw_data, fw_size); 969 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK, 970 + fw_data, fw_size); 971 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK, 972 + fw_data, fw_size); 973 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK, 974 + fw_data, fw_size); 975 + 976 + /* rlc ucode */ 977 + rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 978 + adev->gfx.rlc_fw->data; 979 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 980 + le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 981 + fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 982 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLC_G_UCODE, 983 + fw_data, fw_size); 984 + 985 + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 986 + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 987 + if (version_major == 2) { 988 + if (version_minor >= 2) { 989 + rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 990 + 991 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 992 + le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes)); 993 + fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes); 994 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_UCODE, 995 + fw_data, fw_size); 996 + 997 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 998 + le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes)); 999 + fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes); 1000 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT, 1001 + fw_data, fw_size); 1002 + } 1003 + } 1004 + } 1005 + 1006 + static void 1007 + gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1008 + { 1009 + const __le32 *fw_data; 1010 + uint32_t fw_size; 1011 + const struct sdma_firmware_header_v3_0 *sdma_hdr; 1012 + 1013 + sdma_hdr = (const struct sdma_firmware_header_v3_0 *) 1014 + adev->sdma.instance[0].fw->data; 1015 + fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data + 1016 + le32_to_cpu(sdma_hdr->ucode_offset_bytes)); 1017 + fw_size = le32_to_cpu(sdma_hdr->ucode_size_bytes); 1018 + 1019 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, SOC24_FIRMWARE_ID_SDMA_UCODE_TH0, 1020 + fw_data, fw_size); 1021 + } 1022 + 1023 + static void 1024 + gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev) 1025 + { 1026 + const __le32 *fw_data; 1027 + unsigned fw_size; 1028 + const struct mes_firmware_header_v1_0 *mes_hdr; 1029 + int pipe, ucode_id, data_id; 1030 + 1031 + for (pipe = 0; pipe < 2; pipe++) { 1032 + if (pipe == 0) { 1033 + ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P0; 1034 + data_id = SOC24_FIRMWARE_ID_RS64_MES_P0_STACK; 1035 + } else { 1036 + ucode_id = SOC24_FIRMWARE_ID_RS64_MES_P1; 1037 + data_id = SOC24_FIRMWARE_ID_RS64_MES_P1_STACK; 1038 + } 1039 + 1040 + mes_hdr = (const struct mes_firmware_header_v1_0 *) 1041 + adev->mes.fw[pipe]->data; 1042 + 1043 + fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1044 + le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); 1045 + fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 1046 + 1047 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, ucode_id, fw_data, fw_size); 1048 + 1049 + fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + 1050 + le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); 1051 + fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 1052 + 1053 + gfx_v12_0_rlc_backdoor_autoload_copy_ucode(adev, data_id, fw_data, fw_size); 1054 + } 1055 + } 1056 + 1057 + static int gfx_v12_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1058 + { 1059 + uint32_t rlc_g_offset, rlc_g_size; 1060 + uint64_t gpu_addr; 1061 + uint32_t data; 1062 + 1063 + /* RLC autoload sequence 2: copy ucode */ 1064 + gfx_v12_0_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1065 + gfx_v12_0_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1066 + gfx_v12_0_rlc_backdoor_autoload_copy_mes_ucode(adev); 1067 + gfx_v12_0_rlc_backdoor_autoload_copy_toc_ucode(adev); 1068 + 1069 + rlc_g_offset = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].offset; 1070 + rlc_g_size = rlc_autoload_info[SOC24_FIRMWARE_ID_RLC_G_UCODE].size; 1071 + gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset - adev->gmc.vram_start; 1072 + 1073 + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr)); 1074 + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr)); 1075 + 1076 + WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size); 1077 + 1078 + if (adev->gfx.imu.funcs) { 1079 + /* RLC autoload sequence 3: load IMU fw */ 1080 + if (adev->gfx.imu.funcs->load_microcode) 1081 + adev->gfx.imu.funcs->load_microcode(adev); 1082 + /* RLC autoload sequence 4 init IMU fw */ 1083 + if (adev->gfx.imu.funcs->setup_imu) 1084 + adev->gfx.imu.funcs->setup_imu(adev); 1085 + if (adev->gfx.imu.funcs->start_imu) 1086 + adev->gfx.imu.funcs->start_imu(adev); 1087 + 1088 + /* RLC autoload sequence 5 disable gpa mode */ 1089 + gfx_v12_0_disable_gpa_mode(adev); 1090 + } else { 1091 + /* unhalt rlc to start autoload without imu */ 1092 + data = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE); 1093 + data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD0_ENABLE, 1); 1094 + data = REG_SET_FIELD(data, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1); 1095 + WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, data); 1096 + WREG32_SOC15(GC, 0, regRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 1097 + } 1098 + 1099 + return 0; 1100 + } 1101 + 1102 + static int gfx_v12_0_sw_init(void *handle) 1103 + { 1104 + int i, j, k, r, ring_id = 0; 1105 + int xcc_id = 0; 1106 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1107 + 1108 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1109 + case IP_VERSION(12, 0, 0): 1110 + case IP_VERSION(12, 0, 1): 1111 + adev->gfx.me.num_me = 1; 1112 + adev->gfx.me.num_pipe_per_me = 1; 1113 + adev->gfx.me.num_queue_per_pipe = 1; 1114 + adev->gfx.mec.num_mec = 2; 1115 + adev->gfx.mec.num_pipe_per_mec = 2; 1116 + adev->gfx.mec.num_queue_per_pipe = 4; 1117 + break; 1118 + default: 1119 + adev->gfx.me.num_me = 1; 1120 + adev->gfx.me.num_pipe_per_me = 1; 1121 + adev->gfx.me.num_queue_per_pipe = 1; 1122 + adev->gfx.mec.num_mec = 1; 1123 + adev->gfx.mec.num_pipe_per_mec = 4; 1124 + adev->gfx.mec.num_queue_per_pipe = 8; 1125 + break; 1126 + } 1127 + 1128 + /* EOP Event */ 1129 + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1130 + GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, 1131 + &adev->gfx.eop_irq); 1132 + if (r) 1133 + return r; 1134 + 1135 + /* Privileged reg */ 1136 + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1137 + GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT, 1138 + &adev->gfx.priv_reg_irq); 1139 + if (r) 1140 + return r; 1141 + 1142 + /* Privileged inst */ 1143 + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, 1144 + GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT, 1145 + &adev->gfx.priv_inst_irq); 1146 + if (r) 1147 + return r; 1148 + 1149 + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1150 + 1151 + if (adev->gfx.imu.funcs) { 1152 + if (adev->gfx.imu.funcs->init_microcode) { 1153 + r = adev->gfx.imu.funcs->init_microcode(adev); 1154 + if (r) 1155 + dev_err(adev->dev, "Failed to load imu firmware!\n"); 1156 + } 1157 + } 1158 + 1159 + gfx_v12_0_me_init(adev); 1160 + 1161 + r = gfx_v12_0_rlc_init(adev); 1162 + if (r) { 1163 + dev_err(adev->dev, "Failed to init rlc BOs!\n"); 1164 + return r; 1165 + } 1166 + 1167 + r = gfx_v12_0_mec_init(adev); 1168 + if (r) { 1169 + dev_err(adev->dev, "Failed to init MEC BOs!\n"); 1170 + return r; 1171 + } 1172 + 1173 + /* set up the gfx ring */ 1174 + for (i = 0; i < adev->gfx.me.num_me; i++) { 1175 + for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1176 + for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1177 + if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1178 + continue; 1179 + 1180 + r = gfx_v12_0_gfx_ring_init(adev, ring_id, 1181 + i, k, j); 1182 + if (r) 1183 + return r; 1184 + ring_id++; 1185 + } 1186 + } 1187 + } 1188 + 1189 + ring_id = 0; 1190 + /* set up the compute queues - allocate horizontally across pipes */ 1191 + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1192 + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1193 + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1194 + if (!amdgpu_gfx_is_mec_queue_enabled(adev, 1195 + 0, i, k, j)) 1196 + continue; 1197 + 1198 + r = gfx_v12_0_compute_ring_init(adev, ring_id, 1199 + i, k, j); 1200 + if (r) 1201 + return r; 1202 + 1203 + ring_id++; 1204 + } 1205 + } 1206 + } 1207 + 1208 + if (!adev->enable_mes_kiq) { 1209 + r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0); 1210 + if (r) { 1211 + dev_err(adev->dev, "Failed to init KIQ BOs!\n"); 1212 + return r; 1213 + } 1214 + 1215 + r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); 1216 + if (r) 1217 + return r; 1218 + } 1219 + 1220 + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v12_compute_mqd), 0); 1221 + if (r) 1222 + return r; 1223 + 1224 + /* allocate visible FB for rlc auto-loading fw */ 1225 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1226 + r = gfx_v12_0_rlc_autoload_buffer_init(adev); 1227 + if (r) 1228 + return r; 1229 + } 1230 + 1231 + r = gfx_v12_0_gpu_early_init(adev); 1232 + if (r) 1233 + return r; 1234 + 1235 + return 0; 1236 + } 1237 + 1238 + static void gfx_v12_0_pfp_fini(struct amdgpu_device *adev) 1239 + { 1240 + amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1241 + &adev->gfx.pfp.pfp_fw_gpu_addr, 1242 + (void **)&adev->gfx.pfp.pfp_fw_ptr); 1243 + 1244 + amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj, 1245 + &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1246 + (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1247 + } 1248 + 1249 + static void gfx_v12_0_me_fini(struct amdgpu_device *adev) 1250 + { 1251 + amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1252 + &adev->gfx.me.me_fw_gpu_addr, 1253 + (void **)&adev->gfx.me.me_fw_ptr); 1254 + 1255 + amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj, 1256 + &adev->gfx.me.me_fw_data_gpu_addr, 1257 + (void **)&adev->gfx.me.me_fw_data_ptr); 1258 + } 1259 + 1260 + static void gfx_v12_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev) 1261 + { 1262 + amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1263 + &adev->gfx.rlc.rlc_autoload_gpu_addr, 1264 + (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1265 + } 1266 + 1267 + static int gfx_v12_0_sw_fini(void *handle) 1268 + { 1269 + int i; 1270 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1271 + 1272 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1273 + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1274 + for (i = 0; i < adev->gfx.num_compute_rings; i++) 1275 + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1276 + 1277 + amdgpu_gfx_mqd_sw_fini(adev, 0); 1278 + 1279 + if (!adev->enable_mes_kiq) { 1280 + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring); 1281 + amdgpu_gfx_kiq_fini(adev, 0); 1282 + } 1283 + 1284 + gfx_v12_0_pfp_fini(adev); 1285 + gfx_v12_0_me_fini(adev); 1286 + gfx_v12_0_rlc_fini(adev); 1287 + gfx_v12_0_mec_fini(adev); 1288 + 1289 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1290 + gfx_v12_0_rlc_autoload_buffer_fini(adev); 1291 + 1292 + gfx_v12_0_free_microcode(adev); 1293 + 1294 + return 0; 1295 + } 1296 + 1297 + static void gfx_v12_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1298 + u32 sh_num, u32 instance, int xcc_id) 1299 + { 1300 + u32 data; 1301 + 1302 + if (instance == 0xffffffff) 1303 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1304 + INSTANCE_BROADCAST_WRITES, 1); 1305 + else 1306 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1307 + instance); 1308 + 1309 + if (se_num == 0xffffffff) 1310 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1311 + 1); 1312 + else 1313 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1314 + 1315 + if (sh_num == 0xffffffff) 1316 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1317 + 1); 1318 + else 1319 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1320 + 1321 + WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data); 1322 + } 1323 + 1324 + static u32 gfx_v12_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1325 + { 1326 + u32 data, mask; 1327 + 1328 + data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE); 1329 + data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE); 1330 + 1331 + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1332 + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1333 + 1334 + mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1335 + adev->gfx.config.max_sh_per_se); 1336 + 1337 + return (~data) & mask; 1338 + } 1339 + 1340 + static void gfx_v12_0_setup_rb(struct amdgpu_device *adev) 1341 + { 1342 + int i, j; 1343 + u32 data; 1344 + u32 active_rbs = 0; 1345 + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1346 + adev->gfx.config.max_sh_per_se; 1347 + 1348 + mutex_lock(&adev->grbm_idx_mutex); 1349 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1350 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1351 + gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0); 1352 + data = gfx_v12_0_get_rb_active_bitmap(adev); 1353 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1354 + rb_bitmap_width_per_sh); 1355 + } 1356 + } 1357 + gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1358 + mutex_unlock(&adev->grbm_idx_mutex); 1359 + 1360 + adev->gfx.config.backend_enable_mask = active_rbs; 1361 + adev->gfx.config.num_rbs = hweight32(active_rbs); 1362 + } 1363 + 1364 + #define LDS_APP_BASE 0x1 1365 + #define SCRATCH_APP_BASE 0x2 1366 + 1367 + static void gfx_v12_0_init_compute_vmid(struct amdgpu_device *adev) 1368 + { 1369 + int i; 1370 + uint32_t sh_mem_bases; 1371 + uint32_t data; 1372 + 1373 + /* 1374 + * Configure apertures: 1375 + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1376 + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1377 + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1378 + */ 1379 + sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) | 1380 + SCRATCH_APP_BASE; 1381 + 1382 + mutex_lock(&adev->srbm_mutex); 1383 + for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1384 + soc24_grbm_select(adev, 0, 0, 0, i); 1385 + /* CP and shaders */ 1386 + WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1387 + WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); 1388 + 1389 + /* Enable trap for each kfd vmid. */ 1390 + data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); 1391 + data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); 1392 + } 1393 + soc24_grbm_select(adev, 0, 0, 0, 0); 1394 + mutex_unlock(&adev->srbm_mutex); 1395 + } 1396 + 1397 + static void gfx_v12_0_tcp_harvest(struct amdgpu_device *adev) 1398 + { 1399 + /* TODO: harvest feature to be added later. */ 1400 + } 1401 + 1402 + static void gfx_v12_0_get_tcc_info(struct amdgpu_device *adev) 1403 + { 1404 + } 1405 + 1406 + static void gfx_v12_0_constants_init(struct amdgpu_device *adev) 1407 + { 1408 + u32 tmp; 1409 + int i; 1410 + 1411 + if (!amdgpu_sriov_vf(adev)) 1412 + WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1413 + 1414 + gfx_v12_0_setup_rb(adev); 1415 + gfx_v12_0_get_cu_info(adev, &adev->gfx.cu_info); 1416 + gfx_v12_0_get_tcc_info(adev); 1417 + adev->gfx.config.pa_sc_tile_steering_override = 0; 1418 + 1419 + /* XXX SH_MEM regs */ 1420 + /* where to put LDS, scratch, GPUVM in FSA64 space */ 1421 + mutex_lock(&adev->srbm_mutex); 1422 + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { 1423 + soc24_grbm_select(adev, 0, 0, 0, i); 1424 + /* CP and shaders */ 1425 + WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); 1426 + if (i != 0) { 1427 + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1428 + (adev->gmc.private_aperture_start >> 48)); 1429 + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1430 + (adev->gmc.shared_aperture_start >> 48)); 1431 + WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp); 1432 + } 1433 + } 1434 + soc24_grbm_select(adev, 0, 0, 0, 0); 1435 + 1436 + mutex_unlock(&adev->srbm_mutex); 1437 + 1438 + gfx_v12_0_init_compute_vmid(adev); 1439 + } 1440 + 1441 + static void gfx_v12_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1442 + bool enable) 1443 + { 1444 + u32 tmp; 1445 + 1446 + if (amdgpu_sriov_vf(adev)) 1447 + return; 1448 + 1449 + tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0); 1450 + 1451 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1452 + enable ? 1 : 0); 1453 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1454 + enable ? 1 : 0); 1455 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1456 + enable ? 1 : 0); 1457 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1458 + enable ? 1 : 0); 1459 + 1460 + WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp); 1461 + } 1462 + 1463 + static int gfx_v12_0_init_csb(struct amdgpu_device *adev) 1464 + { 1465 + adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 1466 + 1467 + WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI, 1468 + adev->gfx.rlc.clear_state_gpu_addr >> 32); 1469 + WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO, 1470 + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1471 + WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1472 + 1473 + return 0; 1474 + } 1475 + 1476 + static void gfx_v12_0_rlc_stop(struct amdgpu_device *adev) 1477 + { 1478 + u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); 1479 + 1480 + tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1481 + WREG32_SOC15(GC, 0, regRLC_CNTL, tmp); 1482 + } 1483 + 1484 + static void gfx_v12_0_rlc_reset(struct amdgpu_device *adev) 1485 + { 1486 + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1487 + udelay(50); 1488 + WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1489 + udelay(50); 1490 + } 1491 + 1492 + static void gfx_v12_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1493 + bool enable) 1494 + { 1495 + uint32_t rlc_pg_cntl; 1496 + 1497 + rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL); 1498 + 1499 + if (!enable) { 1500 + /* RLC_PG_CNTL[23] = 0 (default) 1501 + * RLC will wait for handshake acks with SMU 1502 + * GFXOFF will be enabled 1503 + * RLC_PG_CNTL[23] = 1 1504 + * RLC will not issue any message to SMU 1505 + * hence no handshake between SMU & RLC 1506 + * GFXOFF will be disabled 1507 + */ 1508 + rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1509 + } else 1510 + rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK; 1511 + WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl); 1512 + } 1513 + 1514 + static void gfx_v12_0_rlc_start(struct amdgpu_device *adev) 1515 + { 1516 + /* TODO: enable rlc & smu handshake until smu 1517 + * and gfxoff feature works as expected */ 1518 + if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1519 + gfx_v12_0_rlc_smu_handshake_cntl(adev, false); 1520 + 1521 + WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1522 + udelay(50); 1523 + } 1524 + 1525 + static void gfx_v12_0_rlc_enable_srm(struct amdgpu_device *adev) 1526 + { 1527 + uint32_t tmp; 1528 + 1529 + /* enable Save Restore Machine */ 1530 + tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL)); 1531 + tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1532 + tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1533 + WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp); 1534 + } 1535 + 1536 + static void gfx_v12_0_load_rlcg_microcode(struct amdgpu_device *adev) 1537 + { 1538 + const struct rlc_firmware_header_v2_0 *hdr; 1539 + const __le32 *fw_data; 1540 + unsigned i, fw_size; 1541 + 1542 + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1543 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1544 + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1545 + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1546 + 1547 + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, 1548 + RLCG_UCODE_LOADING_START_ADDRESS); 1549 + 1550 + for (i = 0; i < fw_size; i++) 1551 + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, 1552 + le32_to_cpup(fw_data++)); 1553 + 1554 + WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1555 + } 1556 + 1557 + static void gfx_v12_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev) 1558 + { 1559 + const struct rlc_firmware_header_v2_2 *hdr; 1560 + const __le32 *fw_data; 1561 + unsigned i, fw_size; 1562 + u32 tmp; 1563 + 1564 + hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data; 1565 + 1566 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1567 + le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes)); 1568 + fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4; 1569 + 1570 + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0); 1571 + 1572 + for (i = 0; i < fw_size; i++) { 1573 + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1574 + msleep(1); 1575 + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA, 1576 + le32_to_cpup(fw_data++)); 1577 + } 1578 + 1579 + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1580 + 1581 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1582 + le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes)); 1583 + fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4; 1584 + 1585 + WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0); 1586 + for (i = 0; i < fw_size; i++) { 1587 + if ((amdgpu_emu_mode == 1) && (i % 100 == 99)) 1588 + msleep(1); 1589 + WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA, 1590 + le32_to_cpup(fw_data++)); 1591 + } 1592 + 1593 + WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version); 1594 + 1595 + tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL); 1596 + tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1); 1597 + tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0); 1598 + WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp); 1599 + } 1600 + 1601 + static int gfx_v12_0_rlc_load_microcode(struct amdgpu_device *adev) 1602 + { 1603 + const struct rlc_firmware_header_v2_0 *hdr; 1604 + uint16_t version_major; 1605 + uint16_t version_minor; 1606 + 1607 + if (!adev->gfx.rlc_fw) 1608 + return -EINVAL; 1609 + 1610 + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1611 + amdgpu_ucode_print_rlc_hdr(&hdr->header); 1612 + 1613 + version_major = le16_to_cpu(hdr->header.header_version_major); 1614 + version_minor = le16_to_cpu(hdr->header.header_version_minor); 1615 + 1616 + if (version_major == 2) { 1617 + gfx_v12_0_load_rlcg_microcode(adev); 1618 + if (amdgpu_dpm == 1) { 1619 + if (version_minor >= 2) 1620 + gfx_v12_0_load_rlc_iram_dram_microcode(adev); 1621 + } 1622 + 1623 + return 0; 1624 + } 1625 + 1626 + return -EINVAL; 1627 + } 1628 + 1629 + static int gfx_v12_0_rlc_resume(struct amdgpu_device *adev) 1630 + { 1631 + int r; 1632 + 1633 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1634 + gfx_v12_0_init_csb(adev); 1635 + 1636 + if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */ 1637 + gfx_v12_0_rlc_enable_srm(adev); 1638 + } else { 1639 + if (amdgpu_sriov_vf(adev)) { 1640 + gfx_v12_0_init_csb(adev); 1641 + return 0; 1642 + } 1643 + 1644 + adev->gfx.rlc.funcs->stop(adev); 1645 + 1646 + /* disable CG */ 1647 + WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0); 1648 + 1649 + /* disable PG */ 1650 + WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0); 1651 + 1652 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1653 + /* legacy rlc firmware loading */ 1654 + r = gfx_v12_0_rlc_load_microcode(adev); 1655 + if (r) 1656 + return r; 1657 + } 1658 + 1659 + gfx_v12_0_init_csb(adev); 1660 + 1661 + adev->gfx.rlc.funcs->start(adev); 1662 + } 1663 + 1664 + return 0; 1665 + } 1666 + 1667 + static void gfx_v12_0_config_gfx_rs64(struct amdgpu_device *adev) 1668 + { 1669 + const struct gfx_firmware_header_v2_0 *pfp_hdr; 1670 + const struct gfx_firmware_header_v2_0 *me_hdr; 1671 + const struct gfx_firmware_header_v2_0 *mec_hdr; 1672 + uint32_t pipe_id, tmp; 1673 + 1674 + mec_hdr = (const struct gfx_firmware_header_v2_0 *) 1675 + adev->gfx.mec_fw->data; 1676 + me_hdr = (const struct gfx_firmware_header_v2_0 *) 1677 + adev->gfx.me_fw->data; 1678 + pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 1679 + adev->gfx.pfp_fw->data; 1680 + 1681 + /* config pfp program start addr */ 1682 + for (pipe_id = 0; pipe_id < 2; pipe_id++) { 1683 + soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1684 + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 1685 + (pfp_hdr->ucode_start_addr_hi << 30) | 1686 + (pfp_hdr->ucode_start_addr_lo >> 2)); 1687 + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 1688 + pfp_hdr->ucode_start_addr_hi >> 2); 1689 + } 1690 + soc24_grbm_select(adev, 0, 0, 0, 0); 1691 + 1692 + /* reset pfp pipe */ 1693 + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 1694 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1); 1695 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1); 1696 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1697 + 1698 + /* clear pfp pipe reset */ 1699 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0); 1700 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0); 1701 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1702 + 1703 + /* config me program start addr */ 1704 + for (pipe_id = 0; pipe_id < 2; pipe_id++) { 1705 + soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1706 + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 1707 + (me_hdr->ucode_start_addr_hi << 30) | 1708 + (me_hdr->ucode_start_addr_lo >> 2)); 1709 + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 1710 + me_hdr->ucode_start_addr_hi>>2); 1711 + } 1712 + soc24_grbm_select(adev, 0, 0, 0, 0); 1713 + 1714 + /* reset me pipe */ 1715 + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 1716 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1); 1717 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1); 1718 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1719 + 1720 + /* clear me pipe reset */ 1721 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0); 1722 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0); 1723 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1724 + 1725 + /* config mec program start addr */ 1726 + for (pipe_id = 0; pipe_id < 4; pipe_id++) { 1727 + soc24_grbm_select(adev, 1, pipe_id, 0, 0); 1728 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 1729 + mec_hdr->ucode_start_addr_lo >> 2 | 1730 + mec_hdr->ucode_start_addr_hi << 30); 1731 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 1732 + mec_hdr->ucode_start_addr_hi >> 2); 1733 + } 1734 + soc24_grbm_select(adev, 0, 0, 0, 0); 1735 + 1736 + /* reset mec pipe */ 1737 + tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 1738 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1); 1739 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1); 1740 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1); 1741 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1); 1742 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 1743 + 1744 + /* clear mec pipe reset */ 1745 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0); 1746 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0); 1747 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0); 1748 + tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0); 1749 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp); 1750 + } 1751 + 1752 + static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 1753 + { 1754 + uint32_t cp_status; 1755 + uint32_t bootload_status; 1756 + int i; 1757 + 1758 + for (i = 0; i < adev->usec_timeout; i++) { 1759 + cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 1760 + bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS); 1761 + 1762 + if ((cp_status == 0) && 1763 + (REG_GET_FIELD(bootload_status, 1764 + RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 1765 + break; 1766 + } 1767 + udelay(1); 1768 + if (amdgpu_emu_mode) 1769 + msleep(10); 1770 + } 1771 + 1772 + if (i >= adev->usec_timeout) { 1773 + dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 1774 + return -ETIMEDOUT; 1775 + } 1776 + 1777 + return 0; 1778 + } 1779 + 1780 + static int gfx_v12_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 1781 + { 1782 + int i; 1783 + u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 1784 + 1785 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 1786 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 1787 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1788 + 1789 + for (i = 0; i < adev->usec_timeout; i++) { 1790 + if (RREG32_SOC15(GC, 0, regCP_STAT) == 0) 1791 + break; 1792 + udelay(1); 1793 + } 1794 + 1795 + if (i >= adev->usec_timeout) 1796 + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); 1797 + 1798 + return 0; 1799 + } 1800 + 1801 + static int gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) 1802 + { 1803 + int r; 1804 + const struct gfx_firmware_header_v2_0 *pfp_hdr; 1805 + const __le32 *fw_ucode, *fw_data; 1806 + unsigned i, pipe_id, fw_ucode_size, fw_data_size; 1807 + uint32_t tmp; 1808 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 1809 + 1810 + pfp_hdr = (const struct gfx_firmware_header_v2_0 *) 1811 + adev->gfx.pfp_fw->data; 1812 + 1813 + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 1814 + 1815 + /* instruction */ 1816 + fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data + 1817 + le32_to_cpu(pfp_hdr->ucode_offset_bytes)); 1818 + fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes); 1819 + /* data */ 1820 + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1821 + le32_to_cpu(pfp_hdr->data_offset_bytes)); 1822 + fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes); 1823 + 1824 + /* 64kb align */ 1825 + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 1826 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 1827 + &adev->gfx.pfp.pfp_fw_obj, 1828 + &adev->gfx.pfp.pfp_fw_gpu_addr, 1829 + (void **)&adev->gfx.pfp.pfp_fw_ptr); 1830 + if (r) { 1831 + dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r); 1832 + gfx_v12_0_pfp_fini(adev); 1833 + return r; 1834 + } 1835 + 1836 + r = amdgpu_bo_create_reserved(adev, fw_data_size, 1837 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 1838 + &adev->gfx.pfp.pfp_fw_data_obj, 1839 + &adev->gfx.pfp.pfp_fw_data_gpu_addr, 1840 + (void **)&adev->gfx.pfp.pfp_fw_data_ptr); 1841 + if (r) { 1842 + dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r); 1843 + gfx_v12_0_pfp_fini(adev); 1844 + return r; 1845 + } 1846 + 1847 + memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size); 1848 + memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size); 1849 + 1850 + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 1851 + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj); 1852 + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 1853 + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj); 1854 + 1855 + if (amdgpu_emu_mode == 1) 1856 + adev->hdp.funcs->flush_hdp(adev, NULL); 1857 + 1858 + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO, 1859 + lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 1860 + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI, 1861 + upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 1862 + 1863 + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL); 1864 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 1865 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 1866 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 1867 + WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp); 1868 + 1869 + /* 1870 + * Programming any of the CP_PFP_IC_BASE registers 1871 + * forces invalidation of the ME L1 I$. Wait for the 1872 + * invalidation complete 1873 + */ 1874 + for (i = 0; i < usec_timeout; i++) { 1875 + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 1876 + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 1877 + INVALIDATE_CACHE_COMPLETE)) 1878 + break; 1879 + udelay(1); 1880 + } 1881 + 1882 + if (i >= usec_timeout) { 1883 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 1884 + return -EINVAL; 1885 + } 1886 + 1887 + /* Prime the L1 instruction caches */ 1888 + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 1889 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1); 1890 + WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp); 1891 + /* Waiting for cache primed*/ 1892 + for (i = 0; i < usec_timeout; i++) { 1893 + tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL); 1894 + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 1895 + ICACHE_PRIMED)) 1896 + break; 1897 + udelay(1); 1898 + } 1899 + 1900 + if (i >= usec_timeout) { 1901 + dev_err(adev->dev, "failed to prime instruction cache\n"); 1902 + return -EINVAL; 1903 + } 1904 + 1905 + mutex_lock(&adev->srbm_mutex); 1906 + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 1907 + soc24_grbm_select(adev, 0, pipe_id, 0, 0); 1908 + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START, 1909 + (pfp_hdr->ucode_start_addr_hi << 30) | 1910 + (pfp_hdr->ucode_start_addr_lo >> 2)); 1911 + WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI, 1912 + pfp_hdr->ucode_start_addr_hi>>2); 1913 + 1914 + /* 1915 + * Program CP_ME_CNTL to reset given PIPE to take 1916 + * effect of CP_PFP_PRGRM_CNTR_START. 1917 + */ 1918 + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 1919 + if (pipe_id == 0) 1920 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 1921 + PFP_PIPE0_RESET, 1); 1922 + else 1923 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 1924 + PFP_PIPE1_RESET, 1); 1925 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1926 + 1927 + /* Clear pfp pipe0 reset bit. */ 1928 + if (pipe_id == 0) 1929 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 1930 + PFP_PIPE0_RESET, 0); 1931 + else 1932 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 1933 + PFP_PIPE1_RESET, 0); 1934 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 1935 + 1936 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO, 1937 + lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 1938 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI, 1939 + upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr)); 1940 + } 1941 + soc24_grbm_select(adev, 0, 0, 0, 0); 1942 + mutex_unlock(&adev->srbm_mutex); 1943 + 1944 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 1945 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 1946 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 1947 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 1948 + 1949 + /* Invalidate the data caches */ 1950 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 1951 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 1952 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 1953 + 1954 + for (i = 0; i < usec_timeout; i++) { 1955 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 1956 + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 1957 + INVALIDATE_DCACHE_COMPLETE)) 1958 + break; 1959 + udelay(1); 1960 + } 1961 + 1962 + if (i >= usec_timeout) { 1963 + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 1964 + return -EINVAL; 1965 + } 1966 + 1967 + return 0; 1968 + } 1969 + 1970 + static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) 1971 + { 1972 + int r; 1973 + const struct gfx_firmware_header_v2_0 *me_hdr; 1974 + const __le32 *fw_ucode, *fw_data; 1975 + unsigned i, pipe_id, fw_ucode_size, fw_data_size; 1976 + uint32_t tmp; 1977 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 1978 + 1979 + me_hdr = (const struct gfx_firmware_header_v2_0 *) 1980 + adev->gfx.me_fw->data; 1981 + 1982 + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 1983 + 1984 + /* instruction */ 1985 + fw_ucode = (const __le32 *)(adev->gfx.me_fw->data + 1986 + le32_to_cpu(me_hdr->ucode_offset_bytes)); 1987 + fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes); 1988 + /* data */ 1989 + fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1990 + le32_to_cpu(me_hdr->data_offset_bytes)); 1991 + fw_data_size = le32_to_cpu(me_hdr->data_size_bytes); 1992 + 1993 + /* 64kb align*/ 1994 + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 1995 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 1996 + &adev->gfx.me.me_fw_obj, 1997 + &adev->gfx.me.me_fw_gpu_addr, 1998 + (void **)&adev->gfx.me.me_fw_ptr); 1999 + if (r) { 2000 + dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r); 2001 + gfx_v12_0_me_fini(adev); 2002 + return r; 2003 + } 2004 + 2005 + r = amdgpu_bo_create_reserved(adev, fw_data_size, 2006 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2007 + &adev->gfx.me.me_fw_data_obj, 2008 + &adev->gfx.me.me_fw_data_gpu_addr, 2009 + (void **)&adev->gfx.me.me_fw_data_ptr); 2010 + if (r) { 2011 + dev_err(adev->dev, "(%d) failed to create me data bo\n", r); 2012 + gfx_v12_0_pfp_fini(adev); 2013 + return r; 2014 + } 2015 + 2016 + memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size); 2017 + memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size); 2018 + 2019 + amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2020 + amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj); 2021 + amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2022 + amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj); 2023 + 2024 + if (amdgpu_emu_mode == 1) 2025 + adev->hdp.funcs->flush_hdp(adev, NULL); 2026 + 2027 + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO, 2028 + lower_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2029 + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI, 2030 + upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2031 + 2032 + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL); 2033 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2034 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2035 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2036 + WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp); 2037 + 2038 + /* 2039 + * Programming any of the CP_ME_IC_BASE registers 2040 + * forces invalidation of the ME L1 I$. Wait for the 2041 + * invalidation complete 2042 + */ 2043 + for (i = 0; i < usec_timeout; i++) { 2044 + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2045 + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2046 + INVALIDATE_CACHE_COMPLETE)) 2047 + break; 2048 + udelay(1); 2049 + } 2050 + 2051 + if (i >= usec_timeout) { 2052 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2053 + return -EINVAL; 2054 + } 2055 + 2056 + /* Prime the instruction caches */ 2057 + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2058 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1); 2059 + WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp); 2060 + 2061 + /* Waiting for instruction cache primed*/ 2062 + for (i = 0; i < usec_timeout; i++) { 2063 + tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL); 2064 + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2065 + ICACHE_PRIMED)) 2066 + break; 2067 + udelay(1); 2068 + } 2069 + 2070 + if (i >= usec_timeout) { 2071 + dev_err(adev->dev, "failed to prime instruction cache\n"); 2072 + return -EINVAL; 2073 + } 2074 + 2075 + mutex_lock(&adev->srbm_mutex); 2076 + for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) { 2077 + soc24_grbm_select(adev, 0, pipe_id, 0, 0); 2078 + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START, 2079 + (me_hdr->ucode_start_addr_hi << 30) | 2080 + (me_hdr->ucode_start_addr_lo >> 2)); 2081 + WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI, 2082 + me_hdr->ucode_start_addr_hi>>2); 2083 + 2084 + /* 2085 + * Program CP_ME_CNTL to reset given PIPE to take 2086 + * effect of CP_PFP_PRGRM_CNTR_START. 2087 + */ 2088 + tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL); 2089 + if (pipe_id == 0) 2090 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2091 + ME_PIPE0_RESET, 1); 2092 + else 2093 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2094 + ME_PIPE1_RESET, 1); 2095 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2096 + 2097 + /* Clear pfp pipe0 reset bit. */ 2098 + if (pipe_id == 0) 2099 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2100 + ME_PIPE0_RESET, 0); 2101 + else 2102 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, 2103 + ME_PIPE1_RESET, 0); 2104 + WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp); 2105 + 2106 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO, 2107 + lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2108 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI, 2109 + upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr)); 2110 + } 2111 + soc24_grbm_select(adev, 0, 0, 0, 0); 2112 + mutex_unlock(&adev->srbm_mutex); 2113 + 2114 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL); 2115 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0); 2116 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0); 2117 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp); 2118 + 2119 + /* Invalidate the data caches */ 2120 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2121 + tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2122 + WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp); 2123 + 2124 + for (i = 0; i < usec_timeout; i++) { 2125 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL); 2126 + if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, 2127 + INVALIDATE_DCACHE_COMPLETE)) 2128 + break; 2129 + udelay(1); 2130 + } 2131 + 2132 + if (i >= usec_timeout) { 2133 + dev_err(adev->dev, "failed to invalidate RS64 data cache\n"); 2134 + return -EINVAL; 2135 + } 2136 + 2137 + return 0; 2138 + } 2139 + 2140 + static int gfx_v12_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2141 + { 2142 + int r; 2143 + 2144 + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw) 2145 + return -EINVAL; 2146 + 2147 + gfx_v12_0_cp_gfx_enable(adev, false); 2148 + 2149 + r = gfx_v12_0_cp_gfx_load_pfp_microcode_rs64(adev); 2150 + if (r) { 2151 + dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 2152 + return r; 2153 + } 2154 + 2155 + r = gfx_v12_0_cp_gfx_load_me_microcode_rs64(adev); 2156 + if (r) { 2157 + dev_err(adev->dev, "(%d) failed to load me fw\n", r); 2158 + return r; 2159 + } 2160 + 2161 + return 0; 2162 + } 2163 + 2164 + static int gfx_v12_0_cp_gfx_start(struct amdgpu_device *adev) 2165 + { 2166 + /* init the CP */ 2167 + WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT, 2168 + adev->gfx.config.max_hw_contexts - 1); 2169 + WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1); 2170 + 2171 + if (!amdgpu_async_gfx_ring) 2172 + gfx_v12_0_cp_gfx_enable(adev, true); 2173 + 2174 + return 0; 2175 + } 2176 + 2177 + static void gfx_v12_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 2178 + CP_PIPE_ID pipe) 2179 + { 2180 + u32 tmp; 2181 + 2182 + tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 2183 + tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 2184 + 2185 + WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 2186 + } 2187 + 2188 + static void gfx_v12_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 2189 + struct amdgpu_ring *ring) 2190 + { 2191 + u32 tmp; 2192 + 2193 + tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2194 + if (ring->use_doorbell) { 2195 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2196 + DOORBELL_OFFSET, ring->doorbell_index); 2197 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2198 + DOORBELL_EN, 1); 2199 + } else { 2200 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2201 + DOORBELL_EN, 0); 2202 + } 2203 + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp); 2204 + 2205 + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2206 + DOORBELL_RANGE_LOWER, ring->doorbell_index); 2207 + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp); 2208 + 2209 + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2210 + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2211 + } 2212 + 2213 + static int gfx_v12_0_cp_gfx_resume(struct amdgpu_device *adev) 2214 + { 2215 + struct amdgpu_ring *ring; 2216 + u32 tmp; 2217 + u32 rb_bufsz; 2218 + u64 rb_addr, rptr_addr, wptr_gpu_addr; 2219 + u32 i; 2220 + 2221 + /* Set the write pointer delay */ 2222 + WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0); 2223 + 2224 + /* set the RB to use vmid 0 */ 2225 + WREG32_SOC15(GC, 0, regCP_RB_VMID, 0); 2226 + 2227 + /* Init gfx ring 0 for pipe 0 */ 2228 + mutex_lock(&adev->srbm_mutex); 2229 + gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2230 + 2231 + /* Set ring buffer size */ 2232 + ring = &adev->gfx.gfx_ring[0]; 2233 + rb_bufsz = order_base_2(ring->ring_size / 8); 2234 + tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2235 + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2236 + WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2237 + 2238 + /* Initialize the ring buffer's write pointers */ 2239 + ring->wptr = 0; 2240 + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2241 + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2242 + 2243 + /* set the wb address wether it's enabled or not */ 2244 + rptr_addr = ring->rptr_gpu_addr; 2245 + WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2246 + WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2247 + CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2248 + 2249 + wptr_gpu_addr = ring->wptr_gpu_addr; 2250 + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, 2251 + lower_32_bits(wptr_gpu_addr)); 2252 + WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, 2253 + upper_32_bits(wptr_gpu_addr)); 2254 + 2255 + mdelay(1); 2256 + WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp); 2257 + 2258 + rb_addr = ring->gpu_addr >> 8; 2259 + WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr); 2260 + WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2261 + 2262 + WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1); 2263 + 2264 + gfx_v12_0_cp_gfx_set_doorbell(adev, ring); 2265 + mutex_unlock(&adev->srbm_mutex); 2266 + 2267 + /* Switch to pipe 0 */ 2268 + mutex_lock(&adev->srbm_mutex); 2269 + gfx_v12_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2270 + mutex_unlock(&adev->srbm_mutex); 2271 + 2272 + /* start the ring */ 2273 + gfx_v12_0_cp_gfx_start(adev); 2274 + 2275 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2276 + ring = &adev->gfx.gfx_ring[i]; 2277 + ring->sched.ready = true; 2278 + } 2279 + 2280 + return 0; 2281 + } 2282 + 2283 + static void gfx_v12_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2284 + { 2285 + u32 data; 2286 + 2287 + data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL); 2288 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE, 2289 + enable ? 0 : 1); 2290 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 2291 + enable ? 0 : 1); 2292 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 2293 + enable ? 0 : 1); 2294 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 2295 + enable ? 0 : 1); 2296 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 2297 + enable ? 0 : 1); 2298 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE, 2299 + enable ? 1 : 0); 2300 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE, 2301 + enable ? 1 : 0); 2302 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE, 2303 + enable ? 1 : 0); 2304 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE, 2305 + enable ? 1 : 0); 2306 + data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT, 2307 + enable ? 0 : 1); 2308 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data); 2309 + 2310 + adev->gfx.kiq[0].ring.sched.ready = enable; 2311 + 2312 + udelay(50); 2313 + } 2314 + 2315 + static int gfx_v12_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) 2316 + { 2317 + const struct gfx_firmware_header_v2_0 *mec_hdr; 2318 + const __le32 *fw_ucode, *fw_data; 2319 + u32 tmp, fw_ucode_size, fw_data_size; 2320 + u32 i, usec_timeout = 50000; /* Wait for 50 ms */ 2321 + u32 *fw_ucode_ptr, *fw_data_ptr; 2322 + int r; 2323 + 2324 + if (!adev->gfx.mec_fw) 2325 + return -EINVAL; 2326 + 2327 + gfx_v12_0_cp_compute_enable(adev, false); 2328 + 2329 + mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data; 2330 + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2331 + 2332 + fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data + 2333 + le32_to_cpu(mec_hdr->ucode_offset_bytes)); 2334 + fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes); 2335 + 2336 + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 2337 + le32_to_cpu(mec_hdr->data_offset_bytes)); 2338 + fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 2339 + 2340 + r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2341 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2342 + &adev->gfx.mec.mec_fw_obj, 2343 + &adev->gfx.mec.mec_fw_gpu_addr, 2344 + (void **)&fw_ucode_ptr); 2345 + if (r) { 2346 + dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2347 + gfx_v12_0_mec_fini(adev); 2348 + return r; 2349 + } 2350 + 2351 + r = amdgpu_bo_create_reserved(adev, fw_data_size, 2352 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2353 + &adev->gfx.mec.mec_fw_data_obj, 2354 + &adev->gfx.mec.mec_fw_data_gpu_addr, 2355 + (void **)&fw_data_ptr); 2356 + if (r) { 2357 + dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r); 2358 + gfx_v12_0_mec_fini(adev); 2359 + return r; 2360 + } 2361 + 2362 + memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size); 2363 + memcpy(fw_data_ptr, fw_data, fw_data_size); 2364 + 2365 + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 2366 + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj); 2367 + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 2368 + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj); 2369 + 2370 + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL); 2371 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); 2372 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2373 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2374 + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp); 2375 + 2376 + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL); 2377 + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0); 2378 + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0); 2379 + WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp); 2380 + 2381 + mutex_lock(&adev->srbm_mutex); 2382 + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { 2383 + soc24_grbm_select(adev, 1, i, 0, 0); 2384 + 2385 + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr); 2386 + WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI, 2387 + upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr)); 2388 + 2389 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START, 2390 + mec_hdr->ucode_start_addr_lo >> 2 | 2391 + mec_hdr->ucode_start_addr_hi << 30); 2392 + WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI, 2393 + mec_hdr->ucode_start_addr_hi >> 2); 2394 + 2395 + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr); 2396 + WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI, 2397 + upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2398 + } 2399 + mutex_unlock(&adev->srbm_mutex); 2400 + soc24_grbm_select(adev, 0, 0, 0, 0); 2401 + 2402 + /* Trigger an invalidation of the L1 instruction caches */ 2403 + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2404 + tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1); 2405 + WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp); 2406 + 2407 + /* Wait for invalidation complete */ 2408 + for (i = 0; i < usec_timeout; i++) { 2409 + tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL); 2410 + if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL, 2411 + INVALIDATE_DCACHE_COMPLETE)) 2412 + break; 2413 + udelay(1); 2414 + } 2415 + 2416 + if (i >= usec_timeout) { 2417 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2418 + return -EINVAL; 2419 + } 2420 + 2421 + /* Trigger an invalidation of the L1 instruction caches */ 2422 + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2423 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2424 + WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp); 2425 + 2426 + /* Wait for invalidation complete */ 2427 + for (i = 0; i < usec_timeout; i++) { 2428 + tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL); 2429 + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2430 + INVALIDATE_CACHE_COMPLETE)) 2431 + break; 2432 + udelay(1); 2433 + } 2434 + 2435 + if (i >= usec_timeout) { 2436 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2437 + return -EINVAL; 2438 + } 2439 + 2440 + return 0; 2441 + } 2442 + 2443 + static void gfx_v12_0_kiq_setting(struct amdgpu_ring *ring) 2444 + { 2445 + uint32_t tmp; 2446 + struct amdgpu_device *adev = ring->adev; 2447 + 2448 + /* tell RLC which is KIQ queue */ 2449 + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 2450 + tmp &= 0xffffff00; 2451 + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2452 + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2453 + tmp |= 0x80; 2454 + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 2455 + } 2456 + 2457 + static void gfx_v12_0_cp_set_doorbell_range(struct amdgpu_device *adev) 2458 + { 2459 + /* set graphics engine doorbell range */ 2460 + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, 2461 + (adev->doorbell_index.gfx_ring0 * 2) << 2); 2462 + WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER, 2463 + (adev->doorbell_index.gfx_userqueue_end * 2) << 2); 2464 + 2465 + /* set compute engine doorbell range */ 2466 + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 2467 + (adev->doorbell_index.kiq * 2) << 2); 2468 + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 2469 + (adev->doorbell_index.userqueue_end * 2) << 2); 2470 + } 2471 + 2472 + static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, 2473 + struct amdgpu_mqd_prop *prop) 2474 + { 2475 + struct v12_gfx_mqd *mqd = m; 2476 + uint64_t hqd_gpu_addr, wb_gpu_addr; 2477 + uint32_t tmp; 2478 + uint32_t rb_bufsz; 2479 + 2480 + /* set up gfx hqd wptr */ 2481 + mqd->cp_gfx_hqd_wptr = 0; 2482 + mqd->cp_gfx_hqd_wptr_hi = 0; 2483 + 2484 + /* set the pointer to the MQD */ 2485 + mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc; 2486 + mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2487 + 2488 + /* set up mqd control */ 2489 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL); 2490 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 2491 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 2492 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 2493 + mqd->cp_gfx_mqd_control = tmp; 2494 + 2495 + /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 2496 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID); 2497 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 2498 + mqd->cp_gfx_hqd_vmid = 0; 2499 + 2500 + /* set up default queue priority level 2501 + * 0x0 = low priority, 0x1 = high priority */ 2502 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY); 2503 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 2504 + mqd->cp_gfx_hqd_queue_priority = tmp; 2505 + 2506 + /* set up time quantum */ 2507 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM); 2508 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 2509 + mqd->cp_gfx_hqd_quantum = tmp; 2510 + 2511 + /* set up gfx hqd base. this is similar as CP_RB_BASE */ 2512 + hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2513 + mqd->cp_gfx_hqd_base = hqd_gpu_addr; 2514 + mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 2515 + 2516 + /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 2517 + wb_gpu_addr = prop->rptr_gpu_addr; 2518 + mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 2519 + mqd->cp_gfx_hqd_rptr_addr_hi = 2520 + upper_32_bits(wb_gpu_addr) & 0xffff; 2521 + 2522 + /* set up rb_wptr_poll addr */ 2523 + wb_gpu_addr = prop->wptr_gpu_addr; 2524 + mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2525 + mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2526 + 2527 + /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 2528 + rb_bufsz = order_base_2(prop->queue_size / 4) - 1; 2529 + tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL); 2530 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 2531 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 2532 + #ifdef __BIG_ENDIAN 2533 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 2534 + #endif 2535 + mqd->cp_gfx_hqd_cntl = tmp; 2536 + 2537 + /* set up cp_doorbell_control */ 2538 + tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL); 2539 + if (prop->use_doorbell) { 2540 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2541 + DOORBELL_OFFSET, prop->doorbell_index); 2542 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2543 + DOORBELL_EN, 1); 2544 + } else 2545 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2546 + DOORBELL_EN, 0); 2547 + mqd->cp_rb_doorbell_control = tmp; 2548 + 2549 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2550 + mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR); 2551 + 2552 + /* active the queue */ 2553 + mqd->cp_gfx_hqd_active = 1; 2554 + 2555 + return 0; 2556 + } 2557 + 2558 + static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring) 2559 + { 2560 + struct amdgpu_device *adev = ring->adev; 2561 + struct v12_gfx_mqd *mqd = ring->mqd_ptr; 2562 + int mqd_idx = ring - &adev->gfx.gfx_ring[0]; 2563 + 2564 + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2565 + memset((void *)mqd, 0, sizeof(*mqd)); 2566 + mutex_lock(&adev->srbm_mutex); 2567 + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2568 + amdgpu_ring_init_mqd(ring); 2569 + soc24_grbm_select(adev, 0, 0, 0, 0); 2570 + mutex_unlock(&adev->srbm_mutex); 2571 + if (adev->gfx.me.mqd_backup[mqd_idx]) 2572 + memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2573 + } else if (amdgpu_in_reset(adev)) { 2574 + /* reset mqd with the backup copy */ 2575 + if (adev->gfx.me.mqd_backup[mqd_idx]) 2576 + memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); 2577 + /* reset the ring */ 2578 + ring->wptr = 0; 2579 + *ring->wptr_cpu_addr = 0; 2580 + amdgpu_ring_clear_ring(ring); 2581 + } else { 2582 + amdgpu_ring_clear_ring(ring); 2583 + } 2584 + 2585 + return 0; 2586 + } 2587 + 2588 + static int gfx_v12_0_kiq_enable_kgq(struct amdgpu_device *adev) 2589 + { 2590 + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 2591 + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; 2592 + int r, i; 2593 + 2594 + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 2595 + return -EINVAL; 2596 + 2597 + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 2598 + adev->gfx.num_gfx_rings); 2599 + if (r) { 2600 + DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2601 + return r; 2602 + } 2603 + 2604 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2605 + kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); 2606 + 2607 + return amdgpu_ring_test_helper(kiq_ring); 2608 + } 2609 + 2610 + static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 2611 + { 2612 + int r, i; 2613 + struct amdgpu_ring *ring; 2614 + 2615 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2616 + ring = &adev->gfx.gfx_ring[i]; 2617 + 2618 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 2619 + if (unlikely(r != 0)) 2620 + goto done; 2621 + 2622 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2623 + if (!r) { 2624 + r = gfx_v12_0_gfx_init_queue(ring); 2625 + amdgpu_bo_kunmap(ring->mqd_obj); 2626 + ring->mqd_ptr = NULL; 2627 + } 2628 + amdgpu_bo_unreserve(ring->mqd_obj); 2629 + if (r) 2630 + goto done; 2631 + } 2632 + r = gfx_v12_0_kiq_enable_kgq(adev); 2633 + if (r) 2634 + goto done; 2635 + 2636 + r = gfx_v12_0_cp_gfx_start(adev); 2637 + if (r) 2638 + goto done; 2639 + 2640 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2641 + ring = &adev->gfx.gfx_ring[i]; 2642 + ring->sched.ready = true; 2643 + } 2644 + done: 2645 + return r; 2646 + } 2647 + 2648 + static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, 2649 + struct amdgpu_mqd_prop *prop) 2650 + { 2651 + struct v12_compute_mqd *mqd = m; 2652 + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2653 + uint32_t tmp; 2654 + 2655 + mqd->header = 0xC0310800; 2656 + mqd->compute_pipelinestat_enable = 0x00000001; 2657 + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2658 + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2659 + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2660 + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2661 + mqd->compute_misc_reserved = 0x00000007; 2662 + 2663 + eop_base_addr = prop->eop_gpu_addr >> 8; 2664 + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2665 + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2666 + 2667 + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2668 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL); 2669 + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 2670 + (order_base_2(GFX12_MEC_HPD_SIZE / 4) - 1)); 2671 + 2672 + mqd->cp_hqd_eop_control = tmp; 2673 + 2674 + /* enable doorbell? */ 2675 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 2676 + 2677 + if (prop->use_doorbell) { 2678 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2679 + DOORBELL_OFFSET, prop->doorbell_index); 2680 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2681 + DOORBELL_EN, 1); 2682 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2683 + DOORBELL_SOURCE, 0); 2684 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2685 + DOORBELL_HIT, 0); 2686 + } else { 2687 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2688 + DOORBELL_EN, 0); 2689 + } 2690 + 2691 + mqd->cp_hqd_pq_doorbell_control = tmp; 2692 + 2693 + /* disable the queue if it's active */ 2694 + mqd->cp_hqd_dequeue_request = 0; 2695 + mqd->cp_hqd_pq_rptr = 0; 2696 + mqd->cp_hqd_pq_wptr_lo = 0; 2697 + mqd->cp_hqd_pq_wptr_hi = 0; 2698 + 2699 + /* set the pointer to the MQD */ 2700 + mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc; 2701 + mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr); 2702 + 2703 + /* set MQD vmid to 0 */ 2704 + tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); 2705 + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 2706 + mqd->cp_mqd_control = tmp; 2707 + 2708 + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2709 + hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8; 2710 + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2711 + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2712 + 2713 + /* set up the HQD, this is similar to CP_RB0_CNTL */ 2714 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL); 2715 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 2716 + (order_base_2(prop->queue_size / 4) - 1)); 2717 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 2718 + (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); 2719 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 2720 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 2721 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 2722 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 2723 + mqd->cp_hqd_pq_control = tmp; 2724 + 2725 + /* set the wb address whether it's enabled or not */ 2726 + wb_gpu_addr = prop->rptr_gpu_addr; 2727 + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2728 + mqd->cp_hqd_pq_rptr_report_addr_hi = 2729 + upper_32_bits(wb_gpu_addr) & 0xffff; 2730 + 2731 + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2732 + wb_gpu_addr = prop->wptr_gpu_addr; 2733 + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2734 + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2735 + 2736 + tmp = 0; 2737 + /* enable the doorbell if requested */ 2738 + if (prop->use_doorbell) { 2739 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); 2740 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2741 + DOORBELL_OFFSET, prop->doorbell_index); 2742 + 2743 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2744 + DOORBELL_EN, 1); 2745 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2746 + DOORBELL_SOURCE, 0); 2747 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 2748 + DOORBELL_HIT, 0); 2749 + } 2750 + 2751 + mqd->cp_hqd_pq_doorbell_control = tmp; 2752 + 2753 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2754 + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR); 2755 + 2756 + /* set the vmid for the queue */ 2757 + mqd->cp_hqd_vmid = 0; 2758 + 2759 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE); 2760 + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55); 2761 + mqd->cp_hqd_persistent_state = tmp; 2762 + 2763 + /* set MIN_IB_AVAIL_SIZE */ 2764 + tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL); 2765 + tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 2766 + mqd->cp_hqd_ib_control = tmp; 2767 + 2768 + /* set static priority for a compute queue/ring */ 2769 + mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority; 2770 + mqd->cp_hqd_queue_priority = prop->hqd_queue_priority; 2771 + 2772 + mqd->cp_hqd_active = prop->hqd_active; 2773 + 2774 + return 0; 2775 + } 2776 + 2777 + static int gfx_v12_0_kiq_init_register(struct amdgpu_ring *ring) 2778 + { 2779 + struct amdgpu_device *adev = ring->adev; 2780 + struct v12_compute_mqd *mqd = ring->mqd_ptr; 2781 + int j; 2782 + 2783 + /* inactivate the queue */ 2784 + if (amdgpu_sriov_vf(adev)) 2785 + WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0); 2786 + 2787 + /* disable wptr polling */ 2788 + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2789 + 2790 + /* write the EOP addr */ 2791 + WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR, 2792 + mqd->cp_hqd_eop_base_addr_lo); 2793 + WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI, 2794 + mqd->cp_hqd_eop_base_addr_hi); 2795 + 2796 + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2797 + WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL, 2798 + mqd->cp_hqd_eop_control); 2799 + 2800 + /* enable doorbell? */ 2801 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 2802 + mqd->cp_hqd_pq_doorbell_control); 2803 + 2804 + /* disable the queue if it's active */ 2805 + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { 2806 + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); 2807 + for (j = 0; j < adev->usec_timeout; j++) { 2808 + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) 2809 + break; 2810 + udelay(1); 2811 + } 2812 + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 2813 + mqd->cp_hqd_dequeue_request); 2814 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 2815 + mqd->cp_hqd_pq_rptr); 2816 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 2817 + mqd->cp_hqd_pq_wptr_lo); 2818 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 2819 + mqd->cp_hqd_pq_wptr_hi); 2820 + } 2821 + 2822 + /* set the pointer to the MQD */ 2823 + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, 2824 + mqd->cp_mqd_base_addr_lo); 2825 + WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, 2826 + mqd->cp_mqd_base_addr_hi); 2827 + 2828 + /* set MQD vmid to 0 */ 2829 + WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 2830 + mqd->cp_mqd_control); 2831 + 2832 + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2833 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, 2834 + mqd->cp_hqd_pq_base_lo); 2835 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, 2836 + mqd->cp_hqd_pq_base_hi); 2837 + 2838 + /* set up the HQD, this is similar to CP_RB0_CNTL */ 2839 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, 2840 + mqd->cp_hqd_pq_control); 2841 + 2842 + /* set the wb address whether it's enabled or not */ 2843 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, 2844 + mqd->cp_hqd_pq_rptr_report_addr_lo); 2845 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 2846 + mqd->cp_hqd_pq_rptr_report_addr_hi); 2847 + 2848 + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2849 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, 2850 + mqd->cp_hqd_pq_wptr_poll_addr_lo); 2851 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, 2852 + mqd->cp_hqd_pq_wptr_poll_addr_hi); 2853 + 2854 + /* enable the doorbell if requested */ 2855 + if (ring->use_doorbell) { 2856 + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER, 2857 + (adev->doorbell_index.kiq * 2) << 2); 2858 + WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER, 2859 + (adev->doorbell_index.userqueue_end * 2) << 2); 2860 + } 2861 + 2862 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 2863 + mqd->cp_hqd_pq_doorbell_control); 2864 + 2865 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2866 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 2867 + mqd->cp_hqd_pq_wptr_lo); 2868 + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 2869 + mqd->cp_hqd_pq_wptr_hi); 2870 + 2871 + /* set the vmid for the queue */ 2872 + WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid); 2873 + 2874 + WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, 2875 + mqd->cp_hqd_persistent_state); 2876 + 2877 + /* activate the queue */ 2878 + WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 2879 + mqd->cp_hqd_active); 2880 + 2881 + if (ring->use_doorbell) 2882 + WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 2883 + 2884 + return 0; 2885 + } 2886 + 2887 + static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring) 2888 + { 2889 + struct amdgpu_device *adev = ring->adev; 2890 + struct v12_compute_mqd *mqd = ring->mqd_ptr; 2891 + int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 2892 + 2893 + gfx_v12_0_kiq_setting(ring); 2894 + 2895 + if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 2896 + /* reset MQD to a clean status */ 2897 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 2898 + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 2899 + 2900 + /* reset ring buffer */ 2901 + ring->wptr = 0; 2902 + amdgpu_ring_clear_ring(ring); 2903 + 2904 + mutex_lock(&adev->srbm_mutex); 2905 + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2906 + gfx_v12_0_kiq_init_register(ring); 2907 + soc24_grbm_select(adev, 0, 0, 0, 0); 2908 + mutex_unlock(&adev->srbm_mutex); 2909 + } else { 2910 + memset((void *)mqd, 0, sizeof(*mqd)); 2911 + if (amdgpu_sriov_vf(adev) && adev->in_suspend) 2912 + amdgpu_ring_clear_ring(ring); 2913 + mutex_lock(&adev->srbm_mutex); 2914 + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2915 + amdgpu_ring_init_mqd(ring); 2916 + gfx_v12_0_kiq_init_register(ring); 2917 + soc24_grbm_select(adev, 0, 0, 0, 0); 2918 + mutex_unlock(&adev->srbm_mutex); 2919 + 2920 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 2921 + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2922 + } 2923 + 2924 + return 0; 2925 + } 2926 + 2927 + static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring) 2928 + { 2929 + struct amdgpu_device *adev = ring->adev; 2930 + struct v12_compute_mqd *mqd = ring->mqd_ptr; 2931 + int mqd_idx = ring - &adev->gfx.compute_ring[0]; 2932 + 2933 + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { 2934 + memset((void *)mqd, 0, sizeof(*mqd)); 2935 + mutex_lock(&adev->srbm_mutex); 2936 + soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2937 + amdgpu_ring_init_mqd(ring); 2938 + soc24_grbm_select(adev, 0, 0, 0, 0); 2939 + mutex_unlock(&adev->srbm_mutex); 2940 + 2941 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 2942 + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 2943 + } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */ 2944 + /* reset MQD to a clean status */ 2945 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 2946 + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 2947 + 2948 + /* reset ring buffer */ 2949 + ring->wptr = 0; 2950 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); 2951 + amdgpu_ring_clear_ring(ring); 2952 + } else { 2953 + amdgpu_ring_clear_ring(ring); 2954 + } 2955 + 2956 + return 0; 2957 + } 2958 + 2959 + static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 2960 + { 2961 + struct amdgpu_ring *ring; 2962 + int r; 2963 + 2964 + ring = &adev->gfx.kiq[0].ring; 2965 + 2966 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 2967 + if (unlikely(r != 0)) 2968 + return r; 2969 + 2970 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2971 + if (unlikely(r != 0)) { 2972 + amdgpu_bo_unreserve(ring->mqd_obj); 2973 + return r; 2974 + } 2975 + 2976 + gfx_v12_0_kiq_init_queue(ring); 2977 + amdgpu_bo_kunmap(ring->mqd_obj); 2978 + ring->mqd_ptr = NULL; 2979 + amdgpu_bo_unreserve(ring->mqd_obj); 2980 + ring->sched.ready = true; 2981 + return 0; 2982 + } 2983 + 2984 + static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 2985 + { 2986 + struct amdgpu_ring *ring = NULL; 2987 + int r = 0, i; 2988 + 2989 + if (!amdgpu_async_gfx_ring) 2990 + gfx_v12_0_cp_compute_enable(adev, true); 2991 + 2992 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2993 + ring = &adev->gfx.compute_ring[i]; 2994 + 2995 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 2996 + if (unlikely(r != 0)) 2997 + goto done; 2998 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2999 + if (!r) { 3000 + r = gfx_v12_0_kcq_init_queue(ring); 3001 + amdgpu_bo_kunmap(ring->mqd_obj); 3002 + ring->mqd_ptr = NULL; 3003 + } 3004 + amdgpu_bo_unreserve(ring->mqd_obj); 3005 + if (r) 3006 + goto done; 3007 + } 3008 + 3009 + r = amdgpu_gfx_enable_kcq(adev, 0); 3010 + done: 3011 + return r; 3012 + } 3013 + 3014 + static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) 3015 + { 3016 + int r, i; 3017 + struct amdgpu_ring *ring; 3018 + 3019 + if (!(adev->flags & AMD_IS_APU)) 3020 + gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3021 + 3022 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3023 + /* legacy firmware loading */ 3024 + r = gfx_v12_0_cp_gfx_load_microcode(adev); 3025 + if (r) 3026 + return r; 3027 + 3028 + r = gfx_v12_0_cp_compute_load_microcode_rs64(adev); 3029 + if (r) 3030 + return r; 3031 + } 3032 + 3033 + gfx_v12_0_cp_set_doorbell_range(adev); 3034 + 3035 + if (amdgpu_async_gfx_ring) { 3036 + gfx_v12_0_cp_compute_enable(adev, true); 3037 + gfx_v12_0_cp_gfx_enable(adev, true); 3038 + } 3039 + 3040 + if (adev->enable_mes_kiq && adev->mes.kiq_hw_init) 3041 + r = amdgpu_mes_kiq_hw_init(adev); 3042 + else 3043 + r = gfx_v12_0_kiq_resume(adev); 3044 + if (r) 3045 + return r; 3046 + 3047 + r = gfx_v12_0_kcq_resume(adev); 3048 + if (r) 3049 + return r; 3050 + 3051 + if (!amdgpu_async_gfx_ring) { 3052 + r = gfx_v12_0_cp_gfx_resume(adev); 3053 + if (r) 3054 + return r; 3055 + } else { 3056 + r = gfx_v12_0_cp_async_gfx_ring_resume(adev); 3057 + if (r) 3058 + return r; 3059 + } 3060 + 3061 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3062 + ring = &adev->gfx.gfx_ring[i]; 3063 + r = amdgpu_ring_test_helper(ring); 3064 + if (r) 3065 + return r; 3066 + } 3067 + 3068 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3069 + ring = &adev->gfx.compute_ring[i]; 3070 + r = amdgpu_ring_test_helper(ring); 3071 + if (r) 3072 + return r; 3073 + } 3074 + 3075 + return 0; 3076 + } 3077 + 3078 + static void gfx_v12_0_cp_enable(struct amdgpu_device *adev, bool enable) 3079 + { 3080 + gfx_v12_0_cp_gfx_enable(adev, enable); 3081 + gfx_v12_0_cp_compute_enable(adev, enable); 3082 + } 3083 + 3084 + static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev) 3085 + { 3086 + int r; 3087 + bool value; 3088 + 3089 + r = adev->gfxhub.funcs->gart_enable(adev); 3090 + if (r) 3091 + return r; 3092 + 3093 + adev->hdp.funcs->flush_hdp(adev, NULL); 3094 + 3095 + value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 3096 + false : true; 3097 + 3098 + adev->gfxhub.funcs->set_fault_enable_default(adev, value); 3099 + amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); 3100 + 3101 + return 0; 3102 + } 3103 + 3104 + static int get_gb_addr_config(struct amdgpu_device *adev) 3105 + { 3106 + u32 gb_addr_config; 3107 + 3108 + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 3109 + if (gb_addr_config == 0) 3110 + return -EINVAL; 3111 + 3112 + adev->gfx.config.gb_addr_config_fields.num_pkrs = 3113 + 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 3114 + 3115 + adev->gfx.config.gb_addr_config = gb_addr_config; 3116 + 3117 + adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 3118 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3119 + GB_ADDR_CONFIG, NUM_PIPES); 3120 + 3121 + adev->gfx.config.max_tile_pipes = 3122 + adev->gfx.config.gb_addr_config_fields.num_pipes; 3123 + 3124 + adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 3125 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3126 + GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 3127 + adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 3128 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3129 + GB_ADDR_CONFIG, NUM_RB_PER_SE); 3130 + adev->gfx.config.gb_addr_config_fields.num_se = 1 << 3131 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3132 + GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 3133 + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 3134 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 3135 + GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 3136 + 3137 + return 0; 3138 + } 3139 + 3140 + static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev) 3141 + { 3142 + uint32_t data; 3143 + 3144 + data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG); 3145 + data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK; 3146 + WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data); 3147 + 3148 + data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG); 3149 + data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK; 3150 + WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); 3151 + } 3152 + 3153 + static int gfx_v12_0_hw_init(void *handle) 3154 + { 3155 + int r; 3156 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3157 + 3158 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 3159 + if (adev->gfx.imu.funcs) { 3160 + /* RLC autoload sequence 1: Program rlc ram */ 3161 + if (adev->gfx.imu.funcs->program_rlc_ram) 3162 + adev->gfx.imu.funcs->program_rlc_ram(adev); 3163 + } 3164 + /* rlc autoload firmware */ 3165 + r = gfx_v12_0_rlc_backdoor_autoload_enable(adev); 3166 + if (r) 3167 + return r; 3168 + } else { 3169 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3170 + if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) { 3171 + if (adev->gfx.imu.funcs->load_microcode) 3172 + adev->gfx.imu.funcs->load_microcode(adev); 3173 + if (adev->gfx.imu.funcs->setup_imu) 3174 + adev->gfx.imu.funcs->setup_imu(adev); 3175 + if (adev->gfx.imu.funcs->start_imu) 3176 + adev->gfx.imu.funcs->start_imu(adev); 3177 + } 3178 + 3179 + /* disable gpa mode in backdoor loading */ 3180 + gfx_v12_0_disable_gpa_mode(adev); 3181 + } 3182 + } 3183 + 3184 + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) || 3185 + (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 3186 + r = gfx_v12_0_wait_for_rlc_autoload_complete(adev); 3187 + if (r) { 3188 + dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r); 3189 + return r; 3190 + } 3191 + } 3192 + 3193 + adev->gfx.is_poweron = true; 3194 + 3195 + if (get_gb_addr_config(adev)) 3196 + DRM_WARN("Invalid gb_addr_config !\n"); 3197 + 3198 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 3199 + gfx_v12_0_config_gfx_rs64(adev); 3200 + 3201 + r = gfx_v12_0_gfxhub_enable(adev); 3202 + if (r) 3203 + return r; 3204 + 3205 + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT || 3206 + adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) && 3207 + (amdgpu_dpm == 1)) { 3208 + /** 3209 + * For gfx 12, rlc firmware loading relies on smu firmware is 3210 + * loaded firstly, so in direct type, it has to load smc ucode 3211 + * here before rlc. 3212 + */ 3213 + if (!(adev->flags & AMD_IS_APU)) { 3214 + r = amdgpu_pm_load_smu_firmware(adev, NULL); 3215 + if (r) 3216 + return r; 3217 + } 3218 + } 3219 + 3220 + gfx_v12_0_constants_init(adev); 3221 + 3222 + if (adev->nbio.funcs->gc_doorbell_init) 3223 + adev->nbio.funcs->gc_doorbell_init(adev); 3224 + 3225 + r = gfx_v12_0_rlc_resume(adev); 3226 + if (r) 3227 + return r; 3228 + 3229 + /* 3230 + * init golden registers and rlc resume may override some registers, 3231 + * reconfig them here 3232 + */ 3233 + gfx_v12_0_tcp_harvest(adev); 3234 + 3235 + r = gfx_v12_0_cp_resume(adev); 3236 + if (r) 3237 + return r; 3238 + 3239 + return r; 3240 + } 3241 + 3242 + static int gfx_v12_0_kiq_disable_kgq(struct amdgpu_device *adev) 3243 + { 3244 + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 3245 + struct amdgpu_ring *kiq_ring = &kiq->ring; 3246 + int i, r = 0; 3247 + 3248 + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3249 + return -EINVAL; 3250 + 3251 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 3252 + adev->gfx.num_gfx_rings)) 3253 + return -ENOMEM; 3254 + 3255 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3256 + kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 3257 + PREEMPT_QUEUES, 0, 0); 3258 + 3259 + if (adev->gfx.kiq[0].ring.sched.ready) 3260 + r = amdgpu_ring_test_helper(kiq_ring); 3261 + 3262 + return r; 3263 + } 3264 + 3265 + static int gfx_v12_0_hw_fini(void *handle) 3266 + { 3267 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3268 + int r; 3269 + uint32_t tmp; 3270 + 3271 + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3272 + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3273 + 3274 + if (!adev->no_hw_access) { 3275 + if (amdgpu_async_gfx_ring) { 3276 + r = gfx_v12_0_kiq_disable_kgq(adev); 3277 + if (r) 3278 + DRM_ERROR("KGQ disable failed\n"); 3279 + } 3280 + 3281 + if (amdgpu_gfx_disable_kcq(adev, 0)) 3282 + DRM_ERROR("KCQ disable failed\n"); 3283 + 3284 + amdgpu_mes_kiq_hw_fini(adev); 3285 + } 3286 + 3287 + if (amdgpu_sriov_vf(adev)) { 3288 + gfx_v12_0_cp_gfx_enable(adev, false); 3289 + /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ 3290 + tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); 3291 + tmp &= 0xffffff00; 3292 + WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp); 3293 + 3294 + return 0; 3295 + } 3296 + gfx_v12_0_cp_enable(adev, false); 3297 + gfx_v12_0_enable_gui_idle_interrupt(adev, false); 3298 + 3299 + adev->gfxhub.funcs->gart_disable(adev); 3300 + 3301 + adev->gfx.is_poweron = false; 3302 + 3303 + return 0; 3304 + } 3305 + 3306 + static int gfx_v12_0_suspend(void *handle) 3307 + { 3308 + return gfx_v12_0_hw_fini(handle); 3309 + } 3310 + 3311 + static int gfx_v12_0_resume(void *handle) 3312 + { 3313 + return gfx_v12_0_hw_init(handle); 3314 + } 3315 + 3316 + static bool gfx_v12_0_is_idle(void *handle) 3317 + { 3318 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3319 + 3320 + if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 3321 + GRBM_STATUS, GUI_ACTIVE)) 3322 + return false; 3323 + else 3324 + return true; 3325 + } 3326 + 3327 + static int gfx_v12_0_wait_for_idle(void *handle) 3328 + { 3329 + unsigned i; 3330 + u32 tmp; 3331 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3332 + 3333 + for (i = 0; i < adev->usec_timeout; i++) { 3334 + /* read MC_STATUS */ 3335 + tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) & 3336 + GRBM_STATUS__GUI_ACTIVE_MASK; 3337 + 3338 + if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 3339 + return 0; 3340 + udelay(1); 3341 + } 3342 + return -ETIMEDOUT; 3343 + } 3344 + 3345 + static uint64_t gfx_v12_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3346 + { 3347 + uint64_t clock; 3348 + 3349 + amdgpu_gfx_off_ctrl(adev, false); 3350 + mutex_lock(&adev->gfx.gpu_clock_mutex); 3351 + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) | 3352 + ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL); 3353 + mutex_unlock(&adev->gfx.gpu_clock_mutex); 3354 + amdgpu_gfx_off_ctrl(adev, true); 3355 + return clock; 3356 + } 3357 + 3358 + static int gfx_v12_0_early_init(void *handle) 3359 + { 3360 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3361 + 3362 + adev->gfx.funcs = &gfx_v12_0_gfx_funcs; 3363 + 3364 + adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS; 3365 + adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 3366 + AMDGPU_MAX_COMPUTE_RINGS); 3367 + 3368 + gfx_v12_0_set_kiq_pm4_funcs(adev); 3369 + gfx_v12_0_set_ring_funcs(adev); 3370 + gfx_v12_0_set_irq_funcs(adev); 3371 + gfx_v12_0_set_rlc_funcs(adev); 3372 + gfx_v12_0_set_mqd_funcs(adev); 3373 + gfx_v12_0_set_imu_funcs(adev); 3374 + 3375 + gfx_v12_0_init_rlcg_reg_access_ctrl(adev); 3376 + 3377 + return gfx_v12_0_init_microcode(adev); 3378 + } 3379 + 3380 + static int gfx_v12_0_late_init(void *handle) 3381 + { 3382 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3383 + int r; 3384 + 3385 + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3386 + if (r) 3387 + return r; 3388 + 3389 + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3390 + if (r) 3391 + return r; 3392 + 3393 + return 0; 3394 + } 3395 + 3396 + static bool gfx_v12_0_is_rlc_enabled(struct amdgpu_device *adev) 3397 + { 3398 + uint32_t rlc_cntl; 3399 + 3400 + /* if RLC is not enabled, do nothing */ 3401 + rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL); 3402 + return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 3403 + } 3404 + 3405 + static void gfx_v12_0_set_safe_mode(struct amdgpu_device *adev, 3406 + int xcc_id) 3407 + { 3408 + uint32_t data; 3409 + unsigned i; 3410 + 3411 + data = RLC_SAFE_MODE__CMD_MASK; 3412 + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3413 + 3414 + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data); 3415 + 3416 + /* wait for RLC_SAFE_MODE */ 3417 + for (i = 0; i < adev->usec_timeout; i++) { 3418 + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE), 3419 + RLC_SAFE_MODE, CMD)) 3420 + break; 3421 + udelay(1); 3422 + } 3423 + } 3424 + 3425 + static void gfx_v12_0_unset_safe_mode(struct amdgpu_device *adev, 3426 + int xcc_id) 3427 + { 3428 + WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK); 3429 + } 3430 + 3431 + static void gfx_v12_0_update_perf_clk(struct amdgpu_device *adev, 3432 + bool enable) 3433 + { 3434 + /* TODO */ 3435 + } 3436 + 3437 + static void gfx_v12_0_update_spm_vmid(struct amdgpu_device *adev, 3438 + struct amdgpu_ring *ring, 3439 + unsigned vmid) 3440 + { 3441 + u32 reg, data; 3442 + 3443 + reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3444 + if (amdgpu_sriov_is_pp_one_vf(adev)) 3445 + data = RREG32_NO_KIQ(reg); 3446 + else 3447 + data = RREG32(reg); 3448 + 3449 + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; 3450 + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; 3451 + 3452 + if (amdgpu_sriov_is_pp_one_vf(adev)) 3453 + WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data); 3454 + else 3455 + WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data); 3456 + 3457 + if (ring 3458 + && amdgpu_sriov_is_pp_one_vf(adev) 3459 + && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX) 3460 + || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) { 3461 + uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL); 3462 + amdgpu_ring_emit_wreg(ring, reg, data); 3463 + } 3464 + } 3465 + 3466 + static const struct amdgpu_rlc_funcs gfx_v12_0_rlc_funcs = { 3467 + .is_rlc_enabled = gfx_v12_0_is_rlc_enabled, 3468 + .set_safe_mode = gfx_v12_0_set_safe_mode, 3469 + .unset_safe_mode = gfx_v12_0_unset_safe_mode, 3470 + .init = gfx_v12_0_rlc_init, 3471 + .get_csb_size = gfx_v12_0_get_csb_size, 3472 + .get_csb_buffer = gfx_v12_0_get_csb_buffer, 3473 + .resume = gfx_v12_0_rlc_resume, 3474 + .stop = gfx_v12_0_rlc_stop, 3475 + .reset = gfx_v12_0_rlc_reset, 3476 + .start = gfx_v12_0_rlc_start, 3477 + .update_spm_vmid = gfx_v12_0_update_spm_vmid, 3478 + }; 3479 + 3480 + #if 0 3481 + static void gfx_v12_cntl_power_gating(struct amdgpu_device *adev, bool enable) 3482 + { 3483 + /* TODO */ 3484 + } 3485 + 3486 + static void gfx_v12_cntl_pg(struct amdgpu_device *adev, bool enable) 3487 + { 3488 + /* TODO */ 3489 + } 3490 + #endif 3491 + 3492 + static int gfx_v12_0_set_powergating_state(void *handle, 3493 + enum amd_powergating_state state) 3494 + { 3495 + /* TODO */ 3496 + 3497 + return 0; 3498 + } 3499 + 3500 + static int gfx_v12_0_set_clockgating_state(void *handle, 3501 + enum amd_clockgating_state state) 3502 + { 3503 + /* TODO */ 3504 + 3505 + return 0; 3506 + } 3507 + 3508 + static void gfx_v12_0_get_clockgating_state(void *handle, u64 *flags) 3509 + { 3510 + /* TODO */ 3511 + } 3512 + 3513 + static u64 gfx_v12_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 3514 + { 3515 + /* gfx12 is 32bit rptr*/ 3516 + return *(uint32_t *)ring->rptr_cpu_addr; 3517 + } 3518 + 3519 + static u64 gfx_v12_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 3520 + { 3521 + struct amdgpu_device *adev = ring->adev; 3522 + u64 wptr; 3523 + 3524 + /* XXX check if swapping is necessary on BE */ 3525 + if (ring->use_doorbell) { 3526 + wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 3527 + } else { 3528 + wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR); 3529 + wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32; 3530 + } 3531 + 3532 + return wptr; 3533 + } 3534 + 3535 + static void gfx_v12_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 3536 + { 3537 + struct amdgpu_device *adev = ring->adev; 3538 + uint32_t *wptr_saved; 3539 + uint32_t *is_queue_unmap; 3540 + uint64_t aggregated_db_index; 3541 + uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size; 3542 + uint64_t wptr_tmp; 3543 + 3544 + if (ring->is_mes_queue) { 3545 + wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 3546 + is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 3547 + sizeof(uint32_t)); 3548 + aggregated_db_index = 3549 + amdgpu_mes_get_aggregated_doorbell_index(adev, 3550 + ring->hw_prio); 3551 + 3552 + wptr_tmp = ring->wptr & ring->buf_mask; 3553 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 3554 + *wptr_saved = wptr_tmp; 3555 + /* assume doorbell always being used by mes mapped queue */ 3556 + if (*is_queue_unmap) { 3557 + WDOORBELL64(aggregated_db_index, wptr_tmp); 3558 + WDOORBELL64(ring->doorbell_index, wptr_tmp); 3559 + } else { 3560 + WDOORBELL64(ring->doorbell_index, wptr_tmp); 3561 + 3562 + if (*is_queue_unmap) 3563 + WDOORBELL64(aggregated_db_index, wptr_tmp); 3564 + } 3565 + } else { 3566 + if (ring->use_doorbell) { 3567 + /* XXX check if swapping is necessary on BE */ 3568 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 3569 + ring->wptr); 3570 + WDOORBELL64(ring->doorbell_index, ring->wptr); 3571 + } else { 3572 + WREG32_SOC15(GC, 0, regCP_RB0_WPTR, 3573 + lower_32_bits(ring->wptr)); 3574 + WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, 3575 + upper_32_bits(ring->wptr)); 3576 + } 3577 + } 3578 + } 3579 + 3580 + static u64 gfx_v12_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 3581 + { 3582 + /* gfx12 hardware is 32bit rptr */ 3583 + return *(uint32_t *)ring->rptr_cpu_addr; 3584 + } 3585 + 3586 + static u64 gfx_v12_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 3587 + { 3588 + u64 wptr; 3589 + 3590 + /* XXX check if swapping is necessary on BE */ 3591 + if (ring->use_doorbell) 3592 + wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr); 3593 + else 3594 + BUG(); 3595 + return wptr; 3596 + } 3597 + 3598 + static void gfx_v12_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 3599 + { 3600 + struct amdgpu_device *adev = ring->adev; 3601 + uint32_t *wptr_saved; 3602 + uint32_t *is_queue_unmap; 3603 + uint64_t aggregated_db_index; 3604 + uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size; 3605 + uint64_t wptr_tmp; 3606 + 3607 + if (ring->is_mes_queue) { 3608 + wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size); 3609 + is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size + 3610 + sizeof(uint32_t)); 3611 + aggregated_db_index = 3612 + amdgpu_mes_get_aggregated_doorbell_index(adev, 3613 + ring->hw_prio); 3614 + 3615 + wptr_tmp = ring->wptr & ring->buf_mask; 3616 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp); 3617 + *wptr_saved = wptr_tmp; 3618 + /* assume doorbell always used by mes mapped queue */ 3619 + if (*is_queue_unmap) { 3620 + WDOORBELL64(aggregated_db_index, wptr_tmp); 3621 + WDOORBELL64(ring->doorbell_index, wptr_tmp); 3622 + } else { 3623 + WDOORBELL64(ring->doorbell_index, wptr_tmp); 3624 + 3625 + if (*is_queue_unmap) 3626 + WDOORBELL64(aggregated_db_index, wptr_tmp); 3627 + } 3628 + } else { 3629 + /* XXX check if swapping is necessary on BE */ 3630 + if (ring->use_doorbell) { 3631 + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 3632 + ring->wptr); 3633 + WDOORBELL64(ring->doorbell_index, ring->wptr); 3634 + } else { 3635 + BUG(); /* only DOORBELL method supported on gfx12 now */ 3636 + } 3637 + } 3638 + } 3639 + 3640 + static void gfx_v12_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 3641 + { 3642 + struct amdgpu_device *adev = ring->adev; 3643 + u32 ref_and_mask, reg_mem_engine; 3644 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; 3645 + 3646 + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 3647 + switch (ring->me) { 3648 + case 1: 3649 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 3650 + break; 3651 + case 2: 3652 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 3653 + break; 3654 + default: 3655 + return; 3656 + } 3657 + reg_mem_engine = 0; 3658 + } else { 3659 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 3660 + reg_mem_engine = 1; /* pfp */ 3661 + } 3662 + 3663 + gfx_v12_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 3664 + adev->nbio.funcs->get_hdp_flush_req_offset(adev), 3665 + adev->nbio.funcs->get_hdp_flush_done_offset(adev), 3666 + ref_and_mask, ref_and_mask, 0x20); 3667 + } 3668 + 3669 + static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3670 + struct amdgpu_job *job, 3671 + struct amdgpu_ib *ib, 3672 + uint32_t flags) 3673 + { 3674 + unsigned vmid = AMDGPU_JOB_GET_VMID(job); 3675 + u32 header, control = 0; 3676 + 3677 + BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); 3678 + 3679 + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 3680 + 3681 + control |= ib->length_dw | (vmid << 24); 3682 + 3683 + if (ring->is_mes_queue) 3684 + /* inherit vmid from mqd */ 3685 + control |= 0x400000; 3686 + 3687 + amdgpu_ring_write(ring, header); 3688 + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3689 + amdgpu_ring_write(ring, 3690 + #ifdef __BIG_ENDIAN 3691 + (2 << 0) | 3692 + #endif 3693 + lower_32_bits(ib->gpu_addr)); 3694 + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3695 + amdgpu_ring_write(ring, control); 3696 + } 3697 + 3698 + static void gfx_v12_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 3699 + struct amdgpu_job *job, 3700 + struct amdgpu_ib *ib, 3701 + uint32_t flags) 3702 + { 3703 + unsigned vmid = AMDGPU_JOB_GET_VMID(job); 3704 + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 3705 + 3706 + if (ring->is_mes_queue) 3707 + /* inherit vmid from mqd */ 3708 + control |= 0x40000000; 3709 + 3710 + /* Currently, there is a high possibility to get wave ID mismatch 3711 + * between ME and GDS, leading to a hw deadlock, because ME generates 3712 + * different wave IDs than the GDS expects. This situation happens 3713 + * randomly when at least 5 compute pipes use GDS ordered append. 3714 + * The wave IDs generated by ME are also wrong after suspend/resume. 3715 + * Those are probably bugs somewhere else in the kernel driver. 3716 + * 3717 + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 3718 + * GDS to 0 for this ring (me/pipe). 3719 + */ 3720 + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 3721 + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3722 + amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); 3723 + } 3724 + 3725 + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3726 + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 3727 + amdgpu_ring_write(ring, 3728 + #ifdef __BIG_ENDIAN 3729 + (2 << 0) | 3730 + #endif 3731 + lower_32_bits(ib->gpu_addr)); 3732 + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 3733 + amdgpu_ring_write(ring, control); 3734 + } 3735 + 3736 + static void gfx_v12_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 3737 + u64 seq, unsigned flags) 3738 + { 3739 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 3740 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 3741 + 3742 + /* RELEASE_MEM - flush caches, send int */ 3743 + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 3744 + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 3745 + PACKET3_RELEASE_MEM_GCR_GL2_WB | 3746 + PACKET3_RELEASE_MEM_GCR_GL2_INV | 3747 + PACKET3_RELEASE_MEM_GCR_GL2_US | 3748 + PACKET3_RELEASE_MEM_GCR_GL1_INV | 3749 + PACKET3_RELEASE_MEM_GCR_GLV_INV | 3750 + PACKET3_RELEASE_MEM_GCR_GLM_INV | 3751 + PACKET3_RELEASE_MEM_GCR_GLM_WB | 3752 + PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 3753 + PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 3754 + PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 3755 + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 3756 + PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 3757 + 3758 + /* 3759 + * the address should be Qword aligned if 64bit write, Dword 3760 + * aligned if only send 32bit data low (discard data high) 3761 + */ 3762 + if (write64bit) 3763 + BUG_ON(addr & 0x7); 3764 + else 3765 + BUG_ON(addr & 0x3); 3766 + amdgpu_ring_write(ring, lower_32_bits(addr)); 3767 + amdgpu_ring_write(ring, upper_32_bits(addr)); 3768 + amdgpu_ring_write(ring, lower_32_bits(seq)); 3769 + amdgpu_ring_write(ring, upper_32_bits(seq)); 3770 + amdgpu_ring_write(ring, ring->is_mes_queue ? 3771 + (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0); 3772 + } 3773 + 3774 + static void gfx_v12_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3775 + { 3776 + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3777 + uint32_t seq = ring->fence_drv.sync_seq; 3778 + uint64_t addr = ring->fence_drv.gpu_addr; 3779 + 3780 + gfx_v12_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 3781 + upper_32_bits(addr), seq, 0xffffffff, 4); 3782 + } 3783 + 3784 + static void gfx_v12_0_ring_invalidate_tlbs(struct amdgpu_ring *ring, 3785 + uint16_t pasid, uint32_t flush_type, 3786 + bool all_hub, uint8_t dst_sel) 3787 + { 3788 + amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); 3789 + amdgpu_ring_write(ring, 3790 + PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) | 3791 + PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | 3792 + PACKET3_INVALIDATE_TLBS_PASID(pasid) | 3793 + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); 3794 + } 3795 + 3796 + static void gfx_v12_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 3797 + unsigned vmid, uint64_t pd_addr) 3798 + { 3799 + if (ring->is_mes_queue) 3800 + gfx_v12_0_ring_invalidate_tlbs(ring, 0, 0, false, 0); 3801 + else 3802 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 3803 + 3804 + /* compute doesn't have PFP */ 3805 + if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 3806 + /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3807 + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3808 + amdgpu_ring_write(ring, 0x0); 3809 + } 3810 + } 3811 + 3812 + static void gfx_v12_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 3813 + u64 seq, unsigned int flags) 3814 + { 3815 + struct amdgpu_device *adev = ring->adev; 3816 + 3817 + /* we only allocate 32bit for each seq wb address */ 3818 + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 3819 + 3820 + /* write fence seq to the "addr" */ 3821 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3822 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3823 + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 3824 + amdgpu_ring_write(ring, lower_32_bits(addr)); 3825 + amdgpu_ring_write(ring, upper_32_bits(addr)); 3826 + amdgpu_ring_write(ring, lower_32_bits(seq)); 3827 + 3828 + if (flags & AMDGPU_FENCE_FLAG_INT) { 3829 + /* set register to trigger INT */ 3830 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3831 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3832 + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 3833 + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS)); 3834 + amdgpu_ring_write(ring, 0); 3835 + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 3836 + } 3837 + } 3838 + 3839 + static void gfx_v12_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, 3840 + uint32_t flags) 3841 + { 3842 + uint32_t dw2 = 0; 3843 + 3844 + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 3845 + if (flags & AMDGPU_HAVE_CTX_SWITCH) { 3846 + /* set load_global_config & load_global_uconfig */ 3847 + dw2 |= 0x8001; 3848 + /* set load_cs_sh_regs */ 3849 + dw2 |= 0x01000000; 3850 + /* set load_per_context_state & load_gfx_sh_regs for GFX */ 3851 + dw2 |= 0x10002; 3852 + } 3853 + 3854 + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3855 + amdgpu_ring_write(ring, dw2); 3856 + amdgpu_ring_write(ring, 0); 3857 + } 3858 + 3859 + static unsigned gfx_v12_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring, 3860 + uint64_t addr) 3861 + { 3862 + unsigned ret; 3863 + 3864 + amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 3865 + amdgpu_ring_write(ring, lower_32_bits(addr)); 3866 + amdgpu_ring_write(ring, upper_32_bits(addr)); 3867 + /* discard following DWs if *cond_exec_gpu_addr==0 */ 3868 + amdgpu_ring_write(ring, 0); 3869 + ret = ring->wptr & ring->buf_mask; 3870 + /* patch dummy value later */ 3871 + amdgpu_ring_write(ring, 0); 3872 + 3873 + return ret; 3874 + } 3875 + 3876 + static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring) 3877 + { 3878 + int i, r = 0; 3879 + struct amdgpu_device *adev = ring->adev; 3880 + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; 3881 + struct amdgpu_ring *kiq_ring = &kiq->ring; 3882 + unsigned long flags; 3883 + 3884 + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3885 + return -EINVAL; 3886 + 3887 + spin_lock_irqsave(&kiq->ring_lock, flags); 3888 + 3889 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 3890 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 3891 + return -ENOMEM; 3892 + } 3893 + 3894 + /* assert preemption condition */ 3895 + amdgpu_ring_set_preempt_cond_exec(ring, false); 3896 + 3897 + /* assert IB preemption, emit the trailing fence */ 3898 + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 3899 + ring->trail_fence_gpu_addr, 3900 + ++ring->trail_seq); 3901 + amdgpu_ring_commit(kiq_ring); 3902 + 3903 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 3904 + 3905 + /* poll the trailing fence */ 3906 + for (i = 0; i < adev->usec_timeout; i++) { 3907 + if (ring->trail_seq == 3908 + le32_to_cpu(*(ring->trail_fence_cpu_addr))) 3909 + break; 3910 + udelay(1); 3911 + } 3912 + 3913 + if (i >= adev->usec_timeout) { 3914 + r = -EINVAL; 3915 + DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 3916 + } 3917 + 3918 + /* deassert preemption condition */ 3919 + amdgpu_ring_set_preempt_cond_exec(ring, true); 3920 + return r; 3921 + } 3922 + 3923 + static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 3924 + bool start, 3925 + bool secure) 3926 + { 3927 + uint32_t v = secure ? FRAME_TMZ : 0; 3928 + 3929 + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 3930 + amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 3931 + } 3932 + 3933 + static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 3934 + uint32_t reg_val_offs) 3935 + { 3936 + struct amdgpu_device *adev = ring->adev; 3937 + 3938 + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 3939 + amdgpu_ring_write(ring, 0 | /* src: register*/ 3940 + (5 << 8) | /* dst: memory */ 3941 + (1 << 20)); /* write confirm */ 3942 + amdgpu_ring_write(ring, reg); 3943 + amdgpu_ring_write(ring, 0); 3944 + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 3945 + reg_val_offs * 4)); 3946 + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 3947 + reg_val_offs * 4)); 3948 + } 3949 + 3950 + static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, 3951 + uint32_t reg, 3952 + uint32_t val) 3953 + { 3954 + uint32_t cmd = 0; 3955 + 3956 + switch (ring->funcs->type) { 3957 + case AMDGPU_RING_TYPE_GFX: 3958 + cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 3959 + break; 3960 + case AMDGPU_RING_TYPE_KIQ: 3961 + cmd = (1 << 16); /* no inc addr */ 3962 + break; 3963 + default: 3964 + cmd = WR_CONFIRM; 3965 + break; 3966 + } 3967 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3968 + amdgpu_ring_write(ring, cmd); 3969 + amdgpu_ring_write(ring, reg); 3970 + amdgpu_ring_write(ring, 0); 3971 + amdgpu_ring_write(ring, val); 3972 + } 3973 + 3974 + static void gfx_v12_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 3975 + uint32_t val, uint32_t mask) 3976 + { 3977 + gfx_v12_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 3978 + } 3979 + 3980 + static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, 3981 + uint32_t reg0, uint32_t reg1, 3982 + uint32_t ref, uint32_t mask) 3983 + { 3984 + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3985 + 3986 + gfx_v12_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, 3987 + ref, mask, 0x20); 3988 + } 3989 + 3990 + static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, 3991 + unsigned vmid) 3992 + { 3993 + struct amdgpu_device *adev = ring->adev; 3994 + uint32_t value = 0; 3995 + 3996 + value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 3997 + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 3998 + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 3999 + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4000 + WREG32_SOC15(GC, 0, regSQ_CMD, value); 4001 + } 4002 + 4003 + static void 4004 + gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4005 + uint32_t me, uint32_t pipe, 4006 + enum amdgpu_interrupt_state state) 4007 + { 4008 + uint32_t cp_int_cntl, cp_int_cntl_reg; 4009 + 4010 + if (!me) { 4011 + switch (pipe) { 4012 + case 0: 4013 + cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0); 4014 + break; 4015 + default: 4016 + DRM_DEBUG("invalid pipe %d\n", pipe); 4017 + return; 4018 + } 4019 + } else { 4020 + DRM_DEBUG("invalid me %d\n", me); 4021 + return; 4022 + } 4023 + 4024 + switch (state) { 4025 + case AMDGPU_IRQ_STATE_DISABLE: 4026 + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4027 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4028 + TIME_STAMP_INT_ENABLE, 0); 4029 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4030 + GENERIC0_INT_ENABLE, 0); 4031 + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4032 + break; 4033 + case AMDGPU_IRQ_STATE_ENABLE: 4034 + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); 4035 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4036 + TIME_STAMP_INT_ENABLE, 1); 4037 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4038 + GENERIC0_INT_ENABLE, 1); 4039 + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); 4040 + break; 4041 + default: 4042 + break; 4043 + } 4044 + } 4045 + 4046 + static void gfx_v12_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4047 + int me, int pipe, 4048 + enum amdgpu_interrupt_state state) 4049 + { 4050 + u32 mec_int_cntl, mec_int_cntl_reg; 4051 + 4052 + /* 4053 + * amdgpu controls only the first MEC. That's why this function only 4054 + * handles the setting of interrupts for this specific MEC. All other 4055 + * pipes' interrupts are set by amdkfd. 4056 + */ 4057 + 4058 + if (me == 1) { 4059 + switch (pipe) { 4060 + case 0: 4061 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL); 4062 + break; 4063 + case 1: 4064 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL); 4065 + break; 4066 + default: 4067 + DRM_DEBUG("invalid pipe %d\n", pipe); 4068 + return; 4069 + } 4070 + } else { 4071 + DRM_DEBUG("invalid me %d\n", me); 4072 + return; 4073 + } 4074 + 4075 + switch (state) { 4076 + case AMDGPU_IRQ_STATE_DISABLE: 4077 + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4078 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4079 + TIME_STAMP_INT_ENABLE, 0); 4080 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4081 + GENERIC0_INT_ENABLE, 0); 4082 + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4083 + break; 4084 + case AMDGPU_IRQ_STATE_ENABLE: 4085 + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); 4086 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4087 + TIME_STAMP_INT_ENABLE, 1); 4088 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4089 + GENERIC0_INT_ENABLE, 1); 4090 + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); 4091 + break; 4092 + default: 4093 + break; 4094 + } 4095 + } 4096 + 4097 + static int gfx_v12_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4098 + struct amdgpu_irq_src *src, 4099 + unsigned type, 4100 + enum amdgpu_interrupt_state state) 4101 + { 4102 + switch (type) { 4103 + case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4104 + gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 4105 + break; 4106 + case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 4107 + gfx_v12_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 4108 + break; 4109 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4110 + gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4111 + break; 4112 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4113 + gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4114 + break; 4115 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4116 + gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4117 + break; 4118 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4119 + gfx_v12_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4120 + break; 4121 + default: 4122 + break; 4123 + } 4124 + return 0; 4125 + } 4126 + 4127 + static int gfx_v12_0_eop_irq(struct amdgpu_device *adev, 4128 + struct amdgpu_irq_src *source, 4129 + struct amdgpu_iv_entry *entry) 4130 + { 4131 + int i; 4132 + u8 me_id, pipe_id, queue_id; 4133 + struct amdgpu_ring *ring; 4134 + uint32_t mes_queue_id = entry->src_data[0]; 4135 + 4136 + DRM_DEBUG("IH: CP EOP\n"); 4137 + 4138 + if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 4139 + struct amdgpu_mes_queue *queue; 4140 + 4141 + mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 4142 + 4143 + spin_lock(&adev->mes.queue_id_lock); 4144 + queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 4145 + if (queue) { 4146 + DRM_DEBUG("process mes queue id = %d\n", mes_queue_id); 4147 + amdgpu_fence_process(queue->ring); 4148 + } 4149 + spin_unlock(&adev->mes.queue_id_lock); 4150 + } else { 4151 + me_id = (entry->ring_id & 0x0c) >> 2; 4152 + pipe_id = (entry->ring_id & 0x03) >> 0; 4153 + queue_id = (entry->ring_id & 0x70) >> 4; 4154 + 4155 + switch (me_id) { 4156 + case 0: 4157 + if (pipe_id == 0) 4158 + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4159 + else 4160 + amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 4161 + break; 4162 + case 1: 4163 + case 2: 4164 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4165 + ring = &adev->gfx.compute_ring[i]; 4166 + /* Per-queue interrupt is supported for MEC starting from VI. 4167 + * The interrupt can only be enabled/disabled per pipe instead 4168 + * of per queue. 4169 + */ 4170 + if ((ring->me == me_id) && 4171 + (ring->pipe == pipe_id) && 4172 + (ring->queue == queue_id)) 4173 + amdgpu_fence_process(ring); 4174 + } 4175 + break; 4176 + } 4177 + } 4178 + 4179 + return 0; 4180 + } 4181 + 4182 + static int gfx_v12_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4183 + struct amdgpu_irq_src *source, 4184 + unsigned type, 4185 + enum amdgpu_interrupt_state state) 4186 + { 4187 + switch (state) { 4188 + case AMDGPU_IRQ_STATE_DISABLE: 4189 + case AMDGPU_IRQ_STATE_ENABLE: 4190 + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 4191 + PRIV_REG_INT_ENABLE, 4192 + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4193 + break; 4194 + default: 4195 + break; 4196 + } 4197 + 4198 + return 0; 4199 + } 4200 + 4201 + static int gfx_v12_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4202 + struct amdgpu_irq_src *source, 4203 + unsigned type, 4204 + enum amdgpu_interrupt_state state) 4205 + { 4206 + switch (state) { 4207 + case AMDGPU_IRQ_STATE_DISABLE: 4208 + case AMDGPU_IRQ_STATE_ENABLE: 4209 + WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, 4210 + PRIV_INSTR_INT_ENABLE, 4211 + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4212 + break; 4213 + default: 4214 + break; 4215 + } 4216 + 4217 + return 0; 4218 + } 4219 + 4220 + static void gfx_v12_0_handle_priv_fault(struct amdgpu_device *adev, 4221 + struct amdgpu_iv_entry *entry) 4222 + { 4223 + u8 me_id, pipe_id, queue_id; 4224 + struct amdgpu_ring *ring; 4225 + int i; 4226 + 4227 + me_id = (entry->ring_id & 0x0c) >> 2; 4228 + pipe_id = (entry->ring_id & 0x03) >> 0; 4229 + queue_id = (entry->ring_id & 0x70) >> 4; 4230 + 4231 + switch (me_id) { 4232 + case 0: 4233 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4234 + ring = &adev->gfx.gfx_ring[i]; 4235 + /* we only enabled 1 gfx queue per pipe for now */ 4236 + if (ring->me == me_id && ring->pipe == pipe_id) 4237 + drm_sched_fault(&ring->sched); 4238 + } 4239 + break; 4240 + case 1: 4241 + case 2: 4242 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4243 + ring = &adev->gfx.compute_ring[i]; 4244 + if (ring->me == me_id && ring->pipe == pipe_id && 4245 + ring->queue == queue_id) 4246 + drm_sched_fault(&ring->sched); 4247 + } 4248 + break; 4249 + default: 4250 + BUG(); 4251 + break; 4252 + } 4253 + } 4254 + 4255 + static int gfx_v12_0_priv_reg_irq(struct amdgpu_device *adev, 4256 + struct amdgpu_irq_src *source, 4257 + struct amdgpu_iv_entry *entry) 4258 + { 4259 + DRM_ERROR("Illegal register access in command stream\n"); 4260 + gfx_v12_0_handle_priv_fault(adev, entry); 4261 + return 0; 4262 + } 4263 + 4264 + static int gfx_v12_0_priv_inst_irq(struct amdgpu_device *adev, 4265 + struct amdgpu_irq_src *source, 4266 + struct amdgpu_iv_entry *entry) 4267 + { 4268 + DRM_ERROR("Illegal instruction in command stream\n"); 4269 + gfx_v12_0_handle_priv_fault(adev, entry); 4270 + return 0; 4271 + } 4272 + 4273 + static void gfx_v12_0_emit_mem_sync(struct amdgpu_ring *ring) 4274 + { 4275 + const unsigned int gcr_cntl = 4276 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 4277 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 4278 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 4279 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 4280 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 4281 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 4282 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 4283 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 4284 + 4285 + /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 4286 + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 4287 + amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 4288 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 4289 + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 4290 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 4291 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 4292 + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 4293 + amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 4294 + } 4295 + 4296 + static const struct amd_ip_funcs gfx_v12_0_ip_funcs = { 4297 + .name = "gfx_v12_0", 4298 + .early_init = gfx_v12_0_early_init, 4299 + .late_init = gfx_v12_0_late_init, 4300 + .sw_init = gfx_v12_0_sw_init, 4301 + .sw_fini = gfx_v12_0_sw_fini, 4302 + .hw_init = gfx_v12_0_hw_init, 4303 + .hw_fini = gfx_v12_0_hw_fini, 4304 + .suspend = gfx_v12_0_suspend, 4305 + .resume = gfx_v12_0_resume, 4306 + .is_idle = gfx_v12_0_is_idle, 4307 + .wait_for_idle = gfx_v12_0_wait_for_idle, 4308 + .set_clockgating_state = gfx_v12_0_set_clockgating_state, 4309 + .set_powergating_state = gfx_v12_0_set_powergating_state, 4310 + .get_clockgating_state = gfx_v12_0_get_clockgating_state, 4311 + }; 4312 + 4313 + static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { 4314 + .type = AMDGPU_RING_TYPE_GFX, 4315 + .align_mask = 0xff, 4316 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4317 + .support_64bit_ptrs = true, 4318 + .secure_submission_supported = true, 4319 + .get_rptr = gfx_v12_0_ring_get_rptr_gfx, 4320 + .get_wptr = gfx_v12_0_ring_get_wptr_gfx, 4321 + .set_wptr = gfx_v12_0_ring_set_wptr_gfx, 4322 + .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4323 + 5 + /* COND_EXEC */ 4324 + 7 + /* PIPELINE_SYNC */ 4325 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4326 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4327 + 2 + /* VM_FLUSH */ 4328 + 8 + /* FENCE for VM_FLUSH */ 4329 + 20 + /* GDS switch */ 4330 + 5 + /* COND_EXEC */ 4331 + 7 + /* HDP_flush */ 4332 + 4 + /* VGT_flush */ 4333 + 31 + /* DE_META */ 4334 + 3 + /* CNTX_CTRL */ 4335 + 5 + /* HDP_INVL */ 4336 + 8 + 8 + /* FENCE x2 */ 4337 + 8, /* gfx_v12_0_emit_mem_sync */ 4338 + .emit_ib_size = 4, /* gfx_v12_0_ring_emit_ib_gfx */ 4339 + .emit_ib = gfx_v12_0_ring_emit_ib_gfx, 4340 + .emit_fence = gfx_v12_0_ring_emit_fence, 4341 + .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 4342 + .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 4343 + .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 4344 + .test_ring = gfx_v12_0_ring_test_ring, 4345 + .test_ib = gfx_v12_0_ring_test_ib, 4346 + .insert_nop = amdgpu_ring_insert_nop, 4347 + .pad_ib = amdgpu_ring_generic_pad_ib, 4348 + .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 4349 + .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 4350 + .preempt_ib = gfx_v12_0_ring_preempt_ib, 4351 + .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 4352 + .emit_wreg = gfx_v12_0_ring_emit_wreg, 4353 + .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 4354 + .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 4355 + .soft_recovery = gfx_v12_0_ring_soft_recovery, 4356 + .emit_mem_sync = gfx_v12_0_emit_mem_sync, 4357 + }; 4358 + 4359 + static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { 4360 + .type = AMDGPU_RING_TYPE_COMPUTE, 4361 + .align_mask = 0xff, 4362 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4363 + .support_64bit_ptrs = true, 4364 + .get_rptr = gfx_v12_0_ring_get_rptr_compute, 4365 + .get_wptr = gfx_v12_0_ring_get_wptr_compute, 4366 + .set_wptr = gfx_v12_0_ring_set_wptr_compute, 4367 + .emit_frame_size = 4368 + 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 4369 + 5 + /* hdp invalidate */ 4370 + 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 4371 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4372 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4373 + 2 + /* gfx_v12_0_ring_emit_vm_flush */ 4374 + 8 + 8 + 8 + /* gfx_v12_0_ring_emit_fence x3 for user fence, vm fence */ 4375 + 8, /* gfx_v12_0_emit_mem_sync */ 4376 + .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 4377 + .emit_ib = gfx_v12_0_ring_emit_ib_compute, 4378 + .emit_fence = gfx_v12_0_ring_emit_fence, 4379 + .emit_pipeline_sync = gfx_v12_0_ring_emit_pipeline_sync, 4380 + .emit_vm_flush = gfx_v12_0_ring_emit_vm_flush, 4381 + .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, 4382 + .test_ring = gfx_v12_0_ring_test_ring, 4383 + .test_ib = gfx_v12_0_ring_test_ib, 4384 + .insert_nop = amdgpu_ring_insert_nop, 4385 + .pad_ib = amdgpu_ring_generic_pad_ib, 4386 + .emit_wreg = gfx_v12_0_ring_emit_wreg, 4387 + .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 4388 + .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 4389 + .emit_mem_sync = gfx_v12_0_emit_mem_sync, 4390 + }; 4391 + 4392 + static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { 4393 + .type = AMDGPU_RING_TYPE_KIQ, 4394 + .align_mask = 0xff, 4395 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4396 + .support_64bit_ptrs = true, 4397 + .get_rptr = gfx_v12_0_ring_get_rptr_compute, 4398 + .get_wptr = gfx_v12_0_ring_get_wptr_compute, 4399 + .set_wptr = gfx_v12_0_ring_set_wptr_compute, 4400 + .emit_frame_size = 4401 + 7 + /* gfx_v12_0_ring_emit_hdp_flush */ 4402 + 5 + /*hdp invalidate */ 4403 + 7 + /* gfx_v12_0_ring_emit_pipeline_sync */ 4404 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4405 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4406 + 2 + /* gfx_v12_0_ring_emit_vm_flush */ 4407 + 8 + 8 + 8, /* gfx_v12_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4408 + .emit_ib_size = 7, /* gfx_v12_0_ring_emit_ib_compute */ 4409 + .emit_ib = gfx_v12_0_ring_emit_ib_compute, 4410 + .emit_fence = gfx_v12_0_ring_emit_fence_kiq, 4411 + .test_ring = gfx_v12_0_ring_test_ring, 4412 + .test_ib = gfx_v12_0_ring_test_ib, 4413 + .insert_nop = amdgpu_ring_insert_nop, 4414 + .pad_ib = amdgpu_ring_generic_pad_ib, 4415 + .emit_rreg = gfx_v12_0_ring_emit_rreg, 4416 + .emit_wreg = gfx_v12_0_ring_emit_wreg, 4417 + .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 4418 + .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, 4419 + }; 4420 + 4421 + static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) 4422 + { 4423 + int i; 4424 + 4425 + adev->gfx.kiq[0].ring.funcs = &gfx_v12_0_ring_funcs_kiq; 4426 + 4427 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4428 + adev->gfx.gfx_ring[i].funcs = &gfx_v12_0_ring_funcs_gfx; 4429 + 4430 + for (i = 0; i < adev->gfx.num_compute_rings; i++) 4431 + adev->gfx.compute_ring[i].funcs = &gfx_v12_0_ring_funcs_compute; 4432 + } 4433 + 4434 + static const struct amdgpu_irq_src_funcs gfx_v12_0_eop_irq_funcs = { 4435 + .set = gfx_v12_0_set_eop_interrupt_state, 4436 + .process = gfx_v12_0_eop_irq, 4437 + }; 4438 + 4439 + static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_reg_irq_funcs = { 4440 + .set = gfx_v12_0_set_priv_reg_fault_state, 4441 + .process = gfx_v12_0_priv_reg_irq, 4442 + }; 4443 + 4444 + static const struct amdgpu_irq_src_funcs gfx_v12_0_priv_inst_irq_funcs = { 4445 + .set = gfx_v12_0_set_priv_inst_fault_state, 4446 + .process = gfx_v12_0_priv_inst_irq, 4447 + }; 4448 + 4449 + static void gfx_v12_0_set_irq_funcs(struct amdgpu_device *adev) 4450 + { 4451 + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 4452 + adev->gfx.eop_irq.funcs = &gfx_v12_0_eop_irq_funcs; 4453 + 4454 + adev->gfx.priv_reg_irq.num_types = 1; 4455 + adev->gfx.priv_reg_irq.funcs = &gfx_v12_0_priv_reg_irq_funcs; 4456 + 4457 + adev->gfx.priv_inst_irq.num_types = 1; 4458 + adev->gfx.priv_inst_irq.funcs = &gfx_v12_0_priv_inst_irq_funcs; 4459 + } 4460 + 4461 + static void gfx_v12_0_set_imu_funcs(struct amdgpu_device *adev) 4462 + { 4463 + if (adev->flags & AMD_IS_APU) 4464 + adev->gfx.imu.mode = MISSION_MODE; 4465 + else 4466 + adev->gfx.imu.mode = DEBUG_MODE; 4467 + 4468 + /* TODO */ 4469 + //adev->gfx.imu.funcs = &gfx_v12_0_imu_funcs; 4470 + } 4471 + 4472 + static void gfx_v12_0_set_rlc_funcs(struct amdgpu_device *adev) 4473 + { 4474 + adev->gfx.rlc.funcs = &gfx_v12_0_rlc_funcs; 4475 + } 4476 + 4477 + static void gfx_v12_0_set_mqd_funcs(struct amdgpu_device *adev) 4478 + { 4479 + /* set gfx eng mqd */ 4480 + adev->mqds[AMDGPU_HW_IP_GFX].mqd_size = 4481 + sizeof(struct v12_gfx_mqd); 4482 + adev->mqds[AMDGPU_HW_IP_GFX].init_mqd = 4483 + gfx_v12_0_gfx_mqd_init; 4484 + /* set compute eng mqd */ 4485 + adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size = 4486 + sizeof(struct v12_compute_mqd); 4487 + adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd = 4488 + gfx_v12_0_compute_mqd_init; 4489 + } 4490 + 4491 + static void gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 4492 + u32 bitmap) 4493 + { 4494 + u32 data; 4495 + 4496 + if (!bitmap) 4497 + return; 4498 + 4499 + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 4500 + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 4501 + 4502 + WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data); 4503 + } 4504 + 4505 + static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 4506 + { 4507 + u32 data, wgp_bitmask; 4508 + data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG); 4509 + data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG); 4510 + 4511 + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 4512 + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 4513 + 4514 + wgp_bitmask = 4515 + amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 4516 + 4517 + return (~data) & wgp_bitmask; 4518 + } 4519 + 4520 + static u32 gfx_v12_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 4521 + { 4522 + u32 wgp_idx, wgp_active_bitmap; 4523 + u32 cu_bitmap_per_wgp, cu_active_bitmap; 4524 + 4525 + wgp_active_bitmap = gfx_v12_0_get_wgp_active_bitmap_per_sh(adev); 4526 + cu_active_bitmap = 0; 4527 + 4528 + for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 4529 + /* if there is one WGP enabled, it means 2 CUs will be enabled */ 4530 + cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 4531 + if (wgp_active_bitmap & (1 << wgp_idx)) 4532 + cu_active_bitmap |= cu_bitmap_per_wgp; 4533 + } 4534 + 4535 + return cu_active_bitmap; 4536 + } 4537 + 4538 + static int gfx_v12_0_get_cu_info(struct amdgpu_device *adev, 4539 + struct amdgpu_cu_info *cu_info) 4540 + { 4541 + int i, j, k, counter, active_cu_number = 0; 4542 + u32 mask, bitmap; 4543 + unsigned disable_masks[8 * 2]; 4544 + 4545 + if (!adev || !cu_info) 4546 + return -EINVAL; 4547 + 4548 + amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2); 4549 + 4550 + mutex_lock(&adev->grbm_idx_mutex); 4551 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4552 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4553 + mask = 1; 4554 + counter = 0; 4555 + gfx_v12_0_select_se_sh(adev, i, j, 0xffffffff, 0); 4556 + if (i < 8 && j < 2) 4557 + gfx_v12_0_set_user_wgp_inactive_bitmap_per_sh( 4558 + adev, disable_masks[i * 2 + j]); 4559 + bitmap = gfx_v12_0_get_cu_active_bitmap_per_sh(adev); 4560 + 4561 + /** 4562 + * GFX12 could support more than 4 SEs, while the bitmap 4563 + * in cu_info struct is 4x4 and ioctl interface struct 4564 + * drm_amdgpu_info_device should keep stable. 4565 + * So we use last two columns of bitmap to store cu mask for 4566 + * SEs 4 to 7, the layout of the bitmap is as below: 4567 + * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]} 4568 + * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]} 4569 + * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]} 4570 + * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]} 4571 + * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]} 4572 + * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]} 4573 + * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]} 4574 + * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]} 4575 + */ 4576 + cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap; 4577 + 4578 + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 4579 + if (bitmap & mask) 4580 + counter++; 4581 + 4582 + mask <<= 1; 4583 + } 4584 + active_cu_number += counter; 4585 + } 4586 + } 4587 + gfx_v12_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 4588 + mutex_unlock(&adev->grbm_idx_mutex); 4589 + 4590 + cu_info->number = active_cu_number; 4591 + cu_info->simd_per_cu = NUM_SIMD_PER_CU; 4592 + 4593 + return 0; 4594 + } 4595 + 4596 + const struct amdgpu_ip_block_version gfx_v12_0_ip_block = { 4597 + .type = AMD_IP_BLOCK_TYPE_GFX, 4598 + .major = 12, 4599 + .minor = 0, 4600 + .rev = 0, 4601 + .funcs = &gfx_v12_0_ip_funcs, 4602 + };
+29
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h
··· 1 + /* 2 + * Copyright 2023 dvanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __GFX_V12_0_H__ 25 + #define __GFX_V12_0_H__ 26 + 27 + extern const struct amdgpu_ip_block_version gfx_v12_0_ip_block; 28 + 29 + #endif