Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add gfx v10 implementation (v10)

GFX is the graphics and compute block on the GPU.

v1: add initial gfx v10 implementation (Ray)
v2: convert to new get_vm_pde function in emit_vm_flush (Hawking)
v3: switch to new emit ib interfaces (Hawking)
v4: squash in updates (Alex)
v5: remove unused variables (Alex)
v6: v6: some golden regs moved to vbios (Alex)
v7: squash in some cleanups (Alex)
v8: squash in golden settings update (Alex)
v9: squash in whitespace fixes (Ernst Sjöstrand, Alex)
v10: squash in GDS backup size fix and GDS/GWS/OA removal rebase fixes (Hawking)

Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Hawking Zhang and committed by
Alex Deucher
a644d85a 886f82aa

+5204 -3
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 110 110 amdgpu_gfx.o \ 111 111 amdgpu_rlc.o \ 112 112 gfx_v8_0.o \ 113 - gfx_v9_0.o 113 + gfx_v9_0.o \ 114 + gfx_v10_0.o 114 115 115 116 # add async DMA block 116 117 amdgpu-y += \
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 29 29 #include <drm/drm_print.h> 30 30 31 31 /* max number of rings */ 32 - #define AMDGPU_MAX_RINGS 23 33 - #define AMDGPU_MAX_GFX_RINGS 1 32 + #define AMDGPU_MAX_RINGS 24 33 + #define AMDGPU_MAX_GFX_RINGS 2 34 34 #define AMDGPU_MAX_COMPUTE_RINGS 8 35 35 #define AMDGPU_MAX_VCE_RINGS 3 36 36 #define AMDGPU_MAX_UVD_ENC_RINGS 2
+5171
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 1 + /* 2 + * Copyright 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include <linux/firmware.h> 24 + #include <drm/drmP.h> 25 + #include "amdgpu.h" 26 + #include "amdgpu_gfx.h" 27 + #include "amdgpu_psp.h" 28 + #include "amdgpu_smu.h" 29 + #include "nv.h" 30 + #include "nvd.h" 31 + 32 + #include "gc/gc_10_1_0_offset.h" 33 + #include "gc/gc_10_1_0_sh_mask.h" 34 + #include "navi10_enum.h" 35 + #include "hdp/hdp_5_0_0_offset.h" 36 + #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" 37 + 38 + #include "soc15.h" 39 + #include "soc15_common.h" 40 + #include "clearstate_gfx10.h" 41 + #include "v10_structs.h" 42 + #include "gfx_v10_0.h" 43 + #include "nbio_v2_3.h" 44 + 45 + /** 46 + * Navi10 has two graphic rings to share each graphic pipe. 47 + * 1. Primary ring 48 + * 2. Async ring 49 + * 50 + * In bring-up phase, it just used primary ring so set gfx ring number as 1 at 51 + * first. 52 + */ 53 + #define GFX10_NUM_GFX_RINGS 2 54 + #define GFX10_MEC_HPD_SIZE 2048 55 + 56 + #define F32_CE_PROGRAM_RAM_SIZE 65536 57 + #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L 58 + 59 + MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); 60 + MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); 61 + MODULE_FIRMWARE("amdgpu/navi10_me.bin"); 62 + MODULE_FIRMWARE("amdgpu/navi10_mec.bin"); 63 + MODULE_FIRMWARE("amdgpu/navi10_mec2.bin"); 64 + MODULE_FIRMWARE("amdgpu/navi10_rlc.bin"); 65 + 66 + static const struct soc15_reg_golden golden_settings_gc_10_1[] = 67 + { 68 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), 69 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), 70 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100), 71 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100), 72 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), 73 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xfeff8fff, 0xfeff8100), 74 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), 75 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000), 76 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x000007ff, 0x000005ff), 77 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0x20000000, 0x20000000), 78 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), 79 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x04800000), 80 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), 81 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), 82 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), 83 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe), 84 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), 85 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032), 86 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231), 87 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), 88 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), 89 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), 90 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188), 91 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), 92 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), 93 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), 94 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), 95 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130), 96 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), 97 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), 98 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0x40000ff0, 0x40000100), 99 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00000000) 100 + }; 101 + 102 + static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = 103 + { 104 + /* Pending on emulation bring up */ 105 + }; 106 + 107 + static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev); 108 + static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev); 109 + static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev); 110 + static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev); 111 + static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, 112 + struct amdgpu_cu_info *cu_info); 113 + static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev); 114 + static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 115 + u32 sh_num, u32 instance); 116 + static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 117 + 118 + static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev); 119 + static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev); 120 + static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev); 121 + static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); 122 + static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume); 123 + static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 124 + static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start); 125 + 126 + static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 127 + { 128 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); 129 + amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | 130 + PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ 131 + amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ 132 + amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ 133 + amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ 134 + amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ 135 + amdgpu_ring_write(kiq_ring, 0); /* oac mask */ 136 + amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ 137 + } 138 + 139 + static void gfx10_kiq_map_queues(struct amdgpu_ring *kiq_ring, 140 + struct amdgpu_ring *ring) 141 + { 142 + struct amdgpu_device *adev = kiq_ring->adev; 143 + uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 144 + uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 145 + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 146 + 147 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); 148 + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ 149 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 150 + PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ 151 + PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ 152 + PACKET3_MAP_QUEUES_QUEUE(ring->queue) | 153 + PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 154 + PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 155 + PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 156 + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 157 + PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | 158 + PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 159 + amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); 160 + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); 161 + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); 162 + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); 163 + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 164 + } 165 + 166 + static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, 167 + struct amdgpu_ring *ring, 168 + enum amdgpu_unmap_queues_action action, 169 + u64 gpu_addr, u64 seq) 170 + { 171 + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 172 + 173 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); 174 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 175 + PACKET3_UNMAP_QUEUES_ACTION(action) | 176 + PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | 177 + PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | 178 + PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); 179 + amdgpu_ring_write(kiq_ring, 180 + PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 181 + 182 + if (action == PREEMPT_QUEUES_NO_UNMAP) { 183 + amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 184 + amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 185 + amdgpu_ring_write(kiq_ring, seq); 186 + } else { 187 + amdgpu_ring_write(kiq_ring, 0); 188 + amdgpu_ring_write(kiq_ring, 0); 189 + amdgpu_ring_write(kiq_ring, 0); 190 + } 191 + } 192 + 193 + static void gfx10_kiq_query_status(struct amdgpu_ring *kiq_ring, 194 + struct amdgpu_ring *ring, 195 + u64 addr, 196 + u64 seq) 197 + { 198 + uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; 199 + 200 + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); 201 + amdgpu_ring_write(kiq_ring, 202 + PACKET3_QUERY_STATUS_CONTEXT_ID(0) | 203 + PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | 204 + PACKET3_QUERY_STATUS_COMMAND(2)); 205 + amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ 206 + PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | 207 + PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); 208 + amdgpu_ring_write(kiq_ring, lower_32_bits(addr)); 209 + amdgpu_ring_write(kiq_ring, upper_32_bits(addr)); 210 + amdgpu_ring_write(kiq_ring, lower_32_bits(seq)); 211 + amdgpu_ring_write(kiq_ring, upper_32_bits(seq)); 212 + } 213 + 214 + static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { 215 + .kiq_set_resources = gfx10_kiq_set_resources, 216 + .kiq_map_queues = gfx10_kiq_map_queues, 217 + .kiq_unmap_queues = gfx10_kiq_unmap_queues, 218 + .kiq_query_status = gfx10_kiq_query_status, 219 + .set_resources_size = 8, 220 + .map_queues_size = 7, 221 + .unmap_queues_size = 6, 222 + .query_status_size = 7, 223 + }; 224 + 225 + static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) 226 + { 227 + adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs; 228 + } 229 + 230 + static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) 231 + { 232 + switch (adev->asic_type) { 233 + case CHIP_NAVI10: 234 + soc15_program_register_sequence(adev, 235 + golden_settings_gc_10_1, 236 + (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); 237 + soc15_program_register_sequence(adev, 238 + golden_settings_gc_10_0_nv10, 239 + (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); 240 + break; 241 + default: 242 + break; 243 + } 244 + } 245 + 246 + static void gfx_v10_0_scratch_init(struct amdgpu_device *adev) 247 + { 248 + adev->gfx.scratch.num_reg = 8; 249 + adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); 250 + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; 251 + } 252 + 253 + static void gfx_v10_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, 254 + bool wc, uint32_t reg, uint32_t val) 255 + { 256 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 257 + amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | 258 + WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0)); 259 + amdgpu_ring_write(ring, reg); 260 + amdgpu_ring_write(ring, 0); 261 + amdgpu_ring_write(ring, val); 262 + } 263 + 264 + static void gfx_v10_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, 265 + int mem_space, int opt, uint32_t addr0, 266 + uint32_t addr1, uint32_t ref, uint32_t mask, 267 + uint32_t inv) 268 + { 269 + amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 270 + amdgpu_ring_write(ring, 271 + /* memory (1) or register (0) */ 272 + (WAIT_REG_MEM_MEM_SPACE(mem_space) | 273 + WAIT_REG_MEM_OPERATION(opt) | /* wait */ 274 + WAIT_REG_MEM_FUNCTION(3) | /* equal */ 275 + WAIT_REG_MEM_ENGINE(eng_sel))); 276 + 277 + if (mem_space) 278 + BUG_ON(addr0 & 0x3); /* Dword align */ 279 + amdgpu_ring_write(ring, addr0); 280 + amdgpu_ring_write(ring, addr1); 281 + amdgpu_ring_write(ring, ref); 282 + amdgpu_ring_write(ring, mask); 283 + amdgpu_ring_write(ring, inv); /* poll interval */ 284 + } 285 + 286 + static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) 287 + { 288 + struct amdgpu_device *adev = ring->adev; 289 + uint32_t scratch; 290 + uint32_t tmp = 0; 291 + unsigned i; 292 + int r; 293 + 294 + r = amdgpu_gfx_scratch_get(adev, &scratch); 295 + if (r) { 296 + DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); 297 + return r; 298 + } 299 + 300 + WREG32(scratch, 0xCAFEDEAD); 301 + 302 + r = amdgpu_ring_alloc(ring, 3); 303 + if (r) { 304 + DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", 305 + ring->idx, r); 306 + amdgpu_gfx_scratch_free(adev, scratch); 307 + return r; 308 + } 309 + 310 + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 311 + amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); 312 + amdgpu_ring_write(ring, 0xDEADBEEF); 313 + amdgpu_ring_commit(ring); 314 + 315 + for (i = 0; i < adev->usec_timeout; i++) { 316 + tmp = RREG32(scratch); 317 + if (tmp == 0xDEADBEEF) 318 + break; 319 + if (amdgpu_emu_mode == 1) 320 + msleep(1); 321 + else 322 + DRM_UDELAY(1); 323 + } 324 + if (i < adev->usec_timeout) { 325 + if (amdgpu_emu_mode == 1) 326 + DRM_INFO("ring test on %d succeeded in %d msecs\n", 327 + ring->idx, i); 328 + else 329 + DRM_INFO("ring test on %d succeeded in %d usecs\n", 330 + ring->idx, i); 331 + } else { 332 + DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 333 + ring->idx, scratch, tmp); 334 + r = -EINVAL; 335 + } 336 + amdgpu_gfx_scratch_free(adev, scratch); 337 + 338 + return r; 339 + } 340 + 341 + static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 342 + { 343 + struct amdgpu_device *adev = ring->adev; 344 + struct amdgpu_ib ib; 345 + struct dma_fence *f = NULL; 346 + uint32_t scratch; 347 + uint32_t tmp = 0; 348 + long r; 349 + 350 + r = amdgpu_gfx_scratch_get(adev, &scratch); 351 + if (r) { 352 + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); 353 + return r; 354 + } 355 + 356 + WREG32(scratch, 0xCAFEDEAD); 357 + 358 + memset(&ib, 0, sizeof(ib)); 359 + r = amdgpu_ib_get(adev, NULL, 256, &ib); 360 + if (r) { 361 + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); 362 + goto err1; 363 + } 364 + 365 + ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 366 + ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); 367 + ib.ptr[2] = 0xDEADBEEF; 368 + ib.length_dw = 3; 369 + 370 + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 371 + if (r) 372 + goto err2; 373 + 374 + r = dma_fence_wait_timeout(f, false, timeout); 375 + if (r == 0) { 376 + DRM_ERROR("amdgpu: IB test timed out.\n"); 377 + r = -ETIMEDOUT; 378 + goto err2; 379 + } else if (r < 0) { 380 + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); 381 + goto err2; 382 + } 383 + 384 + tmp = RREG32(scratch); 385 + if (tmp == 0xDEADBEEF) { 386 + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 387 + r = 0; 388 + } else { 389 + DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 390 + scratch, tmp); 391 + r = -EINVAL; 392 + } 393 + err2: 394 + amdgpu_ib_free(adev, &ib, NULL); 395 + dma_fence_put(f); 396 + err1: 397 + amdgpu_gfx_scratch_free(adev, scratch); 398 + 399 + return r; 400 + } 401 + 402 + static void gfx_v10_0_free_microcode(struct amdgpu_device *adev) 403 + { 404 + release_firmware(adev->gfx.pfp_fw); 405 + adev->gfx.pfp_fw = NULL; 406 + release_firmware(adev->gfx.me_fw); 407 + adev->gfx.me_fw = NULL; 408 + release_firmware(adev->gfx.ce_fw); 409 + adev->gfx.ce_fw = NULL; 410 + release_firmware(adev->gfx.rlc_fw); 411 + adev->gfx.rlc_fw = NULL; 412 + release_firmware(adev->gfx.mec_fw); 413 + adev->gfx.mec_fw = NULL; 414 + release_firmware(adev->gfx.mec2_fw); 415 + adev->gfx.mec2_fw = NULL; 416 + 417 + kfree(adev->gfx.rlc.register_list_format); 418 + } 419 + 420 + static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev) 421 + { 422 + const struct rlc_firmware_header_v2_1 *rlc_hdr; 423 + 424 + rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; 425 + adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); 426 + adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); 427 + adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); 428 + adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); 429 + adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); 430 + adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); 431 + adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); 432 + adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); 433 + adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); 434 + adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); 435 + adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); 436 + adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); 437 + adev->gfx.rlc.reg_list_format_direct_reg_list_length = 438 + le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); 439 + } 440 + 441 + static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) 442 + { 443 + switch (adev->asic_type) { 444 + case CHIP_NAVI10: 445 + if ((adev->gfx.rlc_fw_version < 85) || 446 + (adev->pm.fw_version < 0x002A0C00)) 447 + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 448 + break; 449 + default: 450 + break; 451 + } 452 + } 453 + 454 + static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) 455 + { 456 + const char *chip_name; 457 + char fw_name[30]; 458 + int err; 459 + struct amdgpu_firmware_info *info = NULL; 460 + const struct common_firmware_header *header = NULL; 461 + const struct gfx_firmware_header_v1_0 *cp_hdr; 462 + const struct rlc_firmware_header_v2_0 *rlc_hdr; 463 + unsigned int *tmp = NULL; 464 + unsigned int i = 0; 465 + uint16_t version_major; 466 + uint16_t version_minor; 467 + 468 + DRM_DEBUG("\n"); 469 + 470 + switch (adev->asic_type) { 471 + case CHIP_NAVI10: 472 + chip_name = "navi10"; 473 + break; 474 + default: 475 + BUG(); 476 + } 477 + 478 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 479 + err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); 480 + if (err) 481 + goto out; 482 + err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 483 + if (err) 484 + goto out; 485 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 486 + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 487 + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 488 + 489 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 490 + err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 491 + if (err) 492 + goto out; 493 + err = amdgpu_ucode_validate(adev->gfx.me_fw); 494 + if (err) 495 + goto out; 496 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 497 + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 498 + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 499 + 500 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 501 + err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 502 + if (err) 503 + goto out; 504 + err = amdgpu_ucode_validate(adev->gfx.ce_fw); 505 + if (err) 506 + goto out; 507 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 508 + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 509 + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 510 + 511 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 512 + err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 513 + if (err) 514 + goto out; 515 + err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 516 + rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 517 + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); 518 + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); 519 + if (version_major == 2 && version_minor == 1) 520 + adev->gfx.rlc.is_rlc_v2_1 = true; 521 + 522 + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); 523 + adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); 524 + adev->gfx.rlc.save_and_restore_offset = 525 + le32_to_cpu(rlc_hdr->save_and_restore_offset); 526 + adev->gfx.rlc.clear_state_descriptor_offset = 527 + le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); 528 + adev->gfx.rlc.avail_scratch_ram_locations = 529 + le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); 530 + adev->gfx.rlc.reg_restore_list_size = 531 + le32_to_cpu(rlc_hdr->reg_restore_list_size); 532 + adev->gfx.rlc.reg_list_format_start = 533 + le32_to_cpu(rlc_hdr->reg_list_format_start); 534 + adev->gfx.rlc.reg_list_format_separate_start = 535 + le32_to_cpu(rlc_hdr->reg_list_format_separate_start); 536 + adev->gfx.rlc.starting_offsets_start = 537 + le32_to_cpu(rlc_hdr->starting_offsets_start); 538 + adev->gfx.rlc.reg_list_format_size_bytes = 539 + le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); 540 + adev->gfx.rlc.reg_list_size_bytes = 541 + le32_to_cpu(rlc_hdr->reg_list_size_bytes); 542 + adev->gfx.rlc.register_list_format = 543 + kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + 544 + adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); 545 + if (!adev->gfx.rlc.register_list_format) { 546 + err = -ENOMEM; 547 + goto out; 548 + } 549 + 550 + tmp = (unsigned int *)((uintptr_t)rlc_hdr + 551 + le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); 552 + for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) 553 + adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); 554 + 555 + adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; 556 + 557 + tmp = (unsigned int *)((uintptr_t)rlc_hdr + 558 + le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); 559 + for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) 560 + adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); 561 + 562 + if (adev->gfx.rlc.is_rlc_v2_1) 563 + gfx_v10_0_init_rlc_ext_microcode(adev); 564 + 565 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 566 + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 567 + if (err) 568 + goto out; 569 + err = amdgpu_ucode_validate(adev->gfx.mec_fw); 570 + if (err) 571 + goto out; 572 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 573 + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); 574 + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); 575 + 576 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 577 + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 578 + if (!err) { 579 + err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 580 + if (err) 581 + goto out; 582 + cp_hdr = (const struct gfx_firmware_header_v1_0 *) 583 + adev->gfx.mec2_fw->data; 584 + adev->gfx.mec2_fw_version = 585 + le32_to_cpu(cp_hdr->header.ucode_version); 586 + adev->gfx.mec2_feature_version = 587 + le32_to_cpu(cp_hdr->ucode_feature_version); 588 + } else { 589 + err = 0; 590 + adev->gfx.mec2_fw = NULL; 591 + } 592 + 593 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 594 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; 595 + info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; 596 + info->fw = adev->gfx.pfp_fw; 597 + header = (const struct common_firmware_header *)info->fw->data; 598 + adev->firmware.fw_size += 599 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 600 + 601 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; 602 + info->ucode_id = AMDGPU_UCODE_ID_CP_ME; 603 + info->fw = adev->gfx.me_fw; 604 + header = (const struct common_firmware_header *)info->fw->data; 605 + adev->firmware.fw_size += 606 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 607 + 608 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; 609 + info->ucode_id = AMDGPU_UCODE_ID_CP_CE; 610 + info->fw = adev->gfx.ce_fw; 611 + header = (const struct common_firmware_header *)info->fw->data; 612 + adev->firmware.fw_size += 613 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 614 + 615 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; 616 + info->ucode_id = AMDGPU_UCODE_ID_RLC_G; 617 + info->fw = adev->gfx.rlc_fw; 618 + header = (const struct common_firmware_header *)info->fw->data; 619 + adev->firmware.fw_size += 620 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 621 + 622 + if (adev->gfx.rlc.is_rlc_v2_1 && 623 + adev->gfx.rlc.save_restore_list_cntl_size_bytes && 624 + adev->gfx.rlc.save_restore_list_gpm_size_bytes && 625 + adev->gfx.rlc.save_restore_list_srm_size_bytes) { 626 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; 627 + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; 628 + info->fw = adev->gfx.rlc_fw; 629 + adev->firmware.fw_size += 630 + ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); 631 + 632 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; 633 + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; 634 + info->fw = adev->gfx.rlc_fw; 635 + adev->firmware.fw_size += 636 + ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); 637 + 638 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; 639 + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; 640 + info->fw = adev->gfx.rlc_fw; 641 + adev->firmware.fw_size += 642 + ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); 643 + } 644 + 645 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; 646 + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; 647 + info->fw = adev->gfx.mec_fw; 648 + header = (const struct common_firmware_header *)info->fw->data; 649 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 650 + adev->firmware.fw_size += 651 + ALIGN(le32_to_cpu(header->ucode_size_bytes) - 652 + le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 653 + 654 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; 655 + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; 656 + info->fw = adev->gfx.mec_fw; 657 + adev->firmware.fw_size += 658 + ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); 659 + 660 + if (adev->gfx.mec2_fw) { 661 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; 662 + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; 663 + info->fw = adev->gfx.mec2_fw; 664 + header = (const struct common_firmware_header *)info->fw->data; 665 + cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; 666 + adev->firmware.fw_size += 667 + ALIGN(le32_to_cpu(header->ucode_size_bytes) - 668 + le32_to_cpu(cp_hdr->jt_size) * 4, 669 + PAGE_SIZE); 670 + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; 671 + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; 672 + info->fw = adev->gfx.mec2_fw; 673 + adev->firmware.fw_size += 674 + ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, 675 + PAGE_SIZE); 676 + } 677 + } 678 + 679 + out: 680 + if (err) { 681 + dev_err(adev->dev, 682 + "gfx10: Failed to load firmware \"%s\"\n", 683 + fw_name); 684 + release_firmware(adev->gfx.pfp_fw); 685 + adev->gfx.pfp_fw = NULL; 686 + release_firmware(adev->gfx.me_fw); 687 + adev->gfx.me_fw = NULL; 688 + release_firmware(adev->gfx.ce_fw); 689 + adev->gfx.ce_fw = NULL; 690 + release_firmware(adev->gfx.rlc_fw); 691 + adev->gfx.rlc_fw = NULL; 692 + release_firmware(adev->gfx.mec_fw); 693 + adev->gfx.mec_fw = NULL; 694 + release_firmware(adev->gfx.mec2_fw); 695 + adev->gfx.mec2_fw = NULL; 696 + } 697 + 698 + gfx_v10_0_check_gfxoff_flag(adev); 699 + 700 + return err; 701 + } 702 + 703 + static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev) 704 + { 705 + u32 count = 0; 706 + const struct cs_section_def *sect = NULL; 707 + const struct cs_extent_def *ext = NULL; 708 + 709 + /* begin clear state */ 710 + count += 2; 711 + /* context control state */ 712 + count += 3; 713 + 714 + for (sect = gfx10_cs_data; sect->section != NULL; ++sect) { 715 + for (ext = sect->section; ext->extent != NULL; ++ext) { 716 + if (sect->id == SECT_CONTEXT) 717 + count += 2 + ext->reg_count; 718 + else 719 + return 0; 720 + } 721 + } 722 + 723 + /* set PA_SC_TILE_STEERING_OVERRIDE */ 724 + count += 3; 725 + /* end clear state */ 726 + count += 2; 727 + /* clear state */ 728 + count += 2; 729 + 730 + return count; 731 + } 732 + 733 + static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, 734 + volatile u32 *buffer) 735 + { 736 + u32 count = 0, i; 737 + const struct cs_section_def *sect = NULL; 738 + const struct cs_extent_def *ext = NULL; 739 + int ctx_reg_offset; 740 + 741 + if (adev->gfx.rlc.cs_data == NULL) 742 + return; 743 + if (buffer == NULL) 744 + return; 745 + 746 + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 747 + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 748 + 749 + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 750 + buffer[count++] = cpu_to_le32(0x80000000); 751 + buffer[count++] = cpu_to_le32(0x80000000); 752 + 753 + for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 754 + for (ext = sect->section; ext->extent != NULL; ++ext) { 755 + if (sect->id == SECT_CONTEXT) { 756 + buffer[count++] = 757 + cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 758 + buffer[count++] = cpu_to_le32(ext->reg_index - 759 + PACKET3_SET_CONTEXT_REG_START); 760 + for (i = 0; i < ext->reg_count; i++) 761 + buffer[count++] = cpu_to_le32(ext->extent[i]); 762 + } else { 763 + return; 764 + } 765 + } 766 + } 767 + 768 + ctx_reg_offset = 769 + SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 770 + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 771 + buffer[count++] = cpu_to_le32(ctx_reg_offset); 772 + buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override); 773 + 774 + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 775 + buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 776 + 777 + buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 778 + buffer[count++] = cpu_to_le32(0); 779 + } 780 + 781 + static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev) 782 + { 783 + /* clear state block */ 784 + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 785 + &adev->gfx.rlc.clear_state_gpu_addr, 786 + (void **)&adev->gfx.rlc.cs_ptr); 787 + 788 + /* jump table block */ 789 + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 790 + &adev->gfx.rlc.cp_table_gpu_addr, 791 + (void **)&adev->gfx.rlc.cp_table_ptr); 792 + } 793 + 794 + static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) 795 + { 796 + const struct cs_section_def *cs_data; 797 + int r; 798 + 799 + adev->gfx.rlc.cs_data = gfx10_cs_data; 800 + 801 + cs_data = adev->gfx.rlc.cs_data; 802 + 803 + if (cs_data) { 804 + /* init clear state block */ 805 + r = amdgpu_gfx_rlc_init_csb(adev); 806 + if (r) 807 + return r; 808 + } 809 + 810 + return 0; 811 + } 812 + 813 + static void gfx_v10_0_mec_fini(struct amdgpu_device *adev) 814 + { 815 + amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 816 + amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); 817 + } 818 + 819 + static int gfx_v10_0_me_init(struct amdgpu_device *adev) 820 + { 821 + int r; 822 + 823 + bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); 824 + 825 + amdgpu_gfx_graphics_queue_acquire(adev); 826 + 827 + r = gfx_v10_0_init_microcode(adev); 828 + if (r) 829 + DRM_ERROR("Failed to load gfx firmware!\n"); 830 + 831 + return r; 832 + } 833 + 834 + static int gfx_v10_0_mec_init(struct amdgpu_device *adev) 835 + { 836 + int r; 837 + u32 *hpd; 838 + const __le32 *fw_data = NULL; 839 + unsigned fw_size; 840 + u32 *fw = NULL; 841 + size_t mec_hpd_size; 842 + 843 + const struct gfx_firmware_header_v1_0 *mec_hdr = NULL; 844 + 845 + bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 846 + 847 + /* take ownership of the relevant compute queues */ 848 + amdgpu_gfx_compute_queue_acquire(adev); 849 + mec_hpd_size = adev->gfx.num_compute_rings * GFX10_MEC_HPD_SIZE; 850 + 851 + r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 852 + AMDGPU_GEM_DOMAIN_GTT, 853 + &adev->gfx.mec.hpd_eop_obj, 854 + &adev->gfx.mec.hpd_eop_gpu_addr, 855 + (void **)&hpd); 856 + if (r) { 857 + dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 858 + gfx_v10_0_mec_fini(adev); 859 + return r; 860 + } 861 + 862 + memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); 863 + 864 + amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 865 + amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 866 + 867 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 868 + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 869 + 870 + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 871 + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 872 + fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); 873 + 874 + r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, 875 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 876 + &adev->gfx.mec.mec_fw_obj, 877 + &adev->gfx.mec.mec_fw_gpu_addr, 878 + (void **)&fw); 879 + if (r) { 880 + dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r); 881 + gfx_v10_0_mec_fini(adev); 882 + return r; 883 + } 884 + 885 + memcpy(fw, fw_data, fw_size); 886 + 887 + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); 888 + amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); 889 + } 890 + 891 + return 0; 892 + } 893 + 894 + static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address) 895 + { 896 + WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 897 + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 898 + (address << SQ_IND_INDEX__INDEX__SHIFT)); 899 + return RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 900 + } 901 + 902 + static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave, 903 + uint32_t thread, uint32_t regno, 904 + uint32_t num, uint32_t *out) 905 + { 906 + WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, 907 + (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 908 + (regno << SQ_IND_INDEX__INDEX__SHIFT) | 909 + (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) | 910 + (SQ_IND_INDEX__AUTO_INCR_MASK)); 911 + while (num--) 912 + *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); 913 + } 914 + 915 + static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 916 + { 917 + /* in gfx10 the SIMD_ID is specified as part of the INSTANCE 918 + * field when performing a select_se_sh so it should be 919 + * zero here */ 920 + WARN_ON(simd != 0); 921 + 922 + /* type 2 wave data */ 923 + dst[(*no_fields)++] = 2; 924 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS); 925 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO); 926 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI); 927 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO); 928 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI); 929 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1); 930 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2); 931 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_INST_DW0); 932 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC); 933 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC); 934 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS); 935 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS); 936 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); 937 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); 938 + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); 939 + } 940 + 941 + static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, 942 + uint32_t wave, uint32_t start, 943 + uint32_t size, uint32_t *dst) 944 + { 945 + WARN_ON(simd != 0); 946 + 947 + wave_read_regs( 948 + adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size, 949 + dst); 950 + } 951 + 952 + static void gfx_v10_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, 953 + uint32_t wave, uint32_t thread, 954 + uint32_t start, uint32_t size, 955 + uint32_t *dst) 956 + { 957 + wave_read_regs( 958 + adev, wave, thread, 959 + start + SQIND_WAVE_VGPRS_OFFSET, size, dst); 960 + } 961 + 962 + 963 + static const struct amdgpu_gfx_funcs gfx_v10_0_gfx_funcs = { 964 + .get_gpu_clock_counter = &gfx_v10_0_get_gpu_clock_counter, 965 + .select_se_sh = &gfx_v10_0_select_se_sh, 966 + .read_wave_data = &gfx_v10_0_read_wave_data, 967 + .read_wave_sgprs = &gfx_v10_0_read_wave_sgprs, 968 + .read_wave_vgprs = &gfx_v10_0_read_wave_vgprs, 969 + }; 970 + 971 + static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) 972 + { 973 + u32 gb_addr_config; 974 + 975 + adev->gfx.funcs = &gfx_v10_0_gfx_funcs; 976 + 977 + switch (adev->asic_type) { 978 + case CHIP_NAVI10: 979 + adev->gfx.config.max_hw_contexts = 8; 980 + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 981 + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 982 + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 983 + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; 984 + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); 985 + break; 986 + default: 987 + BUG(); 988 + break; 989 + } 990 + 991 + adev->gfx.config.gb_addr_config = gb_addr_config; 992 + 993 + adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << 994 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 995 + GB_ADDR_CONFIG, NUM_PIPES); 996 + 997 + adev->gfx.config.max_tile_pipes = 998 + adev->gfx.config.gb_addr_config_fields.num_pipes; 999 + 1000 + adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << 1001 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 1002 + GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS); 1003 + adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << 1004 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 1005 + GB_ADDR_CONFIG, NUM_RB_PER_SE); 1006 + adev->gfx.config.gb_addr_config_fields.num_se = 1 << 1007 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 1008 + GB_ADDR_CONFIG, NUM_SHADER_ENGINES); 1009 + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + 1010 + REG_GET_FIELD(adev->gfx.config.gb_addr_config, 1011 + GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); 1012 + } 1013 + 1014 + static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, 1015 + int me, int pipe, int queue) 1016 + { 1017 + int r; 1018 + struct amdgpu_ring *ring; 1019 + unsigned int irq_type; 1020 + 1021 + ring = &adev->gfx.gfx_ring[ring_id]; 1022 + 1023 + ring->me = me; 1024 + ring->pipe = pipe; 1025 + ring->queue = queue; 1026 + 1027 + ring->ring_obj = NULL; 1028 + ring->use_doorbell = true; 1029 + 1030 + if (!ring_id) 1031 + ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; 1032 + else 1033 + ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; 1034 + sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1035 + 1036 + irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; 1037 + r = amdgpu_ring_init(adev, ring, 1024, 1038 + &adev->gfx.eop_irq, irq_type); 1039 + if (r) 1040 + return r; 1041 + return 0; 1042 + } 1043 + 1044 + static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 1045 + int mec, int pipe, int queue) 1046 + { 1047 + int r; 1048 + unsigned irq_type; 1049 + struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 1050 + 1051 + ring = &adev->gfx.compute_ring[ring_id]; 1052 + 1053 + /* mec0 is me1 */ 1054 + ring->me = mec + 1; 1055 + ring->pipe = pipe; 1056 + ring->queue = queue; 1057 + 1058 + ring->ring_obj = NULL; 1059 + ring->use_doorbell = true; 1060 + ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; 1061 + ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr 1062 + + (ring_id * GFX10_MEC_HPD_SIZE); 1063 + sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 1064 + 1065 + irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 1066 + + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 1067 + + ring->pipe; 1068 + 1069 + /* type-2 packets are deprecated on MEC, use type-3 instead */ 1070 + r = amdgpu_ring_init(adev, ring, 1024, 1071 + &adev->gfx.eop_irq, irq_type); 1072 + if (r) 1073 + return r; 1074 + 1075 + return 0; 1076 + } 1077 + 1078 + static int gfx_v10_0_sw_init(void *handle) 1079 + { 1080 + int i, j, k, r, ring_id = 0; 1081 + struct amdgpu_kiq *kiq; 1082 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1083 + 1084 + switch (adev->asic_type) { 1085 + case CHIP_NAVI10: 1086 + adev->gfx.me.num_me = 1; 1087 + adev->gfx.me.num_pipe_per_me = 2; 1088 + adev->gfx.me.num_queue_per_pipe = 1; 1089 + adev->gfx.mec.num_mec = 2; 1090 + adev->gfx.mec.num_pipe_per_mec = 4; 1091 + adev->gfx.mec.num_queue_per_pipe = 8; 1092 + break; 1093 + default: 1094 + adev->gfx.me.num_me = 1; 1095 + adev->gfx.me.num_pipe_per_me = 1; 1096 + adev->gfx.me.num_queue_per_pipe = 1; 1097 + adev->gfx.mec.num_mec = 1; 1098 + adev->gfx.mec.num_pipe_per_mec = 4; 1099 + adev->gfx.mec.num_queue_per_pipe = 8; 1100 + break; 1101 + } 1102 + 1103 + /* KIQ event */ 1104 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 1105 + GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT, 1106 + &adev->gfx.kiq.irq); 1107 + if (r) 1108 + return r; 1109 + 1110 + /* EOP Event */ 1111 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 1112 + GFX_10_1__SRCID__CP_EOP_INTERRUPT, 1113 + &adev->gfx.eop_irq); 1114 + if (r) 1115 + return r; 1116 + 1117 + /* Privileged reg */ 1118 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_REG_FAULT, 1119 + &adev->gfx.priv_reg_irq); 1120 + if (r) 1121 + return r; 1122 + 1123 + /* Privileged inst */ 1124 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_10_1__SRCID__CP_PRIV_INSTR_FAULT, 1125 + &adev->gfx.priv_inst_irq); 1126 + if (r) 1127 + return r; 1128 + 1129 + adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1130 + 1131 + gfx_v10_0_scratch_init(adev); 1132 + 1133 + r = gfx_v10_0_me_init(adev); 1134 + if (r) 1135 + return r; 1136 + 1137 + r = gfx_v10_0_rlc_init(adev); 1138 + if (r) { 1139 + DRM_ERROR("Failed to init rlc BOs!\n"); 1140 + return r; 1141 + } 1142 + 1143 + r = gfx_v10_0_mec_init(adev); 1144 + if (r) { 1145 + DRM_ERROR("Failed to init MEC BOs!\n"); 1146 + return r; 1147 + } 1148 + 1149 + /* set up the gfx ring */ 1150 + for (i = 0; i < adev->gfx.me.num_me; i++) { 1151 + for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 1152 + for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 1153 + if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j)) 1154 + continue; 1155 + 1156 + r = gfx_v10_0_gfx_ring_init(adev, ring_id, 1157 + i, k, j); 1158 + if (r) 1159 + return r; 1160 + ring_id++; 1161 + } 1162 + } 1163 + } 1164 + 1165 + ring_id = 0; 1166 + /* set up the compute queues - allocate horizontally across pipes */ 1167 + for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 1168 + for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 1169 + for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 1170 + if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, 1171 + j)) 1172 + continue; 1173 + 1174 + r = gfx_v10_0_compute_ring_init(adev, ring_id, 1175 + i, k, j); 1176 + if (r) 1177 + return r; 1178 + 1179 + ring_id++; 1180 + } 1181 + } 1182 + } 1183 + 1184 + r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE); 1185 + if (r) { 1186 + DRM_ERROR("Failed to init KIQ BOs!\n"); 1187 + return r; 1188 + } 1189 + 1190 + kiq = &adev->gfx.kiq; 1191 + r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); 1192 + if (r) 1193 + return r; 1194 + 1195 + r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd)); 1196 + if (r) 1197 + return r; 1198 + 1199 + /* allocate visible FB for rlc auto-loading fw */ 1200 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1201 + r = gfx_v10_0_rlc_backdoor_autoload_buffer_init(adev); 1202 + if (r) 1203 + return r; 1204 + } 1205 + 1206 + adev->gfx.ce_ram_size = F32_CE_PROGRAM_RAM_SIZE; 1207 + 1208 + gfx_v10_0_gpu_early_init(adev); 1209 + 1210 + return 0; 1211 + } 1212 + 1213 + static void gfx_v10_0_pfp_fini(struct amdgpu_device *adev) 1214 + { 1215 + amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj, 1216 + &adev->gfx.pfp.pfp_fw_gpu_addr, 1217 + (void **)&adev->gfx.pfp.pfp_fw_ptr); 1218 + } 1219 + 1220 + static void gfx_v10_0_ce_fini(struct amdgpu_device *adev) 1221 + { 1222 + amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj, 1223 + &adev->gfx.ce.ce_fw_gpu_addr, 1224 + (void **)&adev->gfx.ce.ce_fw_ptr); 1225 + } 1226 + 1227 + static void gfx_v10_0_me_fini(struct amdgpu_device *adev) 1228 + { 1229 + amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj, 1230 + &adev->gfx.me.me_fw_gpu_addr, 1231 + (void **)&adev->gfx.me.me_fw_ptr); 1232 + } 1233 + 1234 + static int gfx_v10_0_sw_fini(void *handle) 1235 + { 1236 + int i; 1237 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1238 + 1239 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1240 + amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 1241 + for (i = 0; i < adev->gfx.num_compute_rings; i++) 1242 + amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1243 + 1244 + amdgpu_gfx_mqd_sw_fini(adev); 1245 + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); 1246 + amdgpu_gfx_kiq_fini(adev); 1247 + 1248 + gfx_v10_0_pfp_fini(adev); 1249 + gfx_v10_0_ce_fini(adev); 1250 + gfx_v10_0_me_fini(adev); 1251 + gfx_v10_0_rlc_fini(adev); 1252 + gfx_v10_0_mec_fini(adev); 1253 + 1254 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) 1255 + gfx_v10_0_rlc_backdoor_autoload_buffer_fini(adev); 1256 + 1257 + gfx_v10_0_free_microcode(adev); 1258 + 1259 + return 0; 1260 + } 1261 + 1262 + 1263 + static void gfx_v10_0_tiling_mode_table_init(struct amdgpu_device *adev) 1264 + { 1265 + /* TODO */ 1266 + } 1267 + 1268 + static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, 1269 + u32 sh_num, u32 instance) 1270 + { 1271 + u32 data; 1272 + 1273 + if (instance == 0xffffffff) 1274 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 1275 + INSTANCE_BROADCAST_WRITES, 1); 1276 + else 1277 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 1278 + instance); 1279 + 1280 + if (se_num == 0xffffffff) 1281 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1282 + 1); 1283 + else 1284 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1285 + 1286 + if (sh_num == 0xffffffff) 1287 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES, 1288 + 1); 1289 + else 1290 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num); 1291 + 1292 + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1293 + } 1294 + 1295 + static u32 gfx_v10_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1296 + { 1297 + u32 data, mask; 1298 + 1299 + data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE); 1300 + data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE); 1301 + 1302 + data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1303 + data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1304 + 1305 + mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1306 + adev->gfx.config.max_sh_per_se); 1307 + 1308 + return (~data) & mask; 1309 + } 1310 + 1311 + static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) 1312 + { 1313 + int i, j; 1314 + u32 data; 1315 + u32 active_rbs = 0; 1316 + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1317 + adev->gfx.config.max_sh_per_se; 1318 + 1319 + mutex_lock(&adev->grbm_idx_mutex); 1320 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1321 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1322 + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); 1323 + data = gfx_v10_0_get_rb_active_bitmap(adev); 1324 + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1325 + rb_bitmap_width_per_sh); 1326 + } 1327 + } 1328 + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1329 + mutex_unlock(&adev->grbm_idx_mutex); 1330 + 1331 + adev->gfx.config.backend_enable_mask = active_rbs; 1332 + adev->gfx.config.num_rbs = hweight32(active_rbs); 1333 + } 1334 + 1335 + static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *adev) 1336 + { 1337 + uint32_t num_sc; 1338 + uint32_t enabled_rb_per_sh; 1339 + uint32_t active_rb_bitmap; 1340 + uint32_t num_rb_per_sc; 1341 + uint32_t num_packer_per_sc; 1342 + uint32_t pa_sc_tile_steering_override; 1343 + 1344 + /* init num_sc */ 1345 + num_sc = adev->gfx.config.max_shader_engines * adev->gfx.config.max_sh_per_se * 1346 + adev->gfx.config.num_sc_per_sh; 1347 + /* init num_rb_per_sc */ 1348 + active_rb_bitmap = gfx_v10_0_get_rb_active_bitmap(adev); 1349 + enabled_rb_per_sh = hweight32(active_rb_bitmap); 1350 + num_rb_per_sc = enabled_rb_per_sh / adev->gfx.config.num_sc_per_sh; 1351 + /* init num_packer_per_sc */ 1352 + num_packer_per_sc = adev->gfx.config.num_packer_per_sc; 1353 + 1354 + pa_sc_tile_steering_override = 0; 1355 + pa_sc_tile_steering_override |= 1356 + (order_base_2(num_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_SC__SHIFT) & 1357 + PA_SC_TILE_STEERING_OVERRIDE__NUM_SC_MASK; 1358 + pa_sc_tile_steering_override |= 1359 + (order_base_2(num_rb_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC__SHIFT) & 1360 + PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SC_MASK; 1361 + pa_sc_tile_steering_override |= 1362 + (order_base_2(num_packer_per_sc) << PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC__SHIFT) & 1363 + PA_SC_TILE_STEERING_OVERRIDE__NUM_PACKER_PER_SC_MASK; 1364 + 1365 + return pa_sc_tile_steering_override; 1366 + } 1367 + 1368 + #define DEFAULT_SH_MEM_BASES (0x6000) 1369 + #define FIRST_COMPUTE_VMID (8) 1370 + #define LAST_COMPUTE_VMID (16) 1371 + 1372 + static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) 1373 + { 1374 + int i; 1375 + uint32_t sh_mem_config; 1376 + uint32_t sh_mem_bases; 1377 + 1378 + /* 1379 + * Configure apertures: 1380 + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1381 + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1382 + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1383 + */ 1384 + sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1385 + 1386 + sh_mem_config = SH_MEM_ADDRESS_MODE_64 | 1387 + SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1388 + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1389 + 1390 + mutex_lock(&adev->srbm_mutex); 1391 + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { 1392 + nv_grbm_select(adev, 0, 0, 0, i); 1393 + /* CP and shaders */ 1394 + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); 1395 + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); 1396 + } 1397 + nv_grbm_select(adev, 0, 0, 0, 0); 1398 + mutex_unlock(&adev->srbm_mutex); 1399 + } 1400 + 1401 + static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 1402 + { 1403 + int i, j, k; 1404 + int max_wgp_per_sh = adev->gfx.config.max_cu_per_sh >> 1; 1405 + u32 tmp, wgp_active_bitmap = 0; 1406 + u32 gcrd_targets_disable_tcp = 0; 1407 + u32 utcl_invreq_disable = 0; 1408 + /* 1409 + * GCRD_TARGETS_DISABLE field contains 1410 + * for Navi10: GL1C=[18:15], SQC=[14:10], TCP=[9:0] 1411 + */ 1412 + u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask( 1413 + 2 * max_wgp_per_sh + /* TCP */ 1414 + max_wgp_per_sh + /* SQC */ 1415 + 4); /* GL1C */ 1416 + /* 1417 + * UTCL1_UTCL0_INVREQ_DISABLE field contains 1418 + * for Navi10: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0] 1419 + */ 1420 + u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask( 1421 + 2 * max_wgp_per_sh + /* TCP */ 1422 + 2 * max_wgp_per_sh + /* SQC */ 1423 + 4 + /* RMI */ 1424 + 1); /* SQG */ 1425 + 1426 + if (adev->asic_type == CHIP_NAVI10) { 1427 + mutex_lock(&adev->grbm_idx_mutex); 1428 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1429 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1430 + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); 1431 + wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); 1432 + /* 1433 + * Set corresponding TCP bits for the inactive WGPs in 1434 + * GCRD_SA_TARGETS_DISABLE 1435 + */ 1436 + gcrd_targets_disable_tcp = 0; 1437 + /* Set TCP & SQC bits in UTCL1_UTCL0_INVREQ_DISABLE */ 1438 + utcl_invreq_disable = 0; 1439 + 1440 + for (k = 0; k < max_wgp_per_sh; k++) { 1441 + if (!(wgp_active_bitmap & (1 << k))) { 1442 + gcrd_targets_disable_tcp |= 3 << (2 * k); 1443 + utcl_invreq_disable |= (3 << (2 * k)) | 1444 + (3 << (2 * (max_wgp_per_sh + k))); 1445 + } 1446 + } 1447 + 1448 + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); 1449 + tmp = RREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE); 1450 + /* only override TCP & SQC bits */ 1451 + tmp &= 0xffffffff << (4 * max_wgp_per_sh); 1452 + tmp |= (utcl_invreq_disable & utcl_invreq_disable_mask); 1453 + WREG32_SOC15(GC, 0, mmUTCL1_UTCL0_INVREQ_DISABLE, tmp); 1454 + 1455 + tmp = RREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE); 1456 + /* only override TCP bits */ 1457 + tmp &= 0xffffffff << (2 * max_wgp_per_sh); 1458 + tmp |= (gcrd_targets_disable_tcp & gcrd_targets_disable_mask); 1459 + WREG32_SOC15(GC, 0, mmGCRD_SA_TARGETS_DISABLE, tmp); 1460 + } 1461 + } 1462 + 1463 + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1464 + mutex_unlock(&adev->grbm_idx_mutex); 1465 + } 1466 + } 1467 + 1468 + static void gfx_v10_0_constants_init(struct amdgpu_device *adev) 1469 + { 1470 + u32 tmp; 1471 + int i; 1472 + 1473 + WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); 1474 + 1475 + gfx_v10_0_tiling_mode_table_init(adev); 1476 + 1477 + gfx_v10_0_setup_rb(adev); 1478 + gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); 1479 + adev->gfx.config.pa_sc_tile_steering_override = 1480 + gfx_v10_0_init_pa_sc_tile_steering_override(adev); 1481 + 1482 + /* XXX SH_MEM regs */ 1483 + /* where to put LDS, scratch, GPUVM in FSA64 space */ 1484 + mutex_lock(&adev->srbm_mutex); 1485 + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { 1486 + nv_grbm_select(adev, 0, 0, 0, i); 1487 + /* CP and shaders */ 1488 + if (i == 0) { 1489 + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1490 + SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1491 + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_MODE, 0); 1492 + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1493 + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0); 1494 + } else { 1495 + tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1496 + SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1497 + tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_MODE, 0); 1498 + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); 1499 + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, 1500 + (adev->gmc.private_aperture_start >> 48)); 1501 + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, 1502 + (adev->gmc.shared_aperture_start >> 48)); 1503 + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); 1504 + } 1505 + } 1506 + nv_grbm_select(adev, 0, 0, 0, 0); 1507 + 1508 + mutex_unlock(&adev->srbm_mutex); 1509 + 1510 + gfx_v10_0_init_compute_vmid(adev); 1511 + 1512 + mutex_lock(&adev->grbm_idx_mutex); 1513 + /* 1514 + * making sure that the following register writes will be broadcasted 1515 + * to all the shaders 1516 + */ 1517 + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1518 + 1519 + WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, 1520 + (adev->gfx.config.sc_prim_fifo_size_frontend << 1521 + PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | 1522 + (adev->gfx.config.sc_prim_fifo_size_backend << 1523 + PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | 1524 + (adev->gfx.config.sc_hiz_tile_fifo_size << 1525 + PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | 1526 + (adev->gfx.config.sc_earlyz_tile_fifo_size << 1527 + PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); 1528 + mutex_unlock(&adev->grbm_idx_mutex); 1529 + } 1530 + 1531 + static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 1532 + bool enable) 1533 + { 1534 + u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 1535 + 1536 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1537 + enable ? 1 : 0); 1538 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1539 + enable ? 1 : 0); 1540 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1541 + enable ? 1 : 0); 1542 + tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1543 + enable ? 1 : 0); 1544 + 1545 + WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); 1546 + } 1547 + 1548 + static void gfx_v10_0_init_csb(struct amdgpu_device *adev) 1549 + { 1550 + /* csib */ 1551 + WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, 1552 + adev->gfx.rlc.clear_state_gpu_addr >> 32); 1553 + WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, 1554 + adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); 1555 + WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); 1556 + } 1557 + 1558 + static void gfx_v10_0_init_pg(struct amdgpu_device *adev) 1559 + { 1560 + gfx_v10_0_init_csb(adev); 1561 + 1562 + amdgpu_gmc_flush_gpu_tlb(adev, 0, 0); 1563 + 1564 + /* TODO: init power gating */ 1565 + return; 1566 + } 1567 + 1568 + void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) 1569 + { 1570 + u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); 1571 + 1572 + tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); 1573 + WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp); 1574 + 1575 + gfx_v10_0_enable_gui_idle_interrupt(adev, false); 1576 + } 1577 + 1578 + static void gfx_v10_0_rlc_reset(struct amdgpu_device *adev) 1579 + { 1580 + WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); 1581 + udelay(50); 1582 + WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); 1583 + udelay(50); 1584 + } 1585 + 1586 + static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, 1587 + bool enable) 1588 + { 1589 + uint32_t rlc_pg_cntl; 1590 + 1591 + rlc_pg_cntl = RREG32_SOC15(GC, 0, mmRLC_PG_CNTL); 1592 + 1593 + if (!enable) { 1594 + /* RLC_PG_CNTL[23] = 0 (default) 1595 + * RLC will wait for handshake acks with SMU 1596 + * GFXOFF will be enabled 1597 + * RLC_PG_CNTL[23] = 1 1598 + * RLC will not issue any message to SMU 1599 + * hence no handshake between SMU & RLC 1600 + * GFXOFF will be disabled 1601 + */ 1602 + rlc_pg_cntl |= 0x80000; 1603 + } else 1604 + rlc_pg_cntl &= ~0x80000; 1605 + WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl); 1606 + } 1607 + 1608 + static void gfx_v10_0_rlc_start(struct amdgpu_device *adev) 1609 + { 1610 + /* TODO: enable rlc & smu handshake until smu 1611 + * and gfxoff feature works as expected */ 1612 + if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK)) 1613 + gfx_v10_0_rlc_smu_handshake_cntl(adev, false); 1614 + 1615 + WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); 1616 + udelay(50); 1617 + } 1618 + 1619 + static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev) 1620 + { 1621 + uint32_t tmp; 1622 + 1623 + /* enable Save Restore Machine */ 1624 + tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); 1625 + tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; 1626 + tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; 1627 + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); 1628 + } 1629 + 1630 + static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev) 1631 + { 1632 + const struct rlc_firmware_header_v2_0 *hdr; 1633 + const __le32 *fw_data; 1634 + unsigned i, fw_size; 1635 + 1636 + if (!adev->gfx.rlc_fw) 1637 + return -EINVAL; 1638 + 1639 + hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 1640 + amdgpu_ucode_print_rlc_hdr(&hdr->header); 1641 + 1642 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1643 + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 1644 + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 1645 + 1646 + WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, 1647 + RLCG_UCODE_LOADING_START_ADDRESS); 1648 + 1649 + for (i = 0; i < fw_size; i++) 1650 + WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, 1651 + le32_to_cpup(fw_data++)); 1652 + 1653 + WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 1654 + 1655 + return 0; 1656 + } 1657 + 1658 + static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) 1659 + { 1660 + int r; 1661 + 1662 + if (amdgpu_sriov_vf(adev)) 1663 + return 0; 1664 + 1665 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1666 + r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); 1667 + if (r) 1668 + return r; 1669 + gfx_v10_0_init_pg(adev); 1670 + 1671 + /* enable RLC SRM */ 1672 + gfx_v10_0_rlc_enable_srm(adev); 1673 + 1674 + } else { 1675 + adev->gfx.rlc.funcs->stop(adev); 1676 + 1677 + /* disable CG */ 1678 + WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 1679 + 1680 + /* disable PG */ 1681 + WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0); 1682 + 1683 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 1684 + /* legacy rlc firmware loading */ 1685 + r = gfx_v10_0_rlc_load_microcode(adev); 1686 + if (r) 1687 + return r; 1688 + } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1689 + /* rlc backdoor autoload firmware */ 1690 + r = gfx_v10_0_rlc_backdoor_autoload_enable(adev); 1691 + if (r) 1692 + return r; 1693 + } 1694 + 1695 + gfx_v10_0_init_pg(adev); 1696 + adev->gfx.rlc.funcs->start(adev); 1697 + 1698 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 1699 + r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); 1700 + if (r) 1701 + return r; 1702 + } 1703 + } 1704 + return 0; 1705 + } 1706 + 1707 + static struct { 1708 + FIRMWARE_ID id; 1709 + unsigned int offset; 1710 + unsigned int size; 1711 + } rlc_autoload_info[FIRMWARE_ID_MAX]; 1712 + 1713 + static int gfx_v10_0_parse_rlc_toc(struct amdgpu_device *adev) 1714 + { 1715 + int ret; 1716 + RLC_TABLE_OF_CONTENT *rlc_toc; 1717 + 1718 + ret = amdgpu_bo_create_reserved(adev, adev->psp.toc_bin_size, PAGE_SIZE, 1719 + AMDGPU_GEM_DOMAIN_GTT, 1720 + &adev->gfx.rlc.rlc_toc_bo, 1721 + &adev->gfx.rlc.rlc_toc_gpu_addr, 1722 + (void **)&adev->gfx.rlc.rlc_toc_buf); 1723 + if (ret) { 1724 + dev_err(adev->dev, "(%d) failed to create rlc toc bo\n", ret); 1725 + return ret; 1726 + } 1727 + 1728 + /* Copy toc from psp sos fw to rlc toc buffer */ 1729 + memcpy(adev->gfx.rlc.rlc_toc_buf, adev->psp.toc_start_addr, adev->psp.toc_bin_size); 1730 + 1731 + rlc_toc = (RLC_TABLE_OF_CONTENT *)adev->gfx.rlc.rlc_toc_buf; 1732 + while (rlc_toc && (rlc_toc->id > FIRMWARE_ID_INVALID) && 1733 + (rlc_toc->id < FIRMWARE_ID_MAX)) { 1734 + if ((rlc_toc->id >= FIRMWARE_ID_CP_CE) && 1735 + (rlc_toc->id <= FIRMWARE_ID_CP_MES)) { 1736 + /* Offset needs 4KB alignment */ 1737 + rlc_toc->offset = ALIGN(rlc_toc->offset * 4, PAGE_SIZE); 1738 + } 1739 + 1740 + rlc_autoload_info[rlc_toc->id].id = rlc_toc->id; 1741 + rlc_autoload_info[rlc_toc->id].offset = rlc_toc->offset * 4; 1742 + rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4; 1743 + 1744 + rlc_toc++; 1745 + }; 1746 + 1747 + return 0; 1748 + } 1749 + 1750 + static uint32_t gfx_v10_0_calc_toc_total_size(struct amdgpu_device *adev) 1751 + { 1752 + uint32_t total_size = 0; 1753 + FIRMWARE_ID id; 1754 + int ret; 1755 + 1756 + ret = gfx_v10_0_parse_rlc_toc(adev); 1757 + if (ret) { 1758 + dev_err(adev->dev, "failed to parse rlc toc\n"); 1759 + return 0; 1760 + } 1761 + 1762 + for (id = FIRMWARE_ID_RLC_G_UCODE; id < FIRMWARE_ID_MAX; id++) 1763 + total_size += rlc_autoload_info[id].size; 1764 + 1765 + /* In case the offset in rlc toc ucode is aligned */ 1766 + if (total_size < rlc_autoload_info[FIRMWARE_ID_MAX-1].offset) 1767 + total_size = rlc_autoload_info[FIRMWARE_ID_MAX-1].offset + 1768 + rlc_autoload_info[FIRMWARE_ID_MAX-1].size; 1769 + 1770 + return total_size; 1771 + } 1772 + 1773 + static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev) 1774 + { 1775 + int r; 1776 + uint32_t total_size; 1777 + 1778 + total_size = gfx_v10_0_calc_toc_total_size(adev); 1779 + 1780 + r = amdgpu_bo_create_reserved(adev, total_size, PAGE_SIZE, 1781 + AMDGPU_GEM_DOMAIN_GTT, 1782 + &adev->gfx.rlc.rlc_autoload_bo, 1783 + &adev->gfx.rlc.rlc_autoload_gpu_addr, 1784 + (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1785 + if (r) { 1786 + dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); 1787 + return r; 1788 + } 1789 + 1790 + return 0; 1791 + } 1792 + 1793 + static void gfx_v10_0_rlc_backdoor_autoload_buffer_fini(struct amdgpu_device *adev) 1794 + { 1795 + amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_toc_bo, 1796 + &adev->gfx.rlc.rlc_toc_gpu_addr, 1797 + (void **)&adev->gfx.rlc.rlc_toc_buf); 1798 + amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo, 1799 + &adev->gfx.rlc.rlc_autoload_gpu_addr, 1800 + (void **)&adev->gfx.rlc.rlc_autoload_ptr); 1801 + } 1802 + 1803 + static void gfx_v10_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev, 1804 + FIRMWARE_ID id, 1805 + const void *fw_data, 1806 + uint32_t fw_size) 1807 + { 1808 + uint32_t toc_offset; 1809 + uint32_t toc_fw_size; 1810 + char *ptr = adev->gfx.rlc.rlc_autoload_ptr; 1811 + 1812 + if (id <= FIRMWARE_ID_INVALID || id >= FIRMWARE_ID_MAX) 1813 + return; 1814 + 1815 + toc_offset = rlc_autoload_info[id].offset; 1816 + toc_fw_size = rlc_autoload_info[id].size; 1817 + 1818 + if (fw_size == 0) 1819 + fw_size = toc_fw_size; 1820 + 1821 + if (fw_size > toc_fw_size) 1822 + fw_size = toc_fw_size; 1823 + 1824 + memcpy(ptr + toc_offset, fw_data, fw_size); 1825 + 1826 + if (fw_size < toc_fw_size) 1827 + memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); 1828 + } 1829 + 1830 + static void gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev) 1831 + { 1832 + void *data; 1833 + uint32_t size; 1834 + 1835 + data = adev->gfx.rlc.rlc_toc_buf; 1836 + size = rlc_autoload_info[FIRMWARE_ID_RLC_TOC].size; 1837 + 1838 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1839 + FIRMWARE_ID_RLC_TOC, 1840 + data, size); 1841 + } 1842 + 1843 + static void gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev) 1844 + { 1845 + const __le32 *fw_data; 1846 + uint32_t fw_size; 1847 + const struct gfx_firmware_header_v1_0 *cp_hdr; 1848 + const struct rlc_firmware_header_v2_0 *rlc_hdr; 1849 + 1850 + /* pfp ucode */ 1851 + cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1852 + adev->gfx.pfp_fw->data; 1853 + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 1854 + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1855 + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1856 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1857 + FIRMWARE_ID_CP_PFP, 1858 + fw_data, fw_size); 1859 + 1860 + /* ce ucode */ 1861 + cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1862 + adev->gfx.ce_fw->data; 1863 + fw_data = (const __le32 *)(adev->gfx.ce_fw->data + 1864 + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1865 + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1866 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1867 + FIRMWARE_ID_CP_CE, 1868 + fw_data, fw_size); 1869 + 1870 + /* me ucode */ 1871 + cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1872 + adev->gfx.me_fw->data; 1873 + fw_data = (const __le32 *)(adev->gfx.me_fw->data + 1874 + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1875 + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); 1876 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1877 + FIRMWARE_ID_CP_ME, 1878 + fw_data, fw_size); 1879 + 1880 + /* rlc ucode */ 1881 + rlc_hdr = (const struct rlc_firmware_header_v2_0 *) 1882 + adev->gfx.rlc_fw->data; 1883 + fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 1884 + le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes)); 1885 + fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes); 1886 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1887 + FIRMWARE_ID_RLC_G_UCODE, 1888 + fw_data, fw_size); 1889 + 1890 + /* mec1 ucode */ 1891 + cp_hdr = (const struct gfx_firmware_header_v1_0 *) 1892 + adev->gfx.mec_fw->data; 1893 + fw_data = (const __le32 *) (adev->gfx.mec_fw->data + 1894 + le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes)); 1895 + fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - 1896 + cp_hdr->jt_size * 4; 1897 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1898 + FIRMWARE_ID_CP_MEC, 1899 + fw_data, fw_size); 1900 + /* mec2 ucode is not necessary if mec2 ucode is same as mec1 */ 1901 + } 1902 + 1903 + /* Temporarily put sdma part here */ 1904 + static void gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev) 1905 + { 1906 + const __le32 *fw_data; 1907 + uint32_t fw_size; 1908 + const struct sdma_firmware_header_v1_0 *sdma_hdr; 1909 + int i; 1910 + 1911 + for (i = 0; i < adev->sdma.num_instances; i++) { 1912 + sdma_hdr = (const struct sdma_firmware_header_v1_0 *) 1913 + adev->sdma.instance[i].fw->data; 1914 + fw_data = (const __le32 *) (adev->sdma.instance[i].fw->data + 1915 + le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes)); 1916 + fw_size = le32_to_cpu(sdma_hdr->header.ucode_size_bytes); 1917 + 1918 + if (i == 0) { 1919 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1920 + FIRMWARE_ID_SDMA0_UCODE, fw_data, fw_size); 1921 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1922 + FIRMWARE_ID_SDMA0_JT, 1923 + (uint32_t *)fw_data + 1924 + sdma_hdr->jt_offset, 1925 + sdma_hdr->jt_size * 4); 1926 + } else if (i == 1) { 1927 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1928 + FIRMWARE_ID_SDMA1_UCODE, fw_data, fw_size); 1929 + gfx_v10_0_rlc_backdoor_autoload_copy_ucode(adev, 1930 + FIRMWARE_ID_SDMA1_JT, 1931 + (uint32_t *)fw_data + 1932 + sdma_hdr->jt_offset, 1933 + sdma_hdr->jt_size * 4); 1934 + } 1935 + } 1936 + } 1937 + 1938 + static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev) 1939 + { 1940 + uint32_t rlc_g_offset, rlc_g_size, tmp; 1941 + uint64_t gpu_addr; 1942 + 1943 + gfx_v10_0_rlc_backdoor_autoload_copy_toc_ucode(adev); 1944 + gfx_v10_0_rlc_backdoor_autoload_copy_sdma_ucode(adev); 1945 + gfx_v10_0_rlc_backdoor_autoload_copy_gfx_ucode(adev); 1946 + 1947 + rlc_g_offset = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].offset; 1948 + rlc_g_size = rlc_autoload_info[FIRMWARE_ID_RLC_G_UCODE].size; 1949 + gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset; 1950 + 1951 + WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_HI, upper_32_bits(gpu_addr)); 1952 + WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_ADDR_LO, lower_32_bits(gpu_addr)); 1953 + WREG32_SOC15(GC, 0, mmRLC_HYP_BOOTLOAD_SIZE, rlc_g_size); 1954 + 1955 + tmp = RREG32_SOC15(GC, 0, mmRLC_HYP_RESET_VECTOR); 1956 + if (!(tmp & (RLC_HYP_RESET_VECTOR__COLD_BOOT_EXIT_MASK | 1957 + RLC_HYP_RESET_VECTOR__VDDGFX_EXIT_MASK))) { 1958 + DRM_ERROR("Neither COLD_BOOT_EXIT nor VDDGFX_EXIT is set\n"); 1959 + return -EINVAL; 1960 + } 1961 + 1962 + tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL); 1963 + if (tmp & RLC_CNTL__RLC_ENABLE_F32_MASK) { 1964 + DRM_ERROR("RLC ROM should halt itself\n"); 1965 + return -EINVAL; 1966 + } 1967 + 1968 + return 0; 1969 + } 1970 + 1971 + static int gfx_v10_0_rlc_backdoor_autoload_config_me_cache(struct amdgpu_device *adev) 1972 + { 1973 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 1974 + uint32_t tmp; 1975 + int i; 1976 + uint64_t addr; 1977 + 1978 + /* Trigger an invalidation of the L1 instruction caches */ 1979 + tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL); 1980 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 1981 + WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp); 1982 + 1983 + /* Wait for invalidation complete */ 1984 + for (i = 0; i < usec_timeout; i++) { 1985 + tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL); 1986 + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 1987 + INVALIDATE_CACHE_COMPLETE)) 1988 + break; 1989 + udelay(1); 1990 + } 1991 + 1992 + if (i >= usec_timeout) { 1993 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 1994 + return -EINVAL; 1995 + } 1996 + 1997 + /* Program me ucode address into intruction cache address register */ 1998 + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 1999 + rlc_autoload_info[FIRMWARE_ID_CP_ME].offset; 2000 + WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO, 2001 + lower_32_bits(addr) & 0xFFFFF000); 2002 + WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI, 2003 + upper_32_bits(addr)); 2004 + 2005 + return 0; 2006 + } 2007 + 2008 + static int gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(struct amdgpu_device *adev) 2009 + { 2010 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2011 + uint32_t tmp; 2012 + int i; 2013 + uint64_t addr; 2014 + 2015 + /* Trigger an invalidation of the L1 instruction caches */ 2016 + tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL); 2017 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2018 + WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp); 2019 + 2020 + /* Wait for invalidation complete */ 2021 + for (i = 0; i < usec_timeout; i++) { 2022 + tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL); 2023 + if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL, 2024 + INVALIDATE_CACHE_COMPLETE)) 2025 + break; 2026 + udelay(1); 2027 + } 2028 + 2029 + if (i >= usec_timeout) { 2030 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2031 + return -EINVAL; 2032 + } 2033 + 2034 + /* Program ce ucode address into intruction cache address register */ 2035 + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2036 + rlc_autoload_info[FIRMWARE_ID_CP_CE].offset; 2037 + WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO, 2038 + lower_32_bits(addr) & 0xFFFFF000); 2039 + WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI, 2040 + upper_32_bits(addr)); 2041 + 2042 + return 0; 2043 + } 2044 + 2045 + static int gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(struct amdgpu_device *adev) 2046 + { 2047 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2048 + uint32_t tmp; 2049 + int i; 2050 + uint64_t addr; 2051 + 2052 + /* Trigger an invalidation of the L1 instruction caches */ 2053 + tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL); 2054 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2055 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp); 2056 + 2057 + /* Wait for invalidation complete */ 2058 + for (i = 0; i < usec_timeout; i++) { 2059 + tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL); 2060 + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2061 + INVALIDATE_CACHE_COMPLETE)) 2062 + break; 2063 + udelay(1); 2064 + } 2065 + 2066 + if (i >= usec_timeout) { 2067 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2068 + return -EINVAL; 2069 + } 2070 + 2071 + /* Program pfp ucode address into intruction cache address register */ 2072 + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2073 + rlc_autoload_info[FIRMWARE_ID_CP_PFP].offset; 2074 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO, 2075 + lower_32_bits(addr) & 0xFFFFF000); 2076 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI, 2077 + upper_32_bits(addr)); 2078 + 2079 + return 0; 2080 + } 2081 + 2082 + static int gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(struct amdgpu_device *adev) 2083 + { 2084 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2085 + uint32_t tmp; 2086 + int i; 2087 + uint64_t addr; 2088 + 2089 + /* Trigger an invalidation of the L1 instruction caches */ 2090 + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); 2091 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2092 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp); 2093 + 2094 + /* Wait for invalidation complete */ 2095 + for (i = 0; i < usec_timeout; i++) { 2096 + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); 2097 + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2098 + INVALIDATE_CACHE_COMPLETE)) 2099 + break; 2100 + udelay(1); 2101 + } 2102 + 2103 + if (i >= usec_timeout) { 2104 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2105 + return -EINVAL; 2106 + } 2107 + 2108 + /* Program mec1 ucode address into intruction cache address register */ 2109 + addr = adev->gfx.rlc.rlc_autoload_gpu_addr + 2110 + rlc_autoload_info[FIRMWARE_ID_CP_MEC].offset; 2111 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, 2112 + lower_32_bits(addr) & 0xFFFFF000); 2113 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, 2114 + upper_32_bits(addr)); 2115 + 2116 + return 0; 2117 + } 2118 + 2119 + static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) 2120 + { 2121 + uint32_t cp_status; 2122 + uint32_t bootload_status; 2123 + int i, r; 2124 + 2125 + for (i = 0; i < adev->usec_timeout; i++) { 2126 + cp_status = RREG32_SOC15(GC, 0, mmCP_STAT); 2127 + bootload_status = RREG32_SOC15(GC, 0, mmRLC_RLCS_BOOTLOAD_STATUS); 2128 + if ((cp_status == 0) && 2129 + (REG_GET_FIELD(bootload_status, 2130 + RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) { 2131 + break; 2132 + } 2133 + udelay(1); 2134 + } 2135 + 2136 + if (i >= adev->usec_timeout) { 2137 + dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n"); 2138 + return -ETIMEDOUT; 2139 + } 2140 + 2141 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { 2142 + r = gfx_v10_0_rlc_backdoor_autoload_config_me_cache(adev); 2143 + if (r) 2144 + return r; 2145 + 2146 + r = gfx_v10_0_rlc_backdoor_autoload_config_ce_cache(adev); 2147 + if (r) 2148 + return r; 2149 + 2150 + r = gfx_v10_0_rlc_backdoor_autoload_config_pfp_cache(adev); 2151 + if (r) 2152 + return r; 2153 + 2154 + r = gfx_v10_0_rlc_backdoor_autoload_config_mec_cache(adev); 2155 + if (r) 2156 + return r; 2157 + } 2158 + 2159 + return 0; 2160 + } 2161 + 2162 + static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2163 + { 2164 + int i; 2165 + u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); 2166 + 2167 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); 2168 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 2169 + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 2170 + if (!enable) { 2171 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2172 + adev->gfx.gfx_ring[i].sched.ready = false; 2173 + } 2174 + WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); 2175 + udelay(50); 2176 + } 2177 + 2178 + static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) 2179 + { 2180 + int r; 2181 + const struct gfx_firmware_header_v1_0 *pfp_hdr; 2182 + const __le32 *fw_data; 2183 + unsigned i, fw_size; 2184 + uint32_t tmp; 2185 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2186 + 2187 + pfp_hdr = (const struct gfx_firmware_header_v1_0 *) 2188 + adev->gfx.pfp_fw->data; 2189 + 2190 + amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2191 + 2192 + fw_data = (const __le32 *)(adev->gfx.pfp_fw->data + 2193 + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2194 + fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes); 2195 + 2196 + r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes, 2197 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2198 + &adev->gfx.pfp.pfp_fw_obj, 2199 + &adev->gfx.pfp.pfp_fw_gpu_addr, 2200 + (void **)&adev->gfx.pfp.pfp_fw_ptr); 2201 + if (r) { 2202 + dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r); 2203 + gfx_v10_0_pfp_fini(adev); 2204 + return r; 2205 + } 2206 + 2207 + memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size); 2208 + 2209 + amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj); 2210 + amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj); 2211 + 2212 + /* Trigger an invalidation of the L1 instruction caches */ 2213 + tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL); 2214 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2215 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL, tmp); 2216 + 2217 + /* Wait for invalidation complete */ 2218 + for (i = 0; i < usec_timeout; i++) { 2219 + tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_OP_CNTL); 2220 + if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL, 2221 + INVALIDATE_CACHE_COMPLETE)) 2222 + break; 2223 + udelay(1); 2224 + } 2225 + 2226 + if (i >= usec_timeout) { 2227 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2228 + return -EINVAL; 2229 + } 2230 + 2231 + if (amdgpu_emu_mode == 1) 2232 + adev->nbio_funcs->hdp_flush(adev, NULL); 2233 + 2234 + tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); 2235 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); 2236 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0); 2237 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0); 2238 + tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2239 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL, tmp); 2240 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_LO, 2241 + adev->gfx.pfp.pfp_fw_gpu_addr & 0xFFFFF000); 2242 + WREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_HI, 2243 + upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr)); 2244 + 2245 + return 0; 2246 + } 2247 + 2248 + static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) 2249 + { 2250 + int r; 2251 + const struct gfx_firmware_header_v1_0 *ce_hdr; 2252 + const __le32 *fw_data; 2253 + unsigned i, fw_size; 2254 + uint32_t tmp; 2255 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2256 + 2257 + ce_hdr = (const struct gfx_firmware_header_v1_0 *) 2258 + adev->gfx.ce_fw->data; 2259 + 2260 + amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2261 + 2262 + fw_data = (const __le32 *)(adev->gfx.ce_fw->data + 2263 + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2264 + fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes); 2265 + 2266 + r = amdgpu_bo_create_reserved(adev, ce_hdr->header.ucode_size_bytes, 2267 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2268 + &adev->gfx.ce.ce_fw_obj, 2269 + &adev->gfx.ce.ce_fw_gpu_addr, 2270 + (void **)&adev->gfx.ce.ce_fw_ptr); 2271 + if (r) { 2272 + dev_err(adev->dev, "(%d) failed to create ce fw bo\n", r); 2273 + gfx_v10_0_ce_fini(adev); 2274 + return r; 2275 + } 2276 + 2277 + memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size); 2278 + 2279 + amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj); 2280 + amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj); 2281 + 2282 + /* Trigger an invalidation of the L1 instruction caches */ 2283 + tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL); 2284 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2285 + WREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL, tmp); 2286 + 2287 + /* Wait for invalidation complete */ 2288 + for (i = 0; i < usec_timeout; i++) { 2289 + tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_OP_CNTL); 2290 + if (1 == REG_GET_FIELD(tmp, CP_CE_IC_OP_CNTL, 2291 + INVALIDATE_CACHE_COMPLETE)) 2292 + break; 2293 + udelay(1); 2294 + } 2295 + 2296 + if (i >= usec_timeout) { 2297 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2298 + return -EINVAL; 2299 + } 2300 + 2301 + if (amdgpu_emu_mode == 1) 2302 + adev->nbio_funcs->hdp_flush(adev, NULL); 2303 + 2304 + tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); 2305 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); 2306 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, CACHE_POLICY, 0); 2307 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, EXE_DISABLE, 0); 2308 + tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2309 + WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_LO, 2310 + adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000); 2311 + WREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_HI, 2312 + upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr)); 2313 + 2314 + return 0; 2315 + } 2316 + 2317 + static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) 2318 + { 2319 + int r; 2320 + const struct gfx_firmware_header_v1_0 *me_hdr; 2321 + const __le32 *fw_data; 2322 + unsigned i, fw_size; 2323 + uint32_t tmp; 2324 + uint32_t usec_timeout = 50000; /* wait for 50ms */ 2325 + 2326 + me_hdr = (const struct gfx_firmware_header_v1_0 *) 2327 + adev->gfx.me_fw->data; 2328 + 2329 + amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2330 + 2331 + fw_data = (const __le32 *)(adev->gfx.me_fw->data + 2332 + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2333 + fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes); 2334 + 2335 + r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes, 2336 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, 2337 + &adev->gfx.me.me_fw_obj, 2338 + &adev->gfx.me.me_fw_gpu_addr, 2339 + (void **)&adev->gfx.me.me_fw_ptr); 2340 + if (r) { 2341 + dev_err(adev->dev, "(%d) failed to create me fw bo\n", r); 2342 + gfx_v10_0_me_fini(adev); 2343 + return r; 2344 + } 2345 + 2346 + memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size); 2347 + 2348 + amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj); 2349 + amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj); 2350 + 2351 + /* Trigger an invalidation of the L1 instruction caches */ 2352 + tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL); 2353 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2354 + WREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL, tmp); 2355 + 2356 + /* Wait for invalidation complete */ 2357 + for (i = 0; i < usec_timeout; i++) { 2358 + tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_OP_CNTL); 2359 + if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL, 2360 + INVALIDATE_CACHE_COMPLETE)) 2361 + break; 2362 + udelay(1); 2363 + } 2364 + 2365 + if (i >= usec_timeout) { 2366 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2367 + return -EINVAL; 2368 + } 2369 + 2370 + if (amdgpu_emu_mode == 1) 2371 + adev->nbio_funcs->hdp_flush(adev, NULL); 2372 + 2373 + tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); 2374 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); 2375 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0); 2376 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0); 2377 + tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2378 + WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_LO, 2379 + adev->gfx.me.me_fw_gpu_addr & 0xFFFFF000); 2380 + WREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_HI, 2381 + upper_32_bits(adev->gfx.me.me_fw_gpu_addr)); 2382 + 2383 + return 0; 2384 + } 2385 + 2386 + static int gfx_v10_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2387 + { 2388 + int r; 2389 + 2390 + if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2391 + return -EINVAL; 2392 + 2393 + gfx_v10_0_cp_gfx_enable(adev, false); 2394 + 2395 + r = gfx_v10_0_cp_gfx_load_pfp_microcode(adev); 2396 + if (r) { 2397 + dev_err(adev->dev, "(%d) failed to load pfp fw\n", r); 2398 + return r; 2399 + } 2400 + 2401 + r = gfx_v10_0_cp_gfx_load_ce_microcode(adev); 2402 + if (r) { 2403 + dev_err(adev->dev, "(%d) failed to load ce fw\n", r); 2404 + return r; 2405 + } 2406 + 2407 + r = gfx_v10_0_cp_gfx_load_me_microcode(adev); 2408 + if (r) { 2409 + dev_err(adev->dev, "(%d) failed to load me fw\n", r); 2410 + return r; 2411 + } 2412 + 2413 + return 0; 2414 + } 2415 + 2416 + static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) 2417 + { 2418 + struct amdgpu_ring *ring; 2419 + const struct cs_section_def *sect = NULL; 2420 + const struct cs_extent_def *ext = NULL; 2421 + int r, i; 2422 + int ctx_reg_offset; 2423 + 2424 + /* init the CP */ 2425 + WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, 2426 + adev->gfx.config.max_hw_contexts - 1); 2427 + WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1); 2428 + 2429 + gfx_v10_0_cp_gfx_enable(adev, true); 2430 + 2431 + ring = &adev->gfx.gfx_ring[0]; 2432 + r = amdgpu_ring_alloc(ring, gfx_v10_0_get_csb_size(adev) + 4); 2433 + if (r) { 2434 + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2435 + return r; 2436 + } 2437 + 2438 + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2439 + amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2440 + 2441 + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2442 + amdgpu_ring_write(ring, 0x80000000); 2443 + amdgpu_ring_write(ring, 0x80000000); 2444 + 2445 + for (sect = gfx10_cs_data; sect->section != NULL; ++sect) { 2446 + for (ext = sect->section; ext->extent != NULL; ++ext) { 2447 + if (sect->id == SECT_CONTEXT) { 2448 + amdgpu_ring_write(ring, 2449 + PACKET3(PACKET3_SET_CONTEXT_REG, 2450 + ext->reg_count)); 2451 + amdgpu_ring_write(ring, ext->reg_index - 2452 + PACKET3_SET_CONTEXT_REG_START); 2453 + for (i = 0; i < ext->reg_count; i++) 2454 + amdgpu_ring_write(ring, ext->extent[i]); 2455 + } 2456 + } 2457 + } 2458 + 2459 + ctx_reg_offset = 2460 + SOC15_REG_OFFSET(GC, 0, mmPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START; 2461 + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); 2462 + amdgpu_ring_write(ring, ctx_reg_offset); 2463 + amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override); 2464 + 2465 + amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2466 + amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2467 + 2468 + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2469 + amdgpu_ring_write(ring, 0); 2470 + 2471 + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2472 + amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2473 + amdgpu_ring_write(ring, 0x8000); 2474 + amdgpu_ring_write(ring, 0x8000); 2475 + 2476 + amdgpu_ring_commit(ring); 2477 + 2478 + /* submit cs packet to copy state 0 to next available state */ 2479 + ring = &adev->gfx.gfx_ring[1]; 2480 + r = amdgpu_ring_alloc(ring, 2); 2481 + if (r) { 2482 + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2483 + return r; 2484 + } 2485 + 2486 + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2487 + amdgpu_ring_write(ring, 0); 2488 + 2489 + amdgpu_ring_commit(ring); 2490 + 2491 + return 0; 2492 + } 2493 + 2494 + static void gfx_v10_0_cp_gfx_switch_pipe(struct amdgpu_device *adev, 2495 + CP_PIPE_ID pipe) 2496 + { 2497 + u32 tmp; 2498 + 2499 + tmp = RREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL); 2500 + tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe); 2501 + 2502 + WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, tmp); 2503 + } 2504 + 2505 + static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, 2506 + struct amdgpu_ring *ring) 2507 + { 2508 + u32 tmp; 2509 + 2510 + tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); 2511 + if (ring->use_doorbell) { 2512 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2513 + DOORBELL_OFFSET, ring->doorbell_index); 2514 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2515 + DOORBELL_EN, 1); 2516 + } else { 2517 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2518 + DOORBELL_EN, 0); 2519 + } 2520 + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); 2521 + tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, 2522 + DOORBELL_RANGE_LOWER, ring->doorbell_index); 2523 + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); 2524 + 2525 + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, 2526 + CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); 2527 + } 2528 + 2529 + static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) 2530 + { 2531 + struct amdgpu_ring *ring; 2532 + u32 tmp; 2533 + u32 rb_bufsz; 2534 + u64 rb_addr, rptr_addr, wptr_gpu_addr; 2535 + u32 i; 2536 + 2537 + /* Set the write pointer delay */ 2538 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); 2539 + 2540 + /* set the RB to use vmid 0 */ 2541 + WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0); 2542 + 2543 + /* Init gfx ring 0 for pipe 0 */ 2544 + mutex_lock(&adev->srbm_mutex); 2545 + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2546 + mutex_unlock(&adev->srbm_mutex); 2547 + /* Set ring buffer size */ 2548 + ring = &adev->gfx.gfx_ring[0]; 2549 + rb_bufsz = order_base_2(ring->ring_size / 8); 2550 + tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); 2551 + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); 2552 + #ifdef __BIG_ENDIAN 2553 + tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); 2554 + #endif 2555 + WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2556 + 2557 + /* Initialize the ring buffer's write pointers */ 2558 + ring->wptr = 0; 2559 + WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2560 + WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 2561 + 2562 + /* set the wb address wether it's enabled or not */ 2563 + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2564 + WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2565 + WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2566 + CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2567 + 2568 + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2569 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, 2570 + lower_32_bits(wptr_gpu_addr)); 2571 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, 2572 + upper_32_bits(wptr_gpu_addr)); 2573 + 2574 + mdelay(1); 2575 + WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); 2576 + 2577 + rb_addr = ring->gpu_addr >> 8; 2578 + WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr); 2579 + WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2580 + 2581 + WREG32_SOC15(GC, 0, mmCP_RB_ACTIVE, 1); 2582 + 2583 + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); 2584 + 2585 + /* Init gfx ring 1 for pipe 1 */ 2586 + mutex_lock(&adev->srbm_mutex); 2587 + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); 2588 + mutex_unlock(&adev->srbm_mutex); 2589 + ring = &adev->gfx.gfx_ring[1]; 2590 + rb_bufsz = order_base_2(ring->ring_size / 8); 2591 + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); 2592 + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); 2593 + #ifdef __BIG_ENDIAN 2594 + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, BUF_SWAP, 1); 2595 + #endif 2596 + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); 2597 + /* Initialize the ring buffer's write pointers */ 2598 + ring->wptr = 0; 2599 + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); 2600 + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); 2601 + /* Set the wb address wether it's enabled or not */ 2602 + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2603 + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); 2604 + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 2605 + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); 2606 + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2607 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, 2608 + lower_32_bits(wptr_gpu_addr)); 2609 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, 2610 + upper_32_bits(wptr_gpu_addr)); 2611 + 2612 + mdelay(1); 2613 + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); 2614 + 2615 + rb_addr = ring->gpu_addr >> 8; 2616 + WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); 2617 + WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); 2618 + WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); 2619 + 2620 + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); 2621 + 2622 + /* Switch to pipe 0 */ 2623 + mutex_lock(&adev->srbm_mutex); 2624 + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); 2625 + mutex_unlock(&adev->srbm_mutex); 2626 + 2627 + /* start the ring */ 2628 + gfx_v10_0_cp_gfx_start(adev); 2629 + 2630 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2631 + ring = &adev->gfx.gfx_ring[i]; 2632 + ring->sched.ready = true; 2633 + } 2634 + 2635 + return 0; 2636 + } 2637 + 2638 + static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2639 + { 2640 + int i; 2641 + 2642 + if (enable) { 2643 + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); 2644 + } else { 2645 + WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 2646 + (CP_MEC_CNTL__MEC_ME1_HALT_MASK | 2647 + CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2648 + for (i = 0; i < adev->gfx.num_compute_rings; i++) 2649 + adev->gfx.compute_ring[i].sched.ready = false; 2650 + adev->gfx.kiq.ring.sched.ready = false; 2651 + } 2652 + udelay(50); 2653 + } 2654 + 2655 + static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2656 + { 2657 + const struct gfx_firmware_header_v1_0 *mec_hdr; 2658 + const __le32 *fw_data; 2659 + unsigned i; 2660 + u32 tmp; 2661 + u32 usec_timeout = 50000; /* Wait for 50 ms */ 2662 + 2663 + if (!adev->gfx.mec_fw) 2664 + return -EINVAL; 2665 + 2666 + gfx_v10_0_cp_compute_enable(adev, false); 2667 + 2668 + mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2669 + amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2670 + 2671 + fw_data = (const __le32 *) 2672 + (adev->gfx.mec_fw->data + 2673 + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2674 + 2675 + /* Trigger an invalidation of the L1 instruction caches */ 2676 + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); 2677 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1); 2678 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL, tmp); 2679 + 2680 + /* Wait for invalidation complete */ 2681 + for (i = 0; i < usec_timeout; i++) { 2682 + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_OP_CNTL); 2683 + if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL, 2684 + INVALIDATE_CACHE_COMPLETE)) 2685 + break; 2686 + udelay(1); 2687 + } 2688 + 2689 + if (i >= usec_timeout) { 2690 + dev_err(adev->dev, "failed to invalidate instruction cache\n"); 2691 + return -EINVAL; 2692 + } 2693 + 2694 + if (amdgpu_emu_mode == 1) 2695 + adev->nbio_funcs->hdp_flush(adev, NULL); 2696 + 2697 + tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); 2698 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); 2699 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0); 2700 + tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1); 2701 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp); 2702 + 2703 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr & 2704 + 0xFFFFF000); 2705 + WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, 2706 + upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); 2707 + 2708 + /* MEC1 */ 2709 + WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, 0); 2710 + 2711 + for (i = 0; i < mec_hdr->jt_size; i++) 2712 + WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA, 2713 + le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); 2714 + 2715 + WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); 2716 + 2717 + /* 2718 + * TODO: Loading MEC2 firmware is only necessary if MEC2 should run 2719 + * different microcode than MEC1. 2720 + */ 2721 + 2722 + return 0; 2723 + } 2724 + 2725 + static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) 2726 + { 2727 + uint32_t tmp; 2728 + struct amdgpu_device *adev = ring->adev; 2729 + 2730 + /* tell RLC which is KIQ queue */ 2731 + tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); 2732 + tmp &= 0xffffff00; 2733 + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); 2734 + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2735 + tmp |= 0x80; 2736 + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); 2737 + } 2738 + 2739 + static int gfx_v10_0_gfx_mqd_init(struct amdgpu_ring *ring) 2740 + { 2741 + struct amdgpu_device *adev = ring->adev; 2742 + struct v10_gfx_mqd *mqd = ring->mqd_ptr; 2743 + uint64_t hqd_gpu_addr, wb_gpu_addr; 2744 + uint32_t tmp; 2745 + uint32_t rb_bufsz; 2746 + 2747 + /* set up gfx hqd wptr */ 2748 + mqd->cp_gfx_hqd_wptr = 0; 2749 + mqd->cp_gfx_hqd_wptr_hi = 0; 2750 + 2751 + /* set the pointer to the MQD */ 2752 + mqd->cp_mqd_base_addr = ring->mqd_gpu_addr & 0xfffffffc; 2753 + mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 2754 + 2755 + /* set up mqd control */ 2756 + tmp = RREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL); 2757 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0); 2758 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1); 2759 + tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0); 2760 + mqd->cp_gfx_mqd_control = tmp; 2761 + 2762 + /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */ 2763 + tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID); 2764 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0); 2765 + mqd->cp_gfx_hqd_vmid = 0; 2766 + 2767 + /* set up default queue priority level 2768 + * 0x0 = low priority, 0x1 = high priority */ 2769 + tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY); 2770 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0); 2771 + mqd->cp_gfx_hqd_queue_priority = tmp; 2772 + 2773 + /* set up time quantum */ 2774 + tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM); 2775 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1); 2776 + mqd->cp_gfx_hqd_quantum = tmp; 2777 + 2778 + /* set up gfx hqd base. this is similar as CP_RB_BASE */ 2779 + hqd_gpu_addr = ring->gpu_addr >> 8; 2780 + mqd->cp_gfx_hqd_base = hqd_gpu_addr; 2781 + mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr); 2782 + 2783 + /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */ 2784 + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 2785 + mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc; 2786 + mqd->cp_gfx_hqd_rptr_addr_hi = 2787 + upper_32_bits(wb_gpu_addr) & 0xffff; 2788 + 2789 + /* set up rb_wptr_poll addr */ 2790 + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 2791 + mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2792 + mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2793 + 2794 + /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */ 2795 + rb_bufsz = order_base_2(ring->ring_size / 4) - 1; 2796 + tmp = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL); 2797 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz); 2798 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2); 2799 + #ifdef __BIG_ENDIAN 2800 + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1); 2801 + #endif 2802 + mqd->cp_gfx_hqd_cntl = tmp; 2803 + 2804 + /* set up cp_doorbell_control */ 2805 + tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); 2806 + if (ring->use_doorbell) { 2807 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2808 + DOORBELL_OFFSET, ring->doorbell_index); 2809 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2810 + DOORBELL_EN, 1); 2811 + } else 2812 + tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, 2813 + DOORBELL_EN, 0); 2814 + mqd->cp_rb_doorbell_control = tmp; 2815 + 2816 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2817 + ring->wptr = 0; 2818 + mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR); 2819 + 2820 + /* active the queue */ 2821 + mqd->cp_gfx_hqd_active = 1; 2822 + 2823 + return 0; 2824 + } 2825 + 2826 + #ifdef BRING_UP_DEBUG 2827 + static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring) 2828 + { 2829 + struct amdgpu_device *adev = ring->adev; 2830 + struct v10_gfx_mqd *mqd = ring->mqd_ptr; 2831 + 2832 + /* set mmCP_GFX_HQD_WPTR/_HI to 0 */ 2833 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr); 2834 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi); 2835 + 2836 + /* set GFX_MQD_BASE */ 2837 + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr); 2838 + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); 2839 + 2840 + /* set GFX_MQD_CONTROL */ 2841 + WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control); 2842 + 2843 + /* set GFX_HQD_VMID to 0 */ 2844 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid); 2845 + 2846 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY, 2847 + mqd->cp_gfx_hqd_queue_priority); 2848 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum); 2849 + 2850 + /* set GFX_HQD_BASE, similar as CP_RB_BASE */ 2851 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base); 2852 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi); 2853 + 2854 + /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */ 2855 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr); 2856 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi); 2857 + 2858 + /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */ 2859 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl); 2860 + 2861 + /* set RB_WPTR_POLL_ADDR */ 2862 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo); 2863 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi); 2864 + 2865 + /* set RB_DOORBELL_CONTROL */ 2866 + WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control); 2867 + 2868 + /* active the queue */ 2869 + WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active); 2870 + 2871 + return 0; 2872 + } 2873 + #endif 2874 + 2875 + static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) 2876 + { 2877 + struct amdgpu_device *adev = ring->adev; 2878 + struct v10_gfx_mqd *mqd = ring->mqd_ptr; 2879 + 2880 + if (adev->in_gpu_reset) { 2881 + /* reset mqd with the backup copy */ 2882 + if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) 2883 + memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd)); 2884 + /* reset the ring */ 2885 + ring->wptr = 0; 2886 + amdgpu_ring_clear_ring(ring); 2887 + #ifdef BRING_UP_DEBUG 2888 + mutex_lock(&adev->srbm_mutex); 2889 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2890 + gfx_v10_0_gfx_queue_init_register(ring); 2891 + nv_grbm_select(adev, 0, 0, 0, 0); 2892 + mutex_unlock(&adev->srbm_mutex); 2893 + #endif 2894 + } else { 2895 + memset((void *)mqd, 0, sizeof(*mqd)); 2896 + mutex_lock(&adev->srbm_mutex); 2897 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2898 + gfx_v10_0_gfx_mqd_init(ring); 2899 + #ifdef BRING_UP_DEBUG 2900 + gfx_v10_0_gfx_queue_init_register(ring); 2901 + #endif 2902 + nv_grbm_select(adev, 0, 0, 0, 0); 2903 + mutex_unlock(&adev->srbm_mutex); 2904 + if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) 2905 + memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd)); 2906 + } 2907 + 2908 + return 0; 2909 + } 2910 + 2911 + #ifndef BRING_UP_DEBUG 2912 + static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev) 2913 + { 2914 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 2915 + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 2916 + int r, i; 2917 + 2918 + if (!kiq->pmf || !kiq->pmf->kiq_map_queues) 2919 + return -EINVAL; 2920 + 2921 + r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 2922 + adev->gfx.num_gfx_rings); 2923 + if (r) { 2924 + DRM_ERROR("Failed to lock KIQ (%d).\n", r); 2925 + return r; 2926 + } 2927 + 2928 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2929 + kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]); 2930 + 2931 + r = amdgpu_ring_test_ring(kiq_ring); 2932 + if (r) { 2933 + DRM_ERROR("kfq enable failed\n"); 2934 + kiq_ring->sched.ready = false; 2935 + } 2936 + return r; 2937 + } 2938 + #endif 2939 + 2940 + static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 2941 + { 2942 + int r, i; 2943 + struct amdgpu_ring *ring; 2944 + 2945 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2946 + ring = &adev->gfx.gfx_ring[i]; 2947 + 2948 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 2949 + if (unlikely(r != 0)) 2950 + goto done; 2951 + 2952 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2953 + if (!r) { 2954 + r = gfx_v10_0_gfx_init_queue(ring); 2955 + amdgpu_bo_kunmap(ring->mqd_obj); 2956 + ring->mqd_ptr = NULL; 2957 + } 2958 + amdgpu_bo_unreserve(ring->mqd_obj); 2959 + if (r) 2960 + goto done; 2961 + } 2962 + #ifndef BRING_UP_DEBUG 2963 + r = gfx_v10_0_kiq_enable_kgq(adev); 2964 + if (r) 2965 + goto done; 2966 + #endif 2967 + r = gfx_v10_0_cp_gfx_start(adev); 2968 + if (r) 2969 + goto done; 2970 + 2971 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 2972 + ring = &adev->gfx.gfx_ring[i]; 2973 + ring->sched.ready = true; 2974 + } 2975 + done: 2976 + return r; 2977 + } 2978 + 2979 + static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) 2980 + { 2981 + struct amdgpu_device *adev = ring->adev; 2982 + struct v10_compute_mqd *mqd = ring->mqd_ptr; 2983 + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; 2984 + uint32_t tmp; 2985 + 2986 + mqd->header = 0xC0310800; 2987 + mqd->compute_pipelinestat_enable = 0x00000001; 2988 + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2989 + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2990 + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2991 + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2992 + mqd->compute_misc_reserved = 0x00000003; 2993 + 2994 + eop_base_addr = ring->eop_gpu_addr >> 8; 2995 + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; 2996 + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); 2997 + 2998 + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2999 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); 3000 + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, 3001 + (order_base_2(GFX10_MEC_HPD_SIZE / 4) - 1)); 3002 + 3003 + mqd->cp_hqd_eop_control = tmp; 3004 + 3005 + /* enable doorbell? */ 3006 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 3007 + 3008 + if (ring->use_doorbell) { 3009 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3010 + DOORBELL_OFFSET, ring->doorbell_index); 3011 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3012 + DOORBELL_EN, 1); 3013 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3014 + DOORBELL_SOURCE, 0); 3015 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3016 + DOORBELL_HIT, 0); 3017 + } else { 3018 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3019 + DOORBELL_EN, 0); 3020 + } 3021 + 3022 + mqd->cp_hqd_pq_doorbell_control = tmp; 3023 + 3024 + /* disable the queue if it's active */ 3025 + ring->wptr = 0; 3026 + mqd->cp_hqd_dequeue_request = 0; 3027 + mqd->cp_hqd_pq_rptr = 0; 3028 + mqd->cp_hqd_pq_wptr_lo = 0; 3029 + mqd->cp_hqd_pq_wptr_hi = 0; 3030 + 3031 + /* set the pointer to the MQD */ 3032 + mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; 3033 + mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); 3034 + 3035 + /* set MQD vmid to 0 */ 3036 + tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); 3037 + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); 3038 + mqd->cp_mqd_control = tmp; 3039 + 3040 + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3041 + hqd_gpu_addr = ring->gpu_addr >> 8; 3042 + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 3043 + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 3044 + 3045 + /* set up the HQD, this is similar to CP_RB0_CNTL */ 3046 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); 3047 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, 3048 + (order_base_2(ring->ring_size / 4) - 1)); 3049 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, 3050 + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); 3051 + #ifdef __BIG_ENDIAN 3052 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); 3053 + #endif 3054 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); 3055 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); 3056 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); 3057 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); 3058 + mqd->cp_hqd_pq_control = tmp; 3059 + 3060 + /* set the wb address whether it's enabled or not */ 3061 + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); 3062 + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 3063 + mqd->cp_hqd_pq_rptr_report_addr_hi = 3064 + upper_32_bits(wb_gpu_addr) & 0xffff; 3065 + 3066 + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3067 + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 3068 + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 3069 + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 3070 + 3071 + tmp = 0; 3072 + /* enable the doorbell if requested */ 3073 + if (ring->use_doorbell) { 3074 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); 3075 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3076 + DOORBELL_OFFSET, ring->doorbell_index); 3077 + 3078 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3079 + DOORBELL_EN, 1); 3080 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3081 + DOORBELL_SOURCE, 0); 3082 + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3083 + DOORBELL_HIT, 0); 3084 + } 3085 + 3086 + mqd->cp_hqd_pq_doorbell_control = tmp; 3087 + 3088 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3089 + ring->wptr = 0; 3090 + mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); 3091 + 3092 + /* set the vmid for the queue */ 3093 + mqd->cp_hqd_vmid = 0; 3094 + 3095 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); 3096 + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); 3097 + mqd->cp_hqd_persistent_state = tmp; 3098 + 3099 + /* set MIN_IB_AVAIL_SIZE */ 3100 + tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); 3101 + tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); 3102 + mqd->cp_hqd_ib_control = tmp; 3103 + 3104 + /* activate the queue */ 3105 + mqd->cp_hqd_active = 1; 3106 + 3107 + return 0; 3108 + } 3109 + 3110 + static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring) 3111 + { 3112 + struct amdgpu_device *adev = ring->adev; 3113 + struct v10_compute_mqd *mqd = ring->mqd_ptr; 3114 + int j; 3115 + 3116 + /* disable wptr polling */ 3117 + WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); 3118 + 3119 + /* write the EOP addr */ 3120 + WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, 3121 + mqd->cp_hqd_eop_base_addr_lo); 3122 + WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, 3123 + mqd->cp_hqd_eop_base_addr_hi); 3124 + 3125 + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 3126 + WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, 3127 + mqd->cp_hqd_eop_control); 3128 + 3129 + /* enable doorbell? */ 3130 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 3131 + mqd->cp_hqd_pq_doorbell_control); 3132 + 3133 + /* disable the queue if it's active */ 3134 + if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { 3135 + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); 3136 + for (j = 0; j < adev->usec_timeout; j++) { 3137 + if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) 3138 + break; 3139 + udelay(1); 3140 + } 3141 + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 3142 + mqd->cp_hqd_dequeue_request); 3143 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 3144 + mqd->cp_hqd_pq_rptr); 3145 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 3146 + mqd->cp_hqd_pq_wptr_lo); 3147 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 3148 + mqd->cp_hqd_pq_wptr_hi); 3149 + } 3150 + 3151 + /* set the pointer to the MQD */ 3152 + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, 3153 + mqd->cp_mqd_base_addr_lo); 3154 + WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, 3155 + mqd->cp_mqd_base_addr_hi); 3156 + 3157 + /* set MQD vmid to 0 */ 3158 + WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, 3159 + mqd->cp_mqd_control); 3160 + 3161 + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 3162 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, 3163 + mqd->cp_hqd_pq_base_lo); 3164 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, 3165 + mqd->cp_hqd_pq_base_hi); 3166 + 3167 + /* set up the HQD, this is similar to CP_RB0_CNTL */ 3168 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, 3169 + mqd->cp_hqd_pq_control); 3170 + 3171 + /* set the wb address whether it's enabled or not */ 3172 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, 3173 + mqd->cp_hqd_pq_rptr_report_addr_lo); 3174 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, 3175 + mqd->cp_hqd_pq_rptr_report_addr_hi); 3176 + 3177 + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 3178 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, 3179 + mqd->cp_hqd_pq_wptr_poll_addr_lo); 3180 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, 3181 + mqd->cp_hqd_pq_wptr_poll_addr_hi); 3182 + 3183 + /* enable the doorbell if requested */ 3184 + if (ring->use_doorbell) { 3185 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 3186 + (adev->doorbell_index.kiq * 2) << 2); 3187 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 3188 + (adev->doorbell_index.userqueue_end * 2) << 2); 3189 + } 3190 + 3191 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 3192 + mqd->cp_hqd_pq_doorbell_control); 3193 + 3194 + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 3195 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 3196 + mqd->cp_hqd_pq_wptr_lo); 3197 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 3198 + mqd->cp_hqd_pq_wptr_hi); 3199 + 3200 + /* set the vmid for the queue */ 3201 + WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid); 3202 + 3203 + WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 3204 + mqd->cp_hqd_persistent_state); 3205 + 3206 + /* activate the queue */ 3207 + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 3208 + mqd->cp_hqd_active); 3209 + 3210 + if (ring->use_doorbell) 3211 + WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); 3212 + 3213 + return 0; 3214 + } 3215 + 3216 + static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring) 3217 + { 3218 + struct amdgpu_device *adev = ring->adev; 3219 + struct v10_compute_mqd *mqd = ring->mqd_ptr; 3220 + int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; 3221 + 3222 + gfx_v10_0_kiq_setting(ring); 3223 + 3224 + if (adev->in_gpu_reset) { /* for GPU_RESET case */ 3225 + /* reset MQD to a clean status */ 3226 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 3227 + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3228 + 3229 + /* reset ring buffer */ 3230 + ring->wptr = 0; 3231 + amdgpu_ring_clear_ring(ring); 3232 + 3233 + mutex_lock(&adev->srbm_mutex); 3234 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3235 + gfx_v10_0_kiq_init_register(ring); 3236 + nv_grbm_select(adev, 0, 0, 0, 0); 3237 + mutex_unlock(&adev->srbm_mutex); 3238 + } else { 3239 + memset((void *)mqd, 0, sizeof(*mqd)); 3240 + mutex_lock(&adev->srbm_mutex); 3241 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3242 + gfx_v10_0_compute_mqd_init(ring); 3243 + gfx_v10_0_kiq_init_register(ring); 3244 + nv_grbm_select(adev, 0, 0, 0, 0); 3245 + mutex_unlock(&adev->srbm_mutex); 3246 + 3247 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 3248 + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3249 + } 3250 + 3251 + return 0; 3252 + } 3253 + 3254 + static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) 3255 + { 3256 + struct amdgpu_device *adev = ring->adev; 3257 + struct v10_compute_mqd *mqd = ring->mqd_ptr; 3258 + int mqd_idx = ring - &adev->gfx.compute_ring[0]; 3259 + 3260 + if (!adev->in_gpu_reset && !adev->in_suspend) { 3261 + memset((void *)mqd, 0, sizeof(*mqd)); 3262 + mutex_lock(&adev->srbm_mutex); 3263 + nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 3264 + gfx_v10_0_compute_mqd_init(ring); 3265 + nv_grbm_select(adev, 0, 0, 0, 0); 3266 + mutex_unlock(&adev->srbm_mutex); 3267 + 3268 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 3269 + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); 3270 + } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ 3271 + /* reset MQD to a clean status */ 3272 + if (adev->gfx.mec.mqd_backup[mqd_idx]) 3273 + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); 3274 + 3275 + /* reset ring buffer */ 3276 + ring->wptr = 0; 3277 + amdgpu_ring_clear_ring(ring); 3278 + } else { 3279 + amdgpu_ring_clear_ring(ring); 3280 + } 3281 + 3282 + return 0; 3283 + } 3284 + 3285 + static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) 3286 + { 3287 + struct amdgpu_ring *ring; 3288 + int r; 3289 + 3290 + ring = &adev->gfx.kiq.ring; 3291 + 3292 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 3293 + if (unlikely(r != 0)) 3294 + return r; 3295 + 3296 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3297 + if (unlikely(r != 0)) 3298 + return r; 3299 + 3300 + gfx_v10_0_kiq_init_queue(ring); 3301 + amdgpu_bo_kunmap(ring->mqd_obj); 3302 + ring->mqd_ptr = NULL; 3303 + amdgpu_bo_unreserve(ring->mqd_obj); 3304 + ring->sched.ready = true; 3305 + return 0; 3306 + } 3307 + 3308 + static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) 3309 + { 3310 + struct amdgpu_ring *ring = NULL; 3311 + int r = 0, i; 3312 + 3313 + gfx_v10_0_cp_compute_enable(adev, true); 3314 + 3315 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3316 + ring = &adev->gfx.compute_ring[i]; 3317 + 3318 + r = amdgpu_bo_reserve(ring->mqd_obj, false); 3319 + if (unlikely(r != 0)) 3320 + goto done; 3321 + r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3322 + if (!r) { 3323 + r = gfx_v10_0_kcq_init_queue(ring); 3324 + amdgpu_bo_kunmap(ring->mqd_obj); 3325 + ring->mqd_ptr = NULL; 3326 + } 3327 + amdgpu_bo_unreserve(ring->mqd_obj); 3328 + if (r) 3329 + goto done; 3330 + } 3331 + 3332 + r = amdgpu_gfx_enable_kcq(adev); 3333 + done: 3334 + return r; 3335 + } 3336 + 3337 + static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) 3338 + { 3339 + int r, i; 3340 + struct amdgpu_ring *ring; 3341 + 3342 + if (!(adev->flags & AMD_IS_APU)) 3343 + gfx_v10_0_enable_gui_idle_interrupt(adev, false); 3344 + 3345 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3346 + /* legacy firmware loading */ 3347 + r = gfx_v10_0_cp_gfx_load_microcode(adev); 3348 + if (r) 3349 + return r; 3350 + 3351 + r = gfx_v10_0_cp_compute_load_microcode(adev); 3352 + if (r) 3353 + return r; 3354 + } 3355 + 3356 + r = gfx_v10_0_kiq_resume(adev); 3357 + if (r) 3358 + return r; 3359 + 3360 + r = gfx_v10_0_kcq_resume(adev); 3361 + if (r) 3362 + return r; 3363 + 3364 + if (!amdgpu_async_gfx_ring) { 3365 + r = gfx_v10_0_cp_gfx_resume(adev); 3366 + if (r) 3367 + return r; 3368 + } else { 3369 + r = gfx_v10_0_cp_async_gfx_ring_resume(adev); 3370 + if (r) 3371 + return r; 3372 + } 3373 + 3374 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3375 + ring = &adev->gfx.gfx_ring[i]; 3376 + DRM_INFO("gfx %d ring me %d pipe %d q %d\n", 3377 + i, ring->me, ring->pipe, ring->queue); 3378 + r = amdgpu_ring_test_ring(ring); 3379 + if (r) { 3380 + ring->sched.ready = false; 3381 + return r; 3382 + } 3383 + } 3384 + 3385 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3386 + ring = &adev->gfx.compute_ring[i]; 3387 + ring->sched.ready = true; 3388 + DRM_INFO("compute ring %d mec %d pipe %d q %d\n", 3389 + i, ring->me, ring->pipe, ring->queue); 3390 + r = amdgpu_ring_test_ring(ring); 3391 + if (r) 3392 + ring->sched.ready = false; 3393 + } 3394 + 3395 + return 0; 3396 + } 3397 + 3398 + static void gfx_v10_0_cp_enable(struct amdgpu_device *adev, bool enable) 3399 + { 3400 + gfx_v10_0_cp_gfx_enable(adev, enable); 3401 + gfx_v10_0_cp_compute_enable(adev, enable); 3402 + } 3403 + 3404 + static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) 3405 + { 3406 + uint32_t data, pattern = 0xDEADBEEF; 3407 + 3408 + /* check if mmVGT_ESGS_RING_SIZE_UMD 3409 + * has been remapped to mmVGT_ESGS_RING_SIZE */ 3410 + data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE); 3411 + 3412 + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, 0); 3413 + 3414 + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); 3415 + 3416 + if (RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE) == pattern) { 3417 + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, data); 3418 + return true; 3419 + } else { 3420 + WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE, data); 3421 + return false; 3422 + } 3423 + } 3424 + 3425 + static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) 3426 + { 3427 + uint32_t data; 3428 + 3429 + /* initialize cam_index to 0 3430 + * index will auto-inc after each data writting */ 3431 + WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); 3432 + 3433 + /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ 3434 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << 3435 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3436 + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE) << 3437 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3438 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3439 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3440 + 3441 + /* mmVGT_TF_MEMORY_BASE_UMD -> mmVGT_TF_MEMORY_BASE */ 3442 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_UMD) << 3443 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3444 + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE) << 3445 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3446 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3447 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3448 + 3449 + /* mmVGT_TF_MEMORY_BASE_HI_UMD -> mmVGT_TF_MEMORY_BASE_HI */ 3450 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI_UMD) << 3451 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3452 + (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_MEMORY_BASE_HI) << 3453 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3454 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3455 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3456 + 3457 + /* mmVGT_HS_OFFCHIP_PARAM_UMD -> mmVGT_HS_OFFCHIP_PARAM */ 3458 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM_UMD) << 3459 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3460 + (SOC15_REG_OFFSET(GC, 0, mmVGT_HS_OFFCHIP_PARAM) << 3461 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3462 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3463 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3464 + 3465 + /* mmVGT_ESGS_RING_SIZE_UMD -> mmVGT_ESGS_RING_SIZE */ 3466 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE_UMD) << 3467 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3468 + (SOC15_REG_OFFSET(GC, 0, mmVGT_ESGS_RING_SIZE) << 3469 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3470 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3471 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3472 + 3473 + /* mmVGT_GSVS_RING_SIZE_UMD -> mmVGT_GSVS_RING_SIZE */ 3474 + data = (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE_UMD) << 3475 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3476 + (SOC15_REG_OFFSET(GC, 0, mmVGT_GSVS_RING_SIZE) << 3477 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3478 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3479 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3480 + 3481 + /* mmSPI_CONFIG_CNTL_REMAP -> mmSPI_CONFIG_CNTL */ 3482 + data = (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_REMAP) << 3483 + GRBM_CAM_DATA__CAM_ADDR__SHIFT) | 3484 + (SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL) << 3485 + GRBM_CAM_DATA__CAM_REMAPADDR__SHIFT); 3486 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA_UPPER, 0); 3487 + WREG32_SOC15(GC, 0, mmGRBM_CAM_DATA, data); 3488 + } 3489 + 3490 + static int gfx_v10_0_hw_init(void *handle) 3491 + { 3492 + int r; 3493 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3494 + 3495 + if (!amdgpu_emu_mode) 3496 + gfx_v10_0_init_golden_registers(adev); 3497 + 3498 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 3499 + /** 3500 + * For gfx 10, rlc firmware loading relies on smu firmware is 3501 + * loaded firstly, so in direct type, it has to load smc ucode 3502 + * here before rlc. 3503 + */ 3504 + r = smu_load_microcode(&adev->smu); 3505 + if (r) 3506 + return r; 3507 + 3508 + r = smu_check_fw_status(&adev->smu); 3509 + if (r) { 3510 + pr_err("SMC firmware status is not correct\n"); 3511 + return r; 3512 + } 3513 + } 3514 + 3515 + /* if GRBM CAM not remapped, set up the remapping */ 3516 + if (!gfx_v10_0_check_grbm_cam_remapping(adev)) 3517 + gfx_v10_0_setup_grbm_cam_remapping(adev); 3518 + 3519 + gfx_v10_0_constants_init(adev); 3520 + 3521 + r = gfx_v10_0_rlc_resume(adev); 3522 + if (r) 3523 + return r; 3524 + 3525 + /* 3526 + * init golden registers and rlc resume may override some registers, 3527 + * reconfig them here 3528 + */ 3529 + gfx_v10_0_tcp_harvest(adev); 3530 + 3531 + r = gfx_v10_0_cp_resume(adev); 3532 + if (r) 3533 + return r; 3534 + 3535 + return r; 3536 + } 3537 + 3538 + #ifndef BRING_UP_DEBUG 3539 + static int gfx10_0_disable_kgq(struct amdgpu_device *adev) 3540 + { 3541 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 3542 + struct amdgpu_ring *kiq_ring = &kiq->ring; 3543 + int i; 3544 + 3545 + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 3546 + return -EINVAL; 3547 + 3548 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * 3549 + adev->gfx.num_gfx_rings)) 3550 + return -ENOMEM; 3551 + 3552 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 3553 + kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i], 3554 + RESET_QUEUES, 0, 0); 3555 + 3556 + return amdgpu_ring_test_ring(kiq_ring); 3557 + } 3558 + #endif 3559 + 3560 + static int gfx_v10_0_hw_fini(void *handle) 3561 + { 3562 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3563 + 3564 + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 3565 + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 3566 + #ifndef BRING_UP_DEBUG 3567 + if (gfx10_0_disable_kgq(adev)) 3568 + DRM_ERROR("KGQ disable failed\n"); 3569 + #endif 3570 + if (amdgpu_gfx_disable_kcq(adev)) 3571 + DRM_ERROR("KCQ disable failed\n"); 3572 + if (amdgpu_sriov_vf(adev)) { 3573 + pr_debug("For SRIOV client, shouldn't do anything.\n"); 3574 + return 0; 3575 + } 3576 + gfx_v10_0_cp_enable(adev, false); 3577 + gfx_v10_0_rlc_stop(adev); 3578 + 3579 + return 0; 3580 + } 3581 + 3582 + static int gfx_v10_0_suspend(void *handle) 3583 + { 3584 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3585 + 3586 + adev->in_suspend = true; 3587 + return gfx_v10_0_hw_fini(adev); 3588 + } 3589 + 3590 + static int gfx_v10_0_resume(void *handle) 3591 + { 3592 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3593 + int r; 3594 + 3595 + r = gfx_v10_0_hw_init(adev); 3596 + adev->in_suspend = false; 3597 + return r; 3598 + } 3599 + 3600 + static bool gfx_v10_0_is_idle(void *handle) 3601 + { 3602 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3603 + 3604 + if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 3605 + GRBM_STATUS, GUI_ACTIVE)) 3606 + return false; 3607 + else 3608 + return true; 3609 + } 3610 + 3611 + static int gfx_v10_0_wait_for_idle(void *handle) 3612 + { 3613 + unsigned i; 3614 + u32 tmp; 3615 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3616 + 3617 + for (i = 0; i < adev->usec_timeout; i++) { 3618 + /* read MC_STATUS */ 3619 + tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) & 3620 + GRBM_STATUS__GUI_ACTIVE_MASK; 3621 + 3622 + if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) 3623 + return 0; 3624 + udelay(1); 3625 + } 3626 + return -ETIMEDOUT; 3627 + } 3628 + 3629 + static int gfx_v10_0_soft_reset(void *handle) 3630 + { 3631 + u32 grbm_soft_reset = 0; 3632 + u32 tmp; 3633 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3634 + 3635 + /* GRBM_STATUS */ 3636 + tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); 3637 + if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 3638 + GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 3639 + GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | 3640 + GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | 3641 + GRBM_STATUS__SPI_BUSY_MASK | GRBM_STATUS__GE_BUSY_NO_DMA_MASK 3642 + | GRBM_STATUS__BCI_BUSY_MASK)) { 3643 + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3644 + GRBM_SOFT_RESET, SOFT_RESET_CP, 3645 + 1); 3646 + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3647 + GRBM_SOFT_RESET, SOFT_RESET_GFX, 3648 + 1); 3649 + } 3650 + 3651 + if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 3652 + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3653 + GRBM_SOFT_RESET, SOFT_RESET_CP, 3654 + 1); 3655 + } 3656 + 3657 + /* GRBM_STATUS2 */ 3658 + tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); 3659 + if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) 3660 + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, 3661 + GRBM_SOFT_RESET, SOFT_RESET_RLC, 3662 + 1); 3663 + 3664 + if (grbm_soft_reset) { 3665 + /* stop the rlc */ 3666 + gfx_v10_0_rlc_stop(adev); 3667 + 3668 + /* Disable GFX parsing/prefetching */ 3669 + gfx_v10_0_cp_gfx_enable(adev, false); 3670 + 3671 + /* Disable MEC parsing/prefetching */ 3672 + gfx_v10_0_cp_compute_enable(adev, false); 3673 + 3674 + if (grbm_soft_reset) { 3675 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3676 + tmp |= grbm_soft_reset; 3677 + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 3678 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3679 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3680 + 3681 + udelay(50); 3682 + 3683 + tmp &= ~grbm_soft_reset; 3684 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 3685 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 3686 + } 3687 + 3688 + /* Wait a little for things to settle down */ 3689 + udelay(50); 3690 + } 3691 + return 0; 3692 + } 3693 + 3694 + static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3695 + { 3696 + uint64_t clock; 3697 + 3698 + mutex_lock(&adev->gfx.gpu_clock_mutex); 3699 + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3700 + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | 3701 + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3702 + mutex_unlock(&adev->gfx.gpu_clock_mutex); 3703 + return clock; 3704 + } 3705 + 3706 + static void gfx_v10_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3707 + uint32_t vmid, 3708 + uint32_t gds_base, uint32_t gds_size, 3709 + uint32_t gws_base, uint32_t gws_size, 3710 + uint32_t oa_base, uint32_t oa_size) 3711 + { 3712 + struct amdgpu_device *adev = ring->adev; 3713 + 3714 + /* GDS Base */ 3715 + gfx_v10_0_write_data_to_reg(ring, 0, false, 3716 + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, 3717 + gds_base); 3718 + 3719 + /* GDS Size */ 3720 + gfx_v10_0_write_data_to_reg(ring, 0, false, 3721 + SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid, 3722 + gds_size); 3723 + 3724 + /* GWS */ 3725 + gfx_v10_0_write_data_to_reg(ring, 0, false, 3726 + SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid, 3727 + gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 3728 + 3729 + /* OA */ 3730 + gfx_v10_0_write_data_to_reg(ring, 0, false, 3731 + SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid, 3732 + (1 << (oa_size + oa_base)) - (1 << oa_base)); 3733 + } 3734 + 3735 + static int gfx_v10_0_early_init(void *handle) 3736 + { 3737 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3738 + 3739 + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; 3740 + adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; 3741 + 3742 + gfx_v10_0_set_kiq_pm4_funcs(adev); 3743 + gfx_v10_0_set_ring_funcs(adev); 3744 + gfx_v10_0_set_irq_funcs(adev); 3745 + gfx_v10_0_set_gds_init(adev); 3746 + gfx_v10_0_set_rlc_funcs(adev); 3747 + 3748 + return 0; 3749 + } 3750 + 3751 + static int gfx_v10_0_late_init(void *handle) 3752 + { 3753 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3754 + int r; 3755 + 3756 + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 3757 + if (r) 3758 + return r; 3759 + 3760 + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 3761 + if (r) 3762 + return r; 3763 + 3764 + return 0; 3765 + } 3766 + 3767 + static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev) 3768 + { 3769 + uint32_t rlc_cntl; 3770 + 3771 + /* if RLC is not enabled, do nothing */ 3772 + rlc_cntl = RREG32_SOC15(GC, 0, mmRLC_CNTL); 3773 + return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false; 3774 + } 3775 + 3776 + static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) 3777 + { 3778 + uint32_t data; 3779 + unsigned i; 3780 + 3781 + data = RLC_SAFE_MODE__CMD_MASK; 3782 + data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 3783 + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3784 + 3785 + /* wait for RLC_SAFE_MODE */ 3786 + for (i = 0; i < adev->usec_timeout; i++) { 3787 + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) 3788 + break; 3789 + udelay(1); 3790 + } 3791 + } 3792 + 3793 + static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) 3794 + { 3795 + uint32_t data; 3796 + 3797 + data = RLC_SAFE_MODE__CMD_MASK; 3798 + WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); 3799 + } 3800 + 3801 + static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, 3802 + bool enable) 3803 + { 3804 + uint32_t data, def; 3805 + 3806 + /* It is disabled by HW by default */ 3807 + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3808 + /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3809 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3810 + data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3811 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3812 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3813 + 3814 + /* only for Vega10 & Raven1 */ 3815 + data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; 3816 + 3817 + if (def != data) 3818 + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3819 + 3820 + /* MGLS is a global flag to control all MGLS in GFX */ 3821 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3822 + /* 2 - RLC memory Light sleep */ 3823 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { 3824 + def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3825 + data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3826 + if (def != data) 3827 + WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3828 + } 3829 + /* 3 - CP memory Light sleep */ 3830 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3831 + def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3832 + data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3833 + if (def != data) 3834 + WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3835 + } 3836 + } 3837 + } else { 3838 + /* 1 - MGCG_OVERRIDE */ 3839 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3840 + data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | 3841 + RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | 3842 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | 3843 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); 3844 + if (def != data) 3845 + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3846 + 3847 + /* 2 - disable MGLS in RLC */ 3848 + data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 3849 + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3850 + data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3851 + WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); 3852 + } 3853 + 3854 + /* 3 - disable MGLS in CP */ 3855 + data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 3856 + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3857 + data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3858 + WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3859 + } 3860 + } 3861 + } 3862 + 3863 + static void gfx_v10_0_update_3d_clock_gating(struct amdgpu_device *adev, 3864 + bool enable) 3865 + { 3866 + uint32_t data, def; 3867 + 3868 + /* Enable 3D CGCG/CGLS */ 3869 + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { 3870 + /* write cmd to clear cgcg/cgls ov */ 3871 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3872 + /* unset CGCG override */ 3873 + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; 3874 + /* update CGCG and CGLS override bits */ 3875 + if (def != data) 3876 + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3877 + /* enable 3Dcgcg FSM(0x0000363f) */ 3878 + def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3879 + data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3880 + RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; 3881 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) 3882 + data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3883 + RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; 3884 + if (def != data) 3885 + WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3886 + 3887 + /* set IDLE_POLL_COUNT(0x00900100) */ 3888 + def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3889 + data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3890 + (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3891 + if (def != data) 3892 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3893 + } else { 3894 + /* Disable CGCG/CGLS */ 3895 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 3896 + /* disable cgcg, cgls should be disabled */ 3897 + data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | 3898 + RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); 3899 + /* disable cgcg and cgls in FSM */ 3900 + if (def != data) 3901 + WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); 3902 + } 3903 + } 3904 + 3905 + static void gfx_v10_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, 3906 + bool enable) 3907 + { 3908 + uint32_t def, data; 3909 + 3910 + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3911 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 3912 + /* unset CGCG override */ 3913 + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; 3914 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3915 + data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3916 + else 3917 + data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; 3918 + /* update CGCG and CGLS override bits */ 3919 + if (def != data) 3920 + WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); 3921 + 3922 + /* enable cgcg FSM(0x0000363F) */ 3923 + def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3924 + data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 3925 + RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 3926 + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) 3927 + data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | 3928 + RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3929 + if (def != data) 3930 + WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3931 + 3932 + /* set IDLE_POLL_COUNT(0x00900100) */ 3933 + def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); 3934 + data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | 3935 + (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3936 + if (def != data) 3937 + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); 3938 + } else { 3939 + def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 3940 + /* reset CGCG/CGLS bits */ 3941 + data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3942 + /* disable cgcg and cgls in FSM */ 3943 + if (def != data) 3944 + WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); 3945 + } 3946 + } 3947 + 3948 + static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, 3949 + bool enable) 3950 + { 3951 + amdgpu_gfx_rlc_enter_safe_mode(adev); 3952 + 3953 + if (enable) { 3954 + /* CGCG/CGLS should be enabled after MGCG/MGLS 3955 + * === MGCG + MGLS === 3956 + */ 3957 + gfx_v10_0_update_medium_grain_clock_gating(adev, enable); 3958 + /* === CGCG /CGLS for GFX 3D Only === */ 3959 + gfx_v10_0_update_3d_clock_gating(adev, enable); 3960 + /* === CGCG + CGLS === */ 3961 + gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); 3962 + } else { 3963 + /* CGCG/CGLS should be disabled before MGCG/MGLS 3964 + * === CGCG + CGLS === 3965 + */ 3966 + gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); 3967 + /* === CGCG /CGLS for GFX 3D Only === */ 3968 + gfx_v10_0_update_3d_clock_gating(adev, enable); 3969 + /* === MGCG + MGLS === */ 3970 + gfx_v10_0_update_medium_grain_clock_gating(adev, enable); 3971 + } 3972 + 3973 + if (adev->cg_flags & 3974 + (AMD_CG_SUPPORT_GFX_MGCG | 3975 + AMD_CG_SUPPORT_GFX_CGLS | 3976 + AMD_CG_SUPPORT_GFX_CGCG | 3977 + AMD_CG_SUPPORT_GFX_CGLS | 3978 + AMD_CG_SUPPORT_GFX_3D_CGCG | 3979 + AMD_CG_SUPPORT_GFX_3D_CGLS)) 3980 + gfx_v10_0_enable_gui_idle_interrupt(adev, enable); 3981 + 3982 + amdgpu_gfx_rlc_exit_safe_mode(adev); 3983 + 3984 + return 0; 3985 + } 3986 + 3987 + static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { 3988 + .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, 3989 + .set_safe_mode = gfx_v10_0_set_safe_mode, 3990 + .unset_safe_mode = gfx_v10_0_unset_safe_mode, 3991 + .init = gfx_v10_0_rlc_init, 3992 + .get_csb_size = gfx_v10_0_get_csb_size, 3993 + .get_csb_buffer = gfx_v10_0_get_csb_buffer, 3994 + .resume = gfx_v10_0_rlc_resume, 3995 + .stop = gfx_v10_0_rlc_stop, 3996 + .reset = gfx_v10_0_rlc_reset, 3997 + .start = gfx_v10_0_rlc_start 3998 + }; 3999 + 4000 + static int gfx_v10_0_set_powergating_state(void *handle, 4001 + enum amd_powergating_state state) 4002 + { 4003 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4004 + bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 4005 + switch (adev->asic_type) { 4006 + case CHIP_NAVI10: 4007 + if (!enable) { 4008 + amdgpu_gfx_off_ctrl(adev, false); 4009 + cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); 4010 + } else 4011 + amdgpu_gfx_off_ctrl(adev, true); 4012 + break; 4013 + default: 4014 + break; 4015 + } 4016 + return 0; 4017 + } 4018 + 4019 + static int gfx_v10_0_set_clockgating_state(void *handle, 4020 + enum amd_clockgating_state state) 4021 + { 4022 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4023 + 4024 + switch (adev->asic_type) { 4025 + case CHIP_NAVI10: 4026 + gfx_v10_0_update_gfx_clock_gating(adev, 4027 + state == AMD_CG_STATE_GATE ? true : false); 4028 + break; 4029 + default: 4030 + break; 4031 + } 4032 + return 0; 4033 + } 4034 + 4035 + static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags) 4036 + { 4037 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4038 + int data; 4039 + 4040 + /* AMD_CG_SUPPORT_GFX_MGCG */ 4041 + data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 4042 + if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 4043 + *flags |= AMD_CG_SUPPORT_GFX_MGCG; 4044 + 4045 + /* AMD_CG_SUPPORT_GFX_CGCG */ 4046 + data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 4047 + if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4048 + *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4049 + 4050 + /* AMD_CG_SUPPORT_GFX_CGLS */ 4051 + if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) 4052 + *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4053 + 4054 + /* AMD_CG_SUPPORT_GFX_RLC_LS */ 4055 + data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 4056 + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 4057 + *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 4058 + 4059 + /* AMD_CG_SUPPORT_GFX_CP_LS */ 4060 + data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 4061 + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 4062 + *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 4063 + 4064 + /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4065 + data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 4066 + if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4067 + *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4068 + 4069 + /* AMD_CG_SUPPORT_GFX_3D_CGLS */ 4070 + if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) 4071 + *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; 4072 + } 4073 + 4074 + static u64 gfx_v10_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) 4075 + { 4076 + return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/ 4077 + } 4078 + 4079 + static u64 gfx_v10_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 4080 + { 4081 + struct amdgpu_device *adev = ring->adev; 4082 + u64 wptr; 4083 + 4084 + /* XXX check if swapping is necessary on BE */ 4085 + if (ring->use_doorbell) { 4086 + wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); 4087 + } else { 4088 + wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR); 4089 + wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32; 4090 + } 4091 + 4092 + return wptr; 4093 + } 4094 + 4095 + static void gfx_v10_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 4096 + { 4097 + struct amdgpu_device *adev = ring->adev; 4098 + 4099 + if (ring->use_doorbell) { 4100 + /* XXX check if swapping is necessary on BE */ 4101 + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); 4102 + WDOORBELL64(ring->doorbell_index, ring->wptr); 4103 + } else { 4104 + WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 4105 + WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); 4106 + } 4107 + } 4108 + 4109 + static u64 gfx_v10_0_ring_get_rptr_compute(struct amdgpu_ring *ring) 4110 + { 4111 + return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */ 4112 + } 4113 + 4114 + static u64 gfx_v10_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 4115 + { 4116 + u64 wptr; 4117 + 4118 + /* XXX check if swapping is necessary on BE */ 4119 + if (ring->use_doorbell) 4120 + wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); 4121 + else 4122 + BUG(); 4123 + return wptr; 4124 + } 4125 + 4126 + static void gfx_v10_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 4127 + { 4128 + struct amdgpu_device *adev = ring->adev; 4129 + 4130 + /* XXX check if swapping is necessary on BE */ 4131 + if (ring->use_doorbell) { 4132 + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); 4133 + WDOORBELL64(ring->doorbell_index, ring->wptr); 4134 + } else { 4135 + BUG(); /* only DOORBELL method supported on gfx10 now */ 4136 + } 4137 + } 4138 + 4139 + static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 4140 + { 4141 + struct amdgpu_device *adev = ring->adev; 4142 + u32 ref_and_mask, reg_mem_engine; 4143 + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; 4144 + 4145 + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 4146 + switch (ring->me) { 4147 + case 1: 4148 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; 4149 + break; 4150 + case 2: 4151 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; 4152 + break; 4153 + default: 4154 + return; 4155 + } 4156 + reg_mem_engine = 0; 4157 + } else { 4158 + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; 4159 + reg_mem_engine = 1; /* pfp */ 4160 + } 4161 + 4162 + gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, 4163 + adev->nbio_funcs->get_hdp_flush_req_offset(adev), 4164 + adev->nbio_funcs->get_hdp_flush_done_offset(adev), 4165 + ref_and_mask, ref_and_mask, 0x20); 4166 + } 4167 + 4168 + static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 4169 + struct amdgpu_job *job, 4170 + struct amdgpu_ib *ib, 4171 + uint32_t flags) 4172 + { 4173 + unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4174 + u32 header, control = 0; 4175 + 4176 + if (ib->flags & AMDGPU_IB_FLAG_CE) 4177 + header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2); 4178 + else 4179 + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4180 + 4181 + control |= ib->length_dw | (vmid << 24); 4182 + 4183 + if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 4184 + control |= INDIRECT_BUFFER_PRE_ENB(1); 4185 + 4186 + if (flags & AMDGPU_IB_PREEMPTED) 4187 + control |= INDIRECT_BUFFER_PRE_RESUME(1); 4188 + 4189 + if (!(ib->flags & AMDGPU_IB_FLAG_CE)) 4190 + gfx_v10_0_ring_emit_de_meta(ring, 4191 + flags & AMDGPU_IB_PREEMPTED ? true : false); 4192 + } 4193 + 4194 + amdgpu_ring_write(ring, header); 4195 + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4196 + amdgpu_ring_write(ring, 4197 + #ifdef __BIG_ENDIAN 4198 + (2 << 0) | 4199 + #endif 4200 + lower_32_bits(ib->gpu_addr)); 4201 + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4202 + amdgpu_ring_write(ring, control); 4203 + } 4204 + 4205 + static void gfx_v10_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4206 + struct amdgpu_job *job, 4207 + struct amdgpu_ib *ib, 4208 + uint32_t flags) 4209 + { 4210 + unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4211 + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 4212 + 4213 + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 4214 + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ 4215 + amdgpu_ring_write(ring, 4216 + #ifdef __BIG_ENDIAN 4217 + (2 << 0) | 4218 + #endif 4219 + lower_32_bits(ib->gpu_addr)); 4220 + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 4221 + amdgpu_ring_write(ring, control); 4222 + } 4223 + 4224 + static void gfx_v10_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 4225 + u64 seq, unsigned flags) 4226 + { 4227 + struct amdgpu_device *adev = ring->adev; 4228 + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 4229 + bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 4230 + 4231 + /* Interrupt not work fine on GFX10.1 model yet. Use fallback instead */ 4232 + if (adev->pdev->device == 0x50) 4233 + int_sel = false; 4234 + 4235 + /* RELEASE_MEM - flush caches, send int */ 4236 + amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 4237 + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ | 4238 + PACKET3_RELEASE_MEM_GCR_GL2_WB | 4239 + PACKET3_RELEASE_MEM_GCR_GL2_INV | 4240 + PACKET3_RELEASE_MEM_GCR_GL2_US | 4241 + PACKET3_RELEASE_MEM_GCR_GL1_INV | 4242 + PACKET3_RELEASE_MEM_GCR_GLV_INV | 4243 + PACKET3_RELEASE_MEM_GCR_GLM_INV | 4244 + PACKET3_RELEASE_MEM_GCR_GLM_WB | 4245 + PACKET3_RELEASE_MEM_CACHE_POLICY(3) | 4246 + PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 4247 + PACKET3_RELEASE_MEM_EVENT_INDEX(5))); 4248 + amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) | 4249 + PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0))); 4250 + 4251 + /* 4252 + * the address should be Qword aligned if 64bit write, Dword 4253 + * aligned if only send 32bit data low (discard data high) 4254 + */ 4255 + if (write64bit) 4256 + BUG_ON(addr & 0x7); 4257 + else 4258 + BUG_ON(addr & 0x3); 4259 + amdgpu_ring_write(ring, lower_32_bits(addr)); 4260 + amdgpu_ring_write(ring, upper_32_bits(addr)); 4261 + amdgpu_ring_write(ring, lower_32_bits(seq)); 4262 + amdgpu_ring_write(ring, upper_32_bits(seq)); 4263 + amdgpu_ring_write(ring, 0); 4264 + } 4265 + 4266 + static void gfx_v10_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 4267 + { 4268 + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 4269 + uint32_t seq = ring->fence_drv.sync_seq; 4270 + uint64_t addr = ring->fence_drv.gpu_addr; 4271 + 4272 + gfx_v10_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr), 4273 + upper_32_bits(addr), seq, 0xffffffff, 4); 4274 + } 4275 + 4276 + static void gfx_v10_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 4277 + unsigned vmid, uint64_t pd_addr) 4278 + { 4279 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 4280 + 4281 + /* compute doesn't have PFP */ 4282 + if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { 4283 + /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4284 + amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4285 + amdgpu_ring_write(ring, 0x0); 4286 + } 4287 + } 4288 + 4289 + static void gfx_v10_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, 4290 + u64 seq, unsigned int flags) 4291 + { 4292 + struct amdgpu_device *adev = ring->adev; 4293 + 4294 + /* we only allocate 32bit for each seq wb address */ 4295 + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 4296 + 4297 + /* write fence seq to the "addr" */ 4298 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4299 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4300 + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); 4301 + amdgpu_ring_write(ring, lower_32_bits(addr)); 4302 + amdgpu_ring_write(ring, upper_32_bits(addr)); 4303 + amdgpu_ring_write(ring, lower_32_bits(seq)); 4304 + 4305 + if (flags & AMDGPU_FENCE_FLAG_INT) { 4306 + /* set register to trigger INT */ 4307 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4308 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4309 + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); 4310 + amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS)); 4311 + amdgpu_ring_write(ring, 0); 4312 + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ 4313 + } 4314 + } 4315 + 4316 + static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring) 4317 + { 4318 + amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 4319 + amdgpu_ring_write(ring, 0); 4320 + } 4321 + 4322 + static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 4323 + { 4324 + uint32_t dw2 = 0; 4325 + 4326 + if (amdgpu_mcbp) 4327 + gfx_v10_0_ring_emit_ce_meta(ring, 4328 + flags & AMDGPU_IB_PREEMPTED ? true : false); 4329 + 4330 + gfx_v10_0_ring_emit_tmz(ring, true); 4331 + 4332 + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 4333 + if (flags & AMDGPU_HAVE_CTX_SWITCH) { 4334 + /* set load_global_config & load_global_uconfig */ 4335 + dw2 |= 0x8001; 4336 + /* set load_cs_sh_regs */ 4337 + dw2 |= 0x01000000; 4338 + /* set load_per_context_state & load_gfx_sh_regs for GFX */ 4339 + dw2 |= 0x10002; 4340 + 4341 + /* set load_ce_ram if preamble presented */ 4342 + if (AMDGPU_PREAMBLE_IB_PRESENT & flags) 4343 + dw2 |= 0x10000000; 4344 + } else { 4345 + /* still load_ce_ram if this is the first time preamble presented 4346 + * although there is no context switch happens. 4347 + */ 4348 + if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) 4349 + dw2 |= 0x10000000; 4350 + } 4351 + 4352 + amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 4353 + amdgpu_ring_write(ring, dw2); 4354 + amdgpu_ring_write(ring, 0); 4355 + } 4356 + 4357 + static unsigned gfx_v10_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) 4358 + { 4359 + unsigned ret; 4360 + 4361 + amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); 4362 + amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); 4363 + amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); 4364 + amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ 4365 + ret = ring->wptr & ring->buf_mask; 4366 + amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ 4367 + 4368 + return ret; 4369 + } 4370 + 4371 + static void gfx_v10_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) 4372 + { 4373 + unsigned cur; 4374 + BUG_ON(offset > ring->buf_mask); 4375 + BUG_ON(ring->ring[offset] != 0x55aa55aa); 4376 + 4377 + cur = (ring->wptr - 1) & ring->buf_mask; 4378 + if (likely(cur > offset)) 4379 + ring->ring[offset] = cur - offset; 4380 + else 4381 + ring->ring[offset] = (ring->buf_mask + 1) - offset + cur; 4382 + } 4383 + 4384 + static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) 4385 + { 4386 + int i, r = 0; 4387 + struct amdgpu_device *adev = ring->adev; 4388 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4389 + struct amdgpu_ring *kiq_ring = &kiq->ring; 4390 + 4391 + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 4392 + return -EINVAL; 4393 + 4394 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) 4395 + return -ENOMEM; 4396 + 4397 + /* assert preemption condition */ 4398 + amdgpu_ring_set_preempt_cond_exec(ring, false); 4399 + 4400 + /* assert IB preemption, emit the trailing fence */ 4401 + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 4402 + ring->trail_fence_gpu_addr, 4403 + ++ring->trail_seq); 4404 + amdgpu_ring_commit(kiq_ring); 4405 + 4406 + /* poll the trailing fence */ 4407 + for (i = 0; i < adev->usec_timeout; i++) { 4408 + if (ring->trail_seq == 4409 + le32_to_cpu(*(ring->trail_fence_cpu_addr))) 4410 + break; 4411 + DRM_UDELAY(1); 4412 + } 4413 + 4414 + if (i >= adev->usec_timeout) { 4415 + r = -EINVAL; 4416 + DRM_ERROR("ring %d failed to preempt ib\n", ring->idx); 4417 + } 4418 + 4419 + /* deassert preemption condition */ 4420 + amdgpu_ring_set_preempt_cond_exec(ring, true); 4421 + return r; 4422 + } 4423 + 4424 + static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) 4425 + { 4426 + struct amdgpu_device *adev = ring->adev; 4427 + struct v10_ce_ib_state ce_payload = {0}; 4428 + uint64_t csa_addr; 4429 + int cnt; 4430 + 4431 + cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 4432 + csa_addr = amdgpu_csa_vaddr(ring->adev); 4433 + 4434 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4435 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4436 + WRITE_DATA_DST_SEL(8) | 4437 + WR_CONFIRM) | 4438 + WRITE_DATA_CACHE_POLICY(0)); 4439 + amdgpu_ring_write(ring, lower_32_bits(csa_addr + 4440 + offsetof(struct v10_gfx_meta_data, ce_payload))); 4441 + amdgpu_ring_write(ring, upper_32_bits(csa_addr + 4442 + offsetof(struct v10_gfx_meta_data, ce_payload))); 4443 + 4444 + if (resume) 4445 + amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr + 4446 + offsetof(struct v10_gfx_meta_data, 4447 + ce_payload), 4448 + sizeof(ce_payload) >> 2); 4449 + else 4450 + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, 4451 + sizeof(ce_payload) >> 2); 4452 + } 4453 + 4454 + static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 4455 + { 4456 + struct amdgpu_device *adev = ring->adev; 4457 + struct v10_de_ib_state de_payload = {0}; 4458 + uint64_t csa_addr, gds_addr; 4459 + int cnt; 4460 + 4461 + csa_addr = amdgpu_csa_vaddr(ring->adev); 4462 + gds_addr = ALIGN(csa_addr + AMDGPU_CSA_SIZE - adev->gds.gds_size, 4463 + PAGE_SIZE); 4464 + de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 4465 + de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 4466 + 4467 + cnt = (sizeof(de_payload) >> 2) + 4 - 2; 4468 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 4469 + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 4470 + WRITE_DATA_DST_SEL(8) | 4471 + WR_CONFIRM) | 4472 + WRITE_DATA_CACHE_POLICY(0)); 4473 + amdgpu_ring_write(ring, lower_32_bits(csa_addr + 4474 + offsetof(struct v10_gfx_meta_data, de_payload))); 4475 + amdgpu_ring_write(ring, upper_32_bits(csa_addr + 4476 + offsetof(struct v10_gfx_meta_data, de_payload))); 4477 + 4478 + if (resume) 4479 + amdgpu_ring_write_multiple(ring, adev->virt.csa_cpu_addr + 4480 + offsetof(struct v10_gfx_meta_data, 4481 + de_payload), 4482 + sizeof(de_payload) >> 2); 4483 + else 4484 + amdgpu_ring_write_multiple(ring, (void *)&de_payload, 4485 + sizeof(de_payload) >> 2); 4486 + } 4487 + 4488 + static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) 4489 + { 4490 + amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4491 + amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ 4492 + } 4493 + 4494 + static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4495 + { 4496 + struct amdgpu_device *adev = ring->adev; 4497 + 4498 + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4499 + amdgpu_ring_write(ring, 0 | /* src: register*/ 4500 + (5 << 8) | /* dst: memory */ 4501 + (1 << 20)); /* write confirm */ 4502 + amdgpu_ring_write(ring, reg); 4503 + amdgpu_ring_write(ring, 0); 4504 + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4505 + adev->virt.reg_val_offs * 4)); 4506 + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4507 + adev->virt.reg_val_offs * 4)); 4508 + } 4509 + 4510 + static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 4511 + uint32_t val) 4512 + { 4513 + uint32_t cmd = 0; 4514 + 4515 + switch (ring->funcs->type) { 4516 + case AMDGPU_RING_TYPE_GFX: 4517 + cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; 4518 + break; 4519 + case AMDGPU_RING_TYPE_KIQ: 4520 + cmd = (1 << 16); /* no inc addr */ 4521 + break; 4522 + default: 4523 + cmd = WR_CONFIRM; 4524 + break; 4525 + } 4526 + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4527 + amdgpu_ring_write(ring, cmd); 4528 + amdgpu_ring_write(ring, reg); 4529 + amdgpu_ring_write(ring, 0); 4530 + amdgpu_ring_write(ring, val); 4531 + } 4532 + 4533 + static void gfx_v10_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 4534 + uint32_t val, uint32_t mask) 4535 + { 4536 + gfx_v10_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); 4537 + } 4538 + 4539 + static void 4540 + gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4541 + uint32_t me, uint32_t pipe, 4542 + enum amdgpu_interrupt_state state) 4543 + { 4544 + uint32_t cp_int_cntl, cp_int_cntl_reg; 4545 + 4546 + if (!me) { 4547 + switch (pipe) { 4548 + case 0: 4549 + cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0); 4550 + break; 4551 + case 1: 4552 + cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING1); 4553 + break; 4554 + default: 4555 + DRM_DEBUG("invalid pipe %d\n", pipe); 4556 + return; 4557 + } 4558 + } else { 4559 + DRM_DEBUG("invalid me %d\n", me); 4560 + return; 4561 + } 4562 + 4563 + switch (state) { 4564 + case AMDGPU_IRQ_STATE_DISABLE: 4565 + cp_int_cntl = RREG32(cp_int_cntl_reg); 4566 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4567 + TIME_STAMP_INT_ENABLE, 0); 4568 + WREG32(cp_int_cntl_reg, cp_int_cntl); 4569 + case AMDGPU_IRQ_STATE_ENABLE: 4570 + cp_int_cntl = RREG32(cp_int_cntl_reg); 4571 + cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4572 + TIME_STAMP_INT_ENABLE, 1); 4573 + WREG32(cp_int_cntl_reg, cp_int_cntl); 4574 + break; 4575 + default: 4576 + break; 4577 + } 4578 + } 4579 + 4580 + static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4581 + int me, int pipe, 4582 + enum amdgpu_interrupt_state state) 4583 + { 4584 + u32 mec_int_cntl, mec_int_cntl_reg; 4585 + 4586 + /* 4587 + * amdgpu controls only the first MEC. That's why this function only 4588 + * handles the setting of interrupts for this specific MEC. All other 4589 + * pipes' interrupts are set by amdkfd. 4590 + */ 4591 + 4592 + if (me == 1) { 4593 + switch (pipe) { 4594 + case 0: 4595 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4596 + break; 4597 + case 1: 4598 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); 4599 + break; 4600 + case 2: 4601 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); 4602 + break; 4603 + case 3: 4604 + mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); 4605 + break; 4606 + default: 4607 + DRM_DEBUG("invalid pipe %d\n", pipe); 4608 + return; 4609 + } 4610 + } else { 4611 + DRM_DEBUG("invalid me %d\n", me); 4612 + return; 4613 + } 4614 + 4615 + switch (state) { 4616 + case AMDGPU_IRQ_STATE_DISABLE: 4617 + mec_int_cntl = RREG32(mec_int_cntl_reg); 4618 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4619 + TIME_STAMP_INT_ENABLE, 0); 4620 + WREG32(mec_int_cntl_reg, mec_int_cntl); 4621 + break; 4622 + case AMDGPU_IRQ_STATE_ENABLE: 4623 + mec_int_cntl = RREG32(mec_int_cntl_reg); 4624 + mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, 4625 + TIME_STAMP_INT_ENABLE, 1); 4626 + WREG32(mec_int_cntl_reg, mec_int_cntl); 4627 + break; 4628 + default: 4629 + break; 4630 + } 4631 + } 4632 + 4633 + static int gfx_v10_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4634 + struct amdgpu_irq_src *src, 4635 + unsigned type, 4636 + enum amdgpu_interrupt_state state) 4637 + { 4638 + switch (type) { 4639 + case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4640 + gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 0, state); 4641 + break; 4642 + case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP: 4643 + gfx_v10_0_set_gfx_eop_interrupt_state(adev, 0, 1, state); 4644 + break; 4645 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4646 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4647 + break; 4648 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4649 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4650 + break; 4651 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4652 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4653 + break; 4654 + case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4655 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4656 + break; 4657 + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4658 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4659 + break; 4660 + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4661 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4662 + break; 4663 + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4664 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4665 + break; 4666 + case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4667 + gfx_v10_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4668 + break; 4669 + default: 4670 + break; 4671 + } 4672 + return 0; 4673 + } 4674 + 4675 + static int gfx_v10_0_eop_irq(struct amdgpu_device *adev, 4676 + struct amdgpu_irq_src *source, 4677 + struct amdgpu_iv_entry *entry) 4678 + { 4679 + int i; 4680 + u8 me_id, pipe_id, queue_id; 4681 + struct amdgpu_ring *ring; 4682 + 4683 + DRM_DEBUG("IH: CP EOP\n"); 4684 + me_id = (entry->ring_id & 0x0c) >> 2; 4685 + pipe_id = (entry->ring_id & 0x03) >> 0; 4686 + queue_id = (entry->ring_id & 0x70) >> 4; 4687 + 4688 + switch (me_id) { 4689 + case 0: 4690 + if (pipe_id == 0) 4691 + amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4692 + else 4693 + amdgpu_fence_process(&adev->gfx.gfx_ring[1]); 4694 + break; 4695 + case 1: 4696 + case 2: 4697 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4698 + ring = &adev->gfx.compute_ring[i]; 4699 + /* Per-queue interrupt is supported for MEC starting from VI. 4700 + * The interrupt can only be enabled/disabled per pipe instead of per queue. 4701 + */ 4702 + if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) 4703 + amdgpu_fence_process(ring); 4704 + } 4705 + break; 4706 + } 4707 + return 0; 4708 + } 4709 + 4710 + static int gfx_v10_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4711 + struct amdgpu_irq_src *source, 4712 + unsigned type, 4713 + enum amdgpu_interrupt_state state) 4714 + { 4715 + switch (state) { 4716 + case AMDGPU_IRQ_STATE_DISABLE: 4717 + case AMDGPU_IRQ_STATE_ENABLE: 4718 + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4719 + PRIV_REG_INT_ENABLE, 4720 + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4721 + break; 4722 + default: 4723 + break; 4724 + } 4725 + 4726 + return 0; 4727 + } 4728 + 4729 + static int gfx_v10_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4730 + struct amdgpu_irq_src *source, 4731 + unsigned type, 4732 + enum amdgpu_interrupt_state state) 4733 + { 4734 + switch (state) { 4735 + case AMDGPU_IRQ_STATE_DISABLE: 4736 + case AMDGPU_IRQ_STATE_ENABLE: 4737 + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, 4738 + PRIV_INSTR_INT_ENABLE, 4739 + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); 4740 + default: 4741 + break; 4742 + } 4743 + 4744 + return 0; 4745 + } 4746 + 4747 + static void gfx_v10_0_handle_priv_fault(struct amdgpu_device *adev, 4748 + struct amdgpu_iv_entry *entry) 4749 + { 4750 + u8 me_id, pipe_id, queue_id; 4751 + struct amdgpu_ring *ring; 4752 + int i; 4753 + 4754 + me_id = (entry->ring_id & 0x0c) >> 2; 4755 + pipe_id = (entry->ring_id & 0x03) >> 0; 4756 + queue_id = (entry->ring_id & 0x70) >> 4; 4757 + 4758 + switch (me_id) { 4759 + case 0: 4760 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4761 + ring = &adev->gfx.gfx_ring[i]; 4762 + /* we only enabled 1 gfx queue per pipe for now */ 4763 + if (ring->me == me_id && ring->pipe == pipe_id) 4764 + drm_sched_fault(&ring->sched); 4765 + } 4766 + break; 4767 + case 1: 4768 + case 2: 4769 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4770 + ring = &adev->gfx.compute_ring[i]; 4771 + if (ring->me == me_id && ring->pipe == pipe_id && 4772 + ring->queue == queue_id) 4773 + drm_sched_fault(&ring->sched); 4774 + } 4775 + break; 4776 + default: 4777 + BUG(); 4778 + } 4779 + } 4780 + 4781 + static int gfx_v10_0_priv_reg_irq(struct amdgpu_device *adev, 4782 + struct amdgpu_irq_src *source, 4783 + struct amdgpu_iv_entry *entry) 4784 + { 4785 + DRM_ERROR("Illegal register access in command stream\n"); 4786 + gfx_v10_0_handle_priv_fault(adev, entry); 4787 + return 0; 4788 + } 4789 + 4790 + static int gfx_v10_0_priv_inst_irq(struct amdgpu_device *adev, 4791 + struct amdgpu_irq_src *source, 4792 + struct amdgpu_iv_entry *entry) 4793 + { 4794 + DRM_ERROR("Illegal instruction in command stream\n"); 4795 + gfx_v10_0_handle_priv_fault(adev, entry); 4796 + return 0; 4797 + } 4798 + 4799 + static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, 4800 + struct amdgpu_irq_src *src, 4801 + unsigned int type, 4802 + enum amdgpu_interrupt_state state) 4803 + { 4804 + uint32_t tmp, target; 4805 + struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4806 + 4807 + if (ring->me == 1) 4808 + target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); 4809 + else 4810 + target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL); 4811 + target += ring->pipe; 4812 + 4813 + switch (type) { 4814 + case AMDGPU_CP_KIQ_IRQ_DRIVER0: 4815 + if (state == AMDGPU_IRQ_STATE_DISABLE) { 4816 + tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4817 + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4818 + GENERIC2_INT_ENABLE, 0); 4819 + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4820 + 4821 + tmp = RREG32(target); 4822 + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4823 + GENERIC2_INT_ENABLE, 0); 4824 + WREG32(target, tmp); 4825 + } else { 4826 + tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); 4827 + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, 4828 + GENERIC2_INT_ENABLE, 1); 4829 + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); 4830 + 4831 + tmp = RREG32(target); 4832 + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, 4833 + GENERIC2_INT_ENABLE, 1); 4834 + WREG32(target, tmp); 4835 + } 4836 + break; 4837 + default: 4838 + BUG(); /* kiq only support GENERIC2_INT now */ 4839 + break; 4840 + } 4841 + return 0; 4842 + } 4843 + 4844 + static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev, 4845 + struct amdgpu_irq_src *source, 4846 + struct amdgpu_iv_entry *entry) 4847 + { 4848 + u8 me_id, pipe_id, queue_id; 4849 + struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); 4850 + 4851 + me_id = (entry->ring_id & 0x0c) >> 2; 4852 + pipe_id = (entry->ring_id & 0x03) >> 0; 4853 + queue_id = (entry->ring_id & 0x70) >> 4; 4854 + DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", 4855 + me_id, pipe_id, queue_id); 4856 + 4857 + amdgpu_fence_process(ring); 4858 + return 0; 4859 + } 4860 + 4861 + static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { 4862 + .name = "gfx_v10_0", 4863 + .early_init = gfx_v10_0_early_init, 4864 + .late_init = gfx_v10_0_late_init, 4865 + .sw_init = gfx_v10_0_sw_init, 4866 + .sw_fini = gfx_v10_0_sw_fini, 4867 + .hw_init = gfx_v10_0_hw_init, 4868 + .hw_fini = gfx_v10_0_hw_fini, 4869 + .suspend = gfx_v10_0_suspend, 4870 + .resume = gfx_v10_0_resume, 4871 + .is_idle = gfx_v10_0_is_idle, 4872 + .wait_for_idle = gfx_v10_0_wait_for_idle, 4873 + .soft_reset = gfx_v10_0_soft_reset, 4874 + .set_clockgating_state = gfx_v10_0_set_clockgating_state, 4875 + .set_powergating_state = gfx_v10_0_set_powergating_state, 4876 + .get_clockgating_state = gfx_v10_0_get_clockgating_state, 4877 + }; 4878 + 4879 + static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 4880 + .type = AMDGPU_RING_TYPE_GFX, 4881 + .align_mask = 0xff, 4882 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4883 + .support_64bit_ptrs = true, 4884 + .vmhub = AMDGPU_GFXHUB, 4885 + .get_rptr = gfx_v10_0_ring_get_rptr_gfx, 4886 + .get_wptr = gfx_v10_0_ring_get_wptr_gfx, 4887 + .set_wptr = gfx_v10_0_ring_set_wptr_gfx, 4888 + .emit_frame_size = /* totally 242 maximum if 16 IBs */ 4889 + 5 + /* COND_EXEC */ 4890 + 7 + /* PIPELINE_SYNC */ 4891 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4892 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4893 + 2 + /* VM_FLUSH */ 4894 + 8 + /* FENCE for VM_FLUSH */ 4895 + 20 + /* GDS switch */ 4896 + 4 + /* double SWITCH_BUFFER, 4897 + * the first COND_EXEC jump to the place 4898 + * just prior to this double SWITCH_BUFFER 4899 + */ 4900 + 5 + /* COND_EXEC */ 4901 + 7 + /* HDP_flush */ 4902 + 4 + /* VGT_flush */ 4903 + 14 + /* CE_META */ 4904 + 31 + /* DE_META */ 4905 + 3 + /* CNTX_CTRL */ 4906 + 5 + /* HDP_INVL */ 4907 + 8 + 8 + /* FENCE x2 */ 4908 + 2, /* SWITCH_BUFFER */ 4909 + .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ 4910 + .emit_ib = gfx_v10_0_ring_emit_ib_gfx, 4911 + .emit_fence = gfx_v10_0_ring_emit_fence, 4912 + .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, 4913 + .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush, 4914 + .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch, 4915 + .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, 4916 + .test_ring = gfx_v10_0_ring_test_ring, 4917 + .test_ib = gfx_v10_0_ring_test_ib, 4918 + .insert_nop = amdgpu_ring_insert_nop, 4919 + .pad_ib = amdgpu_ring_generic_pad_ib, 4920 + .emit_switch_buffer = gfx_v10_0_ring_emit_sb, 4921 + .emit_cntxcntl = gfx_v10_0_ring_emit_cntxcntl, 4922 + .init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec, 4923 + .patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec, 4924 + .preempt_ib = gfx_v10_0_ring_preempt_ib, 4925 + .emit_tmz = gfx_v10_0_ring_emit_tmz, 4926 + .emit_wreg = gfx_v10_0_ring_emit_wreg, 4927 + .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 4928 + }; 4929 + 4930 + static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { 4931 + .type = AMDGPU_RING_TYPE_COMPUTE, 4932 + .align_mask = 0xff, 4933 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4934 + .support_64bit_ptrs = true, 4935 + .vmhub = AMDGPU_GFXHUB, 4936 + .get_rptr = gfx_v10_0_ring_get_rptr_compute, 4937 + .get_wptr = gfx_v10_0_ring_get_wptr_compute, 4938 + .set_wptr = gfx_v10_0_ring_set_wptr_compute, 4939 + .emit_frame_size = 4940 + 20 + /* gfx_v10_0_ring_emit_gds_switch */ 4941 + 7 + /* gfx_v10_0_ring_emit_hdp_flush */ 4942 + 5 + /* hdp invalidate */ 4943 + 7 + /* gfx_v10_0_ring_emit_pipeline_sync */ 4944 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4945 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4946 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 4947 + 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */ 4948 + .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */ 4949 + .emit_ib = gfx_v10_0_ring_emit_ib_compute, 4950 + .emit_fence = gfx_v10_0_ring_emit_fence, 4951 + .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, 4952 + .emit_vm_flush = gfx_v10_0_ring_emit_vm_flush, 4953 + .emit_gds_switch = gfx_v10_0_ring_emit_gds_switch, 4954 + .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, 4955 + .test_ring = gfx_v10_0_ring_test_ring, 4956 + .test_ib = gfx_v10_0_ring_test_ib, 4957 + .insert_nop = amdgpu_ring_insert_nop, 4958 + .pad_ib = amdgpu_ring_generic_pad_ib, 4959 + .emit_wreg = gfx_v10_0_ring_emit_wreg, 4960 + .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 4961 + }; 4962 + 4963 + static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { 4964 + .type = AMDGPU_RING_TYPE_KIQ, 4965 + .align_mask = 0xff, 4966 + .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4967 + .support_64bit_ptrs = true, 4968 + .vmhub = AMDGPU_GFXHUB, 4969 + .get_rptr = gfx_v10_0_ring_get_rptr_compute, 4970 + .get_wptr = gfx_v10_0_ring_get_wptr_compute, 4971 + .set_wptr = gfx_v10_0_ring_set_wptr_compute, 4972 + .emit_frame_size = 4973 + 20 + /* gfx_v10_0_ring_emit_gds_switch */ 4974 + 7 + /* gfx_v10_0_ring_emit_hdp_flush */ 4975 + 5 + /*hdp invalidate */ 4976 + 7 + /* gfx_v10_0_ring_emit_pipeline_sync */ 4977 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + 4978 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 4979 + 2 + /* gfx_v10_0_ring_emit_vm_flush */ 4980 + 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 4981 + .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_compute */ 4982 + .emit_ib = gfx_v10_0_ring_emit_ib_compute, 4983 + .emit_fence = gfx_v10_0_ring_emit_fence_kiq, 4984 + .test_ring = gfx_v10_0_ring_test_ring, 4985 + .test_ib = gfx_v10_0_ring_test_ib, 4986 + .insert_nop = amdgpu_ring_insert_nop, 4987 + .pad_ib = amdgpu_ring_generic_pad_ib, 4988 + .emit_rreg = gfx_v10_0_ring_emit_rreg, 4989 + .emit_wreg = gfx_v10_0_ring_emit_wreg, 4990 + .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 4991 + }; 4992 + 4993 + static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) 4994 + { 4995 + int i; 4996 + 4997 + adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq; 4998 + 4999 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5000 + adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx; 5001 + 5002 + for (i = 0; i < adev->gfx.num_compute_rings; i++) 5003 + adev->gfx.compute_ring[i].funcs = &gfx_v10_0_ring_funcs_compute; 5004 + } 5005 + 5006 + static const struct amdgpu_irq_src_funcs gfx_v10_0_eop_irq_funcs = { 5007 + .set = gfx_v10_0_set_eop_interrupt_state, 5008 + .process = gfx_v10_0_eop_irq, 5009 + }; 5010 + 5011 + static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_reg_irq_funcs = { 5012 + .set = gfx_v10_0_set_priv_reg_fault_state, 5013 + .process = gfx_v10_0_priv_reg_irq, 5014 + }; 5015 + 5016 + static const struct amdgpu_irq_src_funcs gfx_v10_0_priv_inst_irq_funcs = { 5017 + .set = gfx_v10_0_set_priv_inst_fault_state, 5018 + .process = gfx_v10_0_priv_inst_irq, 5019 + }; 5020 + 5021 + static const struct amdgpu_irq_src_funcs gfx_v10_0_kiq_irq_funcs = { 5022 + .set = gfx_v10_0_kiq_set_interrupt_state, 5023 + .process = gfx_v10_0_kiq_irq, 5024 + }; 5025 + 5026 + static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) 5027 + { 5028 + adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5029 + adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs; 5030 + 5031 + adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; 5032 + adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs; 5033 + 5034 + adev->gfx.priv_reg_irq.num_types = 1; 5035 + adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs; 5036 + 5037 + adev->gfx.priv_inst_irq.num_types = 1; 5038 + adev->gfx.priv_inst_irq.funcs = &gfx_v10_0_priv_inst_irq_funcs; 5039 + } 5040 + 5041 + static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) 5042 + { 5043 + switch (adev->asic_type) { 5044 + case CHIP_NAVI10: 5045 + adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; 5046 + break; 5047 + default: 5048 + break; 5049 + } 5050 + } 5051 + 5052 + static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) 5053 + { 5054 + /* init asic gds info */ 5055 + switch (adev->asic_type) { 5056 + case CHIP_NAVI10: 5057 + adev->gds.gds_size = 0x10000; 5058 + break; 5059 + default: 5060 + adev->gds.gds_size = 0x10000; 5061 + break; 5062 + } 5063 + 5064 + adev->gds.gws_size = 64; 5065 + adev->gds.oa_size = 16; 5066 + } 5067 + 5068 + static void gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev, 5069 + u32 bitmap) 5070 + { 5071 + u32 data; 5072 + 5073 + if (!bitmap) 5074 + return; 5075 + 5076 + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5077 + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5078 + 5079 + WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); 5080 + } 5081 + 5082 + static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev) 5083 + { 5084 + u32 data, wgp_bitmask; 5085 + data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); 5086 + data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); 5087 + 5088 + data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK; 5089 + data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT; 5090 + 5091 + wgp_bitmask = 5092 + amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1); 5093 + 5094 + return (~data) & wgp_bitmask; 5095 + } 5096 + 5097 + static u32 gfx_v10_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev) 5098 + { 5099 + u32 wgp_idx, wgp_active_bitmap; 5100 + u32 cu_bitmap_per_wgp, cu_active_bitmap; 5101 + 5102 + wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev); 5103 + cu_active_bitmap = 0; 5104 + 5105 + for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) { 5106 + /* if there is one WGP enabled, it means 2 CUs will be enabled */ 5107 + cu_bitmap_per_wgp = 3 << (2 * wgp_idx); 5108 + if (wgp_active_bitmap & (1 << wgp_idx)) 5109 + cu_active_bitmap |= cu_bitmap_per_wgp; 5110 + } 5111 + 5112 + return cu_active_bitmap; 5113 + } 5114 + 5115 + static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, 5116 + struct amdgpu_cu_info *cu_info) 5117 + { 5118 + int i, j, k, counter, active_cu_number = 0; 5119 + u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 5120 + unsigned disable_masks[4 * 2]; 5121 + 5122 + if (!adev || !cu_info) 5123 + return -EINVAL; 5124 + 5125 + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 5126 + 5127 + mutex_lock(&adev->grbm_idx_mutex); 5128 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5129 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5130 + mask = 1; 5131 + ao_bitmap = 0; 5132 + counter = 0; 5133 + gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); 5134 + if (i < 4 && j < 2) 5135 + gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh( 5136 + adev, disable_masks[i * 2 + j]); 5137 + bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev); 5138 + cu_info->bitmap[i][j] = bitmap; 5139 + 5140 + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5141 + if (bitmap & mask) { 5142 + if (counter < adev->gfx.config.max_cu_per_sh) 5143 + ao_bitmap |= mask; 5144 + counter++; 5145 + } 5146 + mask <<= 1; 5147 + } 5148 + active_cu_number += counter; 5149 + if (i < 2 && j < 2) 5150 + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5151 + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 5152 + } 5153 + } 5154 + gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 5155 + mutex_unlock(&adev->grbm_idx_mutex); 5156 + 5157 + cu_info->number = active_cu_number; 5158 + cu_info->ao_cu_mask = ao_cu_mask; 5159 + cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5160 + 5161 + return 0; 5162 + } 5163 + 5164 + const struct amdgpu_ip_block_version gfx_v10_0_ip_block = 5165 + { 5166 + .type = AMD_IP_BLOCK_TYPE_GFX, 5167 + .major = 10, 5168 + .minor = 0, 5169 + .rev = 0, 5170 + .funcs = &gfx_v10_0_ip_funcs, 5171 + };
+29
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.h
··· 1 + /* 2 + * Copyright 2019 dvanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __GFX_V10_0_H__ 25 + #define __GFX_V10_0_H__ 26 + 27 + extern const struct amdgpu_ip_block_version gfx_v10_0_ip_block; 28 + 29 + #endif