Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add initial support for UMSCH

Add basic data structure, dummy ring functions
and ip functions for UMSCH.

Implement sw_init(ring_init and init_microcodede) and
hw_init(load_microcode), UMSCH can boot up now.

Implement hw_init(ring_start) and hw_fini(ring_stop),
UMSCH is ready for command submission now.

Implement set_hw_resources and add/remove_queue,
UMSCH is ready for scheduling now.

Aggregated doorbell is used to notify UMSCH FW that
there is unmapped queue with corresponding priority level
(e.g., AGDB[0] for Real time band, etc.) is updating its job.

v2: squash together initial patches to avoid breaking the
build (Alex)

Signed-off-by: Lang Yu <Lang.Yu@amd.com>
Reviewed-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Veerabadhran Gopalakrishnan <Veerabadhran.Gopalakrishnan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Lang Yu and committed by
Alex Deucher
3488c79b 2da1b04a

+996
+5
drivers/gpu/drm/amd/amdgpu/Makefile
··· 221 221 amdgpu_vpe.o \ 222 222 vpe_v6_1.o 223 223 224 + # add UMSCH block 225 + amdgpu-y += \ 226 + amdgpu_umsch_mm.o \ 227 + umsch_mm_v4_0.o 228 + 224 229 # 225 230 # add ATHUB block 226 231 amdgpu-y += \
+5
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 80 80 #include "amdgpu_vcn.h" 81 81 #include "amdgpu_jpeg.h" 82 82 #include "amdgpu_vpe.h" 83 + #include "amdgpu_umsch_mm.h" 83 84 #include "amdgpu_gmc.h" 84 85 #include "amdgpu_gfx.h" 85 86 #include "amdgpu_sdma.h" ··· 951 950 952 951 /* vpe */ 953 952 struct amdgpu_vpe vpe; 953 + 954 + /* umsch */ 955 + struct amdgpu_umsch_mm umsch_mm; 956 + bool enable_umsch_mm; 954 957 955 958 /* firmwares */ 956 959 struct amdgpu_firmware firmware;
+361
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include <linux/firmware.h> 26 + 27 + #include "amdgpu.h" 28 + #include "amdgpu_umsch_mm.h" 29 + #include "umsch_mm_v4_0.h" 30 + 31 + int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws) 32 + { 33 + struct amdgpu_ring *ring = &umsch->ring; 34 + 35 + if (amdgpu_ring_alloc(ring, ndws)) 36 + return -ENOMEM; 37 + 38 + amdgpu_ring_write_multiple(ring, pkt, ndws); 39 + amdgpu_ring_commit(ring); 40 + 41 + return 0; 42 + } 43 + 44 + int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch) 45 + { 46 + struct amdgpu_ring *ring = &umsch->ring; 47 + struct amdgpu_device *adev = ring->adev; 48 + int r; 49 + 50 + r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, adev->usec_timeout); 51 + if (r < 1) { 52 + dev_err(adev->dev, "ring umsch timeout, emitted fence %u\n", 53 + ring->fence_drv.sync_seq); 54 + return -ETIMEDOUT; 55 + } 56 + 57 + return 0; 58 + } 59 + 60 + static void umsch_mm_ring_set_wptr(struct amdgpu_ring *ring) 61 + { 62 + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; 63 + struct amdgpu_device *adev = ring->adev; 64 + 65 + if (ring->use_doorbell) 66 + WDOORBELL32(ring->doorbell_index, ring->wptr << 2); 67 + else 68 + WREG32(umsch->rb_wptr, ring->wptr << 2); 69 + } 70 + 71 + static u64 umsch_mm_ring_get_rptr(struct amdgpu_ring *ring) 72 + { 73 + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; 74 + struct amdgpu_device *adev = ring->adev; 75 + 76 + return RREG32(umsch->rb_rptr); 77 + } 78 + 79 + static u64 umsch_mm_ring_get_wptr(struct amdgpu_ring *ring) 80 + { 81 + struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring; 82 + struct amdgpu_device *adev = ring->adev; 83 + 84 + return RREG32(umsch->rb_wptr); 85 + } 86 + 87 + static const struct amdgpu_ring_funcs umsch_v4_0_ring_funcs = { 88 + .type = AMDGPU_RING_TYPE_UMSCH_MM, 89 + .align_mask = 0, 90 + .nop = 0, 91 + .support_64bit_ptrs = false, 92 + .get_rptr = umsch_mm_ring_get_rptr, 93 + .get_wptr = umsch_mm_ring_get_wptr, 94 + .set_wptr = umsch_mm_ring_set_wptr, 95 + .insert_nop = amdgpu_ring_insert_nop, 96 + }; 97 + 98 + int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch) 99 + { 100 + struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm); 101 + struct amdgpu_ring *ring = &umsch->ring; 102 + 103 + ring->vm_hub = AMDGPU_MMHUB0(0); 104 + ring->use_doorbell = 0; 105 + ring->no_scheduler = true; 106 + ring->doorbell_index = (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1) + 6; 107 + 108 + snprintf(ring->name, sizeof(ring->name), "umsch"); 109 + 110 + return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); 111 + } 112 + 113 + int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) 114 + { 115 + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; 116 + struct amdgpu_device *adev = umsch->ring.adev; 117 + const char *fw_name = NULL; 118 + int r; 119 + 120 + switch (adev->ip_versions[VCN_HWIP][0]) { 121 + case IP_VERSION(4, 0, 5): 122 + fw_name = "amdgpu/umsch_mm_4_0_0.bin"; 123 + break; 124 + default: 125 + break; 126 + } 127 + 128 + r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, fw_name); 129 + if (r) { 130 + release_firmware(adev->umsch_mm.fw); 131 + adev->umsch_mm.fw = NULL; 132 + return r; 133 + } 134 + 135 + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)adev->umsch_mm.fw->data; 136 + 137 + adev->umsch_mm.ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); 138 + adev->umsch_mm.data_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); 139 + 140 + adev->umsch_mm.irq_start_addr = 141 + le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_lo) | 142 + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_hi)) << 32); 143 + adev->umsch_mm.uc_start_addr = 144 + le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_lo) | 145 + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_hi)) << 32); 146 + adev->umsch_mm.data_start_addr = 147 + le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_lo) | 148 + ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_hi)) << 32); 149 + 150 + return 0; 151 + } 152 + 153 + int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch) 154 + { 155 + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; 156 + struct amdgpu_device *adev = umsch->ring.adev; 157 + const __le32 *fw_data; 158 + uint32_t fw_size; 159 + int r; 160 + 161 + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *) 162 + adev->umsch_mm.fw->data; 163 + 164 + fw_data = (const __le32 *)(adev->umsch_mm.fw->data + 165 + le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_offset_bytes)); 166 + fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); 167 + 168 + r = amdgpu_bo_create_reserved(adev, fw_size, 169 + 4 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 170 + &adev->umsch_mm.ucode_fw_obj, 171 + &adev->umsch_mm.ucode_fw_gpu_addr, 172 + (void **)&adev->umsch_mm.ucode_fw_ptr); 173 + if (r) { 174 + dev_err(adev->dev, "(%d) failed to create umsch_mm fw ucode bo\n", r); 175 + return r; 176 + } 177 + 178 + memcpy(adev->umsch_mm.ucode_fw_ptr, fw_data, fw_size); 179 + 180 + amdgpu_bo_kunmap(adev->umsch_mm.ucode_fw_obj); 181 + amdgpu_bo_unreserve(adev->umsch_mm.ucode_fw_obj); 182 + return 0; 183 + } 184 + 185 + int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch) 186 + { 187 + const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr; 188 + struct amdgpu_device *adev = umsch->ring.adev; 189 + const __le32 *fw_data; 190 + uint32_t fw_size; 191 + int r; 192 + 193 + umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *) 194 + adev->umsch_mm.fw->data; 195 + 196 + fw_data = (const __le32 *)(adev->umsch_mm.fw->data + 197 + le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes)); 198 + fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); 199 + 200 + r = amdgpu_bo_create_reserved(adev, fw_size, 201 + 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 202 + &adev->umsch_mm.data_fw_obj, 203 + &adev->umsch_mm.data_fw_gpu_addr, 204 + (void **)&adev->umsch_mm.data_fw_ptr); 205 + if (r) { 206 + dev_err(adev->dev, "(%d) failed to create umsch_mm fw data bo\n", r); 207 + return r; 208 + } 209 + 210 + memcpy(adev->umsch_mm.data_fw_ptr, fw_data, fw_size); 211 + 212 + amdgpu_bo_kunmap(adev->umsch_mm.data_fw_obj); 213 + amdgpu_bo_unreserve(adev->umsch_mm.data_fw_obj); 214 + return 0; 215 + } 216 + 217 + static void umsch_mm_agdb_index_init(struct amdgpu_device *adev) 218 + { 219 + uint32_t umsch_mm_agdb_start; 220 + int i; 221 + 222 + umsch_mm_agdb_start = adev->doorbell_index.max_assignment + 1; 223 + umsch_mm_agdb_start = roundup(umsch_mm_agdb_start, 1024); 224 + umsch_mm_agdb_start += (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1); 225 + 226 + for (i = 0; i < CONTEXT_PRIORITY_NUM_LEVELS; i++) 227 + adev->umsch_mm.agdb_index[i] = umsch_mm_agdb_start + i; 228 + } 229 + 230 + static int umsch_mm_init(struct amdgpu_device *adev) 231 + { 232 + int r; 233 + 234 + r = amdgpu_device_wb_get(adev, &adev->umsch_mm.wb_index); 235 + if (r) { 236 + dev_err(adev->dev, "failed to alloc wb for umsch: %d\n", r); 237 + return r; 238 + } 239 + 240 + adev->umsch_mm.sch_ctx_gpu_addr = adev->wb.gpu_addr + 241 + (adev->umsch_mm.wb_index * 4); 242 + 243 + mutex_init(&adev->umsch_mm.mutex_hidden); 244 + 245 + umsch_mm_agdb_index_init(adev); 246 + 247 + return 0; 248 + } 249 + 250 + 251 + static int umsch_mm_early_init(void *handle) 252 + { 253 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 254 + 255 + switch (adev->ip_versions[VCN_HWIP][0]) { 256 + case IP_VERSION(4, 0, 5): 257 + umsch_mm_v4_0_set_funcs(&adev->umsch_mm); 258 + break; 259 + default: 260 + return -EINVAL; 261 + } 262 + 263 + adev->umsch_mm.ring.funcs = &umsch_v4_0_ring_funcs; 264 + umsch_mm_set_regs(&adev->umsch_mm); 265 + 266 + return 0; 267 + } 268 + 269 + static int umsch_mm_late_init(void *handle) 270 + { 271 + return 0; 272 + } 273 + 274 + static int umsch_mm_sw_init(void *handle) 275 + { 276 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 277 + int r; 278 + 279 + r = umsch_mm_init(adev); 280 + if (r) 281 + return r; 282 + 283 + r = umsch_mm_ring_init(&adev->umsch_mm); 284 + if (r) 285 + return r; 286 + 287 + r = umsch_mm_init_microcode(&adev->umsch_mm); 288 + if (r) 289 + return r; 290 + 291 + return 0; 292 + } 293 + 294 + static int umsch_mm_sw_fini(void *handle) 295 + { 296 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 297 + 298 + release_firmware(adev->umsch_mm.fw); 299 + adev->umsch_mm.fw = NULL; 300 + 301 + amdgpu_ring_fini(&adev->umsch_mm.ring); 302 + 303 + mutex_destroy(&adev->umsch_mm.mutex_hidden); 304 + amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index); 305 + 306 + return 0; 307 + } 308 + 309 + static int umsch_mm_hw_init(void *handle) 310 + { 311 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 312 + int r; 313 + 314 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 315 + r = umsch_mm_load_microcode(&adev->umsch_mm); 316 + if (r) 317 + return r; 318 + } 319 + 320 + umsch_mm_ring_start(&adev->umsch_mm); 321 + 322 + r = umsch_mm_set_hw_resources(&adev->umsch_mm); 323 + if (r) 324 + return r; 325 + 326 + return 0; 327 + } 328 + 329 + static int umsch_mm_hw_fini(void *handle) 330 + { 331 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 332 + 333 + umsch_mm_ring_stop(&adev->umsch_mm); 334 + 335 + amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj, 336 + &adev->umsch_mm.data_fw_gpu_addr, 337 + (void **)&adev->umsch_mm.data_fw_ptr); 338 + 339 + amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj, 340 + &adev->umsch_mm.ucode_fw_gpu_addr, 341 + (void **)&adev->umsch_mm.ucode_fw_ptr); 342 + return 0; 343 + } 344 + 345 + static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = { 346 + .name = "umsch_mm_v4_0", 347 + .early_init = umsch_mm_early_init, 348 + .late_init = umsch_mm_late_init, 349 + .sw_init = umsch_mm_sw_init, 350 + .sw_fini = umsch_mm_sw_fini, 351 + .hw_init = umsch_mm_hw_init, 352 + .hw_fini = umsch_mm_hw_fini, 353 + }; 354 + 355 + const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = { 356 + .type = AMD_IP_BLOCK_TYPE_UMSCH_MM, 357 + .major = 4, 358 + .minor = 0, 359 + .rev = 0, 360 + .funcs = &umsch_mm_v4_0_ip_funcs, 361 + };
+210
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #ifndef __AMDGPU_UMSCH_MM_H__ 26 + #define __AMDGPU_UMSCH_MM_H__ 27 + 28 + enum UMSCH_SWIP_ENGINE_TYPE { 29 + UMSCH_SWIP_ENGINE_TYPE_VCN0 = 0, 30 + UMSCH_SWIP_ENGINE_TYPE_VCN1 = 1, 31 + UMSCH_SWIP_ENGINE_TYPE_VCN = 2, 32 + UMSCH_SWIP_ENGINE_TYPE_VPE = 3, 33 + UMSCH_SWIP_ENGINE_TYPE_MAX 34 + }; 35 + 36 + enum UMSCH_SWIP_AFFINITY_TYPE { 37 + UMSCH_SWIP_AFFINITY_TYPE_ANY = 0, 38 + UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1, 39 + UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2, 40 + UMSCH_SWIP_AFFINITY_TYPE_MAX 41 + }; 42 + 43 + enum UMSCH_CONTEXT_PRIORITY_LEVEL { 44 + CONTEXT_PRIORITY_LEVEL_IDLE = 0, 45 + CONTEXT_PRIORITY_LEVEL_NORMAL = 1, 46 + CONTEXT_PRIORITY_LEVEL_FOCUS = 2, 47 + CONTEXT_PRIORITY_LEVEL_REALTIME = 3, 48 + CONTEXT_PRIORITY_NUM_LEVELS 49 + }; 50 + 51 + struct umsch_mm_set_resource_input { 52 + uint32_t vmid_mask_mm_vcn; 53 + uint32_t vmid_mask_mm_vpe; 54 + uint32_t logging_vmid; 55 + uint32_t engine_mask; 56 + union { 57 + struct { 58 + uint32_t disable_reset : 1; 59 + uint32_t disable_umsch_mm_log : 1; 60 + uint32_t reserved : 30; 61 + }; 62 + uint32_t uint32_all; 63 + }; 64 + }; 65 + 66 + struct umsch_mm_add_queue_input { 67 + uint32_t process_id; 68 + uint64_t page_table_base_addr; 69 + uint64_t process_va_start; 70 + uint64_t process_va_end; 71 + uint64_t process_quantum; 72 + uint64_t process_csa_addr; 73 + uint64_t context_quantum; 74 + uint64_t context_csa_addr; 75 + uint32_t inprocess_context_priority; 76 + enum UMSCH_CONTEXT_PRIORITY_LEVEL context_global_priority_level; 77 + uint32_t doorbell_offset_0; 78 + uint32_t doorbell_offset_1; 79 + enum UMSCH_SWIP_ENGINE_TYPE engine_type; 80 + uint32_t affinity; 81 + enum UMSCH_SWIP_AFFINITY_TYPE affinity_type; 82 + uint64_t mqd_addr; 83 + uint64_t h_context; 84 + uint64_t h_queue; 85 + uint32_t vm_context_cntl; 86 + 87 + struct { 88 + uint32_t is_context_suspended : 1; 89 + uint32_t reserved : 31; 90 + }; 91 + }; 92 + 93 + struct umsch_mm_remove_queue_input { 94 + uint32_t doorbell_offset_0; 95 + uint32_t doorbell_offset_1; 96 + uint64_t context_csa_addr; 97 + }; 98 + 99 + struct MQD_INFO { 100 + uint32_t rb_base_hi; 101 + uint32_t rb_base_lo; 102 + uint32_t rb_size; 103 + uint32_t wptr_val; 104 + uint32_t rptr_val; 105 + uint32_t unmapped; 106 + }; 107 + 108 + struct amdgpu_umsch_mm; 109 + 110 + struct umsch_mm_funcs { 111 + int (*set_hw_resources)(struct amdgpu_umsch_mm *umsch); 112 + int (*add_queue)(struct amdgpu_umsch_mm *umsch, 113 + struct umsch_mm_add_queue_input *input); 114 + int (*remove_queue)(struct amdgpu_umsch_mm *umsch, 115 + struct umsch_mm_remove_queue_input *input); 116 + int (*set_regs)(struct amdgpu_umsch_mm *umsch); 117 + int (*init_microcode)(struct amdgpu_umsch_mm *umsch); 118 + int (*load_microcode)(struct amdgpu_umsch_mm *umsch); 119 + int (*ring_init)(struct amdgpu_umsch_mm *umsch); 120 + int (*ring_start)(struct amdgpu_umsch_mm *umsch); 121 + int (*ring_stop)(struct amdgpu_umsch_mm *umsch); 122 + int (*ring_fini)(struct amdgpu_umsch_mm *umsch); 123 + }; 124 + 125 + struct amdgpu_umsch_mm { 126 + struct amdgpu_ring ring; 127 + 128 + uint32_t rb_wptr; 129 + uint32_t rb_rptr; 130 + 131 + const struct umsch_mm_funcs *funcs; 132 + 133 + const struct firmware *fw; 134 + uint32_t fw_version; 135 + uint32_t feature_version; 136 + 137 + struct amdgpu_bo *ucode_fw_obj; 138 + uint64_t ucode_fw_gpu_addr; 139 + uint32_t *ucode_fw_ptr; 140 + uint64_t irq_start_addr; 141 + uint64_t uc_start_addr; 142 + uint32_t ucode_size; 143 + 144 + struct amdgpu_bo *data_fw_obj; 145 + uint64_t data_fw_gpu_addr; 146 + uint32_t *data_fw_ptr; 147 + uint64_t data_start_addr; 148 + uint32_t data_size; 149 + 150 + uint32_t wb_index; 151 + uint64_t sch_ctx_gpu_addr; 152 + uint32_t *sch_ctx_cpu_addr; 153 + 154 + uint32_t vmid_mask_mm_vcn; 155 + uint32_t vmid_mask_mm_vpe; 156 + uint32_t engine_mask; 157 + uint32_t vcn0_hqd_mask; 158 + uint32_t vcn1_hqd_mask; 159 + uint32_t vcn_hqd_mask[2]; 160 + uint32_t vpe_hqd_mask; 161 + uint32_t agdb_index[CONTEXT_PRIORITY_NUM_LEVELS]; 162 + 163 + struct mutex mutex_hidden; 164 + }; 165 + 166 + int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws); 167 + int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch); 168 + 169 + int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch); 170 + int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch); 171 + int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch); 172 + 173 + int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch); 174 + 175 + #define umsch_mm_set_hw_resources(umsch) \ 176 + ((umsch)->funcs->set_hw_resources ? (umsch)->funcs->set_hw_resources((umsch)) : 0) 177 + #define umsch_mm_add_queue(umsch, input) \ 178 + ((umsch)->funcs->add_queue ? (umsch)->funcs->add_queue((umsch), (input)) : 0) 179 + #define umsch_mm_remove_queue(umsch, input) \ 180 + ((umsch)->funcs->remove_queue ? (umsch)->funcs->remove_queue((umsch), (input)) : 0) 181 + 182 + #define umsch_mm_set_regs(umsch) \ 183 + ((umsch)->funcs->set_regs ? (umsch)->funcs->set_regs((umsch)) : 0) 184 + #define umsch_mm_init_microcode(umsch) \ 185 + ((umsch)->funcs->init_microcode ? (umsch)->funcs->init_microcode((umsch)) : 0) 186 + #define umsch_mm_load_microcode(umsch) \ 187 + ((umsch)->funcs->load_microcode ? (umsch)->funcs->load_microcode((umsch)) : 0) 188 + 189 + #define umsch_mm_ring_init(umsch) \ 190 + ((umsch)->funcs->ring_init ? (umsch)->funcs->ring_init((umsch)) : 0) 191 + #define umsch_mm_ring_start(umsch) \ 192 + ((umsch)->funcs->ring_start ? (umsch)->funcs->ring_start((umsch)) : 0) 193 + #define umsch_mm_ring_stop(umsch) \ 194 + ((umsch)->funcs->ring_stop ? (umsch)->funcs->ring_stop((umsch)) : 0) 195 + #define umsch_mm_ring_fini(umsch) \ 196 + ((umsch)->funcs->ring_fini ? (umsch)->funcs->ring_fini((umsch)) : 0) 197 + 198 + static inline void amdgpu_umsch_mm_lock(struct amdgpu_umsch_mm *umsch) 199 + { 200 + mutex_lock(&umsch->mutex_hidden); 201 + } 202 + 203 + static inline void amdgpu_umsch_mm_unlock(struct amdgpu_umsch_mm *umsch) 204 + { 205 + mutex_unlock(&umsch->mutex_hidden); 206 + } 207 + 208 + extern const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block; 209 + 210 + #endif
+385
drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include <linux/firmware.h> 26 + #include <linux/module.h> 27 + #include "amdgpu.h" 28 + #include "soc15_common.h" 29 + #include "soc21.h" 30 + #include "vcn/vcn_4_0_0_offset.h" 31 + #include "vcn/vcn_4_0_0_sh_mask.h" 32 + 33 + #include "amdgpu_umsch_mm.h" 34 + #include "umsch_mm_4_0_api_def.h" 35 + #include "umsch_mm_v4_0.h" 36 + 37 + static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch) 38 + { 39 + struct amdgpu_device *adev = umsch->ring.adev; 40 + uint32_t data; 41 + int r; 42 + 43 + r = amdgpu_umsch_mm_allocate_ucode_buffer(umsch); 44 + if (r) 45 + return r; 46 + 47 + r = amdgpu_umsch_mm_allocate_ucode_data_buffer(umsch); 48 + if (r) 49 + goto err_free_ucode_bo; 50 + 51 + data = RREG32_SOC15(VCN, 0, regUMSCH_MES_RESET_CTRL); 52 + data = REG_SET_FIELD(data, UMSCH_MES_RESET_CTRL, MES_CORE_SOFT_RESET, 0); 53 + WREG32_SOC15(VCN, 0, regUMSCH_MES_RESET_CTRL, data); 54 + 55 + data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); 56 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 1); 57 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 1); 58 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 0); 59 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 1); 60 + WREG32_SOC15(VCN, 0, regVCN_MES_CNTL, data); 61 + 62 + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_CNTL); 63 + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, VMID, 0); 64 + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, EXE_DISABLE, 0); 65 + data = REG_SET_FIELD(data, VCN_MES_IC_BASE_CNTL, CACHE_POLICY, 0); 66 + WREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_CNTL, data); 67 + 68 + WREG32_SOC15(VCN, 0, regVCN_MES_INTR_ROUTINE_START, 69 + lower_32_bits(adev->umsch_mm.irq_start_addr >> 2)); 70 + WREG32_SOC15(VCN, 0, regVCN_MES_INTR_ROUTINE_START_HI, 71 + upper_32_bits(adev->umsch_mm.irq_start_addr >> 2)); 72 + 73 + WREG32_SOC15(VCN, 0, regVCN_MES_PRGRM_CNTR_START, 74 + lower_32_bits(adev->umsch_mm.uc_start_addr >> 2)); 75 + WREG32_SOC15(VCN, 0, regVCN_MES_PRGRM_CNTR_START_HI, 76 + upper_32_bits(adev->umsch_mm.uc_start_addr >> 2)); 77 + 78 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_INSTR_BASE_LO, 0); 79 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_INSTR_BASE_HI, 0); 80 + 81 + data = adev->umsch_mm.uc_start_addr + adev->umsch_mm.ucode_size - 1; 82 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_INSTR_MASK_LO, lower_32_bits(data)); 83 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_INSTR_MASK_HI, upper_32_bits(data)); 84 + 85 + WREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_LO, 86 + lower_32_bits(adev->umsch_mm.ucode_fw_gpu_addr)); 87 + WREG32_SOC15(VCN, 0, regVCN_MES_IC_BASE_HI, 88 + upper_32_bits(adev->umsch_mm.ucode_fw_gpu_addr)); 89 + 90 + WREG32_SOC15(VCN, 0, regVCN_MES_MIBOUND_LO, 0x1FFFFF); 91 + 92 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_BASE0_LO, 93 + lower_32_bits(adev->umsch_mm.data_start_addr)); 94 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_BASE0_HI, 95 + upper_32_bits(adev->umsch_mm.data_start_addr)); 96 + 97 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_MASK0_LO, 98 + lower_32_bits(adev->umsch_mm.data_size - 1)); 99 + WREG32_SOC15(VCN, 0, regVCN_MES_LOCAL_MASK0_HI, 100 + upper_32_bits(adev->umsch_mm.data_size - 1)); 101 + 102 + WREG32_SOC15(VCN, 0, regVCN_MES_DC_BASE_LO, 103 + lower_32_bits(adev->umsch_mm.data_fw_gpu_addr)); 104 + WREG32_SOC15(VCN, 0, regVCN_MES_DC_BASE_HI, 105 + upper_32_bits(adev->umsch_mm.data_fw_gpu_addr)); 106 + 107 + WREG32_SOC15(VCN, 0, regVCN_MES_MDBOUND_LO, 0x3FFFF); 108 + 109 + data = RREG32_SOC15(VCN, 0, regUVD_UMSCH_FORCE); 110 + data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, IC_FORCE_GPUVM, 1); 111 + data = REG_SET_FIELD(data, UVD_UMSCH_FORCE, DC_FORCE_GPUVM, 1); 112 + WREG32_SOC15(VCN, 0, regUVD_UMSCH_FORCE, data); 113 + 114 + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); 115 + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 0); 116 + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); 117 + WREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL, data); 118 + 119 + data = RREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL); 120 + data = REG_SET_FIELD(data, VCN_MES_IC_OP_CNTL, PRIME_ICACHE, 1); 121 + WREG32_SOC15(VCN, 0, regVCN_MES_IC_OP_CNTL, data); 122 + 123 + WREG32_SOC15(VCN, 0, regVCN_MES_GP0_LO, 0); 124 + WREG32_SOC15(VCN, 0, regVCN_MES_GP0_HI, 0); 125 + 126 + WREG32_SOC15(VCN, 0, regVCN_MES_GP1_LO, 0); 127 + WREG32_SOC15(VCN, 0, regVCN_MES_GP1_HI, 0); 128 + 129 + data = RREG32_SOC15(VCN, 0, regVCN_MES_CNTL); 130 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_INVALIDATE_ICACHE, 0); 131 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_RESET, 0); 132 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_HALT, 0); 133 + data = REG_SET_FIELD(data, VCN_MES_CNTL, MES_PIPE0_ACTIVE, 1); 134 + WREG32_SOC15(VCN, 0, regVCN_MES_CNTL, data); 135 + 136 + r = SOC15_WAIT_ON_RREG(VCN, 0, regVCN_MES_MSTATUS_LO, 0xAAAAAAAA, 0xFFFFFFFF); 137 + if (r) { 138 + dev_err(adev->dev, "UMSCH FW Load: Failed, regVCN_MES_MSTATUS_LO: 0x%08x\n", 139 + RREG32_SOC15(VCN, 0, regVCN_MES_MSTATUS_LO)); 140 + goto err_free_data_bo; 141 + } 142 + 143 + return 0; 144 + 145 + err_free_data_bo: 146 + amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj, 147 + &adev->umsch_mm.data_fw_gpu_addr, 148 + (void **)&adev->umsch_mm.data_fw_ptr); 149 + err_free_ucode_bo: 150 + amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj, 151 + &adev->umsch_mm.ucode_fw_gpu_addr, 152 + (void **)&adev->umsch_mm.ucode_fw_ptr); 153 + return r; 154 + } 155 + 156 + static void umsch_mm_v4_0_aggregated_doorbell_init(struct amdgpu_umsch_mm *umsch) 157 + { 158 + struct amdgpu_device *adev = umsch->ring.adev; 159 + uint32_t data; 160 + 161 + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0); 162 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, OFFSET, 163 + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_REALTIME]); 164 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL0, EN, 1); 165 + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL0, data); 166 + 167 + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1); 168 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, OFFSET, 169 + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_FOCUS]); 170 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL1, EN, 1); 171 + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL1, data); 172 + 173 + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2); 174 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, OFFSET, 175 + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL]); 176 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL2, EN, 1); 177 + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL2, data); 178 + 179 + data = RREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3); 180 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, OFFSET, 181 + umsch->agdb_index[CONTEXT_PRIORITY_LEVEL_IDLE]); 182 + data = REG_SET_FIELD(data, VCN_AGDB_CTRL3, EN, 1); 183 + WREG32_SOC15(VCN, 0, regVCN_AGDB_CTRL3, data); 184 + } 185 + 186 + static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) 187 + { 188 + struct amdgpu_ring *ring = &umsch->ring; 189 + struct amdgpu_device *adev = ring->adev; 190 + uint32_t data; 191 + 192 + data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); 193 + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, OFFSET, ring->doorbell_index); 194 + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 1); 195 + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); 196 + 197 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); 198 + 199 + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); 200 + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 201 + 202 + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); 203 + 204 + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); 205 + data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); 206 + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); 207 + 208 + umsch_mm_v4_0_aggregated_doorbell_init(umsch); 209 + 210 + return 0; 211 + } 212 + 213 + static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch) 214 + { 215 + struct amdgpu_ring *ring = &umsch->ring; 216 + struct amdgpu_device *adev = ring->adev; 217 + uint32_t data; 218 + 219 + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); 220 + data = REG_SET_FIELD(data, VCN_RB_ENABLE, UMSCH_RB_EN, 0); 221 + WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); 222 + 223 + data = RREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL); 224 + data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0); 225 + WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); 226 + 227 + return 0; 228 + } 229 + 230 + static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch) 231 + { 232 + union UMSCHAPI__SET_HW_RESOURCES set_hw_resources = {}; 233 + struct amdgpu_device *adev = umsch->ring.adev; 234 + int r; 235 + 236 + set_hw_resources.header.type = UMSCH_API_TYPE_SCHEDULER; 237 + set_hw_resources.header.opcode = UMSCH_API_SET_HW_RSRC; 238 + set_hw_resources.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 239 + 240 + set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn; 241 + set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe; 242 + set_hw_resources.engine_mask = umsch->engine_mask; 243 + 244 + set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask; 245 + set_hw_resources.vcn1_hqd_mask[0] = umsch->vcn1_hqd_mask; 246 + set_hw_resources.vcn_hqd_mask[0] = umsch->vcn_hqd_mask[0]; 247 + set_hw_resources.vcn_hqd_mask[1] = umsch->vcn_hqd_mask[1]; 248 + set_hw_resources.vpe_hqd_mask[0] = umsch->vpe_hqd_mask; 249 + 250 + set_hw_resources.g_sch_ctx_gpu_mc_ptr = umsch->sch_ctx_gpu_addr; 251 + 252 + set_hw_resources.enable_level_process_quantum_check = 1; 253 + 254 + memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0], 255 + sizeof(uint32_t) * 5); 256 + set_hw_resources.mmhub_version = adev->ip_versions[MMHUB_HWIP][0]; 257 + 258 + memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0], 259 + sizeof(uint32_t) * 5); 260 + set_hw_resources.osssys_version = adev->ip_versions[OSSSYS_HWIP][0]; 261 + 262 + set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 263 + set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 264 + 265 + r = amdgpu_umsch_mm_submit_pkt(umsch, &set_hw_resources.max_dwords_in_api, 266 + API_FRAME_SIZE_IN_DWORDS); 267 + if (r) 268 + return r; 269 + 270 + r = amdgpu_umsch_mm_query_fence(umsch); 271 + if (r) { 272 + dev_err(adev->dev, "UMSCH SET_HW_RESOURCES: Failed\n"); 273 + return r; 274 + } 275 + 276 + return 0; 277 + } 278 + 279 + static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch, 280 + struct umsch_mm_add_queue_input *input_ptr) 281 + { 282 + struct amdgpu_device *adev = umsch->ring.adev; 283 + union UMSCHAPI__ADD_QUEUE add_queue = {}; 284 + int r; 285 + 286 + add_queue.header.type = UMSCH_API_TYPE_SCHEDULER; 287 + add_queue.header.opcode = UMSCH_API_ADD_QUEUE; 288 + add_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 289 + 290 + add_queue.process_id = input_ptr->process_id; 291 + add_queue.page_table_base_addr = input_ptr->page_table_base_addr; 292 + add_queue.process_va_start = input_ptr->process_va_start; 293 + add_queue.process_va_end = input_ptr->process_va_end; 294 + add_queue.process_quantum = input_ptr->process_quantum; 295 + add_queue.process_csa_addr = input_ptr->process_csa_addr; 296 + add_queue.context_quantum = input_ptr->context_quantum; 297 + add_queue.context_csa_addr = input_ptr->context_csa_addr; 298 + add_queue.inprocess_context_priority = input_ptr->inprocess_context_priority; 299 + add_queue.context_global_priority_level = 300 + (enum UMSCH_AMD_PRIORITY_LEVEL)input_ptr->context_global_priority_level; 301 + add_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; 302 + add_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; 303 + add_queue.affinity.u32All = input_ptr->affinity; 304 + add_queue.mqd_addr = input_ptr->mqd_addr; 305 + add_queue.engine_type = (enum UMSCH_ENGINE_TYPE)input_ptr->engine_type; 306 + add_queue.h_context = input_ptr->h_context; 307 + add_queue.h_queue = input_ptr->h_queue; 308 + add_queue.vm_context_cntl = input_ptr->vm_context_cntl; 309 + add_queue.is_context_suspended = input_ptr->is_context_suspended; 310 + 311 + add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 312 + add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 313 + 314 + r = amdgpu_umsch_mm_submit_pkt(umsch, &add_queue.max_dwords_in_api, 315 + API_FRAME_SIZE_IN_DWORDS); 316 + if (r) 317 + return r; 318 + 319 + r = amdgpu_umsch_mm_query_fence(umsch); 320 + if (r) { 321 + dev_err(adev->dev, "UMSCH ADD_QUEUE: Failed\n"); 322 + return r; 323 + } 324 + 325 + return 0; 326 + } 327 + 328 + static int umsch_mm_v4_0_remove_queue(struct amdgpu_umsch_mm *umsch, 329 + struct umsch_mm_remove_queue_input *input_ptr) 330 + { 331 + union UMSCHAPI__REMOVE_QUEUE remove_queue = {}; 332 + struct amdgpu_device *adev = umsch->ring.adev; 333 + int r; 334 + 335 + remove_queue.header.type = UMSCH_API_TYPE_SCHEDULER; 336 + remove_queue.header.opcode = UMSCH_API_REMOVE_QUEUE; 337 + remove_queue.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 338 + 339 + remove_queue.doorbell_offset_0 = input_ptr->doorbell_offset_0; 340 + remove_queue.doorbell_offset_1 = input_ptr->doorbell_offset_1; 341 + remove_queue.context_csa_addr = input_ptr->context_csa_addr; 342 + 343 + remove_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 344 + remove_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; 345 + 346 + r = amdgpu_umsch_mm_submit_pkt(umsch, &remove_queue.max_dwords_in_api, 347 + API_FRAME_SIZE_IN_DWORDS); 348 + if (r) 349 + return r; 350 + 351 + r = amdgpu_umsch_mm_query_fence(umsch); 352 + if (r) { 353 + dev_err(adev->dev, "UMSCH REMOVE_QUEUE: Failed\n"); 354 + return r; 355 + } 356 + 357 + return 0; 358 + } 359 + 360 + static int umsch_mm_v4_0_set_regs(struct amdgpu_umsch_mm *umsch) 361 + { 362 + struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm); 363 + 364 + umsch->rb_wptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_WPTR); 365 + umsch->rb_rptr = SOC15_REG_OFFSET(VCN, 0, regVCN_UMSCH_RB_RPTR); 366 + 367 + return 0; 368 + } 369 + 370 + static const struct umsch_mm_funcs umsch_mm_v4_0_funcs = { 371 + .set_hw_resources = umsch_mm_v4_0_set_hw_resources, 372 + .add_queue = umsch_mm_v4_0_add_queue, 373 + .remove_queue = umsch_mm_v4_0_remove_queue, 374 + .set_regs = umsch_mm_v4_0_set_regs, 375 + .init_microcode = amdgpu_umsch_mm_init_microcode, 376 + .load_microcode = umsch_mm_v4_0_load_microcode, 377 + .ring_init = amdgpu_umsch_mm_ring_init, 378 + .ring_start = umsch_mm_v4_0_ring_start, 379 + .ring_stop = umsch_mm_v4_0_ring_stop, 380 + }; 381 + 382 + void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch) 383 + { 384 + umsch->funcs = &umsch_mm_v4_0_funcs; 385 + }
+30
drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #ifndef __UMSCH_MM_V4_0_H__ 26 + #define __UMSCH_MM_V4_0_H__ 27 + 28 + void umsch_mm_v4_0_set_funcs(struct amdgpu_umsch_mm *umsch); 29 + 30 + #endif