Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/module.h>
26#include "amdgpu.h"
27#include "soc15_common.h"
28#include "soc21.h"
29#include "gc/gc_11_0_0_offset.h"
30#include "gc/gc_11_0_0_sh_mask.h"
31#include "gc/gc_11_0_0_default.h"
32#include "v11_structs.h"
33#include "mes_v11_api_def.h"
34
35MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
36MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
37MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
38MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
39MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
40MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
41MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
42MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
43MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
44MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
45MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
46MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
47MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
48MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
49MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
50MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes_2.bin");
51MODULE_FIRMWARE("amdgpu/gc_11_5_0_mes1.bin");
52MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes_2.bin");
53MODULE_FIRMWARE("amdgpu/gc_11_5_1_mes1.bin");
54
55
56static int mes_v11_0_hw_fini(void *handle);
57static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
58static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
59
60#define MES_EOP_SIZE 2048
61#define GFX_MES_DRAM_SIZE 0x80000
62
63static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
64{
65 struct amdgpu_device *adev = ring->adev;
66
67 if (ring->use_doorbell) {
68 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
69 ring->wptr);
70 WDOORBELL64(ring->doorbell_index, ring->wptr);
71 } else {
72 BUG();
73 }
74}
75
76static u64 mes_v11_0_ring_get_rptr(struct amdgpu_ring *ring)
77{
78 return *ring->rptr_cpu_addr;
79}
80
81static u64 mes_v11_0_ring_get_wptr(struct amdgpu_ring *ring)
82{
83 u64 wptr;
84
85 if (ring->use_doorbell)
86 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
87 else
88 BUG();
89 return wptr;
90}
91
92static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
93 .type = AMDGPU_RING_TYPE_MES,
94 .align_mask = 1,
95 .nop = 0,
96 .support_64bit_ptrs = true,
97 .get_rptr = mes_v11_0_ring_get_rptr,
98 .get_wptr = mes_v11_0_ring_get_wptr,
99 .set_wptr = mes_v11_0_ring_set_wptr,
100 .insert_nop = amdgpu_ring_insert_nop,
101};
102
103static const char *mes_v11_0_opcodes[] = {
104 "SET_HW_RSRC",
105 "SET_SCHEDULING_CONFIG",
106 "ADD_QUEUE",
107 "REMOVE_QUEUE",
108 "PERFORM_YIELD",
109 "SET_GANG_PRIORITY_LEVEL",
110 "SUSPEND",
111 "RESUME",
112 "RESET",
113 "SET_LOG_BUFFER",
114 "CHANGE_GANG_PRORITY",
115 "QUERY_SCHEDULER_STATUS",
116 "PROGRAM_GDS",
117 "SET_DEBUG_VMID",
118 "MISC",
119 "UPDATE_ROOT_PAGE_TABLE",
120 "AMD_LOG",
121};
122
123static const char *mes_v11_0_misc_opcodes[] = {
124 "WRITE_REG",
125 "INV_GART",
126 "QUERY_STATUS",
127 "READ_REG",
128 "WAIT_REG_MEM",
129 "SET_SHADER_DEBUGGER",
130};
131
132static const char *mes_v11_0_get_op_string(union MESAPI__MISC *x_pkt)
133{
134 const char *op_str = NULL;
135
136 if (x_pkt->header.opcode < ARRAY_SIZE(mes_v11_0_opcodes))
137 op_str = mes_v11_0_opcodes[x_pkt->header.opcode];
138
139 return op_str;
140}
141
142static const char *mes_v11_0_get_misc_op_string(union MESAPI__MISC *x_pkt)
143{
144 const char *op_str = NULL;
145
146 if ((x_pkt->header.opcode == MES_SCH_API_MISC) &&
147 (x_pkt->opcode < ARRAY_SIZE(mes_v11_0_misc_opcodes)))
148 op_str = mes_v11_0_misc_opcodes[x_pkt->opcode];
149
150 return op_str;
151}
152
153static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
154 void *pkt, int size,
155 int api_status_off)
156{
157 union MESAPI__QUERY_MES_STATUS mes_status_pkt;
158 signed long timeout = 3000000; /* 3000 ms */
159 struct amdgpu_device *adev = mes->adev;
160 struct amdgpu_ring *ring = &mes->ring;
161 struct MES_API_STATUS *api_status;
162 union MESAPI__MISC *x_pkt = pkt;
163 const char *op_str, *misc_op_str;
164 unsigned long flags;
165 u64 status_gpu_addr;
166 u32 status_offset;
167 u64 *status_ptr;
168 signed long r;
169 int ret;
170
171 if (x_pkt->header.opcode >= MES_SCH_API_MAX)
172 return -EINVAL;
173
174 if (amdgpu_emu_mode) {
175 timeout *= 100;
176 } else if (amdgpu_sriov_vf(adev)) {
177 /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
178 timeout = 15 * 600 * 1000;
179 }
180
181 ret = amdgpu_device_wb_get(adev, &status_offset);
182 if (ret)
183 return ret;
184
185 status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4);
186 status_ptr = (u64 *)&adev->wb.wb[status_offset];
187 *status_ptr = 0;
188
189 spin_lock_irqsave(&mes->ring_lock, flags);
190 r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
191 if (r)
192 goto error_unlock_free;
193
194 api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
195 api_status->api_completion_fence_addr = status_gpu_addr;
196 api_status->api_completion_fence_value = 1;
197
198 amdgpu_ring_write_multiple(ring, pkt, size / 4);
199
200 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
201 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
202 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
203 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
204 mes_status_pkt.api_status.api_completion_fence_addr =
205 ring->fence_drv.gpu_addr;
206 mes_status_pkt.api_status.api_completion_fence_value =
207 ++ring->fence_drv.sync_seq;
208
209 amdgpu_ring_write_multiple(ring, &mes_status_pkt,
210 sizeof(mes_status_pkt) / 4);
211
212 amdgpu_ring_commit(ring);
213 spin_unlock_irqrestore(&mes->ring_lock, flags);
214
215 op_str = mes_v11_0_get_op_string(x_pkt);
216 misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
217
218 if (misc_op_str)
219 dev_dbg(adev->dev, "MES msg=%s (%s) was emitted\n", op_str,
220 misc_op_str);
221 else if (op_str)
222 dev_dbg(adev->dev, "MES msg=%s was emitted\n", op_str);
223 else
224 dev_dbg(adev->dev, "MES msg=%d was emitted\n",
225 x_pkt->header.opcode);
226
227 r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, timeout);
228 if (r < 1 || !*status_ptr) {
229
230 if (misc_op_str)
231 dev_err(adev->dev, "MES failed to respond to msg=%s (%s)\n",
232 op_str, misc_op_str);
233 else if (op_str)
234 dev_err(adev->dev, "MES failed to respond to msg=%s\n",
235 op_str);
236 else
237 dev_err(adev->dev, "MES failed to respond to msg=%d\n",
238 x_pkt->header.opcode);
239
240 while (halt_if_hws_hang)
241 schedule();
242
243 r = -ETIMEDOUT;
244 goto error_wb_free;
245 }
246
247 amdgpu_device_wb_free(adev, status_offset);
248 return 0;
249
250error_unlock_free:
251 spin_unlock_irqrestore(&mes->ring_lock, flags);
252
253error_wb_free:
254 amdgpu_device_wb_free(adev, status_offset);
255 return r;
256}
257
258static int convert_to_mes_queue_type(int queue_type)
259{
260 if (queue_type == AMDGPU_RING_TYPE_GFX)
261 return MES_QUEUE_TYPE_GFX;
262 else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
263 return MES_QUEUE_TYPE_COMPUTE;
264 else if (queue_type == AMDGPU_RING_TYPE_SDMA)
265 return MES_QUEUE_TYPE_SDMA;
266 else
267 BUG();
268 return -1;
269}
270
271static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
272 struct mes_add_queue_input *input)
273{
274 struct amdgpu_device *adev = mes->adev;
275 union MESAPI__ADD_QUEUE mes_add_queue_pkt;
276 struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
277 uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
278
279 memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
280
281 mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
282 mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
283 mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
284
285 mes_add_queue_pkt.process_id = input->process_id;
286 mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
287 mes_add_queue_pkt.process_va_start = input->process_va_start;
288 mes_add_queue_pkt.process_va_end = input->process_va_end;
289 mes_add_queue_pkt.process_quantum = input->process_quantum;
290 mes_add_queue_pkt.process_context_addr = input->process_context_addr;
291 mes_add_queue_pkt.gang_quantum = input->gang_quantum;
292 mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
293 mes_add_queue_pkt.inprocess_gang_priority =
294 input->inprocess_gang_priority;
295 mes_add_queue_pkt.gang_global_priority_level =
296 input->gang_global_priority_level;
297 mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
298 mes_add_queue_pkt.mqd_addr = input->mqd_addr;
299
300 if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
301 AMDGPU_MES_API_VERSION_SHIFT) >= 2)
302 mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr;
303 else
304 mes_add_queue_pkt.wptr_addr = input->wptr_addr;
305
306 mes_add_queue_pkt.queue_type =
307 convert_to_mes_queue_type(input->queue_type);
308 mes_add_queue_pkt.paging = input->paging;
309 mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
310 mes_add_queue_pkt.gws_base = input->gws_base;
311 mes_add_queue_pkt.gws_size = input->gws_size;
312 mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
313 mes_add_queue_pkt.tma_addr = input->tma_addr;
314 mes_add_queue_pkt.trap_en = input->trap_en;
315 mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
316 mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
317
318 /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
319 mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
320 mes_add_queue_pkt.gds_size = input->queue_size;
321
322 mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
323
324 return mes_v11_0_submit_pkt_and_poll_completion(mes,
325 &mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
326 offsetof(union MESAPI__ADD_QUEUE, api_status));
327}
328
329static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
330 struct mes_remove_queue_input *input)
331{
332 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
333
334 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
335
336 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
337 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
338 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
339
340 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
341 mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
342
343 return mes_v11_0_submit_pkt_and_poll_completion(mes,
344 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
345 offsetof(union MESAPI__REMOVE_QUEUE, api_status));
346}
347
348static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
349 struct mes_unmap_legacy_queue_input *input)
350{
351 union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
352
353 memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
354
355 mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
356 mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
357 mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
358
359 mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
360 mes_remove_queue_pkt.gang_context_addr = 0;
361
362 mes_remove_queue_pkt.pipe_id = input->pipe_id;
363 mes_remove_queue_pkt.queue_id = input->queue_id;
364
365 if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
366 mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
367 mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
368 mes_remove_queue_pkt.tf_data =
369 lower_32_bits(input->trail_fence_data);
370 } else {
371 mes_remove_queue_pkt.unmap_legacy_queue = 1;
372 mes_remove_queue_pkt.queue_type =
373 convert_to_mes_queue_type(input->queue_type);
374 }
375
376 return mes_v11_0_submit_pkt_and_poll_completion(mes,
377 &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
378 offsetof(union MESAPI__REMOVE_QUEUE, api_status));
379}
380
381static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
382 struct mes_suspend_gang_input *input)
383{
384 return 0;
385}
386
387static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
388 struct mes_resume_gang_input *input)
389{
390 return 0;
391}
392
393static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
394{
395 union MESAPI__QUERY_MES_STATUS mes_status_pkt;
396
397 memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
398
399 mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
400 mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
401 mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
402
403 return mes_v11_0_submit_pkt_and_poll_completion(mes,
404 &mes_status_pkt, sizeof(mes_status_pkt),
405 offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
406}
407
408static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
409 struct mes_misc_op_input *input)
410{
411 union MESAPI__MISC misc_pkt;
412
413 memset(&misc_pkt, 0, sizeof(misc_pkt));
414
415 misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
416 misc_pkt.header.opcode = MES_SCH_API_MISC;
417 misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
418
419 switch (input->op) {
420 case MES_MISC_OP_READ_REG:
421 misc_pkt.opcode = MESAPI_MISC__READ_REG;
422 misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset;
423 misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr;
424 break;
425 case MES_MISC_OP_WRITE_REG:
426 misc_pkt.opcode = MESAPI_MISC__WRITE_REG;
427 misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset;
428 misc_pkt.write_reg.reg_value = input->write_reg.reg_value;
429 break;
430 case MES_MISC_OP_WRM_REG_WAIT:
431 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
432 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM;
433 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
434 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
435 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
436 misc_pkt.wait_reg_mem.reg_offset2 = 0;
437 break;
438 case MES_MISC_OP_WRM_REG_WR_WAIT:
439 misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
440 misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG;
441 misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
442 misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
443 misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
444 misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
445 break;
446 case MES_MISC_OP_SET_SHADER_DEBUGGER:
447 misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
448 misc_pkt.set_shader_debugger.process_context_addr =
449 input->set_shader_debugger.process_context_addr;
450 misc_pkt.set_shader_debugger.flags.u32all =
451 input->set_shader_debugger.flags.u32all;
452 misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
453 input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
454 memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
455 input->set_shader_debugger.tcp_watch_cntl,
456 sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
457 misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
458 break;
459 default:
460 DRM_ERROR("unsupported misc op (%d) \n", input->op);
461 return -EINVAL;
462 }
463
464 return mes_v11_0_submit_pkt_and_poll_completion(mes,
465 &misc_pkt, sizeof(misc_pkt),
466 offsetof(union MESAPI__MISC, api_status));
467}
468
469static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
470{
471 int i;
472 struct amdgpu_device *adev = mes->adev;
473 union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
474
475 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
476
477 mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
478 mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
479 mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
480
481 mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
482 mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
483 mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
484 mes_set_hw_res_pkt.paging_vmid = 0;
485 mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
486 mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
487 mes->query_status_fence_gpu_addr;
488
489 for (i = 0; i < MAX_COMPUTE_PIPES; i++)
490 mes_set_hw_res_pkt.compute_hqd_mask[i] =
491 mes->compute_hqd_mask[i];
492
493 for (i = 0; i < MAX_GFX_PIPES; i++)
494 mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
495
496 for (i = 0; i < MAX_SDMA_PIPES; i++)
497 mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
498
499 for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
500 mes_set_hw_res_pkt.aggregated_doorbells[i] =
501 mes->aggregated_doorbells[i];
502
503 for (i = 0; i < 5; i++) {
504 mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
505 mes_set_hw_res_pkt.mmhub_base[i] =
506 adev->reg_offset[MMHUB_HWIP][0][i];
507 mes_set_hw_res_pkt.osssys_base[i] =
508 adev->reg_offset[OSSSYS_HWIP][0][i];
509 }
510
511 mes_set_hw_res_pkt.disable_reset = 1;
512 mes_set_hw_res_pkt.disable_mes_log = 1;
513 mes_set_hw_res_pkt.use_different_vmid_compute = 1;
514 mes_set_hw_res_pkt.enable_reg_active_poll = 1;
515 mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
516 mes_set_hw_res_pkt.oversubscription_timer = 50;
517 if (amdgpu_mes_log_enable) {
518 mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
519 mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
520 mes->event_log_gpu_addr;
521 }
522
523 return mes_v11_0_submit_pkt_and_poll_completion(mes,
524 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
525 offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
526}
527
528static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes)
529{
530 int size = 128 * PAGE_SIZE;
531 int ret = 0;
532 struct amdgpu_device *adev = mes->adev;
533 union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt;
534 memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
535
536 mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
537 mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
538 mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
539 mes_set_hw_res_pkt.enable_mes_info_ctx = 1;
540
541 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
542 AMDGPU_GEM_DOMAIN_VRAM,
543 &mes->resource_1,
544 &mes->resource_1_gpu_addr,
545 &mes->resource_1_addr);
546 if (ret) {
547 dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret);
548 return ret;
549 }
550
551 mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr;
552 mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size;
553 return mes_v11_0_submit_pkt_and_poll_completion(mes,
554 &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
555 offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status));
556}
557
558static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
559 .add_hw_queue = mes_v11_0_add_hw_queue,
560 .remove_hw_queue = mes_v11_0_remove_hw_queue,
561 .unmap_legacy_queue = mes_v11_0_unmap_legacy_queue,
562 .suspend_gang = mes_v11_0_suspend_gang,
563 .resume_gang = mes_v11_0_resume_gang,
564 .misc_op = mes_v11_0_misc_op,
565};
566
567static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
568 enum admgpu_mes_pipe pipe)
569{
570 int r;
571 const struct mes_firmware_header_v1_0 *mes_hdr;
572 const __le32 *fw_data;
573 unsigned fw_size;
574
575 mes_hdr = (const struct mes_firmware_header_v1_0 *)
576 adev->mes.fw[pipe]->data;
577
578 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
579 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
580 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
581
582 r = amdgpu_bo_create_reserved(adev, fw_size,
583 PAGE_SIZE,
584 AMDGPU_GEM_DOMAIN_VRAM |
585 AMDGPU_GEM_DOMAIN_GTT,
586 &adev->mes.ucode_fw_obj[pipe],
587 &adev->mes.ucode_fw_gpu_addr[pipe],
588 (void **)&adev->mes.ucode_fw_ptr[pipe]);
589 if (r) {
590 dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
591 return r;
592 }
593
594 memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
595
596 amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
597 amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
598
599 return 0;
600}
601
602static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
603 enum admgpu_mes_pipe pipe)
604{
605 int r;
606 const struct mes_firmware_header_v1_0 *mes_hdr;
607 const __le32 *fw_data;
608 unsigned fw_size;
609
610 mes_hdr = (const struct mes_firmware_header_v1_0 *)
611 adev->mes.fw[pipe]->data;
612
613 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
614 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
615 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
616
617 if (fw_size > GFX_MES_DRAM_SIZE) {
618 dev_err(adev->dev, "PIPE%d ucode data fw size (%d) is greater than dram size (%d)\n",
619 pipe, fw_size, GFX_MES_DRAM_SIZE);
620 return -EINVAL;
621 }
622
623 r = amdgpu_bo_create_reserved(adev, GFX_MES_DRAM_SIZE,
624 64 * 1024,
625 AMDGPU_GEM_DOMAIN_VRAM |
626 AMDGPU_GEM_DOMAIN_GTT,
627 &adev->mes.data_fw_obj[pipe],
628 &adev->mes.data_fw_gpu_addr[pipe],
629 (void **)&adev->mes.data_fw_ptr[pipe]);
630 if (r) {
631 dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
632 return r;
633 }
634
635 memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
636
637 amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
638 amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
639
640 return 0;
641}
642
643static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
644 enum admgpu_mes_pipe pipe)
645{
646 amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
647 &adev->mes.data_fw_gpu_addr[pipe],
648 (void **)&adev->mes.data_fw_ptr[pipe]);
649
650 amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
651 &adev->mes.ucode_fw_gpu_addr[pipe],
652 (void **)&adev->mes.ucode_fw_ptr[pipe]);
653}
654
655static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
656{
657 uint64_t ucode_addr;
658 uint32_t pipe, data = 0;
659
660 if (enable) {
661 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
662 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
663 data = REG_SET_FIELD(data, CP_MES_CNTL,
664 MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
665 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
666
667 mutex_lock(&adev->srbm_mutex);
668 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
669 if (!adev->enable_mes_kiq &&
670 pipe == AMDGPU_MES_KIQ_PIPE)
671 continue;
672
673 soc21_grbm_select(adev, 3, pipe, 0, 0);
674
675 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
676 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
677 lower_32_bits(ucode_addr));
678 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
679 upper_32_bits(ucode_addr));
680 }
681 soc21_grbm_select(adev, 0, 0, 0, 0);
682 mutex_unlock(&adev->srbm_mutex);
683
684 /* unhalt MES and activate pipe0 */
685 data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
686 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
687 adev->enable_mes_kiq ? 1 : 0);
688 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
689
690 if (amdgpu_emu_mode)
691 msleep(100);
692 else
693 udelay(50);
694 } else {
695 data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
696 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
697 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
698 data = REG_SET_FIELD(data, CP_MES_CNTL,
699 MES_INVALIDATE_ICACHE, 1);
700 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
701 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
702 adev->enable_mes_kiq ? 1 : 0);
703 data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
704 WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
705 }
706}
707
708/* This function is for backdoor MES firmware */
709static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
710 enum admgpu_mes_pipe pipe, bool prime_icache)
711{
712 int r;
713 uint32_t data;
714 uint64_t ucode_addr;
715
716 mes_v11_0_enable(adev, false);
717
718 if (!adev->mes.fw[pipe])
719 return -EINVAL;
720
721 r = mes_v11_0_allocate_ucode_buffer(adev, pipe);
722 if (r)
723 return r;
724
725 r = mes_v11_0_allocate_ucode_data_buffer(adev, pipe);
726 if (r) {
727 mes_v11_0_free_ucode_buffers(adev, pipe);
728 return r;
729 }
730
731 mutex_lock(&adev->srbm_mutex);
732 /* me=3, pipe=0, queue=0 */
733 soc21_grbm_select(adev, 3, pipe, 0, 0);
734
735 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
736
737 /* set ucode start address */
738 ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
739 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
740 lower_32_bits(ucode_addr));
741 WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
742 upper_32_bits(ucode_addr));
743
744 /* set ucode fimrware address */
745 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
746 lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
747 WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
748 upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
749
750 /* set ucode instruction cache boundary to 2M-1 */
751 WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
752
753 /* set ucode data firmware address */
754 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
755 lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
756 WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
757 upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
758
759 /* Set 0x7FFFF (512K-1) to CP_MES_MDBOUND_LO */
760 WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF);
761
762 if (prime_icache) {
763 /* invalidate ICACHE */
764 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
765 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
766 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
767 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
768
769 /* prime the ICACHE. */
770 data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
771 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
772 WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
773 }
774
775 soc21_grbm_select(adev, 0, 0, 0, 0);
776 mutex_unlock(&adev->srbm_mutex);
777
778 return 0;
779}
780
781static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
782 enum admgpu_mes_pipe pipe)
783{
784 int r;
785 u32 *eop;
786
787 r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
788 AMDGPU_GEM_DOMAIN_GTT,
789 &adev->mes.eop_gpu_obj[pipe],
790 &adev->mes.eop_gpu_addr[pipe],
791 (void **)&eop);
792 if (r) {
793 dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
794 return r;
795 }
796
797 memset(eop, 0,
798 adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
799
800 amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
801 amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
802
803 return 0;
804}
805
806static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
807{
808 struct v11_compute_mqd *mqd = ring->mqd_ptr;
809 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
810 uint32_t tmp;
811
812 memset(mqd, 0, sizeof(*mqd));
813
814 mqd->header = 0xC0310800;
815 mqd->compute_pipelinestat_enable = 0x00000001;
816 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
817 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
818 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
819 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
820 mqd->compute_misc_reserved = 0x00000007;
821
822 eop_base_addr = ring->eop_gpu_addr >> 8;
823
824 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
825 tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
826 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
827 (order_base_2(MES_EOP_SIZE / 4) - 1));
828
829 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
830 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
831 mqd->cp_hqd_eop_control = tmp;
832
833 /* disable the queue if it's active */
834 ring->wptr = 0;
835 mqd->cp_hqd_pq_rptr = 0;
836 mqd->cp_hqd_pq_wptr_lo = 0;
837 mqd->cp_hqd_pq_wptr_hi = 0;
838
839 /* set the pointer to the MQD */
840 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
841 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
842
843 /* set MQD vmid to 0 */
844 tmp = regCP_MQD_CONTROL_DEFAULT;
845 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
846 mqd->cp_mqd_control = tmp;
847
848 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
849 hqd_gpu_addr = ring->gpu_addr >> 8;
850 mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
851 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
852
853 /* set the wb address whether it's enabled or not */
854 wb_gpu_addr = ring->rptr_gpu_addr;
855 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
856 mqd->cp_hqd_pq_rptr_report_addr_hi =
857 upper_32_bits(wb_gpu_addr) & 0xffff;
858
859 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
860 wb_gpu_addr = ring->wptr_gpu_addr;
861 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
862 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
863
864 /* set up the HQD, this is similar to CP_RB0_CNTL */
865 tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
866 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
867 (order_base_2(ring->ring_size / 4) - 1));
868 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
869 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
870 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
871 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
872 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
873 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
874 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
875 mqd->cp_hqd_pq_control = tmp;
876
877 /* enable doorbell */
878 tmp = 0;
879 if (ring->use_doorbell) {
880 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
881 DOORBELL_OFFSET, ring->doorbell_index);
882 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
883 DOORBELL_EN, 1);
884 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
885 DOORBELL_SOURCE, 0);
886 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
887 DOORBELL_HIT, 0);
888 } else
889 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
890 DOORBELL_EN, 0);
891 mqd->cp_hqd_pq_doorbell_control = tmp;
892
893 mqd->cp_hqd_vmid = 0;
894 /* activate the queue */
895 mqd->cp_hqd_active = 1;
896
897 tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
898 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
899 PRELOAD_SIZE, 0x55);
900 mqd->cp_hqd_persistent_state = tmp;
901
902 mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
903 mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
904 mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
905
906 amdgpu_device_flush_hdp(ring->adev, NULL);
907 return 0;
908}
909
910static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
911{
912 struct v11_compute_mqd *mqd = ring->mqd_ptr;
913 struct amdgpu_device *adev = ring->adev;
914 uint32_t data = 0;
915
916 mutex_lock(&adev->srbm_mutex);
917 soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
918
919 /* set CP_HQD_VMID.VMID = 0. */
920 data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
921 data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
922 WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
923
924 /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
925 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
926 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
927 DOORBELL_EN, 0);
928 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
929
930 /* set CP_MQD_BASE_ADDR/HI with the MQD base address */
931 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
932 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
933
934 /* set CP_MQD_CONTROL.VMID=0 */
935 data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
936 data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
937 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
938
939 /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
940 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
941 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
942
943 /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
944 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
945 mqd->cp_hqd_pq_rptr_report_addr_lo);
946 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
947 mqd->cp_hqd_pq_rptr_report_addr_hi);
948
949 /* set CP_HQD_PQ_CONTROL */
950 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
951
952 /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
953 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
954 mqd->cp_hqd_pq_wptr_poll_addr_lo);
955 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
956 mqd->cp_hqd_pq_wptr_poll_addr_hi);
957
958 /* set CP_HQD_PQ_DOORBELL_CONTROL */
959 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
960 mqd->cp_hqd_pq_doorbell_control);
961
962 /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
963 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
964
965 /* set CP_HQD_ACTIVE.ACTIVE=1 */
966 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
967
968 soc21_grbm_select(adev, 0, 0, 0, 0);
969 mutex_unlock(&adev->srbm_mutex);
970}
971
972static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
973{
974 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
975 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
976 int r;
977
978 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
979 return -EINVAL;
980
981 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
982 if (r) {
983 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
984 return r;
985 }
986
987 kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
988
989 return amdgpu_ring_test_helper(kiq_ring);
990}
991
992static int mes_v11_0_queue_init(struct amdgpu_device *adev,
993 enum admgpu_mes_pipe pipe)
994{
995 struct amdgpu_ring *ring;
996 int r;
997
998 if (pipe == AMDGPU_MES_KIQ_PIPE)
999 ring = &adev->gfx.kiq[0].ring;
1000 else if (pipe == AMDGPU_MES_SCHED_PIPE)
1001 ring = &adev->mes.ring;
1002 else
1003 BUG();
1004
1005 if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
1006 (amdgpu_in_reset(adev) || adev->in_suspend)) {
1007 *(ring->wptr_cpu_addr) = 0;
1008 *(ring->rptr_cpu_addr) = 0;
1009 amdgpu_ring_clear_ring(ring);
1010 }
1011
1012 r = mes_v11_0_mqd_init(ring);
1013 if (r)
1014 return r;
1015
1016 if (pipe == AMDGPU_MES_SCHED_PIPE) {
1017 r = mes_v11_0_kiq_enable_queue(adev);
1018 if (r)
1019 return r;
1020 } else {
1021 mes_v11_0_queue_init_register(ring);
1022 }
1023
1024 /* get MES scheduler/KIQ versions */
1025 mutex_lock(&adev->srbm_mutex);
1026 soc21_grbm_select(adev, 3, pipe, 0, 0);
1027
1028 if (pipe == AMDGPU_MES_SCHED_PIPE)
1029 adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
1030 else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
1031 adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
1032
1033 soc21_grbm_select(adev, 0, 0, 0, 0);
1034 mutex_unlock(&adev->srbm_mutex);
1035
1036 return 0;
1037}
1038
1039static int mes_v11_0_ring_init(struct amdgpu_device *adev)
1040{
1041 struct amdgpu_ring *ring;
1042
1043 ring = &adev->mes.ring;
1044
1045 ring->funcs = &mes_v11_0_ring_funcs;
1046
1047 ring->me = 3;
1048 ring->pipe = 0;
1049 ring->queue = 0;
1050
1051 ring->ring_obj = NULL;
1052 ring->use_doorbell = true;
1053 ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
1054 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
1055 ring->no_scheduler = true;
1056 sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1057
1058 return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1059 AMDGPU_RING_PRIO_DEFAULT, NULL);
1060}
1061
1062static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
1063{
1064 struct amdgpu_ring *ring;
1065
1066 spin_lock_init(&adev->gfx.kiq[0].ring_lock);
1067
1068 ring = &adev->gfx.kiq[0].ring;
1069
1070 ring->me = 3;
1071 ring->pipe = 1;
1072 ring->queue = 0;
1073
1074 ring->adev = NULL;
1075 ring->ring_obj = NULL;
1076 ring->use_doorbell = true;
1077 ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
1078 ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
1079 ring->no_scheduler = true;
1080 sprintf(ring->name, "mes_kiq_%d.%d.%d",
1081 ring->me, ring->pipe, ring->queue);
1082
1083 return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1084 AMDGPU_RING_PRIO_DEFAULT, NULL);
1085}
1086
1087static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
1088 enum admgpu_mes_pipe pipe)
1089{
1090 int r, mqd_size = sizeof(struct v11_compute_mqd);
1091 struct amdgpu_ring *ring;
1092
1093 if (pipe == AMDGPU_MES_KIQ_PIPE)
1094 ring = &adev->gfx.kiq[0].ring;
1095 else if (pipe == AMDGPU_MES_SCHED_PIPE)
1096 ring = &adev->mes.ring;
1097 else
1098 BUG();
1099
1100 if (ring->mqd_obj)
1101 return 0;
1102
1103 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
1104 AMDGPU_GEM_DOMAIN_VRAM |
1105 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
1106 &ring->mqd_gpu_addr, &ring->mqd_ptr);
1107 if (r) {
1108 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
1109 return r;
1110 }
1111
1112 memset(ring->mqd_ptr, 0, mqd_size);
1113
1114 /* prepare MQD backup */
1115 adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
1116 if (!adev->mes.mqd_backup[pipe]) {
1117 dev_warn(adev->dev,
1118 "no memory to create MQD backup for ring %s\n",
1119 ring->name);
1120 return -ENOMEM;
1121 }
1122
1123 return 0;
1124}
1125
1126static int mes_v11_0_sw_init(void *handle)
1127{
1128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1129 int pipe, r;
1130
1131 adev->mes.funcs = &mes_v11_0_funcs;
1132 adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
1133 adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
1134
1135 r = amdgpu_mes_init(adev);
1136 if (r)
1137 return r;
1138
1139 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1140 if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1141 continue;
1142
1143 r = mes_v11_0_allocate_eop_buf(adev, pipe);
1144 if (r)
1145 return r;
1146
1147 r = mes_v11_0_mqd_sw_init(adev, pipe);
1148 if (r)
1149 return r;
1150 }
1151
1152 if (adev->enable_mes_kiq) {
1153 r = mes_v11_0_kiq_ring_init(adev);
1154 if (r)
1155 return r;
1156 }
1157
1158 r = mes_v11_0_ring_init(adev);
1159 if (r)
1160 return r;
1161
1162 return 0;
1163}
1164
1165static int mes_v11_0_sw_fini(void *handle)
1166{
1167 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1168 int pipe;
1169
1170 amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
1171 amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
1172
1173 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1174 kfree(adev->mes.mqd_backup[pipe]);
1175
1176 amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
1177 &adev->mes.eop_gpu_addr[pipe],
1178 NULL);
1179 amdgpu_ucode_release(&adev->mes.fw[pipe]);
1180 }
1181
1182 amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
1183 &adev->gfx.kiq[0].ring.mqd_gpu_addr,
1184 &adev->gfx.kiq[0].ring.mqd_ptr);
1185
1186 amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
1187 &adev->mes.ring.mqd_gpu_addr,
1188 &adev->mes.ring.mqd_ptr);
1189
1190 amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
1191 amdgpu_ring_fini(&adev->mes.ring);
1192
1193 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1194 mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
1195 mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
1196 }
1197
1198 amdgpu_mes_fini(adev);
1199 return 0;
1200}
1201
1202static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring)
1203{
1204 uint32_t data;
1205 int i;
1206 struct amdgpu_device *adev = ring->adev;
1207
1208 mutex_lock(&adev->srbm_mutex);
1209 soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
1210
1211 /* disable the queue if it's active */
1212 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
1213 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
1214 for (i = 0; i < adev->usec_timeout; i++) {
1215 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
1216 break;
1217 udelay(1);
1218 }
1219 }
1220 data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
1221 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1222 DOORBELL_EN, 0);
1223 data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
1224 DOORBELL_HIT, 1);
1225 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
1226
1227 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
1228
1229 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
1230 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
1231 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
1232
1233 soc21_grbm_select(adev, 0, 0, 0, 0);
1234 mutex_unlock(&adev->srbm_mutex);
1235}
1236
1237static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
1238{
1239 uint32_t tmp;
1240 struct amdgpu_device *adev = ring->adev;
1241
1242 /* tell RLC which is KIQ queue */
1243 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1244 tmp &= 0xffffff00;
1245 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1246 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1247 tmp |= 0x80;
1248 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1249}
1250
1251static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
1252{
1253 uint32_t tmp;
1254
1255 /* tell RLC which is KIQ dequeue */
1256 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
1257 tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK;
1258 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
1259}
1260
1261static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
1262{
1263 int r = 0;
1264
1265 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1266
1267 r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
1268 if (r) {
1269 DRM_ERROR("failed to load MES fw, r=%d\n", r);
1270 return r;
1271 }
1272
1273 r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
1274 if (r) {
1275 DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
1276 return r;
1277 }
1278
1279 }
1280
1281 mes_v11_0_enable(adev, true);
1282
1283 mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
1284
1285 r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
1286 if (r)
1287 goto failure;
1288
1289 return r;
1290
1291failure:
1292 mes_v11_0_hw_fini(adev);
1293 return r;
1294}
1295
1296static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
1297{
1298 if (adev->mes.ring.sched.ready) {
1299 mes_v11_0_kiq_dequeue(&adev->mes.ring);
1300 adev->mes.ring.sched.ready = false;
1301 }
1302
1303 if (amdgpu_sriov_vf(adev)) {
1304 mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
1305 mes_v11_0_kiq_clear(adev);
1306 }
1307
1308 mes_v11_0_enable(adev, false);
1309
1310 return 0;
1311}
1312
1313static int mes_v11_0_hw_init(void *handle)
1314{
1315 int r;
1316 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317
1318 if (!adev->enable_mes_kiq) {
1319 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1320 r = mes_v11_0_load_microcode(adev,
1321 AMDGPU_MES_SCHED_PIPE, true);
1322 if (r) {
1323 DRM_ERROR("failed to MES fw, r=%d\n", r);
1324 return r;
1325 }
1326 }
1327
1328 mes_v11_0_enable(adev, true);
1329 }
1330
1331 r = mes_v11_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
1332 if (r)
1333 goto failure;
1334
1335 r = mes_v11_0_set_hw_resources(&adev->mes);
1336 if (r)
1337 goto failure;
1338
1339 if (amdgpu_sriov_is_mes_info_enable(adev)) {
1340 r = mes_v11_0_set_hw_resources_1(&adev->mes);
1341 if (r) {
1342 DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
1343 goto failure;
1344 }
1345 }
1346
1347 r = mes_v11_0_query_sched_status(&adev->mes);
1348 if (r) {
1349 DRM_ERROR("MES is busy\n");
1350 goto failure;
1351 }
1352
1353 /*
1354 * Disable KIQ ring usage from the driver once MES is enabled.
1355 * MES uses KIQ ring exclusively so driver cannot access KIQ ring
1356 * with MES enabled.
1357 */
1358 adev->gfx.kiq[0].ring.sched.ready = false;
1359 adev->mes.ring.sched.ready = true;
1360
1361 return 0;
1362
1363failure:
1364 mes_v11_0_hw_fini(adev);
1365 return r;
1366}
1367
1368static int mes_v11_0_hw_fini(void *handle)
1369{
1370 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1371 if (amdgpu_sriov_is_mes_info_enable(adev)) {
1372 amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr,
1373 &adev->mes.resource_1_addr);
1374 }
1375 return 0;
1376}
1377
1378static int mes_v11_0_suspend(void *handle)
1379{
1380 int r;
1381 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1382
1383 r = amdgpu_mes_suspend(adev);
1384 if (r)
1385 return r;
1386
1387 return mes_v11_0_hw_fini(adev);
1388}
1389
1390static int mes_v11_0_resume(void *handle)
1391{
1392 int r;
1393 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1394
1395 r = mes_v11_0_hw_init(adev);
1396 if (r)
1397 return r;
1398
1399 return amdgpu_mes_resume(adev);
1400}
1401
1402static int mes_v11_0_early_init(void *handle)
1403{
1404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1405 int pipe, r;
1406
1407 for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
1408 if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
1409 continue;
1410 r = amdgpu_mes_init_microcode(adev, pipe);
1411 if (r)
1412 return r;
1413 }
1414
1415 return 0;
1416}
1417
1418static int mes_v11_0_late_init(void *handle)
1419{
1420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1421
1422 /* it's only intended for use in mes_self_test case, not for s0ix and reset */
1423 if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
1424 (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)))
1425 amdgpu_mes_self_test(adev);
1426
1427 return 0;
1428}
1429
1430static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
1431 .name = "mes_v11_0",
1432 .early_init = mes_v11_0_early_init,
1433 .late_init = mes_v11_0_late_init,
1434 .sw_init = mes_v11_0_sw_init,
1435 .sw_fini = mes_v11_0_sw_fini,
1436 .hw_init = mes_v11_0_hw_init,
1437 .hw_fini = mes_v11_0_hw_fini,
1438 .suspend = mes_v11_0_suspend,
1439 .resume = mes_v11_0_resume,
1440 .dump_ip_state = NULL,
1441 .print_ip_state = NULL,
1442};
1443
1444const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
1445 .type = AMD_IP_BLOCK_TYPE_MES,
1446 .major = 11,
1447 .minor = 0,
1448 .rev = 0,
1449 .funcs = &mes_v11_0_ip_funcs,
1450};