Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/ratelimit.h>
26#include <linux/printk.h>
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/types.h>
30#include <linux/bitops.h>
31#include <linux/sched.h>
32#include "kfd_priv.h"
33#include "kfd_device_queue_manager.h"
34#include "kfd_mqd_manager.h"
35#include "cik_regs.h"
36#include "kfd_kernel_queue.h"
37#include "amdgpu_amdkfd.h"
38#include "amdgpu_reset.h"
39#include "mes_v11_api_def.h"
40#include "kfd_debug.h"
41
42/* Size of the per-pipe EOP queue */
43#define CIK_HPD_EOP_BYTES_LOG2 11
44#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
45
46static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
47 u32 pasid, unsigned int vmid);
48
49static int execute_queues_cpsch(struct device_queue_manager *dqm,
50 enum kfd_unmap_queues_filter filter,
51 uint32_t filter_param,
52 uint32_t grace_period);
53static int unmap_queues_cpsch(struct device_queue_manager *dqm,
54 enum kfd_unmap_queues_filter filter,
55 uint32_t filter_param,
56 uint32_t grace_period,
57 bool reset);
58
59static int map_queues_cpsch(struct device_queue_manager *dqm);
60
61static void deallocate_sdma_queue(struct device_queue_manager *dqm,
62 struct queue *q);
63
64static inline void deallocate_hqd(struct device_queue_manager *dqm,
65 struct queue *q);
66static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
67static int allocate_sdma_queue(struct device_queue_manager *dqm,
68 struct queue *q, const uint32_t *restore_sdma_id);
69static void kfd_process_hw_exception(struct work_struct *work);
70
71static inline
72enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
73{
74 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
75 return KFD_MQD_TYPE_SDMA;
76 return KFD_MQD_TYPE_CP;
77}
78
79static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
80{
81 int i;
82 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
83 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
84
85 /* queue is available for KFD usage if bit is 1 */
86 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
87 if (test_bit(pipe_offset + i,
88 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
89 return true;
90 return false;
91}
92
93unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
94{
95 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
96 AMDGPU_MAX_QUEUES);
97}
98
99unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
100{
101 return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
102}
103
104unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
105{
106 return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
107}
108
109static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
110{
111 return kfd_get_num_sdma_engines(dqm->dev) +
112 kfd_get_num_xgmi_sdma_engines(dqm->dev);
113}
114
115unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
116{
117 return kfd_get_num_sdma_engines(dqm->dev) *
118 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
119}
120
121unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
122{
123 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
124 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
125}
126
127static void init_sdma_bitmaps(struct device_queue_manager *dqm)
128{
129 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
130 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
131
132 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
133 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
134
135 /* Mask out the reserved queues */
136 bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
137 dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
138 KFD_MAX_SDMA_QUEUES);
139}
140
141void program_sh_mem_settings(struct device_queue_manager *dqm,
142 struct qcm_process_device *qpd)
143{
144 uint32_t xcc_mask = dqm->dev->xcc_mask;
145 int xcc_id;
146
147 for_each_inst(xcc_id, xcc_mask)
148 dqm->dev->kfd2kgd->program_sh_mem_settings(
149 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
150 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
151 qpd->sh_mem_bases, xcc_id);
152}
153
154static void kfd_hws_hang(struct device_queue_manager *dqm)
155{
156 /*
157 * Issue a GPU reset if HWS is unresponsive
158 */
159 schedule_work(&dqm->hw_exception_work);
160}
161
162static int convert_to_mes_queue_type(int queue_type)
163{
164 int mes_queue_type;
165
166 switch (queue_type) {
167 case KFD_QUEUE_TYPE_COMPUTE:
168 mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
169 break;
170 case KFD_QUEUE_TYPE_SDMA:
171 mes_queue_type = MES_QUEUE_TYPE_SDMA;
172 break;
173 default:
174 WARN(1, "Invalid queue type %d", queue_type);
175 mes_queue_type = -EINVAL;
176 break;
177 }
178
179 return mes_queue_type;
180}
181
182static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
183 struct qcm_process_device *qpd)
184{
185 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
186 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
187 struct mes_add_queue_input queue_input;
188 int r, queue_type;
189 uint64_t wptr_addr_off;
190
191 if (!down_read_trylock(&adev->reset_domain->sem))
192 return -EIO;
193
194 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
195 queue_input.process_id = qpd->pqm->process->pasid;
196 queue_input.page_table_base_addr = qpd->page_table_base;
197 queue_input.process_va_start = 0;
198 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
199 /* MES unit for quantum is 100ns */
200 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
201 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
202 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
203 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
204 queue_input.inprocess_gang_priority = q->properties.priority;
205 queue_input.gang_global_priority_level =
206 AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
207 queue_input.doorbell_offset = q->properties.doorbell_off;
208 queue_input.mqd_addr = q->gart_mqd_addr;
209 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
210
211 if (q->wptr_bo) {
212 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
213 queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
214 }
215
216 queue_input.is_kfd_process = 1;
217 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
218 queue_input.queue_size = q->properties.queue_size >> 2;
219
220 queue_input.paging = false;
221 queue_input.tba_addr = qpd->tba_addr;
222 queue_input.tma_addr = qpd->tma_addr;
223 queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
224 queue_input.skip_process_ctx_clear =
225 qpd->pqm->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED &&
226 (qpd->pqm->process->debug_trap_enabled ||
227 kfd_dbg_has_ttmps_always_setup(q->device));
228
229 queue_type = convert_to_mes_queue_type(q->properties.type);
230 if (queue_type < 0) {
231 dev_err(adev->dev, "Queue type not supported with MES, queue:%d\n",
232 q->properties.type);
233 up_read(&adev->reset_domain->sem);
234 return -EINVAL;
235 }
236 queue_input.queue_type = (uint32_t)queue_type;
237
238 queue_input.exclusively_scheduled = q->properties.is_gws;
239
240 amdgpu_mes_lock(&adev->mes);
241 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
242 amdgpu_mes_unlock(&adev->mes);
243 up_read(&adev->reset_domain->sem);
244 if (r) {
245 dev_err(adev->dev, "failed to add hardware queue to MES, doorbell=0x%x\n",
246 q->properties.doorbell_off);
247 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
248 kfd_hws_hang(dqm);
249 }
250
251 return r;
252}
253
254static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
255 struct qcm_process_device *qpd)
256{
257 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
258 int r;
259 struct mes_remove_queue_input queue_input;
260
261 if (!down_read_trylock(&adev->reset_domain->sem))
262 return -EIO;
263
264 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
265 queue_input.doorbell_offset = q->properties.doorbell_off;
266 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
267
268 amdgpu_mes_lock(&adev->mes);
269 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
270 amdgpu_mes_unlock(&adev->mes);
271 up_read(&adev->reset_domain->sem);
272
273 if (r) {
274 dev_err(adev->dev, "failed to remove hardware queue from MES, doorbell=0x%x\n",
275 q->properties.doorbell_off);
276 dev_err(adev->dev, "MES might be in unrecoverable state, issue a GPU reset\n");
277 kfd_hws_hang(dqm);
278 }
279
280 return r;
281}
282
283static int remove_all_queues_mes(struct device_queue_manager *dqm)
284{
285 struct device_process_node *cur;
286 struct device *dev = dqm->dev->adev->dev;
287 struct qcm_process_device *qpd;
288 struct queue *q;
289 int retval = 0;
290
291 list_for_each_entry(cur, &dqm->queues, list) {
292 qpd = cur->qpd;
293 list_for_each_entry(q, &qpd->queues_list, list) {
294 if (q->properties.is_active) {
295 retval = remove_queue_mes(dqm, q, qpd);
296 if (retval) {
297 dev_err(dev, "%s: Failed to remove queue %d for dev %d",
298 __func__,
299 q->properties.queue_id,
300 dqm->dev->id);
301 return retval;
302 }
303 }
304 }
305 }
306
307 return retval;
308}
309
310static void increment_queue_count(struct device_queue_manager *dqm,
311 struct qcm_process_device *qpd,
312 struct queue *q)
313{
314 dqm->active_queue_count++;
315 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
316 q->properties.type == KFD_QUEUE_TYPE_DIQ)
317 dqm->active_cp_queue_count++;
318
319 if (q->properties.is_gws) {
320 dqm->gws_queue_count++;
321 qpd->mapped_gws_queue = true;
322 }
323}
324
325static void decrement_queue_count(struct device_queue_manager *dqm,
326 struct qcm_process_device *qpd,
327 struct queue *q)
328{
329 dqm->active_queue_count--;
330 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
331 q->properties.type == KFD_QUEUE_TYPE_DIQ)
332 dqm->active_cp_queue_count--;
333
334 if (q->properties.is_gws) {
335 dqm->gws_queue_count--;
336 qpd->mapped_gws_queue = false;
337 }
338}
339
340/*
341 * Allocate a doorbell ID to this queue.
342 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
343 */
344static int allocate_doorbell(struct qcm_process_device *qpd,
345 struct queue *q,
346 uint32_t const *restore_id)
347{
348 struct kfd_node *dev = qpd->dqm->dev;
349
350 if (!KFD_IS_SOC15(dev)) {
351 /* On pre-SOC15 chips we need to use the queue ID to
352 * preserve the user mode ABI.
353 */
354
355 if (restore_id && *restore_id != q->properties.queue_id)
356 return -EINVAL;
357
358 q->doorbell_id = q->properties.queue_id;
359 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
360 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
361 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
362 * doorbell assignments based on the engine and queue id.
363 * The doobell index distance between RLC (2*i) and (2*i+1)
364 * for a SDMA engine is 512.
365 */
366
367 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
368
369 /*
370 * q->properties.sdma_engine_id corresponds to the virtual
371 * sdma engine number. However, for doorbell allocation,
372 * we need the physical sdma engine id in order to get the
373 * correct doorbell offset.
374 */
375 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
376 get_num_all_sdma_engines(qpd->dqm) +
377 q->properties.sdma_engine_id]
378 + (q->properties.sdma_queue_id & 1)
379 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
380 + (q->properties.sdma_queue_id >> 1);
381
382 if (restore_id && *restore_id != valid_id)
383 return -EINVAL;
384 q->doorbell_id = valid_id;
385 } else {
386 /* For CP queues on SOC15 */
387 if (restore_id) {
388 /* make sure that ID is free */
389 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
390 return -EINVAL;
391
392 q->doorbell_id = *restore_id;
393 } else {
394 /* or reserve a free doorbell ID */
395 unsigned int found;
396
397 found = find_first_zero_bit(qpd->doorbell_bitmap,
398 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
399 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
400 pr_debug("No doorbells available");
401 return -EBUSY;
402 }
403 set_bit(found, qpd->doorbell_bitmap);
404 q->doorbell_id = found;
405 }
406 }
407
408 q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
409 qpd->proc_doorbells,
410 q->doorbell_id,
411 dev->kfd->device_info.doorbell_size);
412 return 0;
413}
414
415static void deallocate_doorbell(struct qcm_process_device *qpd,
416 struct queue *q)
417{
418 unsigned int old;
419 struct kfd_node *dev = qpd->dqm->dev;
420
421 if (!KFD_IS_SOC15(dev) ||
422 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
423 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
424 return;
425
426 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
427 WARN_ON(!old);
428}
429
430static void program_trap_handler_settings(struct device_queue_manager *dqm,
431 struct qcm_process_device *qpd)
432{
433 uint32_t xcc_mask = dqm->dev->xcc_mask;
434 int xcc_id;
435
436 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
437 for_each_inst(xcc_id, xcc_mask)
438 dqm->dev->kfd2kgd->program_trap_handler_settings(
439 dqm->dev->adev, qpd->vmid, qpd->tba_addr,
440 qpd->tma_addr, xcc_id);
441}
442
443static int allocate_vmid(struct device_queue_manager *dqm,
444 struct qcm_process_device *qpd,
445 struct queue *q)
446{
447 struct device *dev = dqm->dev->adev->dev;
448 int allocated_vmid = -1, i;
449
450 for (i = dqm->dev->vm_info.first_vmid_kfd;
451 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
452 if (!dqm->vmid_pasid[i]) {
453 allocated_vmid = i;
454 break;
455 }
456 }
457
458 if (allocated_vmid < 0) {
459 dev_err(dev, "no more vmid to allocate\n");
460 return -ENOSPC;
461 }
462
463 pr_debug("vmid allocated: %d\n", allocated_vmid);
464
465 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
466
467 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
468
469 qpd->vmid = allocated_vmid;
470 q->properties.vmid = allocated_vmid;
471
472 program_sh_mem_settings(dqm, qpd);
473
474 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
475 program_trap_handler_settings(dqm, qpd);
476
477 /* qpd->page_table_base is set earlier when register_process()
478 * is called, i.e. when the first queue is created.
479 */
480 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
481 qpd->vmid,
482 qpd->page_table_base);
483 /* invalidate the VM context after pasid and vmid mapping is set up */
484 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
485
486 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
487 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
488 qpd->sh_hidden_private_base, qpd->vmid);
489
490 return 0;
491}
492
493static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
494 struct qcm_process_device *qpd)
495{
496 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
497 int ret;
498
499 if (!qpd->ib_kaddr)
500 return -ENOMEM;
501
502 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
503 if (ret)
504 return ret;
505
506 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
507 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
508 pmf->release_mem_size / sizeof(uint32_t));
509}
510
511static void deallocate_vmid(struct device_queue_manager *dqm,
512 struct qcm_process_device *qpd,
513 struct queue *q)
514{
515 struct device *dev = dqm->dev->adev->dev;
516
517 /* On GFX v7, CP doesn't flush TC at dequeue */
518 if (q->device->adev->asic_type == CHIP_HAWAII)
519 if (flush_texture_cache_nocpsch(q->device, qpd))
520 dev_err(dev, "Failed to flush TC\n");
521
522 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
523
524 /* Release the vmid mapping */
525 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
526 dqm->vmid_pasid[qpd->vmid] = 0;
527
528 qpd->vmid = 0;
529 q->properties.vmid = 0;
530}
531
532static int create_queue_nocpsch(struct device_queue_manager *dqm,
533 struct queue *q,
534 struct qcm_process_device *qpd,
535 const struct kfd_criu_queue_priv_data *qd,
536 const void *restore_mqd, const void *restore_ctl_stack)
537{
538 struct mqd_manager *mqd_mgr;
539 int retval;
540
541 dqm_lock(dqm);
542
543 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
544 pr_warn("Can't create new usermode queue because %d queues were already created\n",
545 dqm->total_queue_count);
546 retval = -EPERM;
547 goto out_unlock;
548 }
549
550 if (list_empty(&qpd->queues_list)) {
551 retval = allocate_vmid(dqm, qpd, q);
552 if (retval)
553 goto out_unlock;
554 }
555 q->properties.vmid = qpd->vmid;
556 /*
557 * Eviction state logic: mark all queues as evicted, even ones
558 * not currently active. Restoring inactive queues later only
559 * updates the is_evicted flag but is a no-op otherwise.
560 */
561 q->properties.is_evicted = !!qpd->evicted;
562
563 q->properties.tba_addr = qpd->tba_addr;
564 q->properties.tma_addr = qpd->tma_addr;
565
566 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
567 q->properties.type)];
568 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
569 retval = allocate_hqd(dqm, q);
570 if (retval)
571 goto deallocate_vmid;
572 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
573 q->pipe, q->queue);
574 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
575 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
576 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
577 if (retval)
578 goto deallocate_vmid;
579 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
580 }
581
582 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
583 if (retval)
584 goto out_deallocate_hqd;
585
586 /* Temporarily release dqm lock to avoid a circular lock dependency */
587 dqm_unlock(dqm);
588 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
589 dqm_lock(dqm);
590
591 if (!q->mqd_mem_obj) {
592 retval = -ENOMEM;
593 goto out_deallocate_doorbell;
594 }
595
596 if (qd)
597 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
598 &q->properties, restore_mqd, restore_ctl_stack,
599 qd->ctl_stack_size);
600 else
601 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
602 &q->gart_mqd_addr, &q->properties);
603
604 if (q->properties.is_active) {
605 if (!dqm->sched_running) {
606 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
607 goto add_queue_to_list;
608 }
609
610 if (WARN(q->process->mm != current->mm,
611 "should only run in user thread"))
612 retval = -EFAULT;
613 else
614 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
615 q->queue, &q->properties, current->mm);
616 if (retval)
617 goto out_free_mqd;
618 }
619
620add_queue_to_list:
621 list_add(&q->list, &qpd->queues_list);
622 qpd->queue_count++;
623 if (q->properties.is_active)
624 increment_queue_count(dqm, qpd, q);
625
626 /*
627 * Unconditionally increment this counter, regardless of the queue's
628 * type or whether the queue is active.
629 */
630 dqm->total_queue_count++;
631 pr_debug("Total of %d queues are accountable so far\n",
632 dqm->total_queue_count);
633 goto out_unlock;
634
635out_free_mqd:
636 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
637out_deallocate_doorbell:
638 deallocate_doorbell(qpd, q);
639out_deallocate_hqd:
640 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
641 deallocate_hqd(dqm, q);
642 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
643 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
644 deallocate_sdma_queue(dqm, q);
645deallocate_vmid:
646 if (list_empty(&qpd->queues_list))
647 deallocate_vmid(dqm, qpd, q);
648out_unlock:
649 dqm_unlock(dqm);
650 return retval;
651}
652
653static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
654{
655 bool set;
656 int pipe, bit, i;
657
658 set = false;
659
660 for (pipe = dqm->next_pipe_to_allocate, i = 0;
661 i < get_pipes_per_mec(dqm);
662 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
663
664 if (!is_pipe_enabled(dqm, 0, pipe))
665 continue;
666
667 if (dqm->allocated_queues[pipe] != 0) {
668 bit = ffs(dqm->allocated_queues[pipe]) - 1;
669 dqm->allocated_queues[pipe] &= ~(1 << bit);
670 q->pipe = pipe;
671 q->queue = bit;
672 set = true;
673 break;
674 }
675 }
676
677 if (!set)
678 return -EBUSY;
679
680 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
681 /* horizontal hqd allocation */
682 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
683
684 return 0;
685}
686
687static inline void deallocate_hqd(struct device_queue_manager *dqm,
688 struct queue *q)
689{
690 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
691}
692
693#define SQ_IND_CMD_CMD_KILL 0x00000003
694#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
695
696static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
697{
698 int status = 0;
699 unsigned int vmid;
700 uint16_t queried_pasid;
701 union SQ_CMD_BITS reg_sq_cmd;
702 union GRBM_GFX_INDEX_BITS reg_gfx_index;
703 struct kfd_process_device *pdd;
704 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
705 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
706 uint32_t xcc_mask = dev->xcc_mask;
707 int xcc_id;
708
709 reg_sq_cmd.u32All = 0;
710 reg_gfx_index.u32All = 0;
711
712 pr_debug("Killing all process wavefronts\n");
713
714 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
715 dev_err(dev->adev->dev, "no vmid pasid mapping supported\n");
716 return -EOPNOTSUPP;
717 }
718
719 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
720 * ATC_VMID15_PASID_MAPPING
721 * to check which VMID the current process is mapped to.
722 */
723
724 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
725 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
726 (dev->adev, vmid, &queried_pasid);
727
728 if (status && queried_pasid == p->pasid) {
729 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
730 vmid, p->pasid);
731 break;
732 }
733 }
734
735 if (vmid > last_vmid_to_scan) {
736 dev_err(dev->adev->dev, "Didn't find vmid for pasid 0x%x\n", p->pasid);
737 return -EFAULT;
738 }
739
740 /* taking the VMID for that process on the safe way using PDD */
741 pdd = kfd_get_process_device_data(dev, p);
742 if (!pdd)
743 return -EFAULT;
744
745 reg_gfx_index.bits.sh_broadcast_writes = 1;
746 reg_gfx_index.bits.se_broadcast_writes = 1;
747 reg_gfx_index.bits.instance_broadcast_writes = 1;
748 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
749 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
750 reg_sq_cmd.bits.vm_id = vmid;
751
752 for_each_inst(xcc_id, xcc_mask)
753 dev->kfd2kgd->wave_control_execute(
754 dev->adev, reg_gfx_index.u32All,
755 reg_sq_cmd.u32All, xcc_id);
756
757 return 0;
758}
759
760/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
761 * to avoid asynchronized access
762 */
763static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
764 struct qcm_process_device *qpd,
765 struct queue *q)
766{
767 int retval;
768 struct mqd_manager *mqd_mgr;
769
770 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
771 q->properties.type)];
772
773 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
774 deallocate_hqd(dqm, q);
775 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
776 deallocate_sdma_queue(dqm, q);
777 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
778 deallocate_sdma_queue(dqm, q);
779 else {
780 pr_debug("q->properties.type %d is invalid\n",
781 q->properties.type);
782 return -EINVAL;
783 }
784 dqm->total_queue_count--;
785
786 deallocate_doorbell(qpd, q);
787
788 if (!dqm->sched_running) {
789 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
790 return 0;
791 }
792
793 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
794 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
795 KFD_UNMAP_LATENCY_MS,
796 q->pipe, q->queue);
797 if (retval == -ETIME)
798 qpd->reset_wavefronts = true;
799
800 list_del(&q->list);
801 if (list_empty(&qpd->queues_list)) {
802 if (qpd->reset_wavefronts) {
803 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
804 dqm->dev);
805 /* dbgdev_wave_reset_wavefronts has to be called before
806 * deallocate_vmid(), i.e. when vmid is still in use.
807 */
808 dbgdev_wave_reset_wavefronts(dqm->dev,
809 qpd->pqm->process);
810 qpd->reset_wavefronts = false;
811 }
812
813 deallocate_vmid(dqm, qpd, q);
814 }
815 qpd->queue_count--;
816 if (q->properties.is_active)
817 decrement_queue_count(dqm, qpd, q);
818
819 return retval;
820}
821
822static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
823 struct qcm_process_device *qpd,
824 struct queue *q)
825{
826 int retval;
827 uint64_t sdma_val = 0;
828 struct device *dev = dqm->dev->adev->dev;
829 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
830 struct mqd_manager *mqd_mgr =
831 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
832
833 /* Get the SDMA queue stats */
834 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
835 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
836 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
837 &sdma_val);
838 if (retval)
839 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
840 q->properties.queue_id);
841 }
842
843 dqm_lock(dqm);
844 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
845 if (!retval)
846 pdd->sdma_past_activity_counter += sdma_val;
847 dqm_unlock(dqm);
848
849 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
850
851 return retval;
852}
853
854static int update_queue(struct device_queue_manager *dqm, struct queue *q,
855 struct mqd_update_info *minfo)
856{
857 int retval = 0;
858 struct device *dev = dqm->dev->adev->dev;
859 struct mqd_manager *mqd_mgr;
860 struct kfd_process_device *pdd;
861 bool prev_active = false;
862
863 dqm_lock(dqm);
864 pdd = kfd_get_process_device_data(q->device, q->process);
865 if (!pdd) {
866 retval = -ENODEV;
867 goto out_unlock;
868 }
869 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
870 q->properties.type)];
871
872 /* Save previous activity state for counters */
873 prev_active = q->properties.is_active;
874
875 /* Make sure the queue is unmapped before updating the MQD */
876 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
877 if (!dqm->dev->kfd->shared_resources.enable_mes)
878 retval = unmap_queues_cpsch(dqm,
879 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
880 else if (prev_active)
881 retval = remove_queue_mes(dqm, q, &pdd->qpd);
882
883 if (retval) {
884 dev_err(dev, "unmap queue failed\n");
885 goto out_unlock;
886 }
887 } else if (prev_active &&
888 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
889 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
890 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
891
892 if (!dqm->sched_running) {
893 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
894 goto out_unlock;
895 }
896
897 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
898 (dqm->dev->kfd->cwsr_enabled ?
899 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
900 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
901 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
902 if (retval) {
903 dev_err(dev, "destroy mqd failed\n");
904 goto out_unlock;
905 }
906 }
907
908 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
909
910 /*
911 * check active state vs. the previous state and modify
912 * counter accordingly. map_queues_cpsch uses the
913 * dqm->active_queue_count to determine whether a new runlist must be
914 * uploaded.
915 */
916 if (q->properties.is_active && !prev_active) {
917 increment_queue_count(dqm, &pdd->qpd, q);
918 } else if (!q->properties.is_active && prev_active) {
919 decrement_queue_count(dqm, &pdd->qpd, q);
920 } else if (q->gws && !q->properties.is_gws) {
921 if (q->properties.is_active) {
922 dqm->gws_queue_count++;
923 pdd->qpd.mapped_gws_queue = true;
924 }
925 q->properties.is_gws = true;
926 } else if (!q->gws && q->properties.is_gws) {
927 if (q->properties.is_active) {
928 dqm->gws_queue_count--;
929 pdd->qpd.mapped_gws_queue = false;
930 }
931 q->properties.is_gws = false;
932 }
933
934 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
935 if (!dqm->dev->kfd->shared_resources.enable_mes)
936 retval = map_queues_cpsch(dqm);
937 else if (q->properties.is_active)
938 retval = add_queue_mes(dqm, q, &pdd->qpd);
939 } else if (q->properties.is_active &&
940 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
941 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
942 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
943 if (WARN(q->process->mm != current->mm,
944 "should only run in user thread"))
945 retval = -EFAULT;
946 else
947 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
948 q->pipe, q->queue,
949 &q->properties, current->mm);
950 }
951
952out_unlock:
953 dqm_unlock(dqm);
954 return retval;
955}
956
957/* suspend_single_queue does not lock the dqm like the
958 * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
959 * lock the dqm before calling, and unlock after calling.
960 *
961 * The reason we don't lock the dqm is because this function may be
962 * called on multiple queues in a loop, so rather than locking/unlocking
963 * multiple times, we will just keep the dqm locked for all of the calls.
964 */
965static int suspend_single_queue(struct device_queue_manager *dqm,
966 struct kfd_process_device *pdd,
967 struct queue *q)
968{
969 bool is_new;
970
971 if (q->properties.is_suspended)
972 return 0;
973
974 pr_debug("Suspending PASID %u queue [%i]\n",
975 pdd->process->pasid,
976 q->properties.queue_id);
977
978 is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
979
980 if (is_new || q->properties.is_being_destroyed) {
981 pr_debug("Suspend: skip %s queue id %i\n",
982 is_new ? "new" : "destroyed",
983 q->properties.queue_id);
984 return -EBUSY;
985 }
986
987 q->properties.is_suspended = true;
988 if (q->properties.is_active) {
989 if (dqm->dev->kfd->shared_resources.enable_mes) {
990 int r = remove_queue_mes(dqm, q, &pdd->qpd);
991
992 if (r)
993 return r;
994 }
995
996 decrement_queue_count(dqm, &pdd->qpd, q);
997 q->properties.is_active = false;
998 }
999
1000 return 0;
1001}
1002
1003/* resume_single_queue does not lock the dqm like the functions
1004 * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
1005 * lock the dqm before calling, and unlock after calling.
1006 *
1007 * The reason we don't lock the dqm is because this function may be
1008 * called on multiple queues in a loop, so rather than locking/unlocking
1009 * multiple times, we will just keep the dqm locked for all of the calls.
1010 */
1011static int resume_single_queue(struct device_queue_manager *dqm,
1012 struct qcm_process_device *qpd,
1013 struct queue *q)
1014{
1015 struct kfd_process_device *pdd;
1016
1017 if (!q->properties.is_suspended)
1018 return 0;
1019
1020 pdd = qpd_to_pdd(qpd);
1021
1022 pr_debug("Restoring from suspend PASID %u queue [%i]\n",
1023 pdd->process->pasid,
1024 q->properties.queue_id);
1025
1026 q->properties.is_suspended = false;
1027
1028 if (QUEUE_IS_ACTIVE(q->properties)) {
1029 if (dqm->dev->kfd->shared_resources.enable_mes) {
1030 int r = add_queue_mes(dqm, q, &pdd->qpd);
1031
1032 if (r)
1033 return r;
1034 }
1035
1036 q->properties.is_active = true;
1037 increment_queue_count(dqm, qpd, q);
1038 }
1039
1040 return 0;
1041}
1042
1043static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
1044 struct qcm_process_device *qpd)
1045{
1046 struct queue *q;
1047 struct mqd_manager *mqd_mgr;
1048 struct kfd_process_device *pdd;
1049 int retval, ret = 0;
1050
1051 dqm_lock(dqm);
1052 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1053 goto out;
1054
1055 pdd = qpd_to_pdd(qpd);
1056 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1057 pdd->process->pasid);
1058
1059 pdd->last_evict_timestamp = get_jiffies_64();
1060 /* Mark all queues as evicted. Deactivate all active queues on
1061 * the qpd.
1062 */
1063 list_for_each_entry(q, &qpd->queues_list, list) {
1064 q->properties.is_evicted = true;
1065 if (!q->properties.is_active)
1066 continue;
1067
1068 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1069 q->properties.type)];
1070 q->properties.is_active = false;
1071 decrement_queue_count(dqm, qpd, q);
1072
1073 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
1074 continue;
1075
1076 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
1077 (dqm->dev->kfd->cwsr_enabled ?
1078 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
1079 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
1080 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
1081 if (retval && !ret)
1082 /* Return the first error, but keep going to
1083 * maintain a consistent eviction state
1084 */
1085 ret = retval;
1086 }
1087
1088out:
1089 dqm_unlock(dqm);
1090 return ret;
1091}
1092
1093static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
1094 struct qcm_process_device *qpd)
1095{
1096 struct queue *q;
1097 struct device *dev = dqm->dev->adev->dev;
1098 struct kfd_process_device *pdd;
1099 int retval = 0;
1100
1101 dqm_lock(dqm);
1102 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1103 goto out;
1104
1105 pdd = qpd_to_pdd(qpd);
1106
1107 /* The debugger creates processes that temporarily have not acquired
1108 * all VMs for all devices and has no VMs itself.
1109 * Skip queue eviction on process eviction.
1110 */
1111 if (!pdd->drm_priv)
1112 goto out;
1113
1114 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
1115 pdd->process->pasid);
1116
1117 /* Mark all queues as evicted. Deactivate all active queues on
1118 * the qpd.
1119 */
1120 list_for_each_entry(q, &qpd->queues_list, list) {
1121 q->properties.is_evicted = true;
1122 if (!q->properties.is_active)
1123 continue;
1124
1125 q->properties.is_active = false;
1126 decrement_queue_count(dqm, qpd, q);
1127
1128 if (dqm->dev->kfd->shared_resources.enable_mes) {
1129 retval = remove_queue_mes(dqm, q, qpd);
1130 if (retval) {
1131 dev_err(dev, "Failed to evict queue %d\n",
1132 q->properties.queue_id);
1133 goto out;
1134 }
1135 }
1136 }
1137 pdd->last_evict_timestamp = get_jiffies_64();
1138 if (!dqm->dev->kfd->shared_resources.enable_mes)
1139 retval = execute_queues_cpsch(dqm,
1140 qpd->is_debug ?
1141 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
1142 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1143 USE_DEFAULT_GRACE_PERIOD);
1144
1145out:
1146 dqm_unlock(dqm);
1147 return retval;
1148}
1149
1150static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1151 struct qcm_process_device *qpd)
1152{
1153 struct mm_struct *mm = NULL;
1154 struct queue *q;
1155 struct mqd_manager *mqd_mgr;
1156 struct kfd_process_device *pdd;
1157 uint64_t pd_base;
1158 uint64_t eviction_duration;
1159 int retval, ret = 0;
1160
1161 pdd = qpd_to_pdd(qpd);
1162 /* Retrieve PD base */
1163 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1164
1165 dqm_lock(dqm);
1166 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1167 goto out;
1168 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1169 qpd->evicted--;
1170 goto out;
1171 }
1172
1173 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1174 pdd->process->pasid);
1175
1176 /* Update PD Base in QPD */
1177 qpd->page_table_base = pd_base;
1178 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1179
1180 if (!list_empty(&qpd->queues_list)) {
1181 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
1182 dqm->dev->adev,
1183 qpd->vmid,
1184 qpd->page_table_base);
1185 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1186 }
1187
1188 /* Take a safe reference to the mm_struct, which may otherwise
1189 * disappear even while the kfd_process is still referenced.
1190 */
1191 mm = get_task_mm(pdd->process->lead_thread);
1192 if (!mm) {
1193 ret = -EFAULT;
1194 goto out;
1195 }
1196
1197 /* Remove the eviction flags. Activate queues that are not
1198 * inactive for other reasons.
1199 */
1200 list_for_each_entry(q, &qpd->queues_list, list) {
1201 q->properties.is_evicted = false;
1202 if (!QUEUE_IS_ACTIVE(q->properties))
1203 continue;
1204
1205 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1206 q->properties.type)];
1207 q->properties.is_active = true;
1208 increment_queue_count(dqm, qpd, q);
1209
1210 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1211 continue;
1212
1213 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1214 q->queue, &q->properties, mm);
1215 if (retval && !ret)
1216 /* Return the first error, but keep going to
1217 * maintain a consistent eviction state
1218 */
1219 ret = retval;
1220 }
1221 qpd->evicted = 0;
1222 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1223 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1224out:
1225 if (mm)
1226 mmput(mm);
1227 dqm_unlock(dqm);
1228 return ret;
1229}
1230
1231static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1232 struct qcm_process_device *qpd)
1233{
1234 struct queue *q;
1235 struct device *dev = dqm->dev->adev->dev;
1236 struct kfd_process_device *pdd;
1237 uint64_t eviction_duration;
1238 int retval = 0;
1239
1240 pdd = qpd_to_pdd(qpd);
1241
1242 dqm_lock(dqm);
1243 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1244 goto out;
1245 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1246 qpd->evicted--;
1247 goto out;
1248 }
1249
1250 /* The debugger creates processes that temporarily have not acquired
1251 * all VMs for all devices and has no VMs itself.
1252 * Skip queue restore on process restore.
1253 */
1254 if (!pdd->drm_priv)
1255 goto vm_not_acquired;
1256
1257 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
1258 pdd->process->pasid);
1259
1260 /* Update PD Base in QPD */
1261 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1262 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
1263
1264 /* activate all active queues on the qpd */
1265 list_for_each_entry(q, &qpd->queues_list, list) {
1266 q->properties.is_evicted = false;
1267 if (!QUEUE_IS_ACTIVE(q->properties))
1268 continue;
1269
1270 q->properties.is_active = true;
1271 increment_queue_count(dqm, &pdd->qpd, q);
1272
1273 if (dqm->dev->kfd->shared_resources.enable_mes) {
1274 retval = add_queue_mes(dqm, q, qpd);
1275 if (retval) {
1276 dev_err(dev, "Failed to restore queue %d\n",
1277 q->properties.queue_id);
1278 goto out;
1279 }
1280 }
1281 }
1282 if (!dqm->dev->kfd->shared_resources.enable_mes)
1283 retval = execute_queues_cpsch(dqm,
1284 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1285 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1286 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
1287vm_not_acquired:
1288 qpd->evicted = 0;
1289out:
1290 dqm_unlock(dqm);
1291 return retval;
1292}
1293
1294static int register_process(struct device_queue_manager *dqm,
1295 struct qcm_process_device *qpd)
1296{
1297 struct device_process_node *n;
1298 struct kfd_process_device *pdd;
1299 uint64_t pd_base;
1300 int retval;
1301
1302 n = kzalloc(sizeof(*n), GFP_KERNEL);
1303 if (!n)
1304 return -ENOMEM;
1305
1306 n->qpd = qpd;
1307
1308 pdd = qpd_to_pdd(qpd);
1309 /* Retrieve PD base */
1310 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1311
1312 dqm_lock(dqm);
1313 list_add(&n->list, &dqm->queues);
1314
1315 /* Update PD Base in QPD */
1316 qpd->page_table_base = pd_base;
1317 pr_debug("Updated PD address to 0x%llx\n", pd_base);
1318
1319 retval = dqm->asic_ops.update_qpd(dqm, qpd);
1320
1321 dqm->processes_count++;
1322
1323 dqm_unlock(dqm);
1324
1325 /* Outside the DQM lock because under the DQM lock we can't do
1326 * reclaim or take other locks that others hold while reclaiming.
1327 */
1328 kfd_inc_compute_active(dqm->dev);
1329
1330 return retval;
1331}
1332
1333static int unregister_process(struct device_queue_manager *dqm,
1334 struct qcm_process_device *qpd)
1335{
1336 int retval;
1337 struct device_process_node *cur, *next;
1338
1339 pr_debug("qpd->queues_list is %s\n",
1340 list_empty(&qpd->queues_list) ? "empty" : "not empty");
1341
1342 retval = 0;
1343 dqm_lock(dqm);
1344
1345 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1346 if (qpd == cur->qpd) {
1347 list_del(&cur->list);
1348 kfree(cur);
1349 dqm->processes_count--;
1350 goto out;
1351 }
1352 }
1353 /* qpd not found in dqm list */
1354 retval = 1;
1355out:
1356 dqm_unlock(dqm);
1357
1358 /* Outside the DQM lock because under the DQM lock we can't do
1359 * reclaim or take other locks that others hold while reclaiming.
1360 */
1361 if (!retval)
1362 kfd_dec_compute_active(dqm->dev);
1363
1364 return retval;
1365}
1366
1367static int
1368set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
1369 unsigned int vmid)
1370{
1371 uint32_t xcc_mask = dqm->dev->xcc_mask;
1372 int xcc_id, ret;
1373
1374 for_each_inst(xcc_id, xcc_mask) {
1375 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
1376 dqm->dev->adev, pasid, vmid, xcc_id);
1377 if (ret)
1378 break;
1379 }
1380
1381 return ret;
1382}
1383
1384static void init_interrupts(struct device_queue_manager *dqm)
1385{
1386 uint32_t xcc_mask = dqm->dev->xcc_mask;
1387 unsigned int i, xcc_id;
1388
1389 for_each_inst(xcc_id, xcc_mask) {
1390 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
1391 if (is_pipe_enabled(dqm, 0, i)) {
1392 dqm->dev->kfd2kgd->init_interrupts(
1393 dqm->dev->adev, i, xcc_id);
1394 }
1395 }
1396 }
1397}
1398
1399static int initialize_nocpsch(struct device_queue_manager *dqm)
1400{
1401 int pipe, queue;
1402
1403 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1404
1405 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1406 sizeof(unsigned int), GFP_KERNEL);
1407 if (!dqm->allocated_queues)
1408 return -ENOMEM;
1409
1410 mutex_init(&dqm->lock_hidden);
1411 INIT_LIST_HEAD(&dqm->queues);
1412 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
1413 dqm->active_cp_queue_count = 0;
1414 dqm->gws_queue_count = 0;
1415
1416 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1417 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1418
1419 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1420 if (test_bit(pipe_offset + queue,
1421 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1422 dqm->allocated_queues[pipe] |= 1 << queue;
1423 }
1424
1425 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1426
1427 init_sdma_bitmaps(dqm);
1428
1429 return 0;
1430}
1431
1432static void uninitialize(struct device_queue_manager *dqm)
1433{
1434 int i;
1435
1436 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
1437
1438 kfree(dqm->allocated_queues);
1439 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
1440 kfree(dqm->mqd_mgrs[i]);
1441 mutex_destroy(&dqm->lock_hidden);
1442}
1443
1444static int start_nocpsch(struct device_queue_manager *dqm)
1445{
1446 int r = 0;
1447
1448 pr_info("SW scheduler is used");
1449 init_interrupts(dqm);
1450
1451 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1452 r = pm_init(&dqm->packet_mgr, dqm);
1453 if (!r)
1454 dqm->sched_running = true;
1455
1456 return r;
1457}
1458
1459static int stop_nocpsch(struct device_queue_manager *dqm)
1460{
1461 dqm_lock(dqm);
1462 if (!dqm->sched_running) {
1463 dqm_unlock(dqm);
1464 return 0;
1465 }
1466
1467 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
1468 pm_uninit(&dqm->packet_mgr);
1469 dqm->sched_running = false;
1470 dqm_unlock(dqm);
1471
1472 return 0;
1473}
1474
1475static int allocate_sdma_queue(struct device_queue_manager *dqm,
1476 struct queue *q, const uint32_t *restore_sdma_id)
1477{
1478 struct device *dev = dqm->dev->adev->dev;
1479 int bit;
1480
1481 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1482 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1483 dev_err(dev, "No more SDMA queue to allocate\n");
1484 return -ENOMEM;
1485 }
1486
1487 if (restore_sdma_id) {
1488 /* Re-use existing sdma_id */
1489 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
1490 dev_err(dev, "SDMA queue already in use\n");
1491 return -EBUSY;
1492 }
1493 clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
1494 q->sdma_id = *restore_sdma_id;
1495 } else {
1496 /* Find first available sdma_id */
1497 bit = find_first_bit(dqm->sdma_bitmap,
1498 get_num_sdma_queues(dqm));
1499 clear_bit(bit, dqm->sdma_bitmap);
1500 q->sdma_id = bit;
1501 }
1502
1503 q->properties.sdma_engine_id =
1504 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
1505 q->properties.sdma_queue_id = q->sdma_id /
1506 kfd_get_num_sdma_engines(dqm->dev);
1507 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1508 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
1509 dev_err(dev, "No more XGMI SDMA queue to allocate\n");
1510 return -ENOMEM;
1511 }
1512 if (restore_sdma_id) {
1513 /* Re-use existing sdma_id */
1514 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
1515 dev_err(dev, "SDMA queue already in use\n");
1516 return -EBUSY;
1517 }
1518 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
1519 q->sdma_id = *restore_sdma_id;
1520 } else {
1521 bit = find_first_bit(dqm->xgmi_sdma_bitmap,
1522 get_num_xgmi_sdma_queues(dqm));
1523 clear_bit(bit, dqm->xgmi_sdma_bitmap);
1524 q->sdma_id = bit;
1525 }
1526 /* sdma_engine_id is sdma id including
1527 * both PCIe-optimized SDMAs and XGMI-
1528 * optimized SDMAs. The calculation below
1529 * assumes the first N engines are always
1530 * PCIe-optimized ones
1531 */
1532 q->properties.sdma_engine_id =
1533 kfd_get_num_sdma_engines(dqm->dev) +
1534 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1535 q->properties.sdma_queue_id = q->sdma_id /
1536 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1537 }
1538
1539 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1540 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1541
1542 return 0;
1543}
1544
1545static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1546 struct queue *q)
1547{
1548 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1549 if (q->sdma_id >= get_num_sdma_queues(dqm))
1550 return;
1551 set_bit(q->sdma_id, dqm->sdma_bitmap);
1552 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1553 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1554 return;
1555 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
1556 }
1557}
1558
1559/*
1560 * Device Queue Manager implementation for cp scheduler
1561 */
1562
1563static int set_sched_resources(struct device_queue_manager *dqm)
1564{
1565 int i, mec;
1566 struct scheduling_resources res;
1567 struct device *dev = dqm->dev->adev->dev;
1568
1569 res.vmid_mask = dqm->dev->compute_vmid_bitmap;
1570
1571 res.queue_mask = 0;
1572 for (i = 0; i < AMDGPU_MAX_QUEUES; ++i) {
1573 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
1574 / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
1575
1576 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
1577 continue;
1578
1579 /* only acquire queues from the first MEC */
1580 if (mec > 0)
1581 continue;
1582
1583 /* This situation may be hit in the future if a new HW
1584 * generation exposes more than 64 queues. If so, the
1585 * definition of res.queue_mask needs updating
1586 */
1587 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1588 dev_err(dev, "Invalid queue enabled by amdgpu: %d\n", i);
1589 break;
1590 }
1591
1592 res.queue_mask |= 1ull
1593 << amdgpu_queue_mask_bit_to_set_resource_bit(
1594 dqm->dev->adev, i);
1595 }
1596 res.gws_mask = ~0ull;
1597 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1598
1599 pr_debug("Scheduling resources:\n"
1600 "vmid mask: 0x%8X\n"
1601 "queue mask: 0x%8llX\n",
1602 res.vmid_mask, res.queue_mask);
1603
1604 return pm_send_set_resources(&dqm->packet_mgr, &res);
1605}
1606
1607static int initialize_cpsch(struct device_queue_manager *dqm)
1608{
1609 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1610
1611 mutex_init(&dqm->lock_hidden);
1612 INIT_LIST_HEAD(&dqm->queues);
1613 dqm->active_queue_count = dqm->processes_count = 0;
1614 dqm->active_cp_queue_count = 0;
1615 dqm->gws_queue_count = 0;
1616 dqm->active_runlist = false;
1617 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1618 dqm->trap_debug_vmid = 0;
1619
1620 init_sdma_bitmaps(dqm);
1621
1622 if (dqm->dev->kfd2kgd->get_iq_wait_times)
1623 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
1624 &dqm->wait_times,
1625 ffs(dqm->dev->xcc_mask) - 1);
1626 return 0;
1627}
1628
1629static int start_cpsch(struct device_queue_manager *dqm)
1630{
1631 struct device *dev = dqm->dev->adev->dev;
1632 int retval;
1633
1634 retval = 0;
1635
1636 dqm_lock(dqm);
1637
1638 if (!dqm->dev->kfd->shared_resources.enable_mes) {
1639 retval = pm_init(&dqm->packet_mgr, dqm);
1640 if (retval)
1641 goto fail_packet_manager_init;
1642
1643 retval = set_sched_resources(dqm);
1644 if (retval)
1645 goto fail_set_sched_resources;
1646 }
1647 pr_debug("Allocating fence memory\n");
1648
1649 /* allocate fence memory on the gart */
1650 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1651 &dqm->fence_mem);
1652
1653 if (retval)
1654 goto fail_allocate_vidmem;
1655
1656 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
1657 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1658
1659 init_interrupts(dqm);
1660
1661 /* clear hang status when driver try to start the hw scheduler */
1662 dqm->sched_running = true;
1663
1664 if (!dqm->dev->kfd->shared_resources.enable_mes)
1665 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1666
1667 /* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
1668 if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
1669 (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
1670 uint32_t reg_offset = 0;
1671 uint32_t grace_period = 1;
1672
1673 retval = pm_update_grace_period(&dqm->packet_mgr,
1674 grace_period);
1675 if (retval)
1676 dev_err(dev, "Setting grace timeout failed\n");
1677 else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
1678 /* Update dqm->wait_times maintained in software */
1679 dqm->dev->kfd2kgd->build_grace_period_packet_info(
1680 dqm->dev->adev, dqm->wait_times,
1681 grace_period, ®_offset,
1682 &dqm->wait_times);
1683 }
1684
1685 dqm_unlock(dqm);
1686
1687 return 0;
1688fail_allocate_vidmem:
1689fail_set_sched_resources:
1690 if (!dqm->dev->kfd->shared_resources.enable_mes)
1691 pm_uninit(&dqm->packet_mgr);
1692fail_packet_manager_init:
1693 dqm_unlock(dqm);
1694 return retval;
1695}
1696
1697static int stop_cpsch(struct device_queue_manager *dqm)
1698{
1699 dqm_lock(dqm);
1700 if (!dqm->sched_running) {
1701 dqm_unlock(dqm);
1702 return 0;
1703 }
1704
1705 if (!dqm->dev->kfd->shared_resources.enable_mes)
1706 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
1707 else
1708 remove_all_queues_mes(dqm);
1709
1710 dqm->sched_running = false;
1711
1712 if (!dqm->dev->kfd->shared_resources.enable_mes)
1713 pm_release_ib(&dqm->packet_mgr);
1714
1715 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1716 if (!dqm->dev->kfd->shared_resources.enable_mes)
1717 pm_uninit(&dqm->packet_mgr);
1718 dqm_unlock(dqm);
1719
1720 return 0;
1721}
1722
1723static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1724 struct kernel_queue *kq,
1725 struct qcm_process_device *qpd)
1726{
1727 dqm_lock(dqm);
1728 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1729 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1730 dqm->total_queue_count);
1731 dqm_unlock(dqm);
1732 return -EPERM;
1733 }
1734
1735 /*
1736 * Unconditionally increment this counter, regardless of the queue's
1737 * type or whether the queue is active.
1738 */
1739 dqm->total_queue_count++;
1740 pr_debug("Total of %d queues are accountable so far\n",
1741 dqm->total_queue_count);
1742
1743 list_add(&kq->list, &qpd->priv_queue_list);
1744 increment_queue_count(dqm, qpd, kq->queue);
1745 qpd->is_debug = true;
1746 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1747 USE_DEFAULT_GRACE_PERIOD);
1748 dqm_unlock(dqm);
1749
1750 return 0;
1751}
1752
1753static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1754 struct kernel_queue *kq,
1755 struct qcm_process_device *qpd)
1756{
1757 dqm_lock(dqm);
1758 list_del(&kq->list);
1759 decrement_queue_count(dqm, qpd, kq->queue);
1760 qpd->is_debug = false;
1761 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
1762 USE_DEFAULT_GRACE_PERIOD);
1763 /*
1764 * Unconditionally decrement this counter, regardless of the queue's
1765 * type.
1766 */
1767 dqm->total_queue_count--;
1768 pr_debug("Total of %d queues are accountable so far\n",
1769 dqm->total_queue_count);
1770 dqm_unlock(dqm);
1771}
1772
1773static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1774 struct qcm_process_device *qpd,
1775 const struct kfd_criu_queue_priv_data *qd,
1776 const void *restore_mqd, const void *restore_ctl_stack)
1777{
1778 int retval;
1779 struct mqd_manager *mqd_mgr;
1780
1781 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1782 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1783 dqm->total_queue_count);
1784 retval = -EPERM;
1785 goto out;
1786 }
1787
1788 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1789 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1790 dqm_lock(dqm);
1791 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
1792 dqm_unlock(dqm);
1793 if (retval)
1794 goto out;
1795 }
1796
1797 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
1798 if (retval)
1799 goto out_deallocate_sdma_queue;
1800
1801 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1802 q->properties.type)];
1803
1804 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1805 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1806 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1807 q->properties.tba_addr = qpd->tba_addr;
1808 q->properties.tma_addr = qpd->tma_addr;
1809 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1810 if (!q->mqd_mem_obj) {
1811 retval = -ENOMEM;
1812 goto out_deallocate_doorbell;
1813 }
1814
1815 dqm_lock(dqm);
1816 /*
1817 * Eviction state logic: mark all queues as evicted, even ones
1818 * not currently active. Restoring inactive queues later only
1819 * updates the is_evicted flag but is a no-op otherwise.
1820 */
1821 q->properties.is_evicted = !!qpd->evicted;
1822 q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
1823 kfd_dbg_has_cwsr_workaround(q->device);
1824
1825 if (qd)
1826 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
1827 &q->properties, restore_mqd, restore_ctl_stack,
1828 qd->ctl_stack_size);
1829 else
1830 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1831 &q->gart_mqd_addr, &q->properties);
1832
1833 list_add(&q->list, &qpd->queues_list);
1834 qpd->queue_count++;
1835
1836 if (q->properties.is_active) {
1837 increment_queue_count(dqm, qpd, q);
1838
1839 if (!dqm->dev->kfd->shared_resources.enable_mes)
1840 retval = execute_queues_cpsch(dqm,
1841 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
1842 else
1843 retval = add_queue_mes(dqm, q, qpd);
1844 if (retval)
1845 goto cleanup_queue;
1846 }
1847
1848 /*
1849 * Unconditionally increment this counter, regardless of the queue's
1850 * type or whether the queue is active.
1851 */
1852 dqm->total_queue_count++;
1853
1854 pr_debug("Total of %d queues are accountable so far\n",
1855 dqm->total_queue_count);
1856
1857 dqm_unlock(dqm);
1858 return retval;
1859
1860cleanup_queue:
1861 qpd->queue_count--;
1862 list_del(&q->list);
1863 if (q->properties.is_active)
1864 decrement_queue_count(dqm, qpd, q);
1865 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1866 dqm_unlock(dqm);
1867out_deallocate_doorbell:
1868 deallocate_doorbell(qpd, q);
1869out_deallocate_sdma_queue:
1870 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1871 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1872 dqm_lock(dqm);
1873 deallocate_sdma_queue(dqm, q);
1874 dqm_unlock(dqm);
1875 }
1876out:
1877 return retval;
1878}
1879
1880int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
1881 uint64_t fence_value,
1882 unsigned int timeout_ms)
1883{
1884 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1885 struct device *dev = dqm->dev->adev->dev;
1886 uint64_t *fence_addr = dqm->fence_addr;
1887
1888 while (*fence_addr != fence_value) {
1889 /* Fatal err detected, this response won't come */
1890 if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
1891 return -EIO;
1892
1893 if (time_after(jiffies, end_jiffies)) {
1894 dev_err(dev, "qcm fence wait loop timeout expired\n");
1895 /* In HWS case, this is used to halt the driver thread
1896 * in order not to mess up CP states before doing
1897 * scandumps for FW debugging.
1898 */
1899 while (halt_if_hws_hang)
1900 schedule();
1901
1902 return -ETIME;
1903 }
1904 schedule();
1905 }
1906
1907 return 0;
1908}
1909
1910/* dqm->lock mutex has to be locked before calling this function */
1911static int map_queues_cpsch(struct device_queue_manager *dqm)
1912{
1913 struct device *dev = dqm->dev->adev->dev;
1914 int retval;
1915
1916 if (!dqm->sched_running)
1917 return 0;
1918 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1919 return 0;
1920 if (dqm->active_runlist)
1921 return 0;
1922
1923 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
1924 pr_debug("%s sent runlist\n", __func__);
1925 if (retval) {
1926 dev_err(dev, "failed to execute runlist\n");
1927 return retval;
1928 }
1929 dqm->active_runlist = true;
1930
1931 return retval;
1932}
1933
1934/* dqm->lock mutex has to be locked before calling this function */
1935static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1936 enum kfd_unmap_queues_filter filter,
1937 uint32_t filter_param,
1938 uint32_t grace_period,
1939 bool reset)
1940{
1941 struct device *dev = dqm->dev->adev->dev;
1942 struct mqd_manager *mqd_mgr;
1943 int retval;
1944
1945 if (!dqm->sched_running)
1946 return 0;
1947 if (!dqm->active_runlist)
1948 return 0;
1949 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
1950 return -EIO;
1951
1952 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
1953 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
1954 if (retval)
1955 goto out;
1956 }
1957
1958 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
1959 if (retval)
1960 goto out;
1961
1962 *dqm->fence_addr = KFD_FENCE_INIT;
1963 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
1964 KFD_FENCE_COMPLETED);
1965 /* should be timed out */
1966 retval = amdkfd_fence_wait_timeout(dqm, KFD_FENCE_COMPLETED,
1967 queue_preemption_timeout_ms);
1968 if (retval) {
1969 dev_err(dev, "The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1970 kfd_hws_hang(dqm);
1971 goto out;
1972 }
1973
1974 /* In the current MEC firmware implementation, if compute queue
1975 * doesn't response to the preemption request in time, HIQ will
1976 * abandon the unmap request without returning any timeout error
1977 * to driver. Instead, MEC firmware will log the doorbell of the
1978 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1979 * To make sure the queue unmap was successful, driver need to
1980 * check those fields
1981 */
1982 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
1983 if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) {
1984 while (halt_if_hws_hang)
1985 schedule();
1986 kfd_hws_hang(dqm);
1987 retval = -ETIME;
1988 goto out;
1989 }
1990
1991 /* We need to reset the grace period value for this device */
1992 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
1993 if (pm_update_grace_period(&dqm->packet_mgr,
1994 USE_DEFAULT_GRACE_PERIOD))
1995 dev_err(dev, "Failed to reset grace period\n");
1996 }
1997
1998 pm_release_ib(&dqm->packet_mgr);
1999 dqm->active_runlist = false;
2000
2001out:
2002 up_read(&dqm->dev->adev->reset_domain->sem);
2003 return retval;
2004}
2005
2006/* only for compute queue */
2007static int reset_queues_cpsch(struct device_queue_manager *dqm,
2008 uint16_t pasid)
2009{
2010 int retval;
2011
2012 dqm_lock(dqm);
2013
2014 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
2015 pasid, USE_DEFAULT_GRACE_PERIOD, true);
2016
2017 dqm_unlock(dqm);
2018 return retval;
2019}
2020
2021/* dqm->lock mutex has to be locked before calling this function */
2022static int execute_queues_cpsch(struct device_queue_manager *dqm,
2023 enum kfd_unmap_queues_filter filter,
2024 uint32_t filter_param,
2025 uint32_t grace_period)
2026{
2027 int retval;
2028
2029 if (!down_read_trylock(&dqm->dev->adev->reset_domain->sem))
2030 return -EIO;
2031 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
2032 if (!retval)
2033 retval = map_queues_cpsch(dqm);
2034 up_read(&dqm->dev->adev->reset_domain->sem);
2035 return retval;
2036}
2037
2038static int wait_on_destroy_queue(struct device_queue_manager *dqm,
2039 struct queue *q)
2040{
2041 struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
2042 q->process);
2043 int ret = 0;
2044
2045 if (pdd->qpd.is_debug)
2046 return ret;
2047
2048 q->properties.is_being_destroyed = true;
2049
2050 if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
2051 dqm_unlock(dqm);
2052 mutex_unlock(&q->process->mutex);
2053 ret = wait_event_interruptible(dqm->destroy_wait,
2054 !q->properties.is_suspended);
2055
2056 mutex_lock(&q->process->mutex);
2057 dqm_lock(dqm);
2058 }
2059
2060 return ret;
2061}
2062
2063static int destroy_queue_cpsch(struct device_queue_manager *dqm,
2064 struct qcm_process_device *qpd,
2065 struct queue *q)
2066{
2067 int retval;
2068 struct mqd_manager *mqd_mgr;
2069 uint64_t sdma_val = 0;
2070 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
2071 struct device *dev = dqm->dev->adev->dev;
2072
2073 /* Get the SDMA queue stats */
2074 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2075 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2076 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
2077 &sdma_val);
2078 if (retval)
2079 dev_err(dev, "Failed to read SDMA queue counter for queue: %d\n",
2080 q->properties.queue_id);
2081 }
2082
2083 /* remove queue from list to prevent rescheduling after preemption */
2084 dqm_lock(dqm);
2085
2086 retval = wait_on_destroy_queue(dqm, q);
2087
2088 if (retval) {
2089 dqm_unlock(dqm);
2090 return retval;
2091 }
2092
2093 if (qpd->is_debug) {
2094 /*
2095 * error, currently we do not allow to destroy a queue
2096 * of a currently debugged process
2097 */
2098 retval = -EBUSY;
2099 goto failed_try_destroy_debugged_queue;
2100
2101 }
2102
2103 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2104 q->properties.type)];
2105
2106 deallocate_doorbell(qpd, q);
2107
2108 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
2109 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2110 deallocate_sdma_queue(dqm, q);
2111 pdd->sdma_past_activity_counter += sdma_val;
2112 }
2113
2114 list_del(&q->list);
2115 qpd->queue_count--;
2116 if (q->properties.is_active) {
2117 decrement_queue_count(dqm, qpd, q);
2118 if (!dqm->dev->kfd->shared_resources.enable_mes) {
2119 retval = execute_queues_cpsch(dqm,
2120 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
2121 USE_DEFAULT_GRACE_PERIOD);
2122 if (retval == -ETIME)
2123 qpd->reset_wavefronts = true;
2124 } else {
2125 retval = remove_queue_mes(dqm, q, qpd);
2126 }
2127 }
2128
2129 /*
2130 * Unconditionally decrement this counter, regardless of the queue's
2131 * type
2132 */
2133 dqm->total_queue_count--;
2134 pr_debug("Total of %d queues are accountable so far\n",
2135 dqm->total_queue_count);
2136
2137 dqm_unlock(dqm);
2138
2139 /*
2140 * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
2141 * circular locking
2142 */
2143 kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
2144 qpd->pqm->process, q->device,
2145 -1, false, NULL, 0);
2146
2147 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2148
2149 return retval;
2150
2151failed_try_destroy_debugged_queue:
2152
2153 dqm_unlock(dqm);
2154 return retval;
2155}
2156
2157/*
2158 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
2159 * stay in user mode.
2160 */
2161#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
2162/* APE1 limit is inclusive and 64K aligned. */
2163#define APE1_LIMIT_ALIGNMENT 0xFFFF
2164
2165static bool set_cache_memory_policy(struct device_queue_manager *dqm,
2166 struct qcm_process_device *qpd,
2167 enum cache_policy default_policy,
2168 enum cache_policy alternate_policy,
2169 void __user *alternate_aperture_base,
2170 uint64_t alternate_aperture_size)
2171{
2172 bool retval = true;
2173
2174 if (!dqm->asic_ops.set_cache_memory_policy)
2175 return retval;
2176
2177 dqm_lock(dqm);
2178
2179 if (alternate_aperture_size == 0) {
2180 /* base > limit disables APE1 */
2181 qpd->sh_mem_ape1_base = 1;
2182 qpd->sh_mem_ape1_limit = 0;
2183 } else {
2184 /*
2185 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
2186 * SH_MEM_APE1_BASE[31:0], 0x0000 }
2187 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
2188 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
2189 * Verify that the base and size parameters can be
2190 * represented in this format and convert them.
2191 * Additionally restrict APE1 to user-mode addresses.
2192 */
2193
2194 uint64_t base = (uintptr_t)alternate_aperture_base;
2195 uint64_t limit = base + alternate_aperture_size - 1;
2196
2197 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
2198 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
2199 retval = false;
2200 goto out;
2201 }
2202
2203 qpd->sh_mem_ape1_base = base >> 16;
2204 qpd->sh_mem_ape1_limit = limit >> 16;
2205 }
2206
2207 retval = dqm->asic_ops.set_cache_memory_policy(
2208 dqm,
2209 qpd,
2210 default_policy,
2211 alternate_policy,
2212 alternate_aperture_base,
2213 alternate_aperture_size);
2214
2215 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
2216 program_sh_mem_settings(dqm, qpd);
2217
2218 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
2219 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
2220 qpd->sh_mem_ape1_limit);
2221
2222out:
2223 dqm_unlock(dqm);
2224 return retval;
2225}
2226
2227static int process_termination_nocpsch(struct device_queue_manager *dqm,
2228 struct qcm_process_device *qpd)
2229{
2230 struct queue *q;
2231 struct device_process_node *cur, *next_dpn;
2232 int retval = 0;
2233 bool found = false;
2234
2235 dqm_lock(dqm);
2236
2237 /* Clear all user mode queues */
2238 while (!list_empty(&qpd->queues_list)) {
2239 struct mqd_manager *mqd_mgr;
2240 int ret;
2241
2242 q = list_first_entry(&qpd->queues_list, struct queue, list);
2243 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2244 q->properties.type)];
2245 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2246 if (ret)
2247 retval = ret;
2248 dqm_unlock(dqm);
2249 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2250 dqm_lock(dqm);
2251 }
2252
2253 /* Unregister process */
2254 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2255 if (qpd == cur->qpd) {
2256 list_del(&cur->list);
2257 kfree(cur);
2258 dqm->processes_count--;
2259 found = true;
2260 break;
2261 }
2262 }
2263
2264 dqm_unlock(dqm);
2265
2266 /* Outside the DQM lock because under the DQM lock we can't do
2267 * reclaim or take other locks that others hold while reclaiming.
2268 */
2269 if (found)
2270 kfd_dec_compute_active(dqm->dev);
2271
2272 return retval;
2273}
2274
2275static int get_wave_state(struct device_queue_manager *dqm,
2276 struct queue *q,
2277 void __user *ctl_stack,
2278 u32 *ctl_stack_used_size,
2279 u32 *save_area_used_size)
2280{
2281 struct mqd_manager *mqd_mgr;
2282
2283 dqm_lock(dqm);
2284
2285 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2286
2287 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
2288 q->properties.is_active || !q->device->kfd->cwsr_enabled ||
2289 !mqd_mgr->get_wave_state) {
2290 dqm_unlock(dqm);
2291 return -EINVAL;
2292 }
2293
2294 dqm_unlock(dqm);
2295
2296 /*
2297 * get_wave_state is outside the dqm lock to prevent circular locking
2298 * and the queue should be protected against destruction by the process
2299 * lock.
2300 */
2301 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
2302 ctl_stack, ctl_stack_used_size, save_area_used_size);
2303}
2304
2305static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2306 const struct queue *q,
2307 u32 *mqd_size,
2308 u32 *ctl_stack_size)
2309{
2310 struct mqd_manager *mqd_mgr;
2311 enum KFD_MQD_TYPE mqd_type =
2312 get_mqd_type_from_queue_type(q->properties.type);
2313
2314 dqm_lock(dqm);
2315 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2316 *mqd_size = mqd_mgr->mqd_size;
2317 *ctl_stack_size = 0;
2318
2319 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2320 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
2321
2322 dqm_unlock(dqm);
2323}
2324
2325static int checkpoint_mqd(struct device_queue_manager *dqm,
2326 const struct queue *q,
2327 void *mqd,
2328 void *ctl_stack)
2329{
2330 struct mqd_manager *mqd_mgr;
2331 int r = 0;
2332 enum KFD_MQD_TYPE mqd_type =
2333 get_mqd_type_from_queue_type(q->properties.type);
2334
2335 dqm_lock(dqm);
2336
2337 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
2338 r = -EINVAL;
2339 goto dqm_unlock;
2340 }
2341
2342 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2343 if (!mqd_mgr->checkpoint_mqd) {
2344 r = -EOPNOTSUPP;
2345 goto dqm_unlock;
2346 }
2347
2348 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
2349
2350dqm_unlock:
2351 dqm_unlock(dqm);
2352 return r;
2353}
2354
2355static int process_termination_cpsch(struct device_queue_manager *dqm,
2356 struct qcm_process_device *qpd)
2357{
2358 int retval;
2359 struct queue *q;
2360 struct device *dev = dqm->dev->adev->dev;
2361 struct kernel_queue *kq, *kq_next;
2362 struct mqd_manager *mqd_mgr;
2363 struct device_process_node *cur, *next_dpn;
2364 enum kfd_unmap_queues_filter filter =
2365 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
2366 bool found = false;
2367
2368 retval = 0;
2369
2370 dqm_lock(dqm);
2371
2372 /* Clean all kernel queues */
2373 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2374 list_del(&kq->list);
2375 decrement_queue_count(dqm, qpd, kq->queue);
2376 qpd->is_debug = false;
2377 dqm->total_queue_count--;
2378 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2379 }
2380
2381 /* Clear all user mode queues */
2382 list_for_each_entry(q, &qpd->queues_list, list) {
2383 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
2384 deallocate_sdma_queue(dqm, q);
2385 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
2386 deallocate_sdma_queue(dqm, q);
2387
2388 if (q->properties.is_active) {
2389 decrement_queue_count(dqm, qpd, q);
2390
2391 if (dqm->dev->kfd->shared_resources.enable_mes) {
2392 retval = remove_queue_mes(dqm, q, qpd);
2393 if (retval)
2394 dev_err(dev, "Failed to remove queue %d\n",
2395 q->properties.queue_id);
2396 }
2397 }
2398
2399 dqm->total_queue_count--;
2400 }
2401
2402 /* Unregister process */
2403 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2404 if (qpd == cur->qpd) {
2405 list_del(&cur->list);
2406 kfree(cur);
2407 dqm->processes_count--;
2408 found = true;
2409 break;
2410 }
2411 }
2412
2413 if (!dqm->dev->kfd->shared_resources.enable_mes)
2414 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
2415
2416 if ((retval || qpd->reset_wavefronts) &&
2417 down_read_trylock(&dqm->dev->adev->reset_domain->sem)) {
2418 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2419 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2420 qpd->reset_wavefronts = false;
2421 up_read(&dqm->dev->adev->reset_domain->sem);
2422 }
2423
2424 /* Lastly, free mqd resources.
2425 * Do free_mqd() after dqm_unlock to avoid circular locking.
2426 */
2427 while (!list_empty(&qpd->queues_list)) {
2428 q = list_first_entry(&qpd->queues_list, struct queue, list);
2429 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2430 q->properties.type)];
2431 list_del(&q->list);
2432 qpd->queue_count--;
2433 dqm_unlock(dqm);
2434 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2435 dqm_lock(dqm);
2436 }
2437 dqm_unlock(dqm);
2438
2439 /* Outside the DQM lock because under the DQM lock we can't do
2440 * reclaim or take other locks that others hold while reclaiming.
2441 */
2442 if (found)
2443 kfd_dec_compute_active(dqm->dev);
2444
2445 return retval;
2446}
2447
2448static int init_mqd_managers(struct device_queue_manager *dqm)
2449{
2450 int i, j;
2451 struct device *dev = dqm->dev->adev->dev;
2452 struct mqd_manager *mqd_mgr;
2453
2454 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2455 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2456 if (!mqd_mgr) {
2457 dev_err(dev, "mqd manager [%d] initialization failed\n", i);
2458 goto out_free;
2459 }
2460 dqm->mqd_mgrs[i] = mqd_mgr;
2461 }
2462
2463 return 0;
2464
2465out_free:
2466 for (j = 0; j < i; j++) {
2467 kfree(dqm->mqd_mgrs[j]);
2468 dqm->mqd_mgrs[j] = NULL;
2469 }
2470
2471 return -ENOMEM;
2472}
2473
2474/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2475static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2476{
2477 int retval;
2478 struct kfd_node *dev = dqm->dev;
2479 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2480 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
2481 get_num_all_sdma_engines(dqm) *
2482 dev->kfd->device_info.num_sdma_queues_per_engine +
2483 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
2484 NUM_XCC(dqm->dev->xcc_mask));
2485
2486 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
2487 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
2488 (void *)&(mem_obj->cpu_ptr), false);
2489
2490 return retval;
2491}
2492
2493struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
2494{
2495 struct device_queue_manager *dqm;
2496
2497 pr_debug("Loading device queue manager\n");
2498
2499 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
2500 if (!dqm)
2501 return NULL;
2502
2503 switch (dev->adev->asic_type) {
2504 /* HWS is not available on Hawaii. */
2505 case CHIP_HAWAII:
2506 /* HWS depends on CWSR for timely dequeue. CWSR is not
2507 * available on Tonga.
2508 *
2509 * FIXME: This argument also applies to Kaveri.
2510 */
2511 case CHIP_TONGA:
2512 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2513 break;
2514 default:
2515 dqm->sched_policy = sched_policy;
2516 break;
2517 }
2518
2519 dqm->dev = dev;
2520 switch (dqm->sched_policy) {
2521 case KFD_SCHED_POLICY_HWS:
2522 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2523 /* initialize dqm for cp scheduling */
2524 dqm->ops.create_queue = create_queue_cpsch;
2525 dqm->ops.initialize = initialize_cpsch;
2526 dqm->ops.start = start_cpsch;
2527 dqm->ops.stop = stop_cpsch;
2528 dqm->ops.destroy_queue = destroy_queue_cpsch;
2529 dqm->ops.update_queue = update_queue;
2530 dqm->ops.register_process = register_process;
2531 dqm->ops.unregister_process = unregister_process;
2532 dqm->ops.uninitialize = uninitialize;
2533 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2534 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2535 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2536 dqm->ops.process_termination = process_termination_cpsch;
2537 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2538 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
2539 dqm->ops.get_wave_state = get_wave_state;
2540 dqm->ops.reset_queues = reset_queues_cpsch;
2541 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2542 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2543 break;
2544 case KFD_SCHED_POLICY_NO_HWS:
2545 /* initialize dqm for no cp scheduling */
2546 dqm->ops.start = start_nocpsch;
2547 dqm->ops.stop = stop_nocpsch;
2548 dqm->ops.create_queue = create_queue_nocpsch;
2549 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2550 dqm->ops.update_queue = update_queue;
2551 dqm->ops.register_process = register_process;
2552 dqm->ops.unregister_process = unregister_process;
2553 dqm->ops.initialize = initialize_nocpsch;
2554 dqm->ops.uninitialize = uninitialize;
2555 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
2556 dqm->ops.process_termination = process_termination_nocpsch;
2557 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2558 dqm->ops.restore_process_queues =
2559 restore_process_queues_nocpsch;
2560 dqm->ops.get_wave_state = get_wave_state;
2561 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2562 dqm->ops.checkpoint_mqd = checkpoint_mqd;
2563 break;
2564 default:
2565 dev_err(dev->adev->dev, "Invalid scheduling policy %d\n", dqm->sched_policy);
2566 goto out_free;
2567 }
2568
2569 switch (dev->adev->asic_type) {
2570 case CHIP_KAVERI:
2571 case CHIP_HAWAII:
2572 device_queue_manager_init_cik(&dqm->asic_ops);
2573 break;
2574
2575 case CHIP_CARRIZO:
2576 case CHIP_TONGA:
2577 case CHIP_FIJI:
2578 case CHIP_POLARIS10:
2579 case CHIP_POLARIS11:
2580 case CHIP_POLARIS12:
2581 case CHIP_VEGAM:
2582 device_queue_manager_init_vi(&dqm->asic_ops);
2583 break;
2584
2585 default:
2586 if (KFD_GC_VERSION(dev) >= IP_VERSION(12, 0, 0))
2587 device_queue_manager_init_v12(&dqm->asic_ops);
2588 else if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2589 device_queue_manager_init_v11(&dqm->asic_ops);
2590 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
2591 device_queue_manager_init_v10(&dqm->asic_ops);
2592 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2593 device_queue_manager_init_v9(&dqm->asic_ops);
2594 else {
2595 WARN(1, "Unexpected ASIC family %u",
2596 dev->adev->asic_type);
2597 goto out_free;
2598 }
2599 }
2600
2601 if (init_mqd_managers(dqm))
2602 goto out_free;
2603
2604 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
2605 dev_err(dev->adev->dev, "Failed to allocate hiq sdma mqd trunk buffer\n");
2606 goto out_free;
2607 }
2608
2609 if (!dqm->ops.initialize(dqm)) {
2610 init_waitqueue_head(&dqm->destroy_wait);
2611 return dqm;
2612 }
2613
2614out_free:
2615 kfree(dqm);
2616 return NULL;
2617}
2618
2619static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
2620 struct kfd_mem_obj *mqd)
2621{
2622 WARN(!mqd, "No hiq sdma mqd trunk to free");
2623
2624 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
2625}
2626
2627void device_queue_manager_uninit(struct device_queue_manager *dqm)
2628{
2629 dqm->ops.stop(dqm);
2630 dqm->ops.uninitialize(dqm);
2631 if (!dqm->dev->kfd->shared_resources.enable_mes)
2632 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
2633 kfree(dqm);
2634}
2635
2636int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
2637{
2638 struct kfd_process_device *pdd;
2639 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2640 int ret = 0;
2641
2642 if (!p)
2643 return -EINVAL;
2644 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2645 pdd = kfd_get_process_device_data(dqm->dev, p);
2646 if (pdd)
2647 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2648 kfd_unref_process(p);
2649
2650 return ret;
2651}
2652
2653static void kfd_process_hw_exception(struct work_struct *work)
2654{
2655 struct device_queue_manager *dqm = container_of(work,
2656 struct device_queue_manager, hw_exception_work);
2657 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
2658}
2659
2660int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
2661 struct qcm_process_device *qpd)
2662{
2663 int r;
2664 struct device *dev = dqm->dev->adev->dev;
2665 int updated_vmid_mask;
2666
2667 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2668 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
2669 return -EINVAL;
2670 }
2671
2672 dqm_lock(dqm);
2673
2674 if (dqm->trap_debug_vmid != 0) {
2675 dev_err(dev, "Trap debug id already reserved\n");
2676 r = -EBUSY;
2677 goto out_unlock;
2678 }
2679
2680 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
2681 USE_DEFAULT_GRACE_PERIOD, false);
2682 if (r)
2683 goto out_unlock;
2684
2685 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
2686 updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
2687
2688 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
2689 dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
2690 r = set_sched_resources(dqm);
2691 if (r)
2692 goto out_unlock;
2693
2694 r = map_queues_cpsch(dqm);
2695 if (r)
2696 goto out_unlock;
2697
2698 pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
2699
2700out_unlock:
2701 dqm_unlock(dqm);
2702 return r;
2703}
2704
2705/*
2706 * Releases vmid for the trap debugger
2707 */
2708int release_debug_trap_vmid(struct device_queue_manager *dqm,
2709 struct qcm_process_device *qpd)
2710{
2711 struct device *dev = dqm->dev->adev->dev;
2712 int r;
2713 int updated_vmid_mask;
2714 uint32_t trap_debug_vmid;
2715
2716 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
2717 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
2718 return -EINVAL;
2719 }
2720
2721 dqm_lock(dqm);
2722 trap_debug_vmid = dqm->trap_debug_vmid;
2723 if (dqm->trap_debug_vmid == 0) {
2724 dev_err(dev, "Trap debug id is not reserved\n");
2725 r = -EINVAL;
2726 goto out_unlock;
2727 }
2728
2729 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
2730 USE_DEFAULT_GRACE_PERIOD, false);
2731 if (r)
2732 goto out_unlock;
2733
2734 updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
2735 updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
2736
2737 dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
2738 dqm->trap_debug_vmid = 0;
2739 r = set_sched_resources(dqm);
2740 if (r)
2741 goto out_unlock;
2742
2743 r = map_queues_cpsch(dqm);
2744 if (r)
2745 goto out_unlock;
2746
2747 pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
2748
2749out_unlock:
2750 dqm_unlock(dqm);
2751 return r;
2752}
2753
2754#define QUEUE_NOT_FOUND -1
2755/* invalidate queue operation in array */
2756static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
2757{
2758 int i;
2759
2760 for (i = 0; i < num_queues; i++)
2761 queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
2762}
2763
2764/* find queue index in array */
2765static int q_array_get_index(unsigned int queue_id,
2766 uint32_t num_queues,
2767 uint32_t *queue_ids)
2768{
2769 int i;
2770
2771 for (i = 0; i < num_queues; i++)
2772 if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
2773 return i;
2774
2775 return QUEUE_NOT_FOUND;
2776}
2777
2778struct copy_context_work_handler_workarea {
2779 struct work_struct copy_context_work;
2780 struct kfd_process *p;
2781};
2782
2783static void copy_context_work_handler (struct work_struct *work)
2784{
2785 struct copy_context_work_handler_workarea *workarea;
2786 struct mqd_manager *mqd_mgr;
2787 struct queue *q;
2788 struct mm_struct *mm;
2789 struct kfd_process *p;
2790 uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
2791 int i;
2792
2793 workarea = container_of(work,
2794 struct copy_context_work_handler_workarea,
2795 copy_context_work);
2796
2797 p = workarea->p;
2798 mm = get_task_mm(p->lead_thread);
2799
2800 if (!mm)
2801 return;
2802
2803 kthread_use_mm(mm);
2804 for (i = 0; i < p->n_pdds; i++) {
2805 struct kfd_process_device *pdd = p->pdds[i];
2806 struct device_queue_manager *dqm = pdd->dev->dqm;
2807 struct qcm_process_device *qpd = &pdd->qpd;
2808
2809 list_for_each_entry(q, &qpd->queues_list, list) {
2810 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
2811
2812 /* We ignore the return value from get_wave_state
2813 * because
2814 * i) right now, it always returns 0, and
2815 * ii) if we hit an error, we would continue to the
2816 * next queue anyway.
2817 */
2818 mqd_mgr->get_wave_state(mqd_mgr,
2819 q->mqd,
2820 &q->properties,
2821 (void __user *) q->properties.ctx_save_restore_area_address,
2822 &tmp_ctl_stack_used_size,
2823 &tmp_save_area_used_size);
2824 }
2825 }
2826 kthread_unuse_mm(mm);
2827 mmput(mm);
2828}
2829
2830static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
2831{
2832 size_t array_size = num_queues * sizeof(uint32_t);
2833
2834 if (!usr_queue_id_array)
2835 return NULL;
2836
2837 return memdup_user(usr_queue_id_array, array_size);
2838}
2839
2840int resume_queues(struct kfd_process *p,
2841 uint32_t num_queues,
2842 uint32_t *usr_queue_id_array)
2843{
2844 uint32_t *queue_ids = NULL;
2845 int total_resumed = 0;
2846 int i;
2847
2848 if (usr_queue_id_array) {
2849 queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
2850
2851 if (IS_ERR(queue_ids))
2852 return PTR_ERR(queue_ids);
2853
2854 /* mask all queues as invalid. unmask per successful request */
2855 q_array_invalidate(num_queues, queue_ids);
2856 }
2857
2858 for (i = 0; i < p->n_pdds; i++) {
2859 struct kfd_process_device *pdd = p->pdds[i];
2860 struct device_queue_manager *dqm = pdd->dev->dqm;
2861 struct device *dev = dqm->dev->adev->dev;
2862 struct qcm_process_device *qpd = &pdd->qpd;
2863 struct queue *q;
2864 int r, per_device_resumed = 0;
2865
2866 dqm_lock(dqm);
2867
2868 /* unmask queues that resume or already resumed as valid */
2869 list_for_each_entry(q, &qpd->queues_list, list) {
2870 int q_idx = QUEUE_NOT_FOUND;
2871
2872 if (queue_ids)
2873 q_idx = q_array_get_index(
2874 q->properties.queue_id,
2875 num_queues,
2876 queue_ids);
2877
2878 if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
2879 int err = resume_single_queue(dqm, &pdd->qpd, q);
2880
2881 if (queue_ids) {
2882 if (!err) {
2883 queue_ids[q_idx] &=
2884 ~KFD_DBG_QUEUE_INVALID_MASK;
2885 } else {
2886 queue_ids[q_idx] |=
2887 KFD_DBG_QUEUE_ERROR_MASK;
2888 break;
2889 }
2890 }
2891
2892 if (dqm->dev->kfd->shared_resources.enable_mes) {
2893 wake_up_all(&dqm->destroy_wait);
2894 if (!err)
2895 total_resumed++;
2896 } else {
2897 per_device_resumed++;
2898 }
2899 }
2900 }
2901
2902 if (!per_device_resumed) {
2903 dqm_unlock(dqm);
2904 continue;
2905 }
2906
2907 r = execute_queues_cpsch(dqm,
2908 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
2909 0,
2910 USE_DEFAULT_GRACE_PERIOD);
2911 if (r) {
2912 dev_err(dev, "Failed to resume process queues\n");
2913 if (queue_ids) {
2914 list_for_each_entry(q, &qpd->queues_list, list) {
2915 int q_idx = q_array_get_index(
2916 q->properties.queue_id,
2917 num_queues,
2918 queue_ids);
2919
2920 /* mask queue as error on resume fail */
2921 if (q_idx != QUEUE_NOT_FOUND)
2922 queue_ids[q_idx] |=
2923 KFD_DBG_QUEUE_ERROR_MASK;
2924 }
2925 }
2926 } else {
2927 wake_up_all(&dqm->destroy_wait);
2928 total_resumed += per_device_resumed;
2929 }
2930
2931 dqm_unlock(dqm);
2932 }
2933
2934 if (queue_ids) {
2935 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
2936 num_queues * sizeof(uint32_t)))
2937 pr_err("copy_to_user failed on queue resume\n");
2938
2939 kfree(queue_ids);
2940 }
2941
2942 return total_resumed;
2943}
2944
2945int suspend_queues(struct kfd_process *p,
2946 uint32_t num_queues,
2947 uint32_t grace_period,
2948 uint64_t exception_clear_mask,
2949 uint32_t *usr_queue_id_array)
2950{
2951 uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
2952 int total_suspended = 0;
2953 int i;
2954
2955 if (IS_ERR(queue_ids))
2956 return PTR_ERR(queue_ids);
2957
2958 /* mask all queues as invalid. umask on successful request */
2959 q_array_invalidate(num_queues, queue_ids);
2960
2961 for (i = 0; i < p->n_pdds; i++) {
2962 struct kfd_process_device *pdd = p->pdds[i];
2963 struct device_queue_manager *dqm = pdd->dev->dqm;
2964 struct device *dev = dqm->dev->adev->dev;
2965 struct qcm_process_device *qpd = &pdd->qpd;
2966 struct queue *q;
2967 int r, per_device_suspended = 0;
2968
2969 mutex_lock(&p->event_mutex);
2970 dqm_lock(dqm);
2971
2972 /* unmask queues that suspend or already suspended */
2973 list_for_each_entry(q, &qpd->queues_list, list) {
2974 int q_idx = q_array_get_index(q->properties.queue_id,
2975 num_queues,
2976 queue_ids);
2977
2978 if (q_idx != QUEUE_NOT_FOUND) {
2979 int err = suspend_single_queue(dqm, pdd, q);
2980 bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
2981
2982 if (!err) {
2983 queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
2984 if (exception_clear_mask && is_mes)
2985 q->properties.exception_status &=
2986 ~exception_clear_mask;
2987
2988 if (is_mes)
2989 total_suspended++;
2990 else
2991 per_device_suspended++;
2992 } else if (err != -EBUSY) {
2993 r = err;
2994 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
2995 break;
2996 }
2997 }
2998 }
2999
3000 if (!per_device_suspended) {
3001 dqm_unlock(dqm);
3002 mutex_unlock(&p->event_mutex);
3003 if (total_suspended)
3004 amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
3005 continue;
3006 }
3007
3008 r = execute_queues_cpsch(dqm,
3009 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
3010 grace_period);
3011
3012 if (r)
3013 dev_err(dev, "Failed to suspend process queues.\n");
3014 else
3015 total_suspended += per_device_suspended;
3016
3017 list_for_each_entry(q, &qpd->queues_list, list) {
3018 int q_idx = q_array_get_index(q->properties.queue_id,
3019 num_queues, queue_ids);
3020
3021 if (q_idx == QUEUE_NOT_FOUND)
3022 continue;
3023
3024 /* mask queue as error on suspend fail */
3025 if (r)
3026 queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
3027 else if (exception_clear_mask)
3028 q->properties.exception_status &=
3029 ~exception_clear_mask;
3030 }
3031
3032 dqm_unlock(dqm);
3033 mutex_unlock(&p->event_mutex);
3034 amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
3035 }
3036
3037 if (total_suspended) {
3038 struct copy_context_work_handler_workarea copy_context_worker;
3039
3040 INIT_WORK_ONSTACK(
3041 ©_context_worker.copy_context_work,
3042 copy_context_work_handler);
3043
3044 copy_context_worker.p = p;
3045
3046 schedule_work(©_context_worker.copy_context_work);
3047
3048
3049 flush_work(©_context_worker.copy_context_work);
3050 destroy_work_on_stack(©_context_worker.copy_context_work);
3051 }
3052
3053 if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
3054 num_queues * sizeof(uint32_t)))
3055 pr_err("copy_to_user failed on queue suspend\n");
3056
3057 kfree(queue_ids);
3058
3059 return total_suspended;
3060}
3061
3062static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
3063{
3064 switch (q_props->type) {
3065 case KFD_QUEUE_TYPE_COMPUTE:
3066 return q_props->format == KFD_QUEUE_FORMAT_PM4
3067 ? KFD_IOC_QUEUE_TYPE_COMPUTE
3068 : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
3069 case KFD_QUEUE_TYPE_SDMA:
3070 return KFD_IOC_QUEUE_TYPE_SDMA;
3071 case KFD_QUEUE_TYPE_SDMA_XGMI:
3072 return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
3073 default:
3074 WARN_ONCE(true, "queue type not recognized!");
3075 return 0xffffffff;
3076 };
3077}
3078
3079void set_queue_snapshot_entry(struct queue *q,
3080 uint64_t exception_clear_mask,
3081 struct kfd_queue_snapshot_entry *qss_entry)
3082{
3083 qss_entry->ring_base_address = q->properties.queue_address;
3084 qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
3085 qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
3086 qss_entry->ctx_save_restore_address =
3087 q->properties.ctx_save_restore_area_address;
3088 qss_entry->ctx_save_restore_area_size =
3089 q->properties.ctx_save_restore_area_size;
3090 qss_entry->exception_status = q->properties.exception_status;
3091 qss_entry->queue_id = q->properties.queue_id;
3092 qss_entry->gpu_id = q->device->id;
3093 qss_entry->ring_size = (uint32_t)q->properties.queue_size;
3094 qss_entry->queue_type = set_queue_type_for_user(&q->properties);
3095 q->properties.exception_status &= ~exception_clear_mask;
3096}
3097
3098int debug_lock_and_unmap(struct device_queue_manager *dqm)
3099{
3100 struct device *dev = dqm->dev->adev->dev;
3101 int r;
3102
3103 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3104 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3105 return -EINVAL;
3106 }
3107
3108 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3109 return 0;
3110
3111 dqm_lock(dqm);
3112
3113 r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
3114 if (r)
3115 dqm_unlock(dqm);
3116
3117 return r;
3118}
3119
3120int debug_map_and_unlock(struct device_queue_manager *dqm)
3121{
3122 struct device *dev = dqm->dev->adev->dev;
3123 int r;
3124
3125 if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
3126 dev_err(dev, "Unsupported on sched_policy: %i\n", dqm->sched_policy);
3127 return -EINVAL;
3128 }
3129
3130 if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
3131 return 0;
3132
3133 r = map_queues_cpsch(dqm);
3134
3135 dqm_unlock(dqm);
3136
3137 return r;
3138}
3139
3140int debug_refresh_runlist(struct device_queue_manager *dqm)
3141{
3142 int r = debug_lock_and_unmap(dqm);
3143
3144 if (r)
3145 return r;
3146
3147 return debug_map_and_unlock(dqm);
3148}
3149
3150#if defined(CONFIG_DEBUG_FS)
3151
3152static void seq_reg_dump(struct seq_file *m,
3153 uint32_t (*dump)[2], uint32_t n_regs)
3154{
3155 uint32_t i, count;
3156
3157 for (i = 0, count = 0; i < n_regs; i++) {
3158 if (count == 0 ||
3159 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
3160 seq_printf(m, "%s %08x: %08x",
3161 i ? "\n" : "",
3162 dump[i][0], dump[i][1]);
3163 count = 7;
3164 } else {
3165 seq_printf(m, " %08x", dump[i][1]);
3166 count--;
3167 }
3168 }
3169
3170 seq_puts(m, "\n");
3171}
3172
3173int dqm_debugfs_hqds(struct seq_file *m, void *data)
3174{
3175 struct device_queue_manager *dqm = data;
3176 uint32_t xcc_mask = dqm->dev->xcc_mask;
3177 uint32_t (*dump)[2], n_regs;
3178 int pipe, queue;
3179 int r = 0, xcc_id;
3180 uint32_t sdma_engine_start;
3181
3182 if (!dqm->sched_running) {
3183 seq_puts(m, " Device is stopped\n");
3184 return 0;
3185 }
3186
3187 for_each_inst(xcc_id, xcc_mask) {
3188 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3189 KFD_CIK_HIQ_PIPE,
3190 KFD_CIK_HIQ_QUEUE, &dump,
3191 &n_regs, xcc_id);
3192 if (!r) {
3193 seq_printf(
3194 m,
3195 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
3196 xcc_id,
3197 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
3198 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
3199 KFD_CIK_HIQ_QUEUE);
3200 seq_reg_dump(m, dump, n_regs);
3201
3202 kfree(dump);
3203 }
3204
3205 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
3206 int pipe_offset = pipe * get_queues_per_pipe(dqm);
3207
3208 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
3209 if (!test_bit(pipe_offset + queue,
3210 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
3211 continue;
3212
3213 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
3214 pipe, queue,
3215 &dump, &n_regs,
3216 xcc_id);
3217 if (r)
3218 break;
3219
3220 seq_printf(m,
3221 " Inst %d, CP Pipe %d, Queue %d\n",
3222 xcc_id, pipe, queue);
3223 seq_reg_dump(m, dump, n_regs);
3224
3225 kfree(dump);
3226 }
3227 }
3228 }
3229
3230 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
3231 for (pipe = sdma_engine_start;
3232 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
3233 pipe++) {
3234 for (queue = 0;
3235 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
3236 queue++) {
3237 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
3238 dqm->dev->adev, pipe, queue, &dump, &n_regs);
3239 if (r)
3240 break;
3241
3242 seq_printf(m, " SDMA Engine %d, RLC %d\n",
3243 pipe, queue);
3244 seq_reg_dump(m, dump, n_regs);
3245
3246 kfree(dump);
3247 }
3248 }
3249
3250 return r;
3251}
3252
3253int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
3254{
3255 int r = 0;
3256
3257 dqm_lock(dqm);
3258 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
3259 if (r) {
3260 dqm_unlock(dqm);
3261 return r;
3262 }
3263 dqm->active_runlist = true;
3264 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
3265 0, USE_DEFAULT_GRACE_PERIOD);
3266 dqm_unlock(dqm);
3267
3268 return r;
3269}
3270
3271#endif