Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
29#include <linux/bitops.h>
30#include <linux/sched.h>
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
36#include "amdgpu_amdkfd.h"
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 struct queue *q);
56
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
62static void kfd_process_hw_exception(struct work_struct *work);
63
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66{
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
70}
71
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.cp_queue_bitmap))
82 return true;
83 return false;
84}
85
86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
87{
88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
89 KGD_MAX_QUEUES);
90}
91
92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93{
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
99 return dqm->dev->shared_resources.num_pipe_per_mec;
100}
101
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
112static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
113{
114 return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
115}
116
117unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
118{
119 return dqm->dev->device_info->num_sdma_engines
120 * dqm->dev->device_info->num_sdma_queues_per_engine;
121}
122
123unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
124{
125 return dqm->dev->device_info->num_xgmi_sdma_engines
126 * dqm->dev->device_info->num_sdma_queues_per_engine;
127}
128
129void program_sh_mem_settings(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd)
131{
132 return dqm->dev->kfd2kgd->program_sh_mem_settings(
133 dqm->dev->kgd, qpd->vmid,
134 qpd->sh_mem_config,
135 qpd->sh_mem_ape1_base,
136 qpd->sh_mem_ape1_limit,
137 qpd->sh_mem_bases);
138}
139
140void increment_queue_count(struct device_queue_manager *dqm,
141 enum kfd_queue_type type)
142{
143 dqm->active_queue_count++;
144 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
145 dqm->active_cp_queue_count++;
146}
147
148void decrement_queue_count(struct device_queue_manager *dqm,
149 enum kfd_queue_type type)
150{
151 dqm->active_queue_count--;
152 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
153 dqm->active_cp_queue_count--;
154}
155
156static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
157{
158 struct kfd_dev *dev = qpd->dqm->dev;
159
160 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
161 /* On pre-SOC15 chips we need to use the queue ID to
162 * preserve the user mode ABI.
163 */
164 q->doorbell_id = q->properties.queue_id;
165 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
166 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
167 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
168 * doorbell assignments based on the engine and queue id.
169 * The doobell index distance between RLC (2*i) and (2*i+1)
170 * for a SDMA engine is 512.
171 */
172 uint32_t *idx_offset =
173 dev->shared_resources.sdma_doorbell_idx;
174
175 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
176 + (q->properties.sdma_queue_id & 1)
177 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
178 + (q->properties.sdma_queue_id >> 1);
179 } else {
180 /* For CP queues on SOC15 reserve a free doorbell ID */
181 unsigned int found;
182
183 found = find_first_zero_bit(qpd->doorbell_bitmap,
184 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
185 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
186 pr_debug("No doorbells available");
187 return -EBUSY;
188 }
189 set_bit(found, qpd->doorbell_bitmap);
190 q->doorbell_id = found;
191 }
192
193 q->properties.doorbell_off =
194 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
195 q->doorbell_id);
196
197 return 0;
198}
199
200static void deallocate_doorbell(struct qcm_process_device *qpd,
201 struct queue *q)
202{
203 unsigned int old;
204 struct kfd_dev *dev = qpd->dqm->dev;
205
206 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
207 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
208 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
209 return;
210
211 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
212 WARN_ON(!old);
213}
214
215static int allocate_vmid(struct device_queue_manager *dqm,
216 struct qcm_process_device *qpd,
217 struct queue *q)
218{
219 int allocated_vmid = -1, i;
220
221 for (i = dqm->dev->vm_info.first_vmid_kfd;
222 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
223 if (!dqm->vmid_pasid[i]) {
224 allocated_vmid = i;
225 break;
226 }
227 }
228
229 if (allocated_vmid < 0) {
230 pr_err("no more vmid to allocate\n");
231 return -ENOSPC;
232 }
233
234 pr_debug("vmid allocated: %d\n", allocated_vmid);
235
236 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
237
238 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
239
240 qpd->vmid = allocated_vmid;
241 q->properties.vmid = allocated_vmid;
242
243 program_sh_mem_settings(dqm, qpd);
244
245 /* qpd->page_table_base is set earlier when register_process()
246 * is called, i.e. when the first queue is created.
247 */
248 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
249 qpd->vmid,
250 qpd->page_table_base);
251 /* invalidate the VM context after pasid and vmid mapping is set up */
252 kfd_flush_tlb(qpd_to_pdd(qpd));
253
254 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
255 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
256 qpd->sh_hidden_private_base, qpd->vmid);
257
258 return 0;
259}
260
261static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
262 struct qcm_process_device *qpd)
263{
264 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
265 int ret;
266
267 if (!qpd->ib_kaddr)
268 return -ENOMEM;
269
270 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
271 if (ret)
272 return ret;
273
274 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
275 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
276 pmf->release_mem_size / sizeof(uint32_t));
277}
278
279static void deallocate_vmid(struct device_queue_manager *dqm,
280 struct qcm_process_device *qpd,
281 struct queue *q)
282{
283 /* On GFX v7, CP doesn't flush TC at dequeue */
284 if (q->device->device_info->asic_family == CHIP_HAWAII)
285 if (flush_texture_cache_nocpsch(q->device, qpd))
286 pr_err("Failed to flush TC\n");
287
288 kfd_flush_tlb(qpd_to_pdd(qpd));
289
290 /* Release the vmid mapping */
291 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
292 dqm->vmid_pasid[qpd->vmid] = 0;
293
294 qpd->vmid = 0;
295 q->properties.vmid = 0;
296}
297
298static int create_queue_nocpsch(struct device_queue_manager *dqm,
299 struct queue *q,
300 struct qcm_process_device *qpd)
301{
302 struct mqd_manager *mqd_mgr;
303 int retval;
304
305 dqm_lock(dqm);
306
307 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
308 pr_warn("Can't create new usermode queue because %d queues were already created\n",
309 dqm->total_queue_count);
310 retval = -EPERM;
311 goto out_unlock;
312 }
313
314 if (list_empty(&qpd->queues_list)) {
315 retval = allocate_vmid(dqm, qpd, q);
316 if (retval)
317 goto out_unlock;
318 }
319 q->properties.vmid = qpd->vmid;
320 /*
321 * Eviction state logic: mark all queues as evicted, even ones
322 * not currently active. Restoring inactive queues later only
323 * updates the is_evicted flag but is a no-op otherwise.
324 */
325 q->properties.is_evicted = !!qpd->evicted;
326
327 q->properties.tba_addr = qpd->tba_addr;
328 q->properties.tma_addr = qpd->tma_addr;
329
330 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
331 q->properties.type)];
332 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
333 retval = allocate_hqd(dqm, q);
334 if (retval)
335 goto deallocate_vmid;
336 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
337 q->pipe, q->queue);
338 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
339 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
340 retval = allocate_sdma_queue(dqm, q);
341 if (retval)
342 goto deallocate_vmid;
343 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
344 }
345
346 retval = allocate_doorbell(qpd, q);
347 if (retval)
348 goto out_deallocate_hqd;
349
350 /* Temporarily release dqm lock to avoid a circular lock dependency */
351 dqm_unlock(dqm);
352 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
353 dqm_lock(dqm);
354
355 if (!q->mqd_mem_obj) {
356 retval = -ENOMEM;
357 goto out_deallocate_doorbell;
358 }
359 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
360 &q->gart_mqd_addr, &q->properties);
361 if (q->properties.is_active) {
362 if (!dqm->sched_running) {
363 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
364 goto add_queue_to_list;
365 }
366
367 if (WARN(q->process->mm != current->mm,
368 "should only run in user thread"))
369 retval = -EFAULT;
370 else
371 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
372 q->queue, &q->properties, current->mm);
373 if (retval)
374 goto out_free_mqd;
375 }
376
377add_queue_to_list:
378 list_add(&q->list, &qpd->queues_list);
379 qpd->queue_count++;
380 if (q->properties.is_active)
381 increment_queue_count(dqm, q->properties.type);
382
383 /*
384 * Unconditionally increment this counter, regardless of the queue's
385 * type or whether the queue is active.
386 */
387 dqm->total_queue_count++;
388 pr_debug("Total of %d queues are accountable so far\n",
389 dqm->total_queue_count);
390 goto out_unlock;
391
392out_free_mqd:
393 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
394out_deallocate_doorbell:
395 deallocate_doorbell(qpd, q);
396out_deallocate_hqd:
397 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
398 deallocate_hqd(dqm, q);
399 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
400 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
401 deallocate_sdma_queue(dqm, q);
402deallocate_vmid:
403 if (list_empty(&qpd->queues_list))
404 deallocate_vmid(dqm, qpd, q);
405out_unlock:
406 dqm_unlock(dqm);
407 return retval;
408}
409
410static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
411{
412 bool set;
413 int pipe, bit, i;
414
415 set = false;
416
417 for (pipe = dqm->next_pipe_to_allocate, i = 0;
418 i < get_pipes_per_mec(dqm);
419 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
420
421 if (!is_pipe_enabled(dqm, 0, pipe))
422 continue;
423
424 if (dqm->allocated_queues[pipe] != 0) {
425 bit = ffs(dqm->allocated_queues[pipe]) - 1;
426 dqm->allocated_queues[pipe] &= ~(1 << bit);
427 q->pipe = pipe;
428 q->queue = bit;
429 set = true;
430 break;
431 }
432 }
433
434 if (!set)
435 return -EBUSY;
436
437 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
438 /* horizontal hqd allocation */
439 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
440
441 return 0;
442}
443
444static inline void deallocate_hqd(struct device_queue_manager *dqm,
445 struct queue *q)
446{
447 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
448}
449
450/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
451 * to avoid asynchronized access
452 */
453static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
454 struct qcm_process_device *qpd,
455 struct queue *q)
456{
457 int retval;
458 struct mqd_manager *mqd_mgr;
459
460 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
461 q->properties.type)];
462
463 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
464 deallocate_hqd(dqm, q);
465 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
466 deallocate_sdma_queue(dqm, q);
467 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
468 deallocate_sdma_queue(dqm, q);
469 else {
470 pr_debug("q->properties.type %d is invalid\n",
471 q->properties.type);
472 return -EINVAL;
473 }
474 dqm->total_queue_count--;
475
476 deallocate_doorbell(qpd, q);
477
478 if (!dqm->sched_running) {
479 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
480 return 0;
481 }
482
483 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
484 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
485 KFD_UNMAP_LATENCY_MS,
486 q->pipe, q->queue);
487 if (retval == -ETIME)
488 qpd->reset_wavefronts = true;
489
490 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
491
492 list_del(&q->list);
493 if (list_empty(&qpd->queues_list)) {
494 if (qpd->reset_wavefronts) {
495 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
496 dqm->dev);
497 /* dbgdev_wave_reset_wavefronts has to be called before
498 * deallocate_vmid(), i.e. when vmid is still in use.
499 */
500 dbgdev_wave_reset_wavefronts(dqm->dev,
501 qpd->pqm->process);
502 qpd->reset_wavefronts = false;
503 }
504
505 deallocate_vmid(dqm, qpd, q);
506 }
507 qpd->queue_count--;
508 if (q->properties.is_active) {
509 decrement_queue_count(dqm, q->properties.type);
510 if (q->properties.is_gws) {
511 dqm->gws_queue_count--;
512 qpd->mapped_gws_queue = false;
513 }
514 }
515
516 return retval;
517}
518
519static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
520 struct qcm_process_device *qpd,
521 struct queue *q)
522{
523 int retval;
524
525 dqm_lock(dqm);
526 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
527 dqm_unlock(dqm);
528
529 return retval;
530}
531
532static int update_queue(struct device_queue_manager *dqm, struct queue *q)
533{
534 int retval = 0;
535 struct mqd_manager *mqd_mgr;
536 struct kfd_process_device *pdd;
537 bool prev_active = false;
538
539 dqm_lock(dqm);
540 pdd = kfd_get_process_device_data(q->device, q->process);
541 if (!pdd) {
542 retval = -ENODEV;
543 goto out_unlock;
544 }
545 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
546 q->properties.type)];
547
548 /* Save previous activity state for counters */
549 prev_active = q->properties.is_active;
550
551 /* Make sure the queue is unmapped before updating the MQD */
552 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
553 retval = unmap_queues_cpsch(dqm,
554 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
555 if (retval) {
556 pr_err("unmap queue failed\n");
557 goto out_unlock;
558 }
559 } else if (prev_active &&
560 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
561 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
562 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
563
564 if (!dqm->sched_running) {
565 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
566 goto out_unlock;
567 }
568
569 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
570 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
571 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
572 if (retval) {
573 pr_err("destroy mqd failed\n");
574 goto out_unlock;
575 }
576 }
577
578 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
579
580 /*
581 * check active state vs. the previous state and modify
582 * counter accordingly. map_queues_cpsch uses the
583 * dqm->active_queue_count to determine whether a new runlist must be
584 * uploaded.
585 */
586 if (q->properties.is_active && !prev_active)
587 increment_queue_count(dqm, q->properties.type);
588 else if (!q->properties.is_active && prev_active)
589 decrement_queue_count(dqm, q->properties.type);
590
591 if (q->gws && !q->properties.is_gws) {
592 if (q->properties.is_active) {
593 dqm->gws_queue_count++;
594 pdd->qpd.mapped_gws_queue = true;
595 }
596 q->properties.is_gws = true;
597 } else if (!q->gws && q->properties.is_gws) {
598 if (q->properties.is_active) {
599 dqm->gws_queue_count--;
600 pdd->qpd.mapped_gws_queue = false;
601 }
602 q->properties.is_gws = false;
603 }
604
605 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
606 retval = map_queues_cpsch(dqm);
607 else if (q->properties.is_active &&
608 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
609 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
610 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
611 if (WARN(q->process->mm != current->mm,
612 "should only run in user thread"))
613 retval = -EFAULT;
614 else
615 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
616 q->pipe, q->queue,
617 &q->properties, current->mm);
618 }
619
620out_unlock:
621 dqm_unlock(dqm);
622 return retval;
623}
624
625static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
626 struct qcm_process_device *qpd)
627{
628 struct queue *q;
629 struct mqd_manager *mqd_mgr;
630 struct kfd_process_device *pdd;
631 int retval, ret = 0;
632
633 dqm_lock(dqm);
634 if (qpd->evicted++ > 0) /* already evicted, do nothing */
635 goto out;
636
637 pdd = qpd_to_pdd(qpd);
638 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
639 pdd->process->pasid);
640
641 /* Mark all queues as evicted. Deactivate all active queues on
642 * the qpd.
643 */
644 list_for_each_entry(q, &qpd->queues_list, list) {
645 q->properties.is_evicted = true;
646 if (!q->properties.is_active)
647 continue;
648
649 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
650 q->properties.type)];
651 q->properties.is_active = false;
652 decrement_queue_count(dqm, q->properties.type);
653 if (q->properties.is_gws) {
654 dqm->gws_queue_count--;
655 qpd->mapped_gws_queue = false;
656 }
657
658 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
659 continue;
660
661 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
662 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
663 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
664 if (retval && !ret)
665 /* Return the first error, but keep going to
666 * maintain a consistent eviction state
667 */
668 ret = retval;
669 }
670
671out:
672 dqm_unlock(dqm);
673 return ret;
674}
675
676static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
677 struct qcm_process_device *qpd)
678{
679 struct queue *q;
680 struct kfd_process_device *pdd;
681 int retval = 0;
682
683 dqm_lock(dqm);
684 if (qpd->evicted++ > 0) /* already evicted, do nothing */
685 goto out;
686
687 pdd = qpd_to_pdd(qpd);
688 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
689 pdd->process->pasid);
690
691 /* Mark all queues as evicted. Deactivate all active queues on
692 * the qpd.
693 */
694 list_for_each_entry(q, &qpd->queues_list, list) {
695 q->properties.is_evicted = true;
696 if (!q->properties.is_active)
697 continue;
698
699 q->properties.is_active = false;
700 decrement_queue_count(dqm, q->properties.type);
701 }
702 retval = execute_queues_cpsch(dqm,
703 qpd->is_debug ?
704 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
705 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
706
707out:
708 dqm_unlock(dqm);
709 return retval;
710}
711
712static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
713 struct qcm_process_device *qpd)
714{
715 struct mm_struct *mm = NULL;
716 struct queue *q;
717 struct mqd_manager *mqd_mgr;
718 struct kfd_process_device *pdd;
719 uint64_t pd_base;
720 int retval, ret = 0;
721
722 pdd = qpd_to_pdd(qpd);
723 /* Retrieve PD base */
724 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
725
726 dqm_lock(dqm);
727 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
728 goto out;
729 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
730 qpd->evicted--;
731 goto out;
732 }
733
734 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
735 pdd->process->pasid);
736
737 /* Update PD Base in QPD */
738 qpd->page_table_base = pd_base;
739 pr_debug("Updated PD address to 0x%llx\n", pd_base);
740
741 if (!list_empty(&qpd->queues_list)) {
742 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
743 dqm->dev->kgd,
744 qpd->vmid,
745 qpd->page_table_base);
746 kfd_flush_tlb(pdd);
747 }
748
749 /* Take a safe reference to the mm_struct, which may otherwise
750 * disappear even while the kfd_process is still referenced.
751 */
752 mm = get_task_mm(pdd->process->lead_thread);
753 if (!mm) {
754 ret = -EFAULT;
755 goto out;
756 }
757
758 /* Remove the eviction flags. Activate queues that are not
759 * inactive for other reasons.
760 */
761 list_for_each_entry(q, &qpd->queues_list, list) {
762 q->properties.is_evicted = false;
763 if (!QUEUE_IS_ACTIVE(q->properties))
764 continue;
765
766 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
767 q->properties.type)];
768 q->properties.is_active = true;
769 increment_queue_count(dqm, q->properties.type);
770 if (q->properties.is_gws) {
771 dqm->gws_queue_count++;
772 qpd->mapped_gws_queue = true;
773 }
774
775 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
776 continue;
777
778 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
779 q->queue, &q->properties, mm);
780 if (retval && !ret)
781 /* Return the first error, but keep going to
782 * maintain a consistent eviction state
783 */
784 ret = retval;
785 }
786 qpd->evicted = 0;
787out:
788 if (mm)
789 mmput(mm);
790 dqm_unlock(dqm);
791 return ret;
792}
793
794static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
795 struct qcm_process_device *qpd)
796{
797 struct queue *q;
798 struct kfd_process_device *pdd;
799 uint64_t pd_base;
800 int retval = 0;
801
802 pdd = qpd_to_pdd(qpd);
803 /* Retrieve PD base */
804 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
805
806 dqm_lock(dqm);
807 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
808 goto out;
809 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
810 qpd->evicted--;
811 goto out;
812 }
813
814 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
815 pdd->process->pasid);
816
817 /* Update PD Base in QPD */
818 qpd->page_table_base = pd_base;
819 pr_debug("Updated PD address to 0x%llx\n", pd_base);
820
821 /* activate all active queues on the qpd */
822 list_for_each_entry(q, &qpd->queues_list, list) {
823 q->properties.is_evicted = false;
824 if (!QUEUE_IS_ACTIVE(q->properties))
825 continue;
826
827 q->properties.is_active = true;
828 increment_queue_count(dqm, q->properties.type);
829 }
830 retval = execute_queues_cpsch(dqm,
831 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
832 qpd->evicted = 0;
833out:
834 dqm_unlock(dqm);
835 return retval;
836}
837
838static int register_process(struct device_queue_manager *dqm,
839 struct qcm_process_device *qpd)
840{
841 struct device_process_node *n;
842 struct kfd_process_device *pdd;
843 uint64_t pd_base;
844 int retval;
845
846 n = kzalloc(sizeof(*n), GFP_KERNEL);
847 if (!n)
848 return -ENOMEM;
849
850 n->qpd = qpd;
851
852 pdd = qpd_to_pdd(qpd);
853 /* Retrieve PD base */
854 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
855
856 dqm_lock(dqm);
857 list_add(&n->list, &dqm->queues);
858
859 /* Update PD Base in QPD */
860 qpd->page_table_base = pd_base;
861 pr_debug("Updated PD address to 0x%llx\n", pd_base);
862
863 retval = dqm->asic_ops.update_qpd(dqm, qpd);
864
865 dqm->processes_count++;
866
867 dqm_unlock(dqm);
868
869 /* Outside the DQM lock because under the DQM lock we can't do
870 * reclaim or take other locks that others hold while reclaiming.
871 */
872 kfd_inc_compute_active(dqm->dev);
873
874 return retval;
875}
876
877static int unregister_process(struct device_queue_manager *dqm,
878 struct qcm_process_device *qpd)
879{
880 int retval;
881 struct device_process_node *cur, *next;
882
883 pr_debug("qpd->queues_list is %s\n",
884 list_empty(&qpd->queues_list) ? "empty" : "not empty");
885
886 retval = 0;
887 dqm_lock(dqm);
888
889 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
890 if (qpd == cur->qpd) {
891 list_del(&cur->list);
892 kfree(cur);
893 dqm->processes_count--;
894 goto out;
895 }
896 }
897 /* qpd not found in dqm list */
898 retval = 1;
899out:
900 dqm_unlock(dqm);
901
902 /* Outside the DQM lock because under the DQM lock we can't do
903 * reclaim or take other locks that others hold while reclaiming.
904 */
905 if (!retval)
906 kfd_dec_compute_active(dqm->dev);
907
908 return retval;
909}
910
911static int
912set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
913 unsigned int vmid)
914{
915 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
916 dqm->dev->kgd, pasid, vmid);
917}
918
919static void init_interrupts(struct device_queue_manager *dqm)
920{
921 unsigned int i;
922
923 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
924 if (is_pipe_enabled(dqm, 0, i))
925 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
926}
927
928static int initialize_nocpsch(struct device_queue_manager *dqm)
929{
930 int pipe, queue;
931
932 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
933
934 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
935 sizeof(unsigned int), GFP_KERNEL);
936 if (!dqm->allocated_queues)
937 return -ENOMEM;
938
939 mutex_init(&dqm->lock_hidden);
940 INIT_LIST_HEAD(&dqm->queues);
941 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
942 dqm->active_cp_queue_count = 0;
943 dqm->gws_queue_count = 0;
944
945 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
946 int pipe_offset = pipe * get_queues_per_pipe(dqm);
947
948 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
949 if (test_bit(pipe_offset + queue,
950 dqm->dev->shared_resources.cp_queue_bitmap))
951 dqm->allocated_queues[pipe] |= 1 << queue;
952 }
953
954 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
955
956 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
957 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
958
959 return 0;
960}
961
962static void uninitialize(struct device_queue_manager *dqm)
963{
964 int i;
965
966 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
967
968 kfree(dqm->allocated_queues);
969 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
970 kfree(dqm->mqd_mgrs[i]);
971 mutex_destroy(&dqm->lock_hidden);
972}
973
974static int start_nocpsch(struct device_queue_manager *dqm)
975{
976 pr_info("SW scheduler is used");
977 init_interrupts(dqm);
978
979 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
980 return pm_init(&dqm->packets, dqm);
981 dqm->sched_running = true;
982
983 return 0;
984}
985
986static int stop_nocpsch(struct device_queue_manager *dqm)
987{
988 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
989 pm_uninit(&dqm->packets, false);
990 dqm->sched_running = false;
991
992 return 0;
993}
994
995static void pre_reset(struct device_queue_manager *dqm)
996{
997 dqm_lock(dqm);
998 dqm->is_resetting = true;
999 dqm_unlock(dqm);
1000}
1001
1002static int allocate_sdma_queue(struct device_queue_manager *dqm,
1003 struct queue *q)
1004{
1005 int bit;
1006
1007 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1008 if (dqm->sdma_bitmap == 0) {
1009 pr_err("No more SDMA queue to allocate\n");
1010 return -ENOMEM;
1011 }
1012
1013 bit = __ffs64(dqm->sdma_bitmap);
1014 dqm->sdma_bitmap &= ~(1ULL << bit);
1015 q->sdma_id = bit;
1016 q->properties.sdma_engine_id = q->sdma_id %
1017 get_num_sdma_engines(dqm);
1018 q->properties.sdma_queue_id = q->sdma_id /
1019 get_num_sdma_engines(dqm);
1020 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1021 if (dqm->xgmi_sdma_bitmap == 0) {
1022 pr_err("No more XGMI SDMA queue to allocate\n");
1023 return -ENOMEM;
1024 }
1025 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1026 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1027 q->sdma_id = bit;
1028 /* sdma_engine_id is sdma id including
1029 * both PCIe-optimized SDMAs and XGMI-
1030 * optimized SDMAs. The calculation below
1031 * assumes the first N engines are always
1032 * PCIe-optimized ones
1033 */
1034 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1035 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
1036 q->properties.sdma_queue_id = q->sdma_id /
1037 get_num_xgmi_sdma_engines(dqm);
1038 }
1039
1040 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1041 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1042
1043 return 0;
1044}
1045
1046static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1047 struct queue *q)
1048{
1049 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1050 if (q->sdma_id >= get_num_sdma_queues(dqm))
1051 return;
1052 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1053 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1054 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1055 return;
1056 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1057 }
1058}
1059
1060/*
1061 * Device Queue Manager implementation for cp scheduler
1062 */
1063
1064static int set_sched_resources(struct device_queue_manager *dqm)
1065{
1066 int i, mec;
1067 struct scheduling_resources res;
1068
1069 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1070
1071 res.queue_mask = 0;
1072 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1073 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1074 / dqm->dev->shared_resources.num_pipe_per_mec;
1075
1076 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1077 continue;
1078
1079 /* only acquire queues from the first MEC */
1080 if (mec > 0)
1081 continue;
1082
1083 /* This situation may be hit in the future if a new HW
1084 * generation exposes more than 64 queues. If so, the
1085 * definition of res.queue_mask needs updating
1086 */
1087 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1088 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1089 break;
1090 }
1091
1092 res.queue_mask |= 1ull
1093 << amdgpu_queue_mask_bit_to_set_resource_bit(
1094 (struct amdgpu_device *)dqm->dev->kgd, i);
1095 }
1096 res.gws_mask = ~0ull;
1097 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1098
1099 pr_debug("Scheduling resources:\n"
1100 "vmid mask: 0x%8X\n"
1101 "queue mask: 0x%8llX\n",
1102 res.vmid_mask, res.queue_mask);
1103
1104 return pm_send_set_resources(&dqm->packets, &res);
1105}
1106
1107static int initialize_cpsch(struct device_queue_manager *dqm)
1108{
1109 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1110
1111 mutex_init(&dqm->lock_hidden);
1112 INIT_LIST_HEAD(&dqm->queues);
1113 dqm->active_queue_count = dqm->processes_count = 0;
1114 dqm->active_cp_queue_count = 0;
1115 dqm->gws_queue_count = 0;
1116 dqm->active_runlist = false;
1117 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1118 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1119
1120 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1121
1122 return 0;
1123}
1124
1125static int start_cpsch(struct device_queue_manager *dqm)
1126{
1127 int retval;
1128
1129 retval = 0;
1130
1131 retval = pm_init(&dqm->packets, dqm);
1132 if (retval)
1133 goto fail_packet_manager_init;
1134
1135 retval = set_sched_resources(dqm);
1136 if (retval)
1137 goto fail_set_sched_resources;
1138
1139 pr_debug("Allocating fence memory\n");
1140
1141 /* allocate fence memory on the gart */
1142 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1143 &dqm->fence_mem);
1144
1145 if (retval)
1146 goto fail_allocate_vidmem;
1147
1148 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1149 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1150
1151 init_interrupts(dqm);
1152
1153 dqm_lock(dqm);
1154 /* clear hang status when driver try to start the hw scheduler */
1155 dqm->is_hws_hang = false;
1156 dqm->is_resetting = false;
1157 dqm->sched_running = true;
1158 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1159 dqm_unlock(dqm);
1160
1161 return 0;
1162fail_allocate_vidmem:
1163fail_set_sched_resources:
1164 pm_uninit(&dqm->packets, false);
1165fail_packet_manager_init:
1166 return retval;
1167}
1168
1169static int stop_cpsch(struct device_queue_manager *dqm)
1170{
1171 bool hanging;
1172
1173 dqm_lock(dqm);
1174 if (!dqm->is_hws_hang)
1175 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1176 hanging = dqm->is_hws_hang || dqm->is_resetting;
1177 dqm->sched_running = false;
1178 dqm_unlock(dqm);
1179
1180 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1181 pm_uninit(&dqm->packets, hanging);
1182
1183 return 0;
1184}
1185
1186static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1187 struct kernel_queue *kq,
1188 struct qcm_process_device *qpd)
1189{
1190 dqm_lock(dqm);
1191 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1192 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1193 dqm->total_queue_count);
1194 dqm_unlock(dqm);
1195 return -EPERM;
1196 }
1197
1198 /*
1199 * Unconditionally increment this counter, regardless of the queue's
1200 * type or whether the queue is active.
1201 */
1202 dqm->total_queue_count++;
1203 pr_debug("Total of %d queues are accountable so far\n",
1204 dqm->total_queue_count);
1205
1206 list_add(&kq->list, &qpd->priv_queue_list);
1207 increment_queue_count(dqm, kq->queue->properties.type);
1208 qpd->is_debug = true;
1209 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1210 dqm_unlock(dqm);
1211
1212 return 0;
1213}
1214
1215static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1216 struct kernel_queue *kq,
1217 struct qcm_process_device *qpd)
1218{
1219 dqm_lock(dqm);
1220 list_del(&kq->list);
1221 decrement_queue_count(dqm, kq->queue->properties.type);
1222 qpd->is_debug = false;
1223 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1224 /*
1225 * Unconditionally decrement this counter, regardless of the queue's
1226 * type.
1227 */
1228 dqm->total_queue_count--;
1229 pr_debug("Total of %d queues are accountable so far\n",
1230 dqm->total_queue_count);
1231 dqm_unlock(dqm);
1232}
1233
1234static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1235 struct qcm_process_device *qpd)
1236{
1237 int retval;
1238 struct mqd_manager *mqd_mgr;
1239
1240 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1241 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1242 dqm->total_queue_count);
1243 retval = -EPERM;
1244 goto out;
1245 }
1246
1247 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1248 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1249 dqm_lock(dqm);
1250 retval = allocate_sdma_queue(dqm, q);
1251 dqm_unlock(dqm);
1252 if (retval)
1253 goto out;
1254 }
1255
1256 retval = allocate_doorbell(qpd, q);
1257 if (retval)
1258 goto out_deallocate_sdma_queue;
1259
1260 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1261 q->properties.type)];
1262
1263 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1264 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1265 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1266 q->properties.tba_addr = qpd->tba_addr;
1267 q->properties.tma_addr = qpd->tma_addr;
1268 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1269 if (!q->mqd_mem_obj) {
1270 retval = -ENOMEM;
1271 goto out_deallocate_doorbell;
1272 }
1273
1274 dqm_lock(dqm);
1275 /*
1276 * Eviction state logic: mark all queues as evicted, even ones
1277 * not currently active. Restoring inactive queues later only
1278 * updates the is_evicted flag but is a no-op otherwise.
1279 */
1280 q->properties.is_evicted = !!qpd->evicted;
1281 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1282 &q->gart_mqd_addr, &q->properties);
1283
1284 list_add(&q->list, &qpd->queues_list);
1285 qpd->queue_count++;
1286
1287 if (q->properties.is_active) {
1288 increment_queue_count(dqm, q->properties.type);
1289
1290 retval = execute_queues_cpsch(dqm,
1291 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1292 }
1293
1294 /*
1295 * Unconditionally increment this counter, regardless of the queue's
1296 * type or whether the queue is active.
1297 */
1298 dqm->total_queue_count++;
1299
1300 pr_debug("Total of %d queues are accountable so far\n",
1301 dqm->total_queue_count);
1302
1303 dqm_unlock(dqm);
1304 return retval;
1305
1306out_deallocate_doorbell:
1307 deallocate_doorbell(qpd, q);
1308out_deallocate_sdma_queue:
1309 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1310 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1311 dqm_lock(dqm);
1312 deallocate_sdma_queue(dqm, q);
1313 dqm_unlock(dqm);
1314 }
1315out:
1316 return retval;
1317}
1318
1319int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1320 unsigned int fence_value,
1321 unsigned int timeout_ms)
1322{
1323 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1324
1325 while (*fence_addr != fence_value) {
1326 if (time_after(jiffies, end_jiffies)) {
1327 pr_err("qcm fence wait loop timeout expired\n");
1328 /* In HWS case, this is used to halt the driver thread
1329 * in order not to mess up CP states before doing
1330 * scandumps for FW debugging.
1331 */
1332 while (halt_if_hws_hang)
1333 schedule();
1334
1335 return -ETIME;
1336 }
1337 schedule();
1338 }
1339
1340 return 0;
1341}
1342
1343/* dqm->lock mutex has to be locked before calling this function */
1344static int map_queues_cpsch(struct device_queue_manager *dqm)
1345{
1346 int retval;
1347
1348 if (!dqm->sched_running)
1349 return 0;
1350 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1351 return 0;
1352 if (dqm->active_runlist)
1353 return 0;
1354
1355 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1356 pr_debug("%s sent runlist\n", __func__);
1357 if (retval) {
1358 pr_err("failed to execute runlist\n");
1359 return retval;
1360 }
1361 dqm->active_runlist = true;
1362
1363 return retval;
1364}
1365
1366/* dqm->lock mutex has to be locked before calling this function */
1367static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1368 enum kfd_unmap_queues_filter filter,
1369 uint32_t filter_param)
1370{
1371 int retval = 0;
1372
1373 if (!dqm->sched_running)
1374 return 0;
1375 if (dqm->is_hws_hang)
1376 return -EIO;
1377 if (!dqm->active_runlist)
1378 return retval;
1379
1380 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1381 filter, filter_param, false, 0);
1382 if (retval)
1383 return retval;
1384
1385 *dqm->fence_addr = KFD_FENCE_INIT;
1386 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1387 KFD_FENCE_COMPLETED);
1388 /* should be timed out */
1389 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1390 queue_preemption_timeout_ms);
1391 if (retval) {
1392 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1393 dqm->is_hws_hang = true;
1394 /* It's possible we're detecting a HWS hang in the
1395 * middle of a GPU reset. No need to schedule another
1396 * reset in this case.
1397 */
1398 if (!dqm->is_resetting)
1399 schedule_work(&dqm->hw_exception_work);
1400 return retval;
1401 }
1402
1403 pm_release_ib(&dqm->packets);
1404 dqm->active_runlist = false;
1405
1406 return retval;
1407}
1408
1409/* dqm->lock mutex has to be locked before calling this function */
1410static int execute_queues_cpsch(struct device_queue_manager *dqm,
1411 enum kfd_unmap_queues_filter filter,
1412 uint32_t filter_param)
1413{
1414 int retval;
1415
1416 if (dqm->is_hws_hang)
1417 return -EIO;
1418 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1419 if (retval)
1420 return retval;
1421
1422 return map_queues_cpsch(dqm);
1423}
1424
1425static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1426 struct qcm_process_device *qpd,
1427 struct queue *q)
1428{
1429 int retval;
1430 struct mqd_manager *mqd_mgr;
1431
1432 retval = 0;
1433
1434 /* remove queue from list to prevent rescheduling after preemption */
1435 dqm_lock(dqm);
1436
1437 if (qpd->is_debug) {
1438 /*
1439 * error, currently we do not allow to destroy a queue
1440 * of a currently debugged process
1441 */
1442 retval = -EBUSY;
1443 goto failed_try_destroy_debugged_queue;
1444
1445 }
1446
1447 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1448 q->properties.type)];
1449
1450 deallocate_doorbell(qpd, q);
1451
1452 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1453 deallocate_sdma_queue(dqm, q);
1454 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1455 deallocate_sdma_queue(dqm, q);
1456
1457 list_del(&q->list);
1458 qpd->queue_count--;
1459 if (q->properties.is_active) {
1460 decrement_queue_count(dqm, q->properties.type);
1461 retval = execute_queues_cpsch(dqm,
1462 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1463 if (retval == -ETIME)
1464 qpd->reset_wavefronts = true;
1465 if (q->properties.is_gws) {
1466 dqm->gws_queue_count--;
1467 qpd->mapped_gws_queue = false;
1468 }
1469 }
1470
1471 /*
1472 * Unconditionally decrement this counter, regardless of the queue's
1473 * type
1474 */
1475 dqm->total_queue_count--;
1476 pr_debug("Total of %d queues are accountable so far\n",
1477 dqm->total_queue_count);
1478
1479 dqm_unlock(dqm);
1480
1481 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1482 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1483
1484 return retval;
1485
1486failed_try_destroy_debugged_queue:
1487
1488 dqm_unlock(dqm);
1489 return retval;
1490}
1491
1492/*
1493 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1494 * stay in user mode.
1495 */
1496#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1497/* APE1 limit is inclusive and 64K aligned. */
1498#define APE1_LIMIT_ALIGNMENT 0xFFFF
1499
1500static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1501 struct qcm_process_device *qpd,
1502 enum cache_policy default_policy,
1503 enum cache_policy alternate_policy,
1504 void __user *alternate_aperture_base,
1505 uint64_t alternate_aperture_size)
1506{
1507 bool retval = true;
1508
1509 if (!dqm->asic_ops.set_cache_memory_policy)
1510 return retval;
1511
1512 dqm_lock(dqm);
1513
1514 if (alternate_aperture_size == 0) {
1515 /* base > limit disables APE1 */
1516 qpd->sh_mem_ape1_base = 1;
1517 qpd->sh_mem_ape1_limit = 0;
1518 } else {
1519 /*
1520 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1521 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1522 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1523 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1524 * Verify that the base and size parameters can be
1525 * represented in this format and convert them.
1526 * Additionally restrict APE1 to user-mode addresses.
1527 */
1528
1529 uint64_t base = (uintptr_t)alternate_aperture_base;
1530 uint64_t limit = base + alternate_aperture_size - 1;
1531
1532 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1533 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1534 retval = false;
1535 goto out;
1536 }
1537
1538 qpd->sh_mem_ape1_base = base >> 16;
1539 qpd->sh_mem_ape1_limit = limit >> 16;
1540 }
1541
1542 retval = dqm->asic_ops.set_cache_memory_policy(
1543 dqm,
1544 qpd,
1545 default_policy,
1546 alternate_policy,
1547 alternate_aperture_base,
1548 alternate_aperture_size);
1549
1550 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1551 program_sh_mem_settings(dqm, qpd);
1552
1553 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1554 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1555 qpd->sh_mem_ape1_limit);
1556
1557out:
1558 dqm_unlock(dqm);
1559 return retval;
1560}
1561
1562static int set_trap_handler(struct device_queue_manager *dqm,
1563 struct qcm_process_device *qpd,
1564 uint64_t tba_addr,
1565 uint64_t tma_addr)
1566{
1567 uint64_t *tma;
1568
1569 if (dqm->dev->cwsr_enabled) {
1570 /* Jump from CWSR trap handler to user trap */
1571 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1572 tma[0] = tba_addr;
1573 tma[1] = tma_addr;
1574 } else {
1575 qpd->tba_addr = tba_addr;
1576 qpd->tma_addr = tma_addr;
1577 }
1578
1579 return 0;
1580}
1581
1582static int process_termination_nocpsch(struct device_queue_manager *dqm,
1583 struct qcm_process_device *qpd)
1584{
1585 struct queue *q, *next;
1586 struct device_process_node *cur, *next_dpn;
1587 int retval = 0;
1588 bool found = false;
1589
1590 dqm_lock(dqm);
1591
1592 /* Clear all user mode queues */
1593 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1594 int ret;
1595
1596 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1597 if (ret)
1598 retval = ret;
1599 }
1600
1601 /* Unregister process */
1602 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1603 if (qpd == cur->qpd) {
1604 list_del(&cur->list);
1605 kfree(cur);
1606 dqm->processes_count--;
1607 found = true;
1608 break;
1609 }
1610 }
1611
1612 dqm_unlock(dqm);
1613
1614 /* Outside the DQM lock because under the DQM lock we can't do
1615 * reclaim or take other locks that others hold while reclaiming.
1616 */
1617 if (found)
1618 kfd_dec_compute_active(dqm->dev);
1619
1620 return retval;
1621}
1622
1623static int get_wave_state(struct device_queue_manager *dqm,
1624 struct queue *q,
1625 void __user *ctl_stack,
1626 u32 *ctl_stack_used_size,
1627 u32 *save_area_used_size)
1628{
1629 struct mqd_manager *mqd_mgr;
1630 int r;
1631
1632 dqm_lock(dqm);
1633
1634 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1635 q->properties.is_active || !q->device->cwsr_enabled) {
1636 r = -EINVAL;
1637 goto dqm_unlock;
1638 }
1639
1640 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1641
1642 if (!mqd_mgr->get_wave_state) {
1643 r = -EINVAL;
1644 goto dqm_unlock;
1645 }
1646
1647 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1648 ctl_stack_used_size, save_area_used_size);
1649
1650dqm_unlock:
1651 dqm_unlock(dqm);
1652 return r;
1653}
1654
1655static int process_termination_cpsch(struct device_queue_manager *dqm,
1656 struct qcm_process_device *qpd)
1657{
1658 int retval;
1659 struct queue *q, *next;
1660 struct kernel_queue *kq, *kq_next;
1661 struct mqd_manager *mqd_mgr;
1662 struct device_process_node *cur, *next_dpn;
1663 enum kfd_unmap_queues_filter filter =
1664 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1665 bool found = false;
1666
1667 retval = 0;
1668
1669 dqm_lock(dqm);
1670
1671 /* Clean all kernel queues */
1672 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1673 list_del(&kq->list);
1674 decrement_queue_count(dqm, kq->queue->properties.type);
1675 qpd->is_debug = false;
1676 dqm->total_queue_count--;
1677 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1678 }
1679
1680 /* Clear all user mode queues */
1681 list_for_each_entry(q, &qpd->queues_list, list) {
1682 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1683 deallocate_sdma_queue(dqm, q);
1684 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1685 deallocate_sdma_queue(dqm, q);
1686
1687 if (q->properties.is_active) {
1688 decrement_queue_count(dqm, q->properties.type);
1689 if (q->properties.is_gws) {
1690 dqm->gws_queue_count--;
1691 qpd->mapped_gws_queue = false;
1692 }
1693 }
1694
1695 dqm->total_queue_count--;
1696 }
1697
1698 /* Unregister process */
1699 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1700 if (qpd == cur->qpd) {
1701 list_del(&cur->list);
1702 kfree(cur);
1703 dqm->processes_count--;
1704 found = true;
1705 break;
1706 }
1707 }
1708
1709 retval = execute_queues_cpsch(dqm, filter, 0);
1710 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1711 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1712 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1713 qpd->reset_wavefronts = false;
1714 }
1715
1716 dqm_unlock(dqm);
1717
1718 /* Outside the DQM lock because under the DQM lock we can't do
1719 * reclaim or take other locks that others hold while reclaiming.
1720 */
1721 if (found)
1722 kfd_dec_compute_active(dqm->dev);
1723
1724 /* Lastly, free mqd resources.
1725 * Do free_mqd() after dqm_unlock to avoid circular locking.
1726 */
1727 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1728 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1729 q->properties.type)];
1730 list_del(&q->list);
1731 qpd->queue_count--;
1732 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1733 }
1734
1735 return retval;
1736}
1737
1738static int init_mqd_managers(struct device_queue_manager *dqm)
1739{
1740 int i, j;
1741 struct mqd_manager *mqd_mgr;
1742
1743 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1744 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1745 if (!mqd_mgr) {
1746 pr_err("mqd manager [%d] initialization failed\n", i);
1747 goto out_free;
1748 }
1749 dqm->mqd_mgrs[i] = mqd_mgr;
1750 }
1751
1752 return 0;
1753
1754out_free:
1755 for (j = 0; j < i; j++) {
1756 kfree(dqm->mqd_mgrs[j]);
1757 dqm->mqd_mgrs[j] = NULL;
1758 }
1759
1760 return -ENOMEM;
1761}
1762
1763/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1764static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1765{
1766 int retval;
1767 struct kfd_dev *dev = dqm->dev;
1768 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1769 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1770 get_num_all_sdma_engines(dqm) *
1771 dev->device_info->num_sdma_queues_per_engine +
1772 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1773
1774 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1775 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1776 (void *)&(mem_obj->cpu_ptr), false);
1777
1778 return retval;
1779}
1780
1781struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1782{
1783 struct device_queue_manager *dqm;
1784
1785 pr_debug("Loading device queue manager\n");
1786
1787 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1788 if (!dqm)
1789 return NULL;
1790
1791 switch (dev->device_info->asic_family) {
1792 /* HWS is not available on Hawaii. */
1793 case CHIP_HAWAII:
1794 /* HWS depends on CWSR for timely dequeue. CWSR is not
1795 * available on Tonga.
1796 *
1797 * FIXME: This argument also applies to Kaveri.
1798 */
1799 case CHIP_TONGA:
1800 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1801 break;
1802 default:
1803 dqm->sched_policy = sched_policy;
1804 break;
1805 }
1806
1807 dqm->dev = dev;
1808 switch (dqm->sched_policy) {
1809 case KFD_SCHED_POLICY_HWS:
1810 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1811 /* initialize dqm for cp scheduling */
1812 dqm->ops.create_queue = create_queue_cpsch;
1813 dqm->ops.initialize = initialize_cpsch;
1814 dqm->ops.start = start_cpsch;
1815 dqm->ops.stop = stop_cpsch;
1816 dqm->ops.pre_reset = pre_reset;
1817 dqm->ops.destroy_queue = destroy_queue_cpsch;
1818 dqm->ops.update_queue = update_queue;
1819 dqm->ops.register_process = register_process;
1820 dqm->ops.unregister_process = unregister_process;
1821 dqm->ops.uninitialize = uninitialize;
1822 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1823 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1824 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1825 dqm->ops.set_trap_handler = set_trap_handler;
1826 dqm->ops.process_termination = process_termination_cpsch;
1827 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1828 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1829 dqm->ops.get_wave_state = get_wave_state;
1830 break;
1831 case KFD_SCHED_POLICY_NO_HWS:
1832 /* initialize dqm for no cp scheduling */
1833 dqm->ops.start = start_nocpsch;
1834 dqm->ops.stop = stop_nocpsch;
1835 dqm->ops.pre_reset = pre_reset;
1836 dqm->ops.create_queue = create_queue_nocpsch;
1837 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1838 dqm->ops.update_queue = update_queue;
1839 dqm->ops.register_process = register_process;
1840 dqm->ops.unregister_process = unregister_process;
1841 dqm->ops.initialize = initialize_nocpsch;
1842 dqm->ops.uninitialize = uninitialize;
1843 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1844 dqm->ops.set_trap_handler = set_trap_handler;
1845 dqm->ops.process_termination = process_termination_nocpsch;
1846 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1847 dqm->ops.restore_process_queues =
1848 restore_process_queues_nocpsch;
1849 dqm->ops.get_wave_state = get_wave_state;
1850 break;
1851 default:
1852 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1853 goto out_free;
1854 }
1855
1856 switch (dev->device_info->asic_family) {
1857 case CHIP_CARRIZO:
1858 device_queue_manager_init_vi(&dqm->asic_ops);
1859 break;
1860
1861 case CHIP_KAVERI:
1862 device_queue_manager_init_cik(&dqm->asic_ops);
1863 break;
1864
1865 case CHIP_HAWAII:
1866 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1867 break;
1868
1869 case CHIP_TONGA:
1870 case CHIP_FIJI:
1871 case CHIP_POLARIS10:
1872 case CHIP_POLARIS11:
1873 case CHIP_POLARIS12:
1874 case CHIP_VEGAM:
1875 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1876 break;
1877
1878 case CHIP_VEGA10:
1879 case CHIP_VEGA12:
1880 case CHIP_VEGA20:
1881 case CHIP_RAVEN:
1882 case CHIP_RENOIR:
1883 case CHIP_ARCTURUS:
1884 device_queue_manager_init_v9(&dqm->asic_ops);
1885 break;
1886 case CHIP_NAVI10:
1887 case CHIP_NAVI12:
1888 case CHIP_NAVI14:
1889 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1890 break;
1891 default:
1892 WARN(1, "Unexpected ASIC family %u",
1893 dev->device_info->asic_family);
1894 goto out_free;
1895 }
1896
1897 if (init_mqd_managers(dqm))
1898 goto out_free;
1899
1900 if (allocate_hiq_sdma_mqd(dqm)) {
1901 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1902 goto out_free;
1903 }
1904
1905 if (!dqm->ops.initialize(dqm))
1906 return dqm;
1907
1908out_free:
1909 kfree(dqm);
1910 return NULL;
1911}
1912
1913static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1914 struct kfd_mem_obj *mqd)
1915{
1916 WARN(!mqd, "No hiq sdma mqd trunk to free");
1917
1918 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1919}
1920
1921void device_queue_manager_uninit(struct device_queue_manager *dqm)
1922{
1923 dqm->ops.uninitialize(dqm);
1924 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1925 kfree(dqm);
1926}
1927
1928int kfd_process_vm_fault(struct device_queue_manager *dqm,
1929 unsigned int pasid)
1930{
1931 struct kfd_process_device *pdd;
1932 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1933 int ret = 0;
1934
1935 if (!p)
1936 return -EINVAL;
1937 pdd = kfd_get_process_device_data(dqm->dev, p);
1938 if (pdd)
1939 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1940 kfd_unref_process(p);
1941
1942 return ret;
1943}
1944
1945static void kfd_process_hw_exception(struct work_struct *work)
1946{
1947 struct device_queue_manager *dqm = container_of(work,
1948 struct device_queue_manager, hw_exception_work);
1949 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1950}
1951
1952#if defined(CONFIG_DEBUG_FS)
1953
1954static void seq_reg_dump(struct seq_file *m,
1955 uint32_t (*dump)[2], uint32_t n_regs)
1956{
1957 uint32_t i, count;
1958
1959 for (i = 0, count = 0; i < n_regs; i++) {
1960 if (count == 0 ||
1961 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1962 seq_printf(m, "%s %08x: %08x",
1963 i ? "\n" : "",
1964 dump[i][0], dump[i][1]);
1965 count = 7;
1966 } else {
1967 seq_printf(m, " %08x", dump[i][1]);
1968 count--;
1969 }
1970 }
1971
1972 seq_puts(m, "\n");
1973}
1974
1975int dqm_debugfs_hqds(struct seq_file *m, void *data)
1976{
1977 struct device_queue_manager *dqm = data;
1978 uint32_t (*dump)[2], n_regs;
1979 int pipe, queue;
1980 int r = 0;
1981
1982 if (!dqm->sched_running) {
1983 seq_printf(m, " Device is stopped\n");
1984
1985 return 0;
1986 }
1987
1988 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1989 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1990 &dump, &n_regs);
1991 if (!r) {
1992 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1993 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1994 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1995 KFD_CIK_HIQ_QUEUE);
1996 seq_reg_dump(m, dump, n_regs);
1997
1998 kfree(dump);
1999 }
2000
2001 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2002 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2003
2004 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2005 if (!test_bit(pipe_offset + queue,
2006 dqm->dev->shared_resources.cp_queue_bitmap))
2007 continue;
2008
2009 r = dqm->dev->kfd2kgd->hqd_dump(
2010 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2011 if (r)
2012 break;
2013
2014 seq_printf(m, " CP Pipe %d, Queue %d\n",
2015 pipe, queue);
2016 seq_reg_dump(m, dump, n_regs);
2017
2018 kfree(dump);
2019 }
2020 }
2021
2022 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2023 for (queue = 0;
2024 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2025 queue++) {
2026 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2027 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2028 if (r)
2029 break;
2030
2031 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2032 pipe, queue);
2033 seq_reg_dump(m, dump, n_regs);
2034
2035 kfree(dump);
2036 }
2037 }
2038
2039 return r;
2040}
2041
2042int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2043{
2044 int r = 0;
2045
2046 dqm_lock(dqm);
2047 dqm->active_runlist = true;
2048 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2049 dqm_unlock(dqm);
2050
2051 return r;
2052}
2053
2054#endif