Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/*
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef KFD_PRIV_H_INCLUDED
25#define KFD_PRIV_H_INCLUDED
26
27#include <linux/hashtable.h>
28#include <linux/mmu_notifier.h>
29#include <linux/memremap.h>
30#include <linux/mutex.h>
31#include <linux/types.h>
32#include <linux/atomic.h>
33#include <linux/workqueue.h>
34#include <linux/spinlock.h>
35#include <linux/kfd_ioctl.h>
36#include <linux/idr.h>
37#include <linux/kfifo.h>
38#include <linux/seq_file.h>
39#include <linux/kref.h>
40#include <linux/sysfs.h>
41#include <linux/device_cgroup.h>
42#include <drm/drm_file.h>
43#include <drm/drm_drv.h>
44#include <drm/drm_device.h>
45#include <drm/drm_ioctl.h>
46#include <kgd_kfd_interface.h>
47#include <linux/swap.h>
48
49#include "amd_shared.h"
50#include "amdgpu.h"
51
52#define KFD_MAX_RING_ENTRY_SIZE 8
53
54#define KFD_SYSFS_FILE_MODE 0444
55
56/* GPU ID hash width in bits */
57#define KFD_GPU_ID_HASH_WIDTH 16
58
59/* Use upper bits of mmap offset to store KFD driver specific information.
60 * BITS[63:62] - Encode MMAP type
61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
62 * BITS[45:0] - MMAP offset value
63 *
64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
65 * defines are w.r.t to PAGE_SIZE
66 */
67#define KFD_MMAP_TYPE_SHIFT 62
68#define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
69#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
70#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
71#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
72#define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
73
74#define KFD_MMAP_GPU_ID_SHIFT 46
75#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
76 << KFD_MMAP_GPU_ID_SHIFT)
77#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
78 & KFD_MMAP_GPU_ID_MASK)
79#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
80 >> KFD_MMAP_GPU_ID_SHIFT)
81
82/*
83 * When working with cp scheduler we should assign the HIQ manually or via
84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
85 * definitions for Kaveri. In Kaveri only the first ME queues participates
86 * in the cp scheduling taking that in mind we set the HIQ slot in the
87 * second ME.
88 */
89#define KFD_CIK_HIQ_PIPE 4
90#define KFD_CIK_HIQ_QUEUE 0
91
92/* Macro for allocating structures */
93#define kfd_alloc_struct(ptr_to_struct) \
94 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
95
96#define KFD_MAX_NUM_OF_PROCESSES 512
97#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
98
99/*
100 * Size of the per-process TBA+TMA buffer: 2 pages
101 *
102 * The first chunk is the TBA used for the CWSR ISA code. The second
103 * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode.
104 */
105#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
106#define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048)
107
108#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
109 (KFD_MAX_NUM_OF_PROCESSES * \
110 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
111
112#define KFD_KERNEL_QUEUE_SIZE 2048
113
114#define KFD_UNMAP_LATENCY_MS (4000)
115
116#define KFD_MAX_SDMA_QUEUES 128
117
118/*
119 * 512 = 0x200
120 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
121 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
122 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
123 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
124 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
125 */
126#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
127
128/**
129 * enum kfd_ioctl_flags - KFD ioctl flags
130 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how
131 * userspace can use a given ioctl.
132 */
133enum kfd_ioctl_flags {
134 /*
135 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE:
136 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially
137 * perform privileged operations and load arbitrary data into MQDs and
138 * eventually HQD registers when the queue is mapped by HWS. In order to
139 * prevent this we should perform additional security checks.
140 *
141 * This is equivalent to callers with the CHECKPOINT_RESTORE capability.
142 *
143 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE,
144 * we also allow ioctls with SYS_ADMIN capability.
145 */
146 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0),
147};
148/*
149 * Kernel module parameter to specify maximum number of supported queues per
150 * device
151 */
152extern int max_num_of_queues_per_device;
153
154
155/* Kernel module parameter to specify the scheduling policy */
156extern int sched_policy;
157
158/*
159 * Kernel module parameter to specify the maximum process
160 * number per HW scheduler
161 */
162extern int hws_max_conc_proc;
163
164extern int cwsr_enable;
165
166/*
167 * Kernel module parameter to specify whether to send sigterm to HSA process on
168 * unhandled exception
169 */
170extern int send_sigterm;
171
172/*
173 * This kernel module is used to simulate large bar machine on non-large bar
174 * enabled machines.
175 */
176extern int debug_largebar;
177
178/* Set sh_mem_config.retry_disable on GFX v9 */
179extern int amdgpu_noretry;
180
181/* Halt if HWS hang is detected */
182extern int halt_if_hws_hang;
183
184/* Whether MEC FW support GWS barriers */
185extern bool hws_gws_support;
186
187/* Queue preemption timeout in ms */
188extern int queue_preemption_timeout_ms;
189
190/*
191 * Don't evict process queues on vm fault
192 */
193extern int amdgpu_no_queue_eviction_on_vm_fault;
194
195/* Enable eviction debug messages */
196extern bool debug_evictions;
197
198extern struct mutex kfd_processes_mutex;
199
200enum cache_policy {
201 cache_policy_coherent,
202 cache_policy_noncoherent
203};
204
205#define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0))
206#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
207#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
208 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
209 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \
210 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)))
211
212struct kfd_node;
213
214struct kfd_event_interrupt_class {
215 bool (*interrupt_isr)(struct kfd_node *dev,
216 const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
217 bool *patched_flag);
218 void (*interrupt_wq)(struct kfd_node *dev,
219 const uint32_t *ih_ring_entry);
220};
221
222struct kfd_device_info {
223 uint32_t gfx_target_version;
224 const struct kfd_event_interrupt_class *event_interrupt_class;
225 unsigned int max_pasid_bits;
226 unsigned int max_no_of_hqd;
227 unsigned int doorbell_size;
228 size_t ih_ring_entry_size;
229 uint8_t num_of_watch_points;
230 uint16_t mqd_size_aligned;
231 bool supports_cwsr;
232 bool needs_pci_atomics;
233 uint32_t no_atomic_fw_version;
234 unsigned int num_sdma_queues_per_engine;
235 unsigned int num_reserved_sdma_queues_per_engine;
236 DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
237};
238
239unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev);
240unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev);
241
242struct kfd_mem_obj {
243 uint32_t range_start;
244 uint32_t range_end;
245 uint64_t gpu_addr;
246 uint32_t *cpu_ptr;
247 void *gtt_mem;
248};
249
250struct kfd_vmid_info {
251 uint32_t first_vmid_kfd;
252 uint32_t last_vmid_kfd;
253 uint32_t vmid_num_kfd;
254};
255
256#define MAX_KFD_NODES 8
257
258struct kfd_dev;
259
260struct kfd_node {
261 unsigned int node_id;
262 struct amdgpu_device *adev; /* Duplicated here along with keeping
263 * a copy in kfd_dev to save a hop
264 */
265 const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with
266 * keeping a copy in kfd_dev to
267 * save a hop
268 */
269 struct kfd_vmid_info vm_info;
270 unsigned int id; /* topology stub index */
271 uint32_t xcc_mask; /* Instance mask of XCCs present */
272 struct amdgpu_xcp *xcp;
273
274 /* Interrupts */
275 struct kfifo ih_fifo;
276 struct workqueue_struct *ih_wq;
277 struct work_struct interrupt_work;
278 spinlock_t interrupt_lock;
279
280 /*
281 * Interrupts of interest to KFD are copied
282 * from the HW ring into a SW ring.
283 */
284 bool interrupts_active;
285 uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */
286
287 /* QCM Device instance */
288 struct device_queue_manager *dqm;
289
290 /* Global GWS resource shared between processes */
291 void *gws;
292 bool gws_debug_workaround;
293
294 /* Clients watching SMI events */
295 struct list_head smi_clients;
296 spinlock_t smi_lock;
297 uint32_t reset_seq_num;
298
299 /* SRAM ECC flag */
300 atomic_t sram_ecc_flag;
301
302 /*spm process id */
303 unsigned int spm_pasid;
304
305 /* Maximum process number mapped to HW scheduler */
306 unsigned int max_proc_per_quantum;
307
308 unsigned int compute_vmid_bitmap;
309
310 struct kfd_local_mem_info local_mem_info;
311
312 struct kfd_dev *kfd;
313};
314
315struct kfd_dev {
316 struct amdgpu_device *adev;
317
318 struct kfd_device_info device_info;
319
320 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
321 * page used by kernel queue
322 */
323
324 struct kgd2kfd_shared_resources shared_resources;
325
326 const struct kfd2kgd_calls *kfd2kgd;
327 struct mutex doorbell_mutex;
328
329 void *gtt_mem;
330 uint64_t gtt_start_gpu_addr;
331 void *gtt_start_cpu_ptr;
332 void *gtt_sa_bitmap;
333 struct mutex gtt_sa_lock;
334 unsigned int gtt_sa_chunk_size;
335 unsigned int gtt_sa_num_of_chunks;
336
337 bool init_complete;
338
339 /* Firmware versions */
340 uint16_t mec_fw_version;
341 uint16_t mec2_fw_version;
342 uint16_t sdma_fw_version;
343
344 /* CWSR */
345 bool cwsr_enabled;
346 const void *cwsr_isa;
347 unsigned int cwsr_isa_size;
348
349 /* xGMI */
350 uint64_t hive_id;
351
352 bool pci_atomic_requested;
353
354 /* Compute Profile ref. count */
355 atomic_t compute_profile;
356
357 struct ida doorbell_ida;
358 unsigned int max_doorbell_slices;
359
360 int noretry;
361
362 struct kfd_node *nodes[MAX_KFD_NODES];
363 unsigned int num_nodes;
364
365 /* Track per device allocated watch points */
366 uint32_t alloc_watch_ids;
367 spinlock_t watch_points_lock;
368
369 /* Kernel doorbells for KFD device */
370 struct amdgpu_bo *doorbells;
371
372 /* bitmap for dynamic doorbell allocation from doorbell object */
373 unsigned long *doorbell_bitmap;
374};
375
376enum kfd_mempool {
377 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
378 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
379 KFD_MEMPOOL_FRAMEBUFFER = 3,
380};
381
382/* Character device interface */
383int kfd_chardev_init(void);
384void kfd_chardev_exit(void);
385
386/**
387 * enum kfd_unmap_queues_filter - Enum for queue filters.
388 *
389 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
390 * running queues list.
391 *
392 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues
393 * in the run list.
394 *
395 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
396 * specific process.
397 *
398 */
399enum kfd_unmap_queues_filter {
400 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1,
401 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2,
402 KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3
403};
404
405/**
406 * enum kfd_queue_type - Enum for various queue types.
407 *
408 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
409 *
410 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type.
411 *
412 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
413 *
414 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
415 *
416 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface.
417 */
418enum kfd_queue_type {
419 KFD_QUEUE_TYPE_COMPUTE,
420 KFD_QUEUE_TYPE_SDMA,
421 KFD_QUEUE_TYPE_HIQ,
422 KFD_QUEUE_TYPE_DIQ,
423 KFD_QUEUE_TYPE_SDMA_XGMI
424};
425
426enum kfd_queue_format {
427 KFD_QUEUE_FORMAT_PM4,
428 KFD_QUEUE_FORMAT_AQL
429};
430
431enum KFD_QUEUE_PRIORITY {
432 KFD_QUEUE_PRIORITY_MINIMUM = 0,
433 KFD_QUEUE_PRIORITY_MAXIMUM = 15
434};
435
436/**
437 * struct queue_properties
438 *
439 * @type: The queue type.
440 *
441 * @queue_id: Queue identifier.
442 *
443 * @queue_address: Queue ring buffer address.
444 *
445 * @queue_size: Queue ring buffer size.
446 *
447 * @priority: Defines the queue priority relative to other queues in the
448 * process.
449 * This is just an indication and HW scheduling may override the priority as
450 * necessary while keeping the relative prioritization.
451 * the priority granularity is from 0 to f which f is the highest priority.
452 * currently all queues are initialized with the highest priority.
453 *
454 * @queue_percent: This field is partially implemented and currently a zero in
455 * this field defines that the queue is non active.
456 *
457 * @read_ptr: User space address which points to the number of dwords the
458 * cp read from the ring buffer. This field updates automatically by the H/W.
459 *
460 * @write_ptr: Defines the number of dwords written to the ring buffer.
461 *
462 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring
463 * buffer. This field should be similar to write_ptr and the user should
464 * update this field after updating the write_ptr.
465 *
466 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
467 *
468 * @is_interop: Defines if this is a interop queue. Interop queue means that
469 * the queue can access both graphics and compute resources.
470 *
471 * @is_evicted: Defines if the queue is evicted. Only active queues
472 * are evicted, rendering them inactive.
473 *
474 * @is_active: Defines if the queue is active or not. @is_active and
475 * @is_evicted are protected by the DQM lock.
476 *
477 * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
478 * @is_gws should be protected by the DQM lock, since changing it can yield the
479 * possibility of updating DQM state on number of GWS queues.
480 *
481 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
482 * of the queue.
483 *
484 * This structure represents the queue properties for each queue no matter if
485 * it's user mode or kernel mode queue.
486 *
487 */
488
489struct queue_properties {
490 enum kfd_queue_type type;
491 enum kfd_queue_format format;
492 unsigned int queue_id;
493 uint64_t queue_address;
494 uint64_t queue_size;
495 uint32_t priority;
496 uint32_t queue_percent;
497 uint32_t *read_ptr;
498 uint32_t *write_ptr;
499 void __iomem *doorbell_ptr;
500 uint32_t doorbell_off;
501 bool is_interop;
502 bool is_evicted;
503 bool is_suspended;
504 bool is_being_destroyed;
505 bool is_active;
506 bool is_gws;
507 uint32_t pm4_target_xcc;
508 bool is_dbg_wa;
509 bool is_user_cu_masked;
510 /* Not relevant for user mode queues in cp scheduling */
511 unsigned int vmid;
512 /* Relevant only for sdma queues*/
513 uint32_t sdma_engine_id;
514 uint32_t sdma_queue_id;
515 uint32_t sdma_vm_addr;
516 /* Relevant only for VI */
517 uint64_t eop_ring_buffer_address;
518 uint32_t eop_ring_buffer_size;
519 uint64_t ctx_save_restore_area_address;
520 uint32_t ctx_save_restore_area_size;
521 uint32_t ctl_stack_size;
522 uint64_t tba_addr;
523 uint64_t tma_addr;
524 uint64_t exception_status;
525};
526
527#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
528 (q).queue_address != 0 && \
529 (q).queue_percent > 0 && \
530 !(q).is_evicted && \
531 !(q).is_suspended)
532
533enum mqd_update_flag {
534 UPDATE_FLAG_DBG_WA_ENABLE = 1,
535 UPDATE_FLAG_DBG_WA_DISABLE = 2,
536 UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
537};
538
539struct mqd_update_info {
540 union {
541 struct {
542 uint32_t count; /* Must be a multiple of 32 */
543 uint32_t *ptr;
544 } cu_mask;
545 };
546 enum mqd_update_flag update_flag;
547};
548
549/**
550 * struct queue
551 *
552 * @list: Queue linked list.
553 *
554 * @mqd: The queue MQD (memory queue descriptor).
555 *
556 * @mqd_mem_obj: The MQD local gpu memory object.
557 *
558 * @gart_mqd_addr: The MQD gart mc address.
559 *
560 * @properties: The queue properties.
561 *
562 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
563 * that the queue should be executed on.
564 *
565 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
566 * id.
567 *
568 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
569 *
570 * @process: The kfd process that created this queue.
571 *
572 * @device: The kfd device that created this queue.
573 *
574 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
575 * otherwise.
576 *
577 * This structure represents user mode compute queues.
578 * It contains all the necessary data to handle such queues.
579 *
580 */
581
582struct queue {
583 struct list_head list;
584 void *mqd;
585 struct kfd_mem_obj *mqd_mem_obj;
586 uint64_t gart_mqd_addr;
587 struct queue_properties properties;
588
589 uint32_t mec;
590 uint32_t pipe;
591 uint32_t queue;
592
593 unsigned int sdma_id;
594 unsigned int doorbell_id;
595
596 struct kfd_process *process;
597 struct kfd_node *device;
598 void *gws;
599
600 /* procfs */
601 struct kobject kobj;
602
603 void *gang_ctx_bo;
604 uint64_t gang_ctx_gpu_addr;
605 void *gang_ctx_cpu_ptr;
606
607 struct amdgpu_bo *wptr_bo;
608};
609
610enum KFD_MQD_TYPE {
611 KFD_MQD_TYPE_HIQ = 0, /* for hiq */
612 KFD_MQD_TYPE_CP, /* for cp queues and diq */
613 KFD_MQD_TYPE_SDMA, /* for sdma queues */
614 KFD_MQD_TYPE_DIQ, /* for diq */
615 KFD_MQD_TYPE_MAX
616};
617
618enum KFD_PIPE_PRIORITY {
619 KFD_PIPE_PRIORITY_CS_LOW = 0,
620 KFD_PIPE_PRIORITY_CS_MEDIUM,
621 KFD_PIPE_PRIORITY_CS_HIGH
622};
623
624struct scheduling_resources {
625 unsigned int vmid_mask;
626 enum kfd_queue_type type;
627 uint64_t queue_mask;
628 uint64_t gws_mask;
629 uint32_t oac_mask;
630 uint32_t gds_heap_base;
631 uint32_t gds_heap_size;
632};
633
634struct process_queue_manager {
635 /* data */
636 struct kfd_process *process;
637 struct list_head queues;
638 unsigned long *queue_slot_bitmap;
639};
640
641struct qcm_process_device {
642 /* The Device Queue Manager that owns this data */
643 struct device_queue_manager *dqm;
644 struct process_queue_manager *pqm;
645 /* Queues list */
646 struct list_head queues_list;
647 struct list_head priv_queue_list;
648
649 unsigned int queue_count;
650 unsigned int vmid;
651 bool is_debug;
652 unsigned int evicted; /* eviction counter, 0=active */
653
654 /* This flag tells if we should reset all wavefronts on
655 * process termination
656 */
657 bool reset_wavefronts;
658
659 /* This flag tells us if this process has a GWS-capable
660 * queue that will be mapped into the runlist. It's
661 * possible to request a GWS BO, but not have the queue
662 * currently mapped, and this changes how the MAP_PROCESS
663 * PM4 packet is configured.
664 */
665 bool mapped_gws_queue;
666
667 /* All the memory management data should be here too */
668 uint64_t gds_context_area;
669 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
670 uint64_t page_table_base;
671 uint32_t sh_mem_config;
672 uint32_t sh_mem_bases;
673 uint32_t sh_mem_ape1_base;
674 uint32_t sh_mem_ape1_limit;
675 uint32_t gds_size;
676 uint32_t num_gws;
677 uint32_t num_oac;
678 uint32_t sh_hidden_private_base;
679
680 /* CWSR memory */
681 struct kgd_mem *cwsr_mem;
682 void *cwsr_kaddr;
683 uint64_t cwsr_base;
684 uint64_t tba_addr;
685 uint64_t tma_addr;
686
687 /* IB memory */
688 struct kgd_mem *ib_mem;
689 uint64_t ib_base;
690 void *ib_kaddr;
691
692 /* doorbells for kfd process */
693 struct amdgpu_bo *proc_doorbells;
694
695 /* bitmap for dynamic doorbell allocation from the bo */
696 unsigned long *doorbell_bitmap;
697};
698
699/* KFD Memory Eviction */
700
701/* Approx. wait time before attempting to restore evicted BOs */
702#define PROCESS_RESTORE_TIME_MS 100
703/* Approx. back off time if restore fails due to lack of memory */
704#define PROCESS_BACK_OFF_TIME_MS 100
705/* Approx. time before evicting the process again */
706#define PROCESS_ACTIVE_TIME_MS 10
707
708/* 8 byte handle containing GPU ID in the most significant 4 bytes and
709 * idr_handle in the least significant 4 bytes
710 */
711#define MAKE_HANDLE(gpu_id, idr_handle) \
712 (((uint64_t)(gpu_id) << 32) + idr_handle)
713#define GET_GPU_ID(handle) (handle >> 32)
714#define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
715
716enum kfd_pdd_bound {
717 PDD_UNBOUND = 0,
718 PDD_BOUND,
719 PDD_BOUND_SUSPENDED,
720};
721
722#define MAX_SYSFS_FILENAME_LEN 15
723
724/*
725 * SDMA counter runs at 100MHz frequency.
726 * We display SDMA activity in microsecond granularity in sysfs.
727 * As a result, the divisor is 100.
728 */
729#define SDMA_ACTIVITY_DIVISOR 100
730
731/* Data that is per-process-per device. */
732struct kfd_process_device {
733 /* The device that owns this data. */
734 struct kfd_node *dev;
735
736 /* The process that owns this kfd_process_device. */
737 struct kfd_process *process;
738
739 /* per-process-per device QCM data structure */
740 struct qcm_process_device qpd;
741
742 /*Apertures*/
743 uint64_t lds_base;
744 uint64_t lds_limit;
745 uint64_t gpuvm_base;
746 uint64_t gpuvm_limit;
747 uint64_t scratch_base;
748 uint64_t scratch_limit;
749
750 /* VM context for GPUVM allocations */
751 struct file *drm_file;
752 void *drm_priv;
753
754 /* GPUVM allocations storage */
755 struct idr alloc_idr;
756
757 /* Flag used to tell the pdd has dequeued from the dqm.
758 * This is used to prevent dev->dqm->ops.process_termination() from
759 * being called twice when it is already called in IOMMU callback
760 * function.
761 */
762 bool already_dequeued;
763 bool runtime_inuse;
764
765 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
766 enum kfd_pdd_bound bound;
767
768 /* VRAM usage */
769 uint64_t vram_usage;
770 struct attribute attr_vram;
771 char vram_filename[MAX_SYSFS_FILENAME_LEN];
772
773 /* SDMA activity tracking */
774 uint64_t sdma_past_activity_counter;
775 struct attribute attr_sdma;
776 char sdma_filename[MAX_SYSFS_FILENAME_LEN];
777
778 /* Eviction activity tracking */
779 uint64_t last_evict_timestamp;
780 atomic64_t evict_duration_counter;
781 struct attribute attr_evict;
782
783 struct kobject *kobj_stats;
784
785 /*
786 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
787 * that is associated with device encoded by "this" struct instance. The
788 * value reflects CU usage by all of the waves launched by this process
789 * on this device. A very important property of occupancy parameter is
790 * that its value is a snapshot of current use.
791 *
792 * Following is to be noted regarding how this parameter is reported:
793 *
794 * The number of waves that a CU can launch is limited by couple of
795 * parameters. These are encoded by struct amdgpu_cu_info instance
796 * that is part of every device definition. For GFX9 devices this
797 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
798 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
799 * when they do use scratch memory. This could change for future
800 * devices and therefore this example should be considered as a guide.
801 *
802 * All CU's of a device are available for the process. This may not be true
803 * under certain conditions - e.g. CU masking.
804 *
805 * Finally number of CU's that are occupied by a process is affected by both
806 * number of CU's a device has along with number of other competing processes
807 */
808 struct attribute attr_cu_occupancy;
809
810 /* sysfs counters for GPU retry fault and page migration tracking */
811 struct kobject *kobj_counters;
812 struct attribute attr_faults;
813 struct attribute attr_page_in;
814 struct attribute attr_page_out;
815 uint64_t faults;
816 uint64_t page_in;
817 uint64_t page_out;
818
819 /* Exception code status*/
820 uint64_t exception_status;
821 void *vm_fault_exc_data;
822 size_t vm_fault_exc_data_size;
823
824 /* Tracks debug per-vmid request settings */
825 uint32_t spi_dbg_override;
826 uint32_t spi_dbg_launch_mode;
827 uint32_t watch_points[4];
828 uint32_t alloc_watch_ids;
829
830 /*
831 * If this process has been checkpointed before, then the user
832 * application will use the original gpu_id on the
833 * checkpointed node to refer to this device.
834 */
835 uint32_t user_gpu_id;
836
837 void *proc_ctx_bo;
838 uint64_t proc_ctx_gpu_addr;
839 void *proc_ctx_cpu_ptr;
840};
841
842#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
843
844struct svm_range_list {
845 struct mutex lock;
846 struct rb_root_cached objects;
847 struct list_head list;
848 struct work_struct deferred_list_work;
849 struct list_head deferred_range_list;
850 struct list_head criu_svm_metadata_list;
851 spinlock_t deferred_list_lock;
852 atomic_t evicted_ranges;
853 atomic_t drain_pagefaults;
854 struct delayed_work restore_work;
855 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
856 struct task_struct *faulting_task;
857};
858
859/* Process data */
860struct kfd_process {
861 /*
862 * kfd_process are stored in an mm_struct*->kfd_process*
863 * hash table (kfd_processes in kfd_process.c)
864 */
865 struct hlist_node kfd_processes;
866
867 /*
868 * Opaque pointer to mm_struct. We don't hold a reference to
869 * it so it should never be dereferenced from here. This is
870 * only used for looking up processes by their mm.
871 */
872 void *mm;
873
874 struct kref ref;
875 struct work_struct release_work;
876
877 struct mutex mutex;
878
879 /*
880 * In any process, the thread that started main() is the lead
881 * thread and outlives the rest.
882 * It is here because amd_iommu_bind_pasid wants a task_struct.
883 * It can also be used for safely getting a reference to the
884 * mm_struct of the process.
885 */
886 struct task_struct *lead_thread;
887
888 /* We want to receive a notification when the mm_struct is destroyed */
889 struct mmu_notifier mmu_notifier;
890
891 u32 pasid;
892
893 /*
894 * Array of kfd_process_device pointers,
895 * one for each device the process is using.
896 */
897 struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
898 uint32_t n_pdds;
899
900 struct process_queue_manager pqm;
901
902 /*Is the user space process 32 bit?*/
903 bool is_32bit_user_mode;
904
905 /* Event-related data */
906 struct mutex event_mutex;
907 /* Event ID allocator and lookup */
908 struct idr event_idr;
909 /* Event page */
910 u64 signal_handle;
911 struct kfd_signal_page *signal_page;
912 size_t signal_mapped_size;
913 size_t signal_event_count;
914 bool signal_event_limit_reached;
915
916 /* Information used for memory eviction */
917 void *kgd_process_info;
918 /* Eviction fence that is attached to all the BOs of this process. The
919 * fence will be triggered during eviction and new one will be created
920 * during restore
921 */
922 struct dma_fence __rcu *ef;
923
924 /* Work items for evicting and restoring BOs */
925 struct delayed_work eviction_work;
926 struct delayed_work restore_work;
927 /* seqno of the last scheduled eviction */
928 unsigned int last_eviction_seqno;
929 /* Approx. the last timestamp (in jiffies) when the process was
930 * restored after an eviction
931 */
932 unsigned long last_restore_timestamp;
933
934 /* Indicates device process is debug attached with reserved vmid. */
935 bool debug_trap_enabled;
936
937 /* per-process-per device debug event fd file */
938 struct file *dbg_ev_file;
939
940 /* If the process is a kfd debugger, we need to know so we can clean
941 * up at exit time. If a process enables debugging on itself, it does
942 * its own clean-up, so we don't set the flag here. We track this by
943 * counting the number of processes this process is debugging.
944 */
945 atomic_t debugged_process_count;
946
947 /* If the process is a debugged, this is the debugger process */
948 struct kfd_process *debugger_process;
949
950 /* Kobj for our procfs */
951 struct kobject *kobj;
952 struct kobject *kobj_queues;
953 struct attribute attr_pasid;
954
955 /* Keep track cwsr init */
956 bool has_cwsr;
957
958 /* Exception code enable mask and status */
959 uint64_t exception_enable_mask;
960 uint64_t exception_status;
961
962 /* Used to drain stale interrupts */
963 wait_queue_head_t wait_irq_drain;
964 bool irq_drain_is_open;
965
966 /* shared virtual memory registered by this process */
967 struct svm_range_list svms;
968
969 bool xnack_enabled;
970
971 /* Work area for debugger event writer worker. */
972 struct work_struct debug_event_workarea;
973
974 /* Tracks debug per-vmid request for debug flags */
975 u32 dbg_flags;
976
977 atomic_t poison;
978 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
979 bool queues_paused;
980
981 /* Tracks runtime enable status */
982 struct semaphore runtime_enable_sema;
983 bool is_runtime_retry;
984 struct kfd_runtime_info runtime_info;
985};
986
987#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
988extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
989extern struct srcu_struct kfd_processes_srcu;
990
991/**
992 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer.
993 *
994 * @filep: pointer to file structure.
995 * @p: amdkfd process pointer.
996 * @data: pointer to arg that was copied from user.
997 *
998 * Return: returns ioctl completion code.
999 */
1000typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
1001 void *data);
1002
1003struct amdkfd_ioctl_desc {
1004 unsigned int cmd;
1005 int flags;
1006 amdkfd_ioctl_t *func;
1007 unsigned int cmd_drv;
1008 const char *name;
1009};
1010bool kfd_dev_is_large_bar(struct kfd_node *dev);
1011
1012int kfd_process_create_wq(void);
1013void kfd_process_destroy_wq(void);
1014void kfd_cleanup_processes(void);
1015struct kfd_process *kfd_create_process(struct task_struct *thread);
1016struct kfd_process *kfd_get_process(const struct task_struct *task);
1017struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
1018struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
1019
1020int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id);
1021int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1022 uint32_t *gpuid, uint32_t *gpuidx);
1023static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p,
1024 uint32_t gpuidx, uint32_t *gpuid) {
1025 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL;
1026}
1027static inline struct kfd_process_device *kfd_process_device_from_gpuidx(
1028 struct kfd_process *p, uint32_t gpuidx) {
1029 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL;
1030}
1031
1032void kfd_unref_process(struct kfd_process *p);
1033int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger);
1034int kfd_process_restore_queues(struct kfd_process *p);
1035void kfd_suspend_all_processes(void);
1036int kfd_resume_all_processes(void);
1037
1038struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process,
1039 uint32_t gpu_id);
1040
1041int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
1042
1043int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1044 struct file *drm_file);
1045struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1046 struct kfd_process *p);
1047struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1048 struct kfd_process *p);
1049struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1050 struct kfd_process *p);
1051
1052bool kfd_process_xnack_mode(struct kfd_process *p, bool supported);
1053
1054int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
1055 struct vm_area_struct *vma);
1056
1057/* KFD process API for creating and translating handles */
1058int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1059 void *mem);
1060void *kfd_process_device_translate_handle(struct kfd_process_device *p,
1061 int handle);
1062void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1063 int handle);
1064struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
1065
1066/* PASIDs */
1067int kfd_pasid_init(void);
1068void kfd_pasid_exit(void);
1069bool kfd_set_pasid_limit(unsigned int new_limit);
1070unsigned int kfd_get_pasid_limit(void);
1071u32 kfd_pasid_alloc(void);
1072void kfd_pasid_free(u32 pasid);
1073
1074/* Doorbells */
1075size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
1076int kfd_doorbell_init(struct kfd_dev *kfd);
1077void kfd_doorbell_fini(struct kfd_dev *kfd);
1078int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process,
1079 struct vm_area_struct *vma);
1080void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
1081 unsigned int *doorbell_off);
1082void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
1083u32 read_kernel_doorbell(u32 __iomem *db);
1084void write_kernel_doorbell(void __iomem *db, u32 value);
1085void write_kernel_doorbell64(void __iomem *db, u64 value);
1086unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
1087 struct kfd_process_device *pdd,
1088 unsigned int doorbell_id);
1089phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
1090int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
1091 struct kfd_process_device *pdd);
1092void kfd_free_process_doorbells(struct kfd_dev *kfd,
1093 struct kfd_process_device *pdd);
1094/* GTT Sub-Allocator */
1095
1096int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1097 struct kfd_mem_obj **mem_obj);
1098
1099int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj);
1100
1101extern struct device *kfd_device;
1102
1103/* KFD's procfs */
1104void kfd_procfs_init(void);
1105void kfd_procfs_shutdown(void);
1106int kfd_procfs_add_queue(struct queue *q);
1107void kfd_procfs_del_queue(struct queue *q);
1108
1109/* Topology */
1110int kfd_topology_init(void);
1111void kfd_topology_shutdown(void);
1112int kfd_topology_add_device(struct kfd_node *gpu);
1113int kfd_topology_remove_device(struct kfd_node *gpu);
1114struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
1115 uint32_t proximity_domain);
1116struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
1117 uint32_t proximity_domain);
1118struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
1119struct kfd_node *kfd_device_by_id(uint32_t gpu_id);
1120struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev);
1121static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id,
1122 uint32_t vmid)
1123{
1124 return (node->interrupt_bitmap & (1 << node_id)) != 0 &&
1125 (node->compute_vmid_bitmap & (1 << vmid)) != 0;
1126}
1127static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
1128 uint32_t node_id, uint32_t vmid) {
1129 struct kfd_dev *dev = adev->kfd.dev;
1130 uint32_t i;
1131
1132 if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3) &&
1133 KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 4))
1134 return dev->nodes[0];
1135
1136 for (i = 0; i < dev->num_nodes; i++)
1137 if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid))
1138 return dev->nodes[i];
1139
1140 return NULL;
1141}
1142int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev);
1143int kfd_numa_node_to_apic_id(int numa_node_id);
1144
1145/* Interrupts */
1146#define KFD_IRQ_FENCE_CLIENTID 0xff
1147#define KFD_IRQ_FENCE_SOURCEID 0xff
1148#define KFD_IRQ_IS_FENCE(client, source) \
1149 ((client) == KFD_IRQ_FENCE_CLIENTID && \
1150 (source) == KFD_IRQ_FENCE_SOURCEID)
1151int kfd_interrupt_init(struct kfd_node *dev);
1152void kfd_interrupt_exit(struct kfd_node *dev);
1153bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry);
1154bool interrupt_is_wanted(struct kfd_node *dev,
1155 const uint32_t *ih_ring_entry,
1156 uint32_t *patched_ihre, bool *flag);
1157int kfd_process_drain_interrupts(struct kfd_process_device *pdd);
1158void kfd_process_close_interrupt_drain(unsigned int pasid);
1159
1160/* amdkfd Apertures */
1161int kfd_init_apertures(struct kfd_process *process);
1162
1163void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1164 uint64_t tba_addr,
1165 uint64_t tma_addr);
1166void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1167 bool enabled);
1168
1169/* CWSR initialization */
1170int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep);
1171
1172/* CRIU */
1173/*
1174 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private
1175 * structures:
1176 * kfd_criu_process_priv_data
1177 * kfd_criu_device_priv_data
1178 * kfd_criu_bo_priv_data
1179 * kfd_criu_queue_priv_data
1180 * kfd_criu_event_priv_data
1181 * kfd_criu_svm_range_priv_data
1182 */
1183
1184#define KFD_CRIU_PRIV_VERSION 1
1185
1186struct kfd_criu_process_priv_data {
1187 uint32_t version;
1188 uint32_t xnack_mode;
1189};
1190
1191struct kfd_criu_device_priv_data {
1192 /* For future use */
1193 uint64_t reserved;
1194};
1195
1196struct kfd_criu_bo_priv_data {
1197 uint64_t user_addr;
1198 uint32_t idr_handle;
1199 uint32_t mapped_gpuids[MAX_GPU_INSTANCE];
1200};
1201
1202/*
1203 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data,
1204 * kfd_criu_svm_range_priv_data is the object type
1205 */
1206enum kfd_criu_object_type {
1207 KFD_CRIU_OBJECT_TYPE_QUEUE,
1208 KFD_CRIU_OBJECT_TYPE_EVENT,
1209 KFD_CRIU_OBJECT_TYPE_SVM_RANGE,
1210};
1211
1212struct kfd_criu_svm_range_priv_data {
1213 uint32_t object_type;
1214 uint64_t start_addr;
1215 uint64_t size;
1216 /* Variable length array of attributes */
1217 struct kfd_ioctl_svm_attribute attrs[];
1218};
1219
1220struct kfd_criu_queue_priv_data {
1221 uint32_t object_type;
1222 uint64_t q_address;
1223 uint64_t q_size;
1224 uint64_t read_ptr_addr;
1225 uint64_t write_ptr_addr;
1226 uint64_t doorbell_off;
1227 uint64_t eop_ring_buffer_address;
1228 uint64_t ctx_save_restore_area_address;
1229 uint32_t gpu_id;
1230 uint32_t type;
1231 uint32_t format;
1232 uint32_t q_id;
1233 uint32_t priority;
1234 uint32_t q_percent;
1235 uint32_t doorbell_id;
1236 uint32_t gws;
1237 uint32_t sdma_id;
1238 uint32_t eop_ring_buffer_size;
1239 uint32_t ctx_save_restore_area_size;
1240 uint32_t ctl_stack_size;
1241 uint32_t mqd_size;
1242};
1243
1244struct kfd_criu_event_priv_data {
1245 uint32_t object_type;
1246 uint64_t user_handle;
1247 uint32_t event_id;
1248 uint32_t auto_reset;
1249 uint32_t type;
1250 uint32_t signaled;
1251
1252 union {
1253 struct kfd_hsa_memory_exception_data memory_exception_data;
1254 struct kfd_hsa_hw_exception_data hw_exception_data;
1255 };
1256};
1257
1258int kfd_process_get_queue_info(struct kfd_process *p,
1259 uint32_t *num_queues,
1260 uint64_t *priv_data_sizes);
1261
1262int kfd_criu_checkpoint_queues(struct kfd_process *p,
1263 uint8_t __user *user_priv_data,
1264 uint64_t *priv_data_offset);
1265
1266int kfd_criu_restore_queue(struct kfd_process *p,
1267 uint8_t __user *user_priv_data,
1268 uint64_t *priv_data_offset,
1269 uint64_t max_priv_data_size);
1270
1271int kfd_criu_checkpoint_events(struct kfd_process *p,
1272 uint8_t __user *user_priv_data,
1273 uint64_t *priv_data_offset);
1274
1275int kfd_criu_restore_event(struct file *devkfd,
1276 struct kfd_process *p,
1277 uint8_t __user *user_priv_data,
1278 uint64_t *priv_data_offset,
1279 uint64_t max_priv_data_size);
1280/* CRIU - End */
1281
1282/* Queue Context Management */
1283int init_queue(struct queue **q, const struct queue_properties *properties);
1284void uninit_queue(struct queue *q);
1285void print_queue_properties(struct queue_properties *q);
1286void print_queue(struct queue *q);
1287
1288struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
1289 struct kfd_node *dev);
1290struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
1291 struct kfd_node *dev);
1292struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
1293 struct kfd_node *dev);
1294struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
1295 struct kfd_node *dev);
1296struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
1297 struct kfd_node *dev);
1298struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type,
1299 struct kfd_node *dev);
1300struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev);
1301void device_queue_manager_uninit(struct device_queue_manager *dqm);
1302struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
1303 enum kfd_queue_type type);
1304void kernel_queue_uninit(struct kernel_queue *kq);
1305int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
1306
1307/* Process Queue Manager */
1308struct process_queue_node {
1309 struct queue *q;
1310 struct kernel_queue *kq;
1311 struct list_head process_queue_list;
1312};
1313
1314void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
1315void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
1316int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
1317void pqm_uninit(struct process_queue_manager *pqm);
1318int pqm_create_queue(struct process_queue_manager *pqm,
1319 struct kfd_node *dev,
1320 struct file *f,
1321 struct queue_properties *properties,
1322 unsigned int *qid,
1323 struct amdgpu_bo *wptr_bo,
1324 const struct kfd_criu_queue_priv_data *q_data,
1325 const void *restore_mqd,
1326 const void *restore_ctl_stack,
1327 uint32_t *p_doorbell_offset_in_process);
1328int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
1329int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
1330 struct queue_properties *p);
1331int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
1332 struct mqd_update_info *minfo);
1333int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
1334 void *gws);
1335struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
1336 unsigned int qid);
1337struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
1338 unsigned int qid);
1339int pqm_get_wave_state(struct process_queue_manager *pqm,
1340 unsigned int qid,
1341 void __user *ctl_stack,
1342 u32 *ctl_stack_used_size,
1343 u32 *save_area_used_size);
1344int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
1345 uint64_t exception_clear_mask,
1346 void __user *buf,
1347 int *num_qss_entries,
1348 uint32_t *entry_size);
1349
1350int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
1351 uint64_t fence_value,
1352 unsigned int timeout_ms);
1353
1354int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
1355 unsigned int qid,
1356 u32 *mqd_size,
1357 u32 *ctl_stack_size);
1358/* Packet Manager */
1359
1360#define KFD_FENCE_COMPLETED (100)
1361#define KFD_FENCE_INIT (10)
1362
1363struct packet_manager {
1364 struct device_queue_manager *dqm;
1365 struct kernel_queue *priv_queue;
1366 struct mutex lock;
1367 bool allocated;
1368 struct kfd_mem_obj *ib_buffer_obj;
1369 unsigned int ib_size_bytes;
1370 bool is_over_subscription;
1371
1372 const struct packet_manager_funcs *pmf;
1373};
1374
1375struct packet_manager_funcs {
1376 /* Support ASIC-specific packet formats for PM4 packets */
1377 int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
1378 struct qcm_process_device *qpd);
1379 int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
1380 uint64_t ib, size_t ib_size_in_dwords, bool chain);
1381 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
1382 struct scheduling_resources *res);
1383 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
1384 struct queue *q, bool is_static);
1385 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
1386 enum kfd_unmap_queues_filter mode,
1387 uint32_t filter_param, bool reset);
1388 int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer,
1389 uint32_t grace_period);
1390 int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
1391 uint64_t fence_address, uint64_t fence_value);
1392 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
1393
1394 /* Packet sizes */
1395 int map_process_size;
1396 int runlist_size;
1397 int set_resources_size;
1398 int map_queues_size;
1399 int unmap_queues_size;
1400 int set_grace_period_size;
1401 int query_status_size;
1402 int release_mem_size;
1403};
1404
1405extern const struct packet_manager_funcs kfd_vi_pm_funcs;
1406extern const struct packet_manager_funcs kfd_v9_pm_funcs;
1407extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs;
1408
1409int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1410void pm_uninit(struct packet_manager *pm);
1411int pm_send_set_resources(struct packet_manager *pm,
1412 struct scheduling_resources *res);
1413int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
1414int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
1415 uint64_t fence_value);
1416
1417int pm_send_unmap_queue(struct packet_manager *pm,
1418 enum kfd_unmap_queues_filter mode,
1419 uint32_t filter_param, bool reset);
1420
1421void pm_release_ib(struct packet_manager *pm);
1422
1423int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period);
1424
1425/* Following PM funcs can be shared among VI and AI */
1426unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
1427
1428uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
1429
1430/* Events */
1431extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
1432extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
1433extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3;
1434extern const struct kfd_event_interrupt_class event_interrupt_class_v10;
1435extern const struct kfd_event_interrupt_class event_interrupt_class_v11;
1436
1437extern const struct kfd_device_global_init_class device_global_init_class_cik;
1438
1439int kfd_event_init_process(struct kfd_process *p);
1440void kfd_event_free_process(struct kfd_process *p);
1441int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
1442int kfd_wait_on_events(struct kfd_process *p,
1443 uint32_t num_events, void __user *data,
1444 bool all, uint32_t *user_timeout_ms,
1445 uint32_t *wait_result);
1446void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
1447 uint32_t valid_id_bits);
1448void kfd_signal_hw_exception_event(u32 pasid);
1449int kfd_set_event(struct kfd_process *p, uint32_t event_id);
1450int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
1451int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset);
1452
1453int kfd_event_create(struct file *devkfd, struct kfd_process *p,
1454 uint32_t event_type, bool auto_reset, uint32_t node_id,
1455 uint32_t *event_id, uint32_t *event_trigger_data,
1456 uint64_t *event_page_offset, uint32_t *event_slot_index);
1457
1458int kfd_get_num_events(struct kfd_process *p);
1459int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
1460
1461void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
1462 struct kfd_vm_fault_info *info,
1463 struct kfd_hsa_memory_exception_data *data);
1464
1465void kfd_signal_reset_event(struct kfd_node *dev);
1466
1467void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
1468
1469static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
1470 enum TLB_FLUSH_TYPE type)
1471{
1472 struct amdgpu_device *adev = pdd->dev->adev;
1473 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1474
1475 amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
1476}
1477
1478static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
1479{
1480 return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
1481 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
1482 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
1483}
1484
1485int kfd_send_exception_to_runtime(struct kfd_process *p,
1486 unsigned int queue_id,
1487 uint64_t error_reason);
1488bool kfd_is_locked(void);
1489
1490/* Compute profile */
1491void kfd_inc_compute_active(struct kfd_node *dev);
1492void kfd_dec_compute_active(struct kfd_node *dev);
1493
1494/* Cgroup Support */
1495/* Check with device cgroup if @kfd device is accessible */
1496static inline int kfd_devcgroup_check_permission(struct kfd_node *node)
1497{
1498#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
1499 struct drm_device *ddev;
1500
1501 if (node->xcp)
1502 ddev = node->xcp->ddev;
1503 else
1504 ddev = adev_to_drm(node->adev);
1505
1506 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
1507 ddev->render->index,
1508 DEVCG_ACC_WRITE | DEVCG_ACC_READ);
1509#else
1510 return 0;
1511#endif
1512}
1513
1514static inline bool kfd_is_first_node(struct kfd_node *node)
1515{
1516 return (node == node->kfd->nodes[0]);
1517}
1518
1519/* Debugfs */
1520#if defined(CONFIG_DEBUG_FS)
1521
1522void kfd_debugfs_init(void);
1523void kfd_debugfs_fini(void);
1524int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
1525int pqm_debugfs_mqds(struct seq_file *m, void *data);
1526int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
1527int dqm_debugfs_hqds(struct seq_file *m, void *data);
1528int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
1529int pm_debugfs_runlist(struct seq_file *m, void *data);
1530
1531int kfd_debugfs_hang_hws(struct kfd_node *dev);
1532int pm_debugfs_hang_hws(struct packet_manager *pm);
1533int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
1534
1535#else
1536
1537static inline void kfd_debugfs_init(void) {}
1538static inline void kfd_debugfs_fini(void) {}
1539
1540#endif
1541
1542#endif