Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_IOCTL_H_INCLUDED
24#define KFD_IOCTL_H_INCLUDED
25
26#include <drm/drm.h>
27#include <linux/ioctl.h>
28
29/*
30 * - 1.1 - initial version
31 * - 1.3 - Add SMI events support
32 * - 1.4 - Indicate new SRAM EDC bit in device properties
33 * - 1.5 - Add SVM API
34 * - 1.6 - Query clear flags in SVM get_attr API
35 * - 1.7 - Checkpoint Restore (CRIU) API
36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37 * - 1.9 - Add available memory ioctl
38 * - 1.10 - Add SMI profiler event log
39 * - 1.11 - Add unified memory for ctx save/restore area
40 * - 1.12 - Add DMA buf export ioctl
41 * - 1.13 - Add debugger API
42 * - 1.14 - Update kfd_event_data
43 * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
44 * - 1.16 - Add contiguous VRAM allocation flag
45 * - 1.17 - Add SDMA queue creation with target SDMA engine ID
46 */
47#define KFD_IOCTL_MAJOR_VERSION 1
48#define KFD_IOCTL_MINOR_VERSION 17
49
50struct kfd_ioctl_get_version_args {
51 __u32 major_version; /* from KFD */
52 __u32 minor_version; /* from KFD */
53};
54
55/* For kfd_ioctl_create_queue_args.queue_type. */
56#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
57#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
58#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
59#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
60#define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
61
62#define KFD_MAX_QUEUE_PERCENTAGE 100
63#define KFD_MAX_QUEUE_PRIORITY 15
64
65struct kfd_ioctl_create_queue_args {
66 __u64 ring_base_address; /* to KFD */
67 __u64 write_pointer_address; /* from KFD */
68 __u64 read_pointer_address; /* from KFD */
69 __u64 doorbell_offset; /* from KFD */
70
71 __u32 ring_size; /* to KFD */
72 __u32 gpu_id; /* to KFD */
73 __u32 queue_type; /* to KFD */
74 __u32 queue_percentage; /* to KFD */
75 __u32 queue_priority; /* to KFD */
76 __u32 queue_id; /* from KFD */
77
78 __u64 eop_buffer_address; /* to KFD */
79 __u64 eop_buffer_size; /* to KFD */
80 __u64 ctx_save_restore_address; /* to KFD */
81 __u32 ctx_save_restore_size; /* to KFD */
82 __u32 ctl_stack_size; /* to KFD */
83 __u32 sdma_engine_id; /* to KFD */
84 __u32 pad;
85};
86
87struct kfd_ioctl_destroy_queue_args {
88 __u32 queue_id; /* to KFD */
89 __u32 pad;
90};
91
92struct kfd_ioctl_update_queue_args {
93 __u64 ring_base_address; /* to KFD */
94
95 __u32 queue_id; /* to KFD */
96 __u32 ring_size; /* to KFD */
97 __u32 queue_percentage; /* to KFD */
98 __u32 queue_priority; /* to KFD */
99};
100
101struct kfd_ioctl_set_cu_mask_args {
102 __u32 queue_id; /* to KFD */
103 __u32 num_cu_mask; /* to KFD */
104 __u64 cu_mask_ptr; /* to KFD */
105};
106
107struct kfd_ioctl_get_queue_wave_state_args {
108 __u64 ctl_stack_address; /* to KFD */
109 __u32 ctl_stack_used_size; /* from KFD */
110 __u32 save_area_used_size; /* from KFD */
111 __u32 queue_id; /* to KFD */
112 __u32 pad;
113};
114
115struct kfd_ioctl_get_available_memory_args {
116 __u64 available; /* from KFD */
117 __u32 gpu_id; /* to KFD */
118 __u32 pad;
119};
120
121struct kfd_dbg_device_info_entry {
122 __u64 exception_status;
123 __u64 lds_base;
124 __u64 lds_limit;
125 __u64 scratch_base;
126 __u64 scratch_limit;
127 __u64 gpuvm_base;
128 __u64 gpuvm_limit;
129 __u32 gpu_id;
130 __u32 location_id;
131 __u32 vendor_id;
132 __u32 device_id;
133 __u32 revision_id;
134 __u32 subsystem_vendor_id;
135 __u32 subsystem_device_id;
136 __u32 fw_version;
137 __u32 gfx_target_version;
138 __u32 simd_count;
139 __u32 max_waves_per_simd;
140 __u32 array_count;
141 __u32 simd_arrays_per_engine;
142 __u32 num_xcc;
143 __u32 capability;
144 __u32 debug_prop;
145};
146
147/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
148#define KFD_IOC_CACHE_POLICY_COHERENT 0
149#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
150
151struct kfd_ioctl_set_memory_policy_args {
152 __u64 alternate_aperture_base; /* to KFD */
153 __u64 alternate_aperture_size; /* to KFD */
154
155 __u32 gpu_id; /* to KFD */
156 __u32 default_policy; /* to KFD */
157 __u32 alternate_policy; /* to KFD */
158 __u32 pad;
159};
160
161/*
162 * All counters are monotonic. They are used for profiling of compute jobs.
163 * The profiling is done by userspace.
164 *
165 * In case of GPU reset, the counter should not be affected.
166 */
167
168struct kfd_ioctl_get_clock_counters_args {
169 __u64 gpu_clock_counter; /* from KFD */
170 __u64 cpu_clock_counter; /* from KFD */
171 __u64 system_clock_counter; /* from KFD */
172 __u64 system_clock_freq; /* from KFD */
173
174 __u32 gpu_id; /* to KFD */
175 __u32 pad;
176};
177
178struct kfd_process_device_apertures {
179 __u64 lds_base; /* from KFD */
180 __u64 lds_limit; /* from KFD */
181 __u64 scratch_base; /* from KFD */
182 __u64 scratch_limit; /* from KFD */
183 __u64 gpuvm_base; /* from KFD */
184 __u64 gpuvm_limit; /* from KFD */
185 __u32 gpu_id; /* from KFD */
186 __u32 pad;
187};
188
189/*
190 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
191 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
192 * unlimited number of GPUs.
193 */
194#define NUM_OF_SUPPORTED_GPUS 7
195struct kfd_ioctl_get_process_apertures_args {
196 struct kfd_process_device_apertures
197 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
198
199 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
200 __u32 num_of_nodes;
201 __u32 pad;
202};
203
204struct kfd_ioctl_get_process_apertures_new_args {
205 /* User allocated. Pointer to struct kfd_process_device_apertures
206 * filled in by Kernel
207 */
208 __u64 kfd_process_device_apertures_ptr;
209 /* to KFD - indicates amount of memory present in
210 * kfd_process_device_apertures_ptr
211 * from KFD - Number of entries filled by KFD.
212 */
213 __u32 num_of_nodes;
214 __u32 pad;
215};
216
217#define MAX_ALLOWED_NUM_POINTS 100
218#define MAX_ALLOWED_AW_BUFF_SIZE 4096
219#define MAX_ALLOWED_WAC_BUFF_SIZE 128
220
221struct kfd_ioctl_dbg_register_args {
222 __u32 gpu_id; /* to KFD */
223 __u32 pad;
224};
225
226struct kfd_ioctl_dbg_unregister_args {
227 __u32 gpu_id; /* to KFD */
228 __u32 pad;
229};
230
231struct kfd_ioctl_dbg_address_watch_args {
232 __u64 content_ptr; /* a pointer to the actual content */
233 __u32 gpu_id; /* to KFD */
234 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
235};
236
237struct kfd_ioctl_dbg_wave_control_args {
238 __u64 content_ptr; /* a pointer to the actual content */
239 __u32 gpu_id; /* to KFD */
240 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
241};
242
243#define KFD_INVALID_FD 0xffffffff
244
245/* Matching HSA_EVENTTYPE */
246#define KFD_IOC_EVENT_SIGNAL 0
247#define KFD_IOC_EVENT_NODECHANGE 1
248#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
249#define KFD_IOC_EVENT_HW_EXCEPTION 3
250#define KFD_IOC_EVENT_SYSTEM_EVENT 4
251#define KFD_IOC_EVENT_DEBUG_EVENT 5
252#define KFD_IOC_EVENT_PROFILE_EVENT 6
253#define KFD_IOC_EVENT_QUEUE_EVENT 7
254#define KFD_IOC_EVENT_MEMORY 8
255
256#define KFD_IOC_WAIT_RESULT_COMPLETE 0
257#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
258#define KFD_IOC_WAIT_RESULT_FAIL 2
259
260#define KFD_SIGNAL_EVENT_LIMIT 4096
261
262/* For kfd_event_data.hw_exception_data.reset_type. */
263#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
264#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
265
266/* For kfd_event_data.hw_exception_data.reset_cause. */
267#define KFD_HW_EXCEPTION_GPU_HANG 0
268#define KFD_HW_EXCEPTION_ECC 1
269
270/* For kfd_hsa_memory_exception_data.ErrorType */
271#define KFD_MEM_ERR_NO_RAS 0
272#define KFD_MEM_ERR_SRAM_ECC 1
273#define KFD_MEM_ERR_POISON_CONSUMED 2
274#define KFD_MEM_ERR_GPU_HANG 3
275
276struct kfd_ioctl_create_event_args {
277 __u64 event_page_offset; /* from KFD */
278 __u32 event_trigger_data; /* from KFD - signal events only */
279 __u32 event_type; /* to KFD */
280 __u32 auto_reset; /* to KFD */
281 __u32 node_id; /* to KFD - only valid for certain
282 event types */
283 __u32 event_id; /* from KFD */
284 __u32 event_slot_index; /* from KFD */
285};
286
287struct kfd_ioctl_destroy_event_args {
288 __u32 event_id; /* to KFD */
289 __u32 pad;
290};
291
292struct kfd_ioctl_set_event_args {
293 __u32 event_id; /* to KFD */
294 __u32 pad;
295};
296
297struct kfd_ioctl_reset_event_args {
298 __u32 event_id; /* to KFD */
299 __u32 pad;
300};
301
302struct kfd_memory_exception_failure {
303 __u32 NotPresent; /* Page not present or supervisor privilege */
304 __u32 ReadOnly; /* Write access to a read-only page */
305 __u32 NoExecute; /* Execute access to a page marked NX */
306 __u32 imprecise; /* Can't determine the exact fault address */
307};
308
309/* memory exception data */
310struct kfd_hsa_memory_exception_data {
311 struct kfd_memory_exception_failure failure;
312 __u64 va;
313 __u32 gpu_id;
314 __u32 ErrorType; /* 0 = no RAS error,
315 * 1 = ECC_SRAM,
316 * 2 = Link_SYNFLOOD (poison),
317 * 3 = GPU hang (not attributable to a specific cause),
318 * other values reserved
319 */
320};
321
322/* hw exception data */
323struct kfd_hsa_hw_exception_data {
324 __u32 reset_type;
325 __u32 reset_cause;
326 __u32 memory_lost;
327 __u32 gpu_id;
328};
329
330/* hsa signal event data */
331struct kfd_hsa_signal_event_data {
332 __u64 last_event_age; /* to and from KFD */
333};
334
335/* Event data */
336struct kfd_event_data {
337 union {
338 /* From KFD */
339 struct kfd_hsa_memory_exception_data memory_exception_data;
340 struct kfd_hsa_hw_exception_data hw_exception_data;
341 /* To and From KFD */
342 struct kfd_hsa_signal_event_data signal_event_data;
343 };
344 __u64 kfd_event_data_ext; /* pointer to an extension structure
345 for future exception types */
346 __u32 event_id; /* to KFD */
347 __u32 pad;
348};
349
350struct kfd_ioctl_wait_events_args {
351 __u64 events_ptr; /* pointed to struct
352 kfd_event_data array, to KFD */
353 __u32 num_events; /* to KFD */
354 __u32 wait_for_all; /* to KFD */
355 __u32 timeout; /* to KFD */
356 __u32 wait_result; /* from KFD */
357};
358
359struct kfd_ioctl_set_scratch_backing_va_args {
360 __u64 va_addr; /* to KFD */
361 __u32 gpu_id; /* to KFD */
362 __u32 pad;
363};
364
365struct kfd_ioctl_get_tile_config_args {
366 /* to KFD: pointer to tile array */
367 __u64 tile_config_ptr;
368 /* to KFD: pointer to macro tile array */
369 __u64 macro_tile_config_ptr;
370 /* to KFD: array size allocated by user mode
371 * from KFD: array size filled by kernel
372 */
373 __u32 num_tile_configs;
374 /* to KFD: array size allocated by user mode
375 * from KFD: array size filled by kernel
376 */
377 __u32 num_macro_tile_configs;
378
379 __u32 gpu_id; /* to KFD */
380 __u32 gb_addr_config; /* from KFD */
381 __u32 num_banks; /* from KFD */
382 __u32 num_ranks; /* from KFD */
383 /* struct size can be extended later if needed
384 * without breaking ABI compatibility
385 */
386};
387
388struct kfd_ioctl_set_trap_handler_args {
389 __u64 tba_addr; /* to KFD */
390 __u64 tma_addr; /* to KFD */
391 __u32 gpu_id; /* to KFD */
392 __u32 pad;
393};
394
395struct kfd_ioctl_acquire_vm_args {
396 __u32 drm_fd; /* to KFD */
397 __u32 gpu_id; /* to KFD */
398};
399
400/* Allocation flags: memory types */
401#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
402#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
403#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
404#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
405#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
406/* Allocation flags: attributes/access options */
407#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
408#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
409#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
410#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
411#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
412#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
413#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
414#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
415#define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23)
416
417/* Allocate memory for later SVM (shared virtual memory) mapping.
418 *
419 * @va_addr: virtual address of the memory to be allocated
420 * all later mappings on all GPUs will use this address
421 * @size: size in bytes
422 * @handle: buffer handle returned to user mode, used to refer to
423 * this allocation for mapping, unmapping and freeing
424 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
425 * for userptrs this is overloaded to specify the CPU address
426 * @gpu_id: device identifier
427 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
428 */
429struct kfd_ioctl_alloc_memory_of_gpu_args {
430 __u64 va_addr; /* to KFD */
431 __u64 size; /* to KFD */
432 __u64 handle; /* from KFD */
433 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
434 __u32 gpu_id; /* to KFD */
435 __u32 flags;
436};
437
438/* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
439 *
440 * @handle: memory handle returned by alloc
441 */
442struct kfd_ioctl_free_memory_of_gpu_args {
443 __u64 handle; /* to KFD */
444};
445
446/* Map memory to one or more GPUs
447 *
448 * @handle: memory handle returned by alloc
449 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
450 * @n_devices: number of devices in the array
451 * @n_success: number of devices mapped successfully
452 *
453 * @n_success returns information to the caller how many devices from
454 * the start of the array have mapped the buffer successfully. It can
455 * be passed into a subsequent retry call to skip those devices. For
456 * the first call the caller should initialize it to 0.
457 *
458 * If the ioctl completes with return code 0 (success), n_success ==
459 * n_devices.
460 */
461struct kfd_ioctl_map_memory_to_gpu_args {
462 __u64 handle; /* to KFD */
463 __u64 device_ids_array_ptr; /* to KFD */
464 __u32 n_devices; /* to KFD */
465 __u32 n_success; /* to/from KFD */
466};
467
468/* Unmap memory from one or more GPUs
469 *
470 * same arguments as for mapping
471 */
472struct kfd_ioctl_unmap_memory_from_gpu_args {
473 __u64 handle; /* to KFD */
474 __u64 device_ids_array_ptr; /* to KFD */
475 __u32 n_devices; /* to KFD */
476 __u32 n_success; /* to/from KFD */
477};
478
479/* Allocate GWS for specific queue
480 *
481 * @queue_id: queue's id that GWS is allocated for
482 * @num_gws: how many GWS to allocate
483 * @first_gws: index of the first GWS allocated.
484 * only support contiguous GWS allocation
485 */
486struct kfd_ioctl_alloc_queue_gws_args {
487 __u32 queue_id; /* to KFD */
488 __u32 num_gws; /* to KFD */
489 __u32 first_gws; /* from KFD */
490 __u32 pad;
491};
492
493struct kfd_ioctl_get_dmabuf_info_args {
494 __u64 size; /* from KFD */
495 __u64 metadata_ptr; /* to KFD */
496 __u32 metadata_size; /* to KFD (space allocated by user)
497 * from KFD (actual metadata size)
498 */
499 __u32 gpu_id; /* from KFD */
500 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
501 __u32 dmabuf_fd; /* to KFD */
502};
503
504struct kfd_ioctl_import_dmabuf_args {
505 __u64 va_addr; /* to KFD */
506 __u64 handle; /* from KFD */
507 __u32 gpu_id; /* to KFD */
508 __u32 dmabuf_fd; /* to KFD */
509};
510
511struct kfd_ioctl_export_dmabuf_args {
512 __u64 handle; /* to KFD */
513 __u32 flags; /* to KFD */
514 __u32 dmabuf_fd; /* from KFD */
515};
516
517/*
518 * KFD SMI(System Management Interface) events
519 */
520enum kfd_smi_event {
521 KFD_SMI_EVENT_NONE = 0, /* not used */
522 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
523 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
524 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
525 KFD_SMI_EVENT_GPU_POST_RESET = 4,
526 KFD_SMI_EVENT_MIGRATE_START = 5,
527 KFD_SMI_EVENT_MIGRATE_END = 6,
528 KFD_SMI_EVENT_PAGE_FAULT_START = 7,
529 KFD_SMI_EVENT_PAGE_FAULT_END = 8,
530 KFD_SMI_EVENT_QUEUE_EVICTION = 9,
531 KFD_SMI_EVENT_QUEUE_RESTORE = 10,
532 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
533
534 /*
535 * max event number, as a flag bit to get events from all processes,
536 * this requires super user permission, otherwise will not be able to
537 * receive event from any process. Without this flag to receive events
538 * from same process.
539 */
540 KFD_SMI_EVENT_ALL_PROCESS = 64
541};
542
543/* The reason of the page migration event */
544enum KFD_MIGRATE_TRIGGERS {
545 KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */
546 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */
547 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */
548 KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */
549};
550
551/* The reason of user queue evition event */
552enum KFD_QUEUE_EVICTION_TRIGGERS {
553 KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */
554 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */
555 KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */
556 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */
557 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */
558 KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */
559};
560
561/* The reason of unmap buffer from GPU event */
562enum KFD_SVM_UNMAP_TRIGGERS {
563 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */
564 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */
565 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */
566};
567
568#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
569#define KFD_SMI_EVENT_MSG_SIZE 96
570
571struct kfd_ioctl_smi_events_args {
572 __u32 gpuid; /* to KFD */
573 __u32 anon_fd; /* from KFD */
574};
575
576/*
577 * SVM event tracing via SMI system management interface
578 *
579 * Open event file descriptor
580 * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file
581 * descriptor to receive SMI events.
582 * If calling with sudo permission, then file descriptor can be used to receive
583 * SVM events from all processes, otherwise, to only receive SVM events of same
584 * process.
585 *
586 * To enable the SVM event
587 * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap
588 * mask to start record the event to the kfifo, use bitmap mask combination
589 * for multiple events. New event mask will overwrite the previous event mask.
590 * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo
591 * permisson to receive SVM events from all process.
592 *
593 * To receive the event
594 * Application can poll file descriptor to wait for the events, then read event
595 * from the file into a buffer. Each event is one line string message, starting
596 * with the event id, then the event specific information.
597 *
598 * To decode event information
599 * The following event format string macro can be used with sscanf to decode
600 * the specific event information.
601 * event triggers: the reason to generate the event, defined as enum for unmap,
602 * eviction and migrate events.
603 * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory.
604 * addr: user mode address, in pages
605 * size: in pages
606 * pid: the process ID to generate the event
607 * ns: timestamp in nanosecond-resolution, starts at system boot time but
608 * stops during suspend
609 * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update
610 * rw: 'W' for write page fault, 'R' for read page fault
611 * rescheduled: 'R' if the queue restore failed and rescheduled to try again
612 */
613#define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\
614 "%x %s\n", (reset_seq_num), (reset_cause)
615
616#define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\
617 "%llx:%llx\n", (bitmask), (counter)
618
619#define KFD_EVENT_FMT_VMFAULT(pid, task_name)\
620 "%x:%s\n", (pid), (task_name)
621
622#define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\
623 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
624
625#define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\
626 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
627
628#define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\
629 preferred_loc, migrate_trigger)\
630 "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\
631 (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
632
633#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\
634 "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\
635 (from), (to), (migrate_trigger)
636
637#define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\
638 "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
639
640#define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\
641 "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
642
643#define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\
644 "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\
645 (node), (unmap_trigger)
646
647/**************************************************************************************************
648 * CRIU IOCTLs (Checkpoint Restore In Userspace)
649 *
650 * When checkpointing a process, the userspace application will perform:
651 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
652 * all the queues.
653 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
654 * 3. UNPAUSE op to un-evict all the queues
655 *
656 * When restoring a process, the CRIU userspace application will perform:
657 *
658 * 1. RESTORE op to restore process contents
659 * 2. RESUME op to start the process
660 *
661 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
662 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
663 */
664
665enum kfd_criu_op {
666 KFD_CRIU_OP_PROCESS_INFO,
667 KFD_CRIU_OP_CHECKPOINT,
668 KFD_CRIU_OP_UNPAUSE,
669 KFD_CRIU_OP_RESTORE,
670 KFD_CRIU_OP_RESUME,
671};
672
673/**
674 * kfd_ioctl_criu_args - Arguments perform CRIU operation
675 * @devices: [in/out] User pointer to memory location for devices information.
676 * This is an array of type kfd_criu_device_bucket.
677 * @bos: [in/out] User pointer to memory location for BOs information
678 * This is an array of type kfd_criu_bo_bucket.
679 * @priv_data: [in/out] User pointer to memory location for private data
680 * @priv_data_size: [in/out] Size of priv_data in bytes
681 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array.
682 * @num_bos [in/out] Number of BOs used by process. Size of @bos array.
683 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to
684 * user application.
685 * @pid: [in/out] PID of the process being checkpointed
686 * @op [in] Type of operation (kfd_criu_op)
687 *
688 * Return: 0 on success, -errno on failure
689 */
690struct kfd_ioctl_criu_args {
691 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */
692 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */
693 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */
694 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */
695 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */
696 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */
697 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */
698 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */
699 __u32 op;
700};
701
702struct kfd_criu_device_bucket {
703 __u32 user_gpu_id;
704 __u32 actual_gpu_id;
705 __u32 drm_fd;
706 __u32 pad;
707};
708
709struct kfd_criu_bo_bucket {
710 __u64 addr;
711 __u64 size;
712 __u64 offset;
713 __u64 restored_offset; /* During restore, updated offset for BO */
714 __u32 gpu_id; /* This is the user_gpu_id */
715 __u32 alloc_flags;
716 __u32 dmabuf_fd;
717 __u32 pad;
718};
719
720/* CRIU IOCTLs - END */
721/**************************************************************************************************/
722
723/* Register offset inside the remapped mmio page
724 */
725enum kfd_mmio_remap {
726 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
727 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
728};
729
730/* Guarantee host access to memory */
731#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
732/* Fine grained coherency between all devices with access */
733#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
734/* Use any GPU in same hive as preferred device */
735#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
736/* GPUs only read, allows replication */
737#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
738/* Allow execution on GPU */
739#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
740/* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
741#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
742/* Keep GPU memory mapping always valid as if XNACK is disable */
743#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
744/* Fine grained coherency between all devices using device-scope atomics */
745#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
746
747/**
748 * kfd_ioctl_svm_op - SVM ioctl operations
749 *
750 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
751 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
752 */
753enum kfd_ioctl_svm_op {
754 KFD_IOCTL_SVM_OP_SET_ATTR,
755 KFD_IOCTL_SVM_OP_GET_ATTR
756};
757
758/** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
759 *
760 * GPU IDs are used to specify GPUs as preferred and prefetch locations.
761 * Below definitions are used for system memory or for leaving the preferred
762 * location unspecified.
763 */
764enum kfd_ioctl_svm_location {
765 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
766 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
767};
768
769/**
770 * kfd_ioctl_svm_attr_type - SVM attribute types
771 *
772 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
773 * system memory
774 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
775 * system memory. Setting this triggers an
776 * immediate prefetch (migration).
777 * @KFD_IOCTL_SVM_ATTR_ACCESS:
778 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
779 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
780 * by the attribute value
781 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
782 * KFD_IOCTL_SVM_FLAG_...)
783 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
784 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
785 * (log2 num pages)
786 */
787enum kfd_ioctl_svm_attr_type {
788 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
789 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
790 KFD_IOCTL_SVM_ATTR_ACCESS,
791 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
792 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
793 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
794 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
795 KFD_IOCTL_SVM_ATTR_GRANULARITY
796};
797
798/**
799 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
800 *
801 * The meaning of the @value depends on the attribute type.
802 *
803 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
804 * @value: attribute value
805 */
806struct kfd_ioctl_svm_attribute {
807 __u32 type;
808 __u32 value;
809};
810
811/**
812 * kfd_ioctl_svm_args - Arguments for SVM ioctl
813 *
814 * @op specifies the operation to perform (see enum
815 * @kfd_ioctl_svm_op). @start_addr and @size are common for all
816 * operations.
817 *
818 * A variable number of attributes can be given in @attrs.
819 * @nattr specifies the number of attributes. New attributes can be
820 * added in the future without breaking the ABI. If unknown attributes
821 * are given, the function returns -EINVAL.
822 *
823 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
824 * range. It may overlap existing virtual address ranges. If it does,
825 * the existing ranges will be split such that the attribute changes
826 * only apply to the specified address range.
827 *
828 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
829 * over all memory in the given range and returns the result as the
830 * attribute value. If different pages have different preferred or
831 * prefetch locations, 0xffffffff will be returned for
832 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
833 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
834 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
835 * aggregated by bitwise AND. That means, a flag will be set in the
836 * output, if that flag is set for all pages in the range. For
837 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
838 * aggregated by bitwise NOR. That means, a flag will be set in the
839 * output, if that flag is clear for all pages in the range.
840 * The minimum migration granularity throughout the range will be
841 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
842 *
843 * Querying of accessibility attributes works by initializing the
844 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
845 * GPUID being queried. Multiple attributes can be given to allow
846 * querying multiple GPUIDs. The ioctl function overwrites the
847 * attribute type to indicate the access for the specified GPU.
848 */
849struct kfd_ioctl_svm_args {
850 __u64 start_addr;
851 __u64 size;
852 __u32 op;
853 __u32 nattr;
854 /* Variable length array of attributes */
855 struct kfd_ioctl_svm_attribute attrs[];
856};
857
858/**
859 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
860 *
861 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
862 *
863 * @xnack_enabled indicates whether recoverable page faults should be
864 * enabled for the current process. 0 means disabled, positive means
865 * enabled, negative means leave unchanged. If enabled, virtual address
866 * translations on GFXv9 and later AMD GPUs can return XNACK and retry
867 * the access until a valid PTE is available. This is used to implement
868 * device page faults.
869 *
870 * On output, @xnack_enabled returns the (new) current mode (0 or
871 * positive). Therefore, a negative input value can be used to query
872 * the current mode without changing it.
873 *
874 * The XNACK mode fundamentally changes the way SVM managed memory works
875 * in the driver, with subtle effects on application performance and
876 * functionality.
877 *
878 * Enabling XNACK mode requires shader programs to be compiled
879 * differently. Furthermore, not all GPUs support changing the mode
880 * per-process. Therefore changing the mode is only allowed while no
881 * user mode queues exist in the process. This ensure that no shader
882 * code is running that may be compiled for the wrong mode. And GPUs
883 * that cannot change to the requested mode will prevent the XNACK
884 * mode from occurring. All GPUs used by the process must be in the
885 * same XNACK mode.
886 *
887 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
888 * Therefore those GPUs are not considered for the XNACK mode switch.
889 *
890 * Return: 0 on success, -errno on failure
891 */
892struct kfd_ioctl_set_xnack_mode_args {
893 __s32 xnack_enabled;
894};
895
896/* Wave launch override modes */
897enum kfd_dbg_trap_override_mode {
898 KFD_DBG_TRAP_OVERRIDE_OR = 0,
899 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
900};
901
902/* Wave launch overrides */
903enum kfd_dbg_trap_mask {
904 KFD_DBG_TRAP_MASK_FP_INVALID = 1,
905 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
906 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
907 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
908 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
909 KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
910 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
911 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
912 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
913 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
914 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
915};
916
917/* Wave launch modes */
918enum kfd_dbg_trap_wave_launch_mode {
919 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
920 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
921 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
922};
923
924/* Address watch modes */
925enum kfd_dbg_trap_address_watch_mode {
926 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
927 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
928 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
929 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
930};
931
932/* Additional wave settings */
933enum kfd_dbg_trap_flags {
934 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
935 KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
936};
937
938/* Trap exceptions */
939enum kfd_dbg_trap_exception_code {
940 EC_NONE = 0,
941 /* per queue */
942 EC_QUEUE_WAVE_ABORT = 1,
943 EC_QUEUE_WAVE_TRAP = 2,
944 EC_QUEUE_WAVE_MATH_ERROR = 3,
945 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
946 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
947 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
948 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
949 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
950 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
951 EC_QUEUE_PACKET_RESERVED = 19,
952 EC_QUEUE_PACKET_UNSUPPORTED = 20,
953 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
954 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
955 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
956 EC_QUEUE_PREEMPTION_ERROR = 30,
957 EC_QUEUE_NEW = 31,
958 /* per device */
959 EC_DEVICE_QUEUE_DELETE = 32,
960 EC_DEVICE_MEMORY_VIOLATION = 33,
961 EC_DEVICE_RAS_ERROR = 34,
962 EC_DEVICE_FATAL_HALT = 35,
963 EC_DEVICE_NEW = 36,
964 /* per process */
965 EC_PROCESS_RUNTIME = 48,
966 EC_PROCESS_DEVICE_REMOVE = 49,
967 EC_MAX
968};
969
970/* Mask generated by ecode in kfd_dbg_trap_exception_code */
971#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
972
973/* Masks for exception code type checks below */
974#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
975 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
976 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
977 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
978 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
979 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
980 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
981 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
982 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
983 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
984 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
985 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
986 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
987 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
988 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
989 KFD_EC_MASK(EC_QUEUE_NEW))
990#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
991 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
992 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
993 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
994 KFD_EC_MASK(EC_DEVICE_NEW))
995#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
996 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
997#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
998 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
999 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
1000 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
1001 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
1002 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
1003 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
1004 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
1005
1006/* Checks for exception code types for KFD search */
1007#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
1008#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
1009 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
1010#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
1011 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
1012#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
1013 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
1014#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \
1015 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
1016
1017
1018/* Runtime enable states */
1019enum kfd_dbg_runtime_state {
1020 DEBUG_RUNTIME_STATE_DISABLED = 0,
1021 DEBUG_RUNTIME_STATE_ENABLED = 1,
1022 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
1023 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
1024};
1025
1026/* Runtime enable status */
1027struct kfd_runtime_info {
1028 __u64 r_debug;
1029 __u32 runtime_state;
1030 __u32 ttmp_setup;
1031};
1032
1033/* Enable modes for runtime enable */
1034#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
1035#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
1036
1037/**
1038 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
1039 *
1040 * Coordinates debug exception signalling and debug device enablement with runtime.
1041 *
1042 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
1043 * @mode_mask - mask to set mode
1044 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
1045 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
1046 * @capabilities_mask - mask to notify runtime on what KFD supports
1047 *
1048 * Return - 0 on SUCCESS.
1049 * - EBUSY if runtime enable call already pending.
1050 * - EEXIST if user queues already active prior to call.
1051 * If process is debug enabled, runtime enable will enable debug devices and
1052 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
1053 * to unblock - see kfd_ioctl_dbg_trap_args.
1054 *
1055 */
1056struct kfd_ioctl_runtime_enable_args {
1057 __u64 r_debug;
1058 __u32 mode_mask;
1059 __u32 capabilities_mask;
1060};
1061
1062/* Queue information */
1063struct kfd_queue_snapshot_entry {
1064 __u64 exception_status;
1065 __u64 ring_base_address;
1066 __u64 write_pointer_address;
1067 __u64 read_pointer_address;
1068 __u64 ctx_save_restore_address;
1069 __u32 queue_id;
1070 __u32 gpu_id;
1071 __u32 ring_size;
1072 __u32 queue_type;
1073 __u32 ctx_save_restore_area_size;
1074 __u32 reserved;
1075};
1076
1077/* Queue status return for suspend/resume */
1078#define KFD_DBG_QUEUE_ERROR_BIT 30
1079#define KFD_DBG_QUEUE_INVALID_BIT 31
1080#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
1081#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
1082
1083/* Context save area header information */
1084struct kfd_context_save_area_header {
1085 struct {
1086 __u32 control_stack_offset;
1087 __u32 control_stack_size;
1088 __u32 wave_state_offset;
1089 __u32 wave_state_size;
1090 } wave_state;
1091 __u32 debug_offset;
1092 __u32 debug_size;
1093 __u64 err_payload_addr;
1094 __u32 err_event_id;
1095 __u32 reserved1;
1096};
1097
1098/*
1099 * Debug operations
1100 *
1101 * For specifics on usage and return values, see documentation per operation
1102 * below. Otherwise, generic error returns apply:
1103 * - ESRCH if the process to debug does not exist.
1104 *
1105 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1106 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1107 * Also returns this error if GPU hardware scheduling is not supported.
1108 *
1109 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1110 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1111 * clean up of debug mode as long as process is debug enabled.
1112 *
1113 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1114 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1115 *
1116 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1117 *
1118 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
1119 * is in a fatal state.
1120 *
1121 */
1122enum kfd_dbg_trap_operations {
1123 KFD_IOC_DBG_TRAP_ENABLE = 0,
1124 KFD_IOC_DBG_TRAP_DISABLE = 1,
1125 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1126 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1127 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
1128 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
1129 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
1130 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
1131 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
1132 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
1133 KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1134 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1135 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1136 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1137 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1138};
1139
1140/**
1141 * kfd_ioctl_dbg_trap_enable_args
1142 *
1143 * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1144 *
1145 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1146 * kfd_ioctl_dbg_trap_args to disable debug session.
1147 *
1148 * @exception_mask (IN) - exceptions to raise to the debugger
1149 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
1150 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
1151 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
1152 * exceptions set in exception_mask.
1153 *
1154 * Generic errors apply (see kfd_dbg_trap_operations).
1155 * Return - 0 on SUCCESS.
1156 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1157 * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1158 * - EBADF if KFD cannot get a reference to dbg_fd.
1159 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1160 * - EINVAL if target process is already debug enabled.
1161 *
1162 */
1163struct kfd_ioctl_dbg_trap_enable_args {
1164 __u64 exception_mask;
1165 __u64 rinfo_ptr;
1166 __u32 rinfo_size;
1167 __u32 dbg_fd;
1168};
1169
1170/**
1171 * kfd_ioctl_dbg_trap_send_runtime_event_args
1172 *
1173 *
1174 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1175 * Raises exceptions to runtime.
1176 *
1177 * @exception_mask (IN) - exceptions to raise to runtime
1178 * @gpu_id (IN) - target device id
1179 * @queue_id (IN) - target queue id
1180 *
1181 * Generic errors apply (see kfd_dbg_trap_operations).
1182 * Return - 0 on SUCCESS.
1183 * - ENODEV if gpu_id not found.
1184 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1185 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1186 * All other exceptions are raised to runtime through err_payload_addr.
1187 * See kfd_context_save_area_header.
1188 */
1189struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1190 __u64 exception_mask;
1191 __u32 gpu_id;
1192 __u32 queue_id;
1193};
1194
1195/**
1196 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1197 *
1198 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1199 * Set new exceptions to be raised to the debugger.
1200 *
1201 * @exception_mask (IN) - new exceptions to raise the debugger
1202 *
1203 * Generic errors apply (see kfd_dbg_trap_operations).
1204 * Return - 0 on SUCCESS.
1205 */
1206struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1207 __u64 exception_mask;
1208};
1209
1210/**
1211 * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1212 *
1213 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1214 * Enable HW exceptions to raise trap.
1215 *
1216 * @override_mode (IN) - see kfd_dbg_trap_override_mode
1217 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1218 * IN is the override modes requested to be enabled.
1219 * OUT is referenced in Return below.
1220 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1221 * IN is the override modes requested for support check.
1222 * OUT is referenced in Return below.
1223 *
1224 * Generic errors apply (see kfd_dbg_trap_operations).
1225 * Return - 0 on SUCCESS.
1226 * Previous enablement is returned in @enable_mask.
1227 * Actual override support is returned in @support_request_mask.
1228 * - EINVAL if override mode is not supported.
1229 * - EACCES if trap support requested is not actually supported.
1230 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1231 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1232 */
1233struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1234 __u32 override_mode;
1235 __u32 enable_mask;
1236 __u32 support_request_mask;
1237 __u32 pad;
1238};
1239
1240/**
1241 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1242 *
1243 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1244 * Set wave launch mode.
1245 *
1246 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1247 *
1248 * Generic errors apply (see kfd_dbg_trap_operations).
1249 * Return - 0 on SUCCESS.
1250 */
1251struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1252 __u32 launch_mode;
1253 __u32 pad;
1254};
1255
1256/**
1257 * kfd_ioctl_dbg_trap_suspend_queues_ags
1258 *
1259 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1260 * Suspend queues.
1261 *
1262 * @exception_mask (IN) - raised exceptions to clear
1263 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1264 * to suspend
1265 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
1266 * @grace_period (IN) - wave time allowance before preemption
1267 * per 1K GPU clock cycle unit
1268 *
1269 * Generic errors apply (see kfd_dbg_trap_operations).
1270 * Destruction of a suspended queue is blocked until the queue is
1271 * resumed. This allows the debugger to access queue information and
1272 * the its context save area without running into a race condition on
1273 * queue destruction.
1274 * Automatically copies per queue context save area header information
1275 * into the save area base
1276 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1277 *
1278 * Return - Number of queues suspended on SUCCESS.
1279 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1280 * for each queue id in @queue_array_ptr array reports unsuccessful
1281 * suspend reason.
1282 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1283 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1284 * is being destroyed.
1285 */
1286struct kfd_ioctl_dbg_trap_suspend_queues_args {
1287 __u64 exception_mask;
1288 __u64 queue_array_ptr;
1289 __u32 num_queues;
1290 __u32 grace_period;
1291};
1292
1293/**
1294 * kfd_ioctl_dbg_trap_resume_queues_args
1295 *
1296 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1297 * Resume queues.
1298 *
1299 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1300 * to resume
1301 * @num_queues (IN) - number of queues to resume in @queue_array_ptr
1302 *
1303 * Generic errors apply (see kfd_dbg_trap_operations).
1304 * Return - Number of queues resumed on SUCCESS.
1305 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1306 * for each queue id in @queue_array_ptr array reports unsuccessful
1307 * resume reason.
1308 * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1309 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1310 */
1311struct kfd_ioctl_dbg_trap_resume_queues_args {
1312 __u64 queue_array_ptr;
1313 __u32 num_queues;
1314 __u32 pad;
1315};
1316
1317/**
1318 * kfd_ioctl_dbg_trap_set_node_address_watch_args
1319 *
1320 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1321 * Sets address watch for device.
1322 *
1323 * @address (IN) - watch address to set
1324 * @mode (IN) - see kfd_dbg_trap_address_watch_mode
1325 * @mask (IN) - watch address mask
1326 * @gpu_id (IN) - target gpu to set watch point
1327 * @id (OUT) - watch id allocated
1328 *
1329 * Generic errors apply (see kfd_dbg_trap_operations).
1330 * Return - 0 on SUCCESS.
1331 * Allocated watch ID returned to @id.
1332 * - ENODEV if gpu_id not found.
1333 * - ENOMEM if watch IDs can be allocated
1334 */
1335struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1336 __u64 address;
1337 __u32 mode;
1338 __u32 mask;
1339 __u32 gpu_id;
1340 __u32 id;
1341};
1342
1343/**
1344 * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1345 *
1346 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1347 * Clear address watch for device.
1348 *
1349 * @gpu_id (IN) - target device to clear watch point
1350 * @id (IN) - allocated watch id to clear
1351 *
1352 * Generic errors apply (see kfd_dbg_trap_operations).
1353 * Return - 0 on SUCCESS.
1354 * - ENODEV if gpu_id not found.
1355 * - EINVAL if watch ID has not been allocated.
1356 */
1357struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1358 __u32 gpu_id;
1359 __u32 id;
1360};
1361
1362/**
1363 * kfd_ioctl_dbg_trap_set_flags_args
1364 *
1365 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1366 * Sets flags for wave behaviour.
1367 *
1368 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1369 *
1370 * Generic errors apply (see kfd_dbg_trap_operations).
1371 * Return - 0 on SUCCESS.
1372 * - EACCESS if any debug device does not allow flag options.
1373 */
1374struct kfd_ioctl_dbg_trap_set_flags_args {
1375 __u32 flags;
1376 __u32 pad;
1377};
1378
1379/**
1380 * kfd_ioctl_dbg_trap_query_debug_event_args
1381 *
1382 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1383 *
1384 * Find one or more raised exceptions. This function can return multiple
1385 * exceptions from a single queue or a single device with one call. To find
1386 * all raised exceptions, this function must be called repeatedly until it
1387 * returns -EAGAIN. Returned exceptions can optionally be cleared by
1388 * setting the corresponding bit in the @exception_mask input parameter.
1389 * However, clearing an exception prevents retrieving further information
1390 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1391 *
1392 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1393 * @gpu_id (OUT) - gpu id of exceptions raised
1394 * @queue_id (OUT) - queue id of exceptions raised
1395 *
1396 * Generic errors apply (see kfd_dbg_trap_operations).
1397 * Return - 0 on raised exception found
1398 * Raised exceptions found are returned in @exception mask
1399 * with reported source id returned in @gpu_id or @queue_id.
1400 * - EAGAIN if no raised exception has been found
1401 */
1402struct kfd_ioctl_dbg_trap_query_debug_event_args {
1403 __u64 exception_mask;
1404 __u32 gpu_id;
1405 __u32 queue_id;
1406};
1407
1408/**
1409 * kfd_ioctl_dbg_trap_query_exception_info_args
1410 *
1411 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1412 * Get additional info on raised exception.
1413 *
1414 * @info_ptr (IN) - pointer to exception info buffer to copy to
1415 * @info_size (IN/OUT) - exception info buffer size (bytes)
1416 * @source_id (IN) - target gpu or queue id
1417 * @exception_code (IN) - target exception
1418 * @clear_exception (IN) - clear raised @exception_code exception
1419 * (0 = false, 1 = true)
1420 *
1421 * Generic errors apply (see kfd_dbg_trap_operations).
1422 * Return - 0 on SUCCESS.
1423 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1424 * bytes of memory exception data to @info_ptr.
1425 * If @exception_code is EC_PROCESS_RUNTIME, copy saved
1426 * kfd_runtime_info to @info_ptr.
1427 * Actual required @info_ptr size (bytes) is returned in @info_size.
1428 */
1429struct kfd_ioctl_dbg_trap_query_exception_info_args {
1430 __u64 info_ptr;
1431 __u32 info_size;
1432 __u32 source_id;
1433 __u32 exception_code;
1434 __u32 clear_exception;
1435};
1436
1437/**
1438 * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1439 *
1440 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1441 * Get queue information.
1442 *
1443 * @exception_mask (IN) - exceptions raised to clear
1444 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1445 * @num_queues (IN/OUT) - number of queue snapshot entries
1446 * The debugger specifies the size of the array allocated in @num_queues.
1447 * KFD returns the number of queues that actually existed. If this is
1448 * larger than the size specified by the debugger, KFD will not overflow
1449 * the array allocated by the debugger.
1450 *
1451 * @entry_size (IN/OUT) - size per entry in bytes
1452 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1453 * @entry_size. KFD returns the number of bytes actually populated per
1454 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1455 * which fields in struct kfd_queue_snapshot_entry are valid. This allows
1456 * growing the ABI in a backwards compatible manner.
1457 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1458 * event that it's larger than actual kfd_queue_snapshot_entry.
1459 *
1460 * Generic errors apply (see kfd_dbg_trap_operations).
1461 * Return - 0 on SUCCESS.
1462 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1463 * into @snapshot_buf_ptr if @num_queues(IN) > 0.
1464 * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1465 */
1466struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1467 __u64 exception_mask;
1468 __u64 snapshot_buf_ptr;
1469 __u32 num_queues;
1470 __u32 entry_size;
1471};
1472
1473/**
1474 * kfd_ioctl_dbg_trap_get_device_snapshot_args
1475 *
1476 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1477 * Get device information.
1478 *
1479 * @exception_mask (IN) - exceptions raised to clear
1480 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1481 * @num_devices (IN/OUT) - number of debug devices to snapshot
1482 * The debugger specifies the size of the array allocated in @num_devices.
1483 * KFD returns the number of devices that actually existed. If this is
1484 * larger than the size specified by the debugger, KFD will not overflow
1485 * the array allocated by the debugger.
1486 *
1487 * @entry_size (IN/OUT) - size per entry in bytes
1488 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1489 * @entry_size. KFD returns the number of bytes actually populated. The
1490 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1491 * in struct kfd_dbg_device_info_entry are valid. This allows growing the
1492 * ABI in a backwards compatible manner.
1493 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1494 * event that it's larger than actual kfd_dbg_device_info_entry.
1495 *
1496 * Generic errors apply (see kfd_dbg_trap_operations).
1497 * Return - 0 on SUCCESS.
1498 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1499 * into @snapshot_buf_ptr if @num_devices(IN) > 0.
1500 * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1501 */
1502struct kfd_ioctl_dbg_trap_device_snapshot_args {
1503 __u64 exception_mask;
1504 __u64 snapshot_buf_ptr;
1505 __u32 num_devices;
1506 __u32 entry_size;
1507};
1508
1509/**
1510 * kfd_ioctl_dbg_trap_args
1511 *
1512 * Arguments to debug target process.
1513 *
1514 * @pid - target process to debug
1515 * @op - debug operation (see kfd_dbg_trap_operations)
1516 *
1517 * @op determines which union struct args to use.
1518 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1519 */
1520struct kfd_ioctl_dbg_trap_args {
1521 __u32 pid;
1522 __u32 op;
1523
1524 union {
1525 struct kfd_ioctl_dbg_trap_enable_args enable;
1526 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1527 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1528 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1529 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1530 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1531 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1532 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1533 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1534 struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1535 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1536 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1537 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1538 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1539 };
1540};
1541
1542#define AMDKFD_IOCTL_BASE 'K'
1543#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
1544#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
1545#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
1546#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
1547
1548#define AMDKFD_IOC_GET_VERSION \
1549 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1550
1551#define AMDKFD_IOC_CREATE_QUEUE \
1552 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1553
1554#define AMDKFD_IOC_DESTROY_QUEUE \
1555 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1556
1557#define AMDKFD_IOC_SET_MEMORY_POLICY \
1558 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1559
1560#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
1561 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1562
1563#define AMDKFD_IOC_GET_PROCESS_APERTURES \
1564 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1565
1566#define AMDKFD_IOC_UPDATE_QUEUE \
1567 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1568
1569#define AMDKFD_IOC_CREATE_EVENT \
1570 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1571
1572#define AMDKFD_IOC_DESTROY_EVENT \
1573 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1574
1575#define AMDKFD_IOC_SET_EVENT \
1576 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1577
1578#define AMDKFD_IOC_RESET_EVENT \
1579 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1580
1581#define AMDKFD_IOC_WAIT_EVENTS \
1582 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1583
1584#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
1585 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1586
1587#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
1588 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1589
1590#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
1591 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1592
1593#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
1594 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1595
1596#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
1597 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1598
1599#define AMDKFD_IOC_GET_TILE_CONFIG \
1600 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1601
1602#define AMDKFD_IOC_SET_TRAP_HANDLER \
1603 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1604
1605#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
1606 AMDKFD_IOWR(0x14, \
1607 struct kfd_ioctl_get_process_apertures_new_args)
1608
1609#define AMDKFD_IOC_ACQUIRE_VM \
1610 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1611
1612#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
1613 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1614
1615#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
1616 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1617
1618#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
1619 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1620
1621#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
1622 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1623
1624#define AMDKFD_IOC_SET_CU_MASK \
1625 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1626
1627#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
1628 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1629
1630#define AMDKFD_IOC_GET_DMABUF_INFO \
1631 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1632
1633#define AMDKFD_IOC_IMPORT_DMABUF \
1634 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1635
1636#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
1637 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1638
1639#define AMDKFD_IOC_SMI_EVENTS \
1640 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1641
1642#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1643
1644#define AMDKFD_IOC_SET_XNACK_MODE \
1645 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1646
1647#define AMDKFD_IOC_CRIU_OP \
1648 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1649
1650#define AMDKFD_IOC_AVAILABLE_MEMORY \
1651 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1652
1653#define AMDKFD_IOC_EXPORT_DMABUF \
1654 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1655
1656#define AMDKFD_IOC_RUNTIME_ENABLE \
1657 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1658
1659#define AMDKFD_IOC_DBG_TRAP \
1660 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1661
1662#define AMDKFD_COMMAND_START 0x01
1663#define AMDKFD_COMMAND_END 0x27
1664
1665#endif