Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#ifndef _UAPI_XE_DRM_H_
7#define _UAPI_XE_DRM_H_
8
9#include "drm.h"
10
11#if defined(__cplusplus)
12extern "C" {
13#endif
14
15/*
16 * Please note that modifications to all structs defined here are
17 * subject to backwards-compatibility constraints.
18 * Sections in this file are organized as follows:
19 * 1. IOCTL definition
20 * 2. Extension definition and helper structs
21 * 3. IOCTL's Query structs in the order of the Query's entries.
22 * 4. The rest of IOCTL structs in the order of IOCTL declaration.
23 */
24
25/**
26 * DOC: Xe Device Block Diagram
27 *
28 * The diagram below represents a high-level simplification of a discrete
29 * GPU supported by the Xe driver. It shows some device components which
30 * are necessary to understand this API, as well as how their relations
31 * to each other. This diagram does not represent real hardware::
32 *
33 * ┌──────────────────────────────────────────────────────────────────┐
34 * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
35 * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
36 * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
37 * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
38 * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
39 * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
40 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
41 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
42 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
43 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
44 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
45 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
46 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
47 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
48 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
49 * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
50 * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
51 * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
52 * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
53 * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
54 * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
55 * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
56 * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
57 * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
58 * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
59 * └─────────────────────────────Device0───────┬──────────────────────┘
60 * │
61 * ───────────────────────┴────────── PCI bus
62 */
63
64/**
65 * DOC: Xe uAPI Overview
66 *
67 * This section aims to describe the Xe's IOCTL entries, its structs, and other
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
69 * entries and usage.
70 *
71 * List of supported IOCTLs:
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
83 */
84
85/*
86 * xe specific ioctls.
87 *
88 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
89 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
90 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
91 */
92#define DRM_XE_DEVICE_QUERY 0x00
93#define DRM_XE_GEM_CREATE 0x01
94#define DRM_XE_GEM_MMAP_OFFSET 0x02
95#define DRM_XE_VM_CREATE 0x03
96#define DRM_XE_VM_DESTROY 0x04
97#define DRM_XE_VM_BIND 0x05
98#define DRM_XE_EXEC_QUEUE_CREATE 0x06
99#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
100#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
101#define DRM_XE_EXEC 0x09
102#define DRM_XE_WAIT_USER_FENCE 0x0a
103/* Must be kept compact -- no holes */
104
105#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
106#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
107#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
108#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
109#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
110#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
111#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
112#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
113#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
114#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
115#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
116
117/**
118 * DOC: Xe IOCTL Extensions
119 *
120 * Before detailing the IOCTLs and its structs, it is important to highlight
121 * that every IOCTL in Xe is extensible.
122 *
123 * Many interfaces need to grow over time. In most cases we can simply
124 * extend the struct and have userspace pass in more data. Another option,
125 * as demonstrated by Vulkan's approach to providing extensions for forward
126 * and backward compatibility, is to use a list of optional structs to
127 * provide those extra details.
128 *
129 * The key advantage to using an extension chain is that it allows us to
130 * redefine the interface more easily than an ever growing struct of
131 * increasing complexity, and for large parts of that interface to be
132 * entirely optional. The downside is more pointer chasing; chasing across
133 * the __user boundary with pointers encapsulated inside u64.
134 *
135 * Example chaining:
136 *
137 * .. code-block:: C
138 *
139 * struct drm_xe_user_extension ext3 {
140 * .next_extension = 0, // end
141 * .name = ...,
142 * };
143 * struct drm_xe_user_extension ext2 {
144 * .next_extension = (uintptr_t)&ext3,
145 * .name = ...,
146 * };
147 * struct drm_xe_user_extension ext1 {
148 * .next_extension = (uintptr_t)&ext2,
149 * .name = ...,
150 * };
151 *
152 * Typically the struct drm_xe_user_extension would be embedded in some uAPI
153 * struct, and in this case we would feed it the head of the chain(i.e ext1),
154 * which would then apply all of the above extensions.
155*/
156
157/**
158 * struct drm_xe_user_extension - Base class for defining a chain of extensions
159 */
160struct drm_xe_user_extension {
161 /**
162 * @next_extension:
163 *
164 * Pointer to the next struct drm_xe_user_extension, or zero if the end.
165 */
166 __u64 next_extension;
167
168 /**
169 * @name: Name of the extension.
170 *
171 * Note that the name here is just some integer.
172 *
173 * Also note that the name space for this is not global for the whole
174 * driver, but rather its scope/meaning is limited to the specific piece
175 * of uAPI which has embedded the struct drm_xe_user_extension.
176 */
177 __u32 name;
178
179 /**
180 * @pad: MBZ
181 *
182 * All undefined bits must be zero.
183 */
184 __u32 pad;
185};
186
187/**
188 * struct drm_xe_ext_set_property - Generic set property extension
189 *
190 * A generic struct that allows any of the Xe's IOCTL to be extended
191 * with a set_property operation.
192 */
193struct drm_xe_ext_set_property {
194 /** @base: base user extension */
195 struct drm_xe_user_extension base;
196
197 /** @property: property to set */
198 __u32 property;
199
200 /** @pad: MBZ */
201 __u32 pad;
202
203 /** @value: property value */
204 __u64 value;
205
206 /** @reserved: Reserved */
207 __u64 reserved[2];
208};
209
210/**
211 * struct drm_xe_engine_class_instance - instance of an engine class
212 *
213 * It is returned as part of the @drm_xe_engine, but it also is used as
214 * the input of engine selection for both @drm_xe_exec_queue_create and
215 * @drm_xe_query_engine_cycles
216 *
217 * The @engine_class can be:
218 * - %DRM_XE_ENGINE_CLASS_RENDER
219 * - %DRM_XE_ENGINE_CLASS_COPY
220 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
221 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
222 * - %DRM_XE_ENGINE_CLASS_COMPUTE
223 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
224 * hardware engine class). Used for creating ordered queues of VM
225 * bind operations.
226 */
227struct drm_xe_engine_class_instance {
228#define DRM_XE_ENGINE_CLASS_RENDER 0
229#define DRM_XE_ENGINE_CLASS_COPY 1
230#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
231#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
232#define DRM_XE_ENGINE_CLASS_COMPUTE 4
233#define DRM_XE_ENGINE_CLASS_VM_BIND 5
234 /** @engine_class: engine class id */
235 __u16 engine_class;
236 /** @engine_instance: engine instance id */
237 __u16 engine_instance;
238 /** @gt_id: Unique ID of this GT within the PCI Device */
239 __u16 gt_id;
240 /** @pad: MBZ */
241 __u16 pad;
242};
243
244/**
245 * struct drm_xe_engine - describe hardware engine
246 */
247struct drm_xe_engine {
248 /** @instance: The @drm_xe_engine_class_instance */
249 struct drm_xe_engine_class_instance instance;
250
251 /** @reserved: Reserved */
252 __u64 reserved[3];
253};
254
255/**
256 * struct drm_xe_query_engines - describe engines
257 *
258 * If a query is made with a struct @drm_xe_device_query where .query
259 * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
260 * struct @drm_xe_query_engines in .data.
261 */
262struct drm_xe_query_engines {
263 /** @num_engines: number of engines returned in @engines */
264 __u32 num_engines;
265 /** @pad: MBZ */
266 __u32 pad;
267 /** @engines: The returned engines for this device */
268 struct drm_xe_engine engines[];
269};
270
271/**
272 * enum drm_xe_memory_class - Supported memory classes.
273 */
274enum drm_xe_memory_class {
275 /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
276 DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
277 /**
278 * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
279 * represents the memory that is local to the device, which we
280 * call VRAM. Not valid on integrated platforms.
281 */
282 DRM_XE_MEM_REGION_CLASS_VRAM
283};
284
285/**
286 * struct drm_xe_mem_region - Describes some region as known to
287 * the driver.
288 */
289struct drm_xe_mem_region {
290 /**
291 * @mem_class: The memory class describing this region.
292 *
293 * See enum drm_xe_memory_class for supported values.
294 */
295 __u16 mem_class;
296 /**
297 * @instance: The unique ID for this region, which serves as the
298 * index in the placement bitmask used as argument for
299 * &DRM_IOCTL_XE_GEM_CREATE
300 */
301 __u16 instance;
302 /**
303 * @min_page_size: Min page-size in bytes for this region.
304 *
305 * When the kernel allocates memory for this region, the
306 * underlying pages will be at least @min_page_size in size.
307 * Buffer objects with an allowable placement in this region must be
308 * created with a size aligned to this value.
309 * GPU virtual address mappings of (parts of) buffer objects that
310 * may be placed in this region must also have their GPU virtual
311 * address and range aligned to this value.
312 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
313 * not met.
314 */
315 __u32 min_page_size;
316 /**
317 * @total_size: The usable size in bytes for this region.
318 */
319 __u64 total_size;
320 /**
321 * @used: Estimate of the memory used in bytes for this region.
322 *
323 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
324 * accounting. Without this the value here will always equal
325 * zero.
326 */
327 __u64 used;
328 /**
329 * @cpu_visible_size: How much of this region can be CPU
330 * accessed, in bytes.
331 *
332 * This will always be <= @total_size, and the remainder (if
333 * any) will not be CPU accessible. If the CPU accessible part
334 * is smaller than @total_size then this is referred to as a
335 * small BAR system.
336 *
337 * On systems without small BAR (full BAR), the probed_size will
338 * always equal the @total_size, since all of it will be CPU
339 * accessible.
340 *
341 * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
342 * regions (for other types the value here will always equal
343 * zero).
344 */
345 __u64 cpu_visible_size;
346 /**
347 * @cpu_visible_used: Estimate of CPU visible memory used, in
348 * bytes.
349 *
350 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
351 * accounting. Without this the value here will always equal
352 * zero. Note this is only currently tracked for
353 * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
354 * here will always be zero).
355 */
356 __u64 cpu_visible_used;
357 /** @reserved: Reserved */
358 __u64 reserved[6];
359};
360
361/**
362 * struct drm_xe_query_mem_regions - describe memory regions
363 *
364 * If a query is made with a struct drm_xe_device_query where .query
365 * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
366 * struct drm_xe_query_mem_regions in .data.
367 */
368struct drm_xe_query_mem_regions {
369 /** @num_mem_regions: number of memory regions returned in @mem_regions */
370 __u32 num_mem_regions;
371 /** @pad: MBZ */
372 __u32 pad;
373 /** @mem_regions: The returned memory regions for this device */
374 struct drm_xe_mem_region mem_regions[];
375};
376
377/**
378 * struct drm_xe_query_config - describe the device configuration
379 *
380 * If a query is made with a struct drm_xe_device_query where .query
381 * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
382 * struct drm_xe_query_config in .data.
383 *
384 * The index in @info can be:
385 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
386 * and the device revision (next 8 bits)
387 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
388 * configuration, see list below
389 *
390 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
391 * has usable VRAM
392 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
393 * required by this device, typically SZ_4K or SZ_64K
394 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
395 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
396 * available exec queue priority
397 */
398struct drm_xe_query_config {
399 /** @num_params: number of parameters returned in info */
400 __u32 num_params;
401
402 /** @pad: MBZ */
403 __u32 pad;
404
405#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
406#define DRM_XE_QUERY_CONFIG_FLAGS 1
407 #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
408#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
409#define DRM_XE_QUERY_CONFIG_VA_BITS 3
410#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
411 /** @info: array of elements containing the config info */
412 __u64 info[];
413};
414
415/**
416 * struct drm_xe_gt - describe an individual GT.
417 *
418 * To be used with drm_xe_query_gt_list, which will return a list with all the
419 * existing GT individual descriptions.
420 * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
421 * implementing graphics and/or media operations.
422 *
423 * The index in @type can be:
424 * - %DRM_XE_QUERY_GT_TYPE_MAIN
425 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
426 */
427struct drm_xe_gt {
428#define DRM_XE_QUERY_GT_TYPE_MAIN 0
429#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
430 /** @type: GT type: Main or Media */
431 __u16 type;
432 /** @tile_id: Tile ID where this GT lives (Information only) */
433 __u16 tile_id;
434 /** @gt_id: Unique ID of this GT within the PCI Device */
435 __u16 gt_id;
436 /** @pad: MBZ */
437 __u16 pad[3];
438 /** @reference_clock: A clock frequency for timestamp */
439 __u32 reference_clock;
440 /**
441 * @near_mem_regions: Bit mask of instances from
442 * drm_xe_query_mem_regions that are nearest to the current engines
443 * of this GT.
444 * Each index in this mask refers directly to the struct
445 * drm_xe_query_mem_regions' instance, no assumptions should
446 * be made about order. The type of each region is described
447 * by struct drm_xe_query_mem_regions' mem_class.
448 */
449 __u64 near_mem_regions;
450 /**
451 * @far_mem_regions: Bit mask of instances from
452 * drm_xe_query_mem_regions that are far from the engines of this GT.
453 * In general, they have extra indirections when compared to the
454 * @near_mem_regions. For a discrete device this could mean system
455 * memory and memory living in a different tile.
456 * Each index in this mask refers directly to the struct
457 * drm_xe_query_mem_regions' instance, no assumptions should
458 * be made about order. The type of each region is described
459 * by struct drm_xe_query_mem_regions' mem_class.
460 */
461 __u64 far_mem_regions;
462 /** @reserved: Reserved */
463 __u64 reserved[8];
464};
465
466/**
467 * struct drm_xe_query_gt_list - A list with GT description items.
468 *
469 * If a query is made with a struct drm_xe_device_query where .query
470 * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
471 * drm_xe_query_gt_list in .data.
472 */
473struct drm_xe_query_gt_list {
474 /** @num_gt: number of GT items returned in gt_list */
475 __u32 num_gt;
476 /** @pad: MBZ */
477 __u32 pad;
478 /** @gt_list: The GT list returned for this device */
479 struct drm_xe_gt gt_list[];
480};
481
482/**
483 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
484 *
485 * This is the hardware topology which reflects the internal physical
486 * structure of the GPU.
487 *
488 * If a query is made with a struct drm_xe_device_query where .query
489 * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
490 * struct drm_xe_query_topology_mask in .data.
491 *
492 * The @type can be:
493 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
494 * (DSS) available for geometry operations. For example a query response
495 * containing the following in mask:
496 * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
497 * means 32 DSS are available for geometry.
498 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
499 * (DSS) available for compute operations. For example a query response
500 * containing the following in mask:
501 * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
502 * means 32 DSS are available for compute.
503 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
504 * available per Dual Sub Slices (DSS). For example a query response
505 * containing the following in mask:
506 * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
507 * means each DSS has 16 EU.
508 */
509struct drm_xe_query_topology_mask {
510 /** @gt_id: GT ID the mask is associated with */
511 __u16 gt_id;
512
513#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
514#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
515#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
516 /** @type: type of mask */
517 __u16 type;
518
519 /** @num_bytes: number of bytes in requested mask */
520 __u32 num_bytes;
521
522 /** @mask: little-endian mask of @num_bytes */
523 __u8 mask[];
524};
525
526/**
527 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
528 *
529 * If a query is made with a struct drm_xe_device_query where .query is equal to
530 * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
531 * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
532 * .data points to this allocated structure.
533 *
534 * The query returns the engine cycles, which along with GT's @reference_clock,
535 * can be used to calculate the engine timestamp. In addition the
536 * query returns a set of cpu timestamps that indicate when the command
537 * streamer cycle count was captured.
538 */
539struct drm_xe_query_engine_cycles {
540 /**
541 * @eci: This is input by the user and is the engine for which command
542 * streamer cycles is queried.
543 */
544 struct drm_xe_engine_class_instance eci;
545
546 /**
547 * @clockid: This is input by the user and is the reference clock id for
548 * CPU timestamp. For definition, see clock_gettime(2) and
549 * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
550 * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
551 */
552 __s32 clockid;
553
554 /** @width: Width of the engine cycle counter in bits. */
555 __u32 width;
556
557 /**
558 * @engine_cycles: Engine cycles as read from its register
559 * at 0x358 offset.
560 */
561 __u64 engine_cycles;
562
563 /**
564 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
565 * reading the engine_cycles register using the reference clockid set by the
566 * user.
567 */
568 __u64 cpu_timestamp;
569
570 /**
571 * @cpu_delta: Time delta in ns captured around reading the lower dword
572 * of the engine_cycles register.
573 */
574 __u64 cpu_delta;
575};
576
577/**
578 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
579 * structure to query device information
580 *
581 * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
582 * and sets the value in the query member. This determines the type of
583 * the structure provided by the driver in data, among struct drm_xe_query_*.
584 *
585 * The @query can be:
586 * - %DRM_XE_DEVICE_QUERY_ENGINES
587 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
588 * - %DRM_XE_DEVICE_QUERY_CONFIG
589 * - %DRM_XE_DEVICE_QUERY_GT_LIST
590 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
591 * configuration of the device such as information on slices, memory,
592 * caches, and so on. It is provided as a table of key / value
593 * attributes.
594 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
595 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
596 *
597 * If size is set to 0, the driver fills it with the required size for
598 * the requested type of data to query. If size is equal to the required
599 * size, the queried information is copied into data. If size is set to
600 * a value different from 0 and different from the required size, the
601 * IOCTL call returns -EINVAL.
602 *
603 * For example the following code snippet allows retrieving and printing
604 * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
605 *
606 * .. code-block:: C
607 *
608 * struct drm_xe_query_engines *engines;
609 * struct drm_xe_device_query query = {
610 * .extensions = 0,
611 * .query = DRM_XE_DEVICE_QUERY_ENGINES,
612 * .size = 0,
613 * .data = 0,
614 * };
615 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
616 * engines = malloc(query.size);
617 * query.data = (uintptr_t)engines;
618 * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
619 * for (int i = 0; i < engines->num_engines; i++) {
620 * printf("Engine %d: %s\n", i,
621 * engines->engines[i].instance.engine_class ==
622 * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
623 * engines->engines[i].instance.engine_class ==
624 * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
625 * engines->engines[i].instance.engine_class ==
626 * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
627 * engines->engines[i].instance.engine_class ==
628 * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
629 * engines->engines[i].instance.engine_class ==
630 * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
631 * "UNKNOWN");
632 * }
633 * free(engines);
634 */
635struct drm_xe_device_query {
636 /** @extensions: Pointer to the first extension struct, if any */
637 __u64 extensions;
638
639#define DRM_XE_DEVICE_QUERY_ENGINES 0
640#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
641#define DRM_XE_DEVICE_QUERY_CONFIG 2
642#define DRM_XE_DEVICE_QUERY_GT_LIST 3
643#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
644#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
645#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
646 /** @query: The type of data to query */
647 __u32 query;
648
649 /** @size: Size of the queried data */
650 __u32 size;
651
652 /** @data: Queried data is placed here */
653 __u64 data;
654
655 /** @reserved: Reserved */
656 __u64 reserved[2];
657};
658
659/**
660 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
661 * gem creation
662 *
663 * The @flags can be:
664 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
665 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
666 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
667 * possible placement, ensure that the corresponding VRAM allocation
668 * will always use the CPU accessible part of VRAM. This is important
669 * for small-bar systems (on full-bar systems this gets turned into a
670 * noop).
671 * Note1: System memory can be used as an extra placement if the kernel
672 * should spill the allocation to system memory, if space can't be made
673 * available in the CPU accessible part of VRAM (giving the same
674 * behaviour as the i915 interface, see
675 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
676 * Note2: For clear-color CCS surfaces the kernel needs to read the
677 * clear-color value stored in the buffer, and on discrete platforms we
678 * need to use VRAM for display surfaces, therefore the kernel requires
679 * setting this flag for such objects, otherwise an error is thrown on
680 * small-bar systems.
681 *
682 * @cpu_caching supports the following values:
683 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
684 * caching. On iGPU this can't be used for scanout surfaces. Currently
685 * not allowed for objects placed in VRAM.
686 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
687 * is uncached. Scanout surfaces should likely use this. All objects
688 * that can be placed in VRAM must use this.
689 */
690struct drm_xe_gem_create {
691 /** @extensions: Pointer to the first extension struct, if any */
692 __u64 extensions;
693
694 /**
695 * @size: Size of the object to be created, must match region
696 * (system or vram) minimum alignment (&min_page_size).
697 */
698 __u64 size;
699
700 /**
701 * @placement: A mask of memory instances of where BO can be placed.
702 * Each index in this mask refers directly to the struct
703 * drm_xe_query_mem_regions' instance, no assumptions should
704 * be made about order. The type of each region is described
705 * by struct drm_xe_query_mem_regions' mem_class.
706 */
707 __u32 placement;
708
709#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
710#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
711#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
712 /**
713 * @flags: Flags, currently a mask of memory instances of where BO can
714 * be placed
715 */
716 __u32 flags;
717
718 /**
719 * @vm_id: Attached VM, if any
720 *
721 * If a VM is specified, this BO must:
722 *
723 * 1. Only ever be bound to that VM.
724 * 2. Cannot be exported as a PRIME fd.
725 */
726 __u32 vm_id;
727
728 /**
729 * @handle: Returned handle for the object.
730 *
731 * Object handles are nonzero.
732 */
733 __u32 handle;
734
735#define DRM_XE_GEM_CPU_CACHING_WB 1
736#define DRM_XE_GEM_CPU_CACHING_WC 2
737 /**
738 * @cpu_caching: The CPU caching mode to select for this object. If
739 * mmaping the object the mode selected here will also be used.
740 */
741 __u16 cpu_caching;
742 /** @pad: MBZ */
743 __u16 pad[3];
744
745 /** @reserved: Reserved */
746 __u64 reserved[2];
747};
748
749/**
750 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
751 */
752struct drm_xe_gem_mmap_offset {
753 /** @extensions: Pointer to the first extension struct, if any */
754 __u64 extensions;
755
756 /** @handle: Handle for the object being mapped. */
757 __u32 handle;
758
759 /** @flags: Must be zero */
760 __u32 flags;
761
762 /** @offset: The fake offset to use for subsequent mmap call */
763 __u64 offset;
764
765 /** @reserved: Reserved */
766 __u64 reserved[2];
767};
768
769/**
770 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
771 *
772 * The @flags can be:
773 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
774 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
775 * exec submissions to its exec_queues that don't have an upper time
776 * limit on the job execution time. But exec submissions to these
777 * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
778 * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
779 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
780 * LR VMs can be created in recoverable page-fault mode using
781 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
782 * If that flag is omitted, the UMD can not rely on the slightly
783 * different per-VM overcommit semantics that are enabled by
784 * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
785 * still enable recoverable pagefaults if supported by the device.
786 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
787 * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
788 * demand when accessed, and also allows per-VM overcommit of memory.
789 * The xe driver internally uses recoverable pagefaults to implement
790 * this.
791 */
792struct drm_xe_vm_create {
793 /** @extensions: Pointer to the first extension struct, if any */
794 __u64 extensions;
795
796#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
797#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
798#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
799 /** @flags: Flags */
800 __u32 flags;
801
802 /** @vm_id: Returned VM ID */
803 __u32 vm_id;
804
805 /** @reserved: Reserved */
806 __u64 reserved[2];
807};
808
809/**
810 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
811 */
812struct drm_xe_vm_destroy {
813 /** @vm_id: VM ID */
814 __u32 vm_id;
815
816 /** @pad: MBZ */
817 __u32 pad;
818
819 /** @reserved: Reserved */
820 __u64 reserved[2];
821};
822
823/**
824 * struct drm_xe_vm_bind_op - run bind operations
825 *
826 * The @op can be:
827 * - %DRM_XE_VM_BIND_OP_MAP
828 * - %DRM_XE_VM_BIND_OP_UNMAP
829 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
830 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
831 * - %DRM_XE_VM_BIND_OP_PREFETCH
832 *
833 * and the @flags can be:
834 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
835 * tables are setup with a special bit which indicates writes are
836 * dropped and all reads return zero. In the future, the NULL flags
837 * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
838 * handle MBZ, and the BO offset MBZ. This flag is intended to
839 * implement VK sparse bindings.
840 */
841struct drm_xe_vm_bind_op {
842 /** @extensions: Pointer to the first extension struct, if any */
843 __u64 extensions;
844
845 /**
846 * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
847 */
848 __u32 obj;
849
850 /**
851 * @pat_index: The platform defined @pat_index to use for this mapping.
852 * The index basically maps to some predefined memory attributes,
853 * including things like caching, coherency, compression etc. The exact
854 * meaning of the pat_index is platform specific and defined in the
855 * Bspec and PRMs. When the KMD sets up the binding the index here is
856 * encoded into the ppGTT PTE.
857 *
858 * For coherency the @pat_index needs to be at least 1way coherent when
859 * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
860 * will extract the coherency mode from the @pat_index and reject if
861 * there is a mismatch (see note below for pre-MTL platforms).
862 *
863 * Note: On pre-MTL platforms there is only a caching mode and no
864 * explicit coherency mode, but on such hardware there is always a
865 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
866 * CPU caches even with the caching mode set as uncached. It's only the
867 * display engine that is incoherent (on dgpu it must be in VRAM which
868 * is always mapped as WC on the CPU). However to keep the uapi somewhat
869 * consistent with newer platforms the KMD groups the different cache
870 * levels into the following coherency buckets on all pre-MTL platforms:
871 *
872 * ppGTT UC -> COH_NONE
873 * ppGTT WC -> COH_NONE
874 * ppGTT WT -> COH_NONE
875 * ppGTT WB -> COH_AT_LEAST_1WAY
876 *
877 * In practice UC/WC/WT should only ever used for scanout surfaces on
878 * such platforms (or perhaps in general for dma-buf if shared with
879 * another device) since it is only the display engine that is actually
880 * incoherent. Everything else should typically use WB given that we
881 * have a shared-LLC. On MTL+ this completely changes and the HW
882 * defines the coherency mode as part of the @pat_index, where
883 * incoherent GT access is possible.
884 *
885 * Note: For userptr and externally imported dma-buf the kernel expects
886 * either 1WAY or 2WAY for the @pat_index.
887 *
888 * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
889 * on the @pat_index. For such mappings there is no actual memory being
890 * mapped (the address in the PTE is invalid), so the various PAT memory
891 * attributes likely do not apply. Simply leaving as zero is one
892 * option (still a valid pat_index).
893 */
894 __u16 pat_index;
895
896 /** @pad: MBZ */
897 __u16 pad;
898
899 union {
900 /**
901 * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
902 * ignored for unbind
903 */
904 __u64 obj_offset;
905
906 /** @userptr: user pointer to bind on */
907 __u64 userptr;
908 };
909
910 /**
911 * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
912 */
913 __u64 range;
914
915 /** @addr: Address to operate on, MBZ for UNMAP_ALL */
916 __u64 addr;
917
918#define DRM_XE_VM_BIND_OP_MAP 0x0
919#define DRM_XE_VM_BIND_OP_UNMAP 0x1
920#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
921#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
922#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
923 /** @op: Bind operation to perform */
924 __u32 op;
925
926#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
927#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
928 /** @flags: Bind flags */
929 __u32 flags;
930
931 /**
932 * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
933 * It is a region instance, not a mask.
934 * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
935 */
936 __u32 prefetch_mem_region_instance;
937
938 /** @pad2: MBZ */
939 __u32 pad2;
940
941 /** @reserved: Reserved */
942 __u64 reserved[3];
943};
944
945/**
946 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
947 *
948 * Below is an example of a minimal use of @drm_xe_vm_bind to
949 * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
950 * illustrate `userptr`. It can be synchronized by using the example
951 * provided for @drm_xe_sync.
952 *
953 * .. code-block:: C
954 *
955 * data = aligned_alloc(ALIGNMENT, BO_SIZE);
956 * struct drm_xe_vm_bind bind = {
957 * .vm_id = vm,
958 * .num_binds = 1,
959 * .bind.obj = 0,
960 * .bind.obj_offset = to_user_pointer(data),
961 * .bind.range = BO_SIZE,
962 * .bind.addr = BIND_ADDRESS,
963 * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
964 * .bind.flags = 0,
965 * .num_syncs = 1,
966 * .syncs = &sync,
967 * .exec_queue_id = 0,
968 * };
969 * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
970 *
971 */
972struct drm_xe_vm_bind {
973 /** @extensions: Pointer to the first extension struct, if any */
974 __u64 extensions;
975
976 /** @vm_id: The ID of the VM to bind to */
977 __u32 vm_id;
978
979 /**
980 * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
981 * and exec queue must have same vm_id. If zero, the default VM bind engine
982 * is used.
983 */
984 __u32 exec_queue_id;
985
986 /** @pad: MBZ */
987 __u32 pad;
988
989 /** @num_binds: number of binds in this IOCTL */
990 __u32 num_binds;
991
992 union {
993 /** @bind: used if num_binds == 1 */
994 struct drm_xe_vm_bind_op bind;
995
996 /**
997 * @vector_of_binds: userptr to array of struct
998 * drm_xe_vm_bind_op if num_binds > 1
999 */
1000 __u64 vector_of_binds;
1001 };
1002
1003 /** @pad2: MBZ */
1004 __u32 pad2;
1005
1006 /** @num_syncs: amount of syncs to wait on */
1007 __u32 num_syncs;
1008
1009 /** @syncs: pointer to struct drm_xe_sync array */
1010 __u64 syncs;
1011
1012 /** @reserved: Reserved */
1013 __u64 reserved[2];
1014};
1015
1016/**
1017 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1018 *
1019 * The example below shows how to use @drm_xe_exec_queue_create to create
1020 * a simple exec_queue (no parallel submission) of class
1021 * &DRM_XE_ENGINE_CLASS_RENDER.
1022 *
1023 * .. code-block:: C
1024 *
1025 * struct drm_xe_engine_class_instance instance = {
1026 * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
1027 * };
1028 * struct drm_xe_exec_queue_create exec_queue_create = {
1029 * .extensions = 0,
1030 * .vm_id = vm,
1031 * .num_bb_per_exec = 1,
1032 * .num_eng_per_bb = 1,
1033 * .instances = to_user_pointer(&instance),
1034 * };
1035 * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
1036 *
1037 */
1038struct drm_xe_exec_queue_create {
1039#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
1040#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
1041#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
1042
1043 /** @extensions: Pointer to the first extension struct, if any */
1044 __u64 extensions;
1045
1046 /** @width: submission width (number BB per exec) for this exec queue */
1047 __u16 width;
1048
1049 /** @num_placements: number of valid placements for this exec queue */
1050 __u16 num_placements;
1051
1052 /** @vm_id: VM to use for this exec queue */
1053 __u32 vm_id;
1054
1055 /** @flags: MBZ */
1056 __u32 flags;
1057
1058 /** @exec_queue_id: Returned exec queue ID */
1059 __u32 exec_queue_id;
1060
1061 /**
1062 * @instances: user pointer to a 2-d array of struct
1063 * drm_xe_engine_class_instance
1064 *
1065 * length = width (i) * num_placements (j)
1066 * index = j + i * width
1067 */
1068 __u64 instances;
1069
1070 /** @reserved: Reserved */
1071 __u64 reserved[2];
1072};
1073
1074/**
1075 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1076 */
1077struct drm_xe_exec_queue_destroy {
1078 /** @exec_queue_id: Exec queue ID */
1079 __u32 exec_queue_id;
1080
1081 /** @pad: MBZ */
1082 __u32 pad;
1083
1084 /** @reserved: Reserved */
1085 __u64 reserved[2];
1086};
1087
1088/**
1089 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1090 *
1091 * The @property can be:
1092 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1093 */
1094struct drm_xe_exec_queue_get_property {
1095 /** @extensions: Pointer to the first extension struct, if any */
1096 __u64 extensions;
1097
1098 /** @exec_queue_id: Exec queue ID */
1099 __u32 exec_queue_id;
1100
1101#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
1102 /** @property: property to get */
1103 __u32 property;
1104
1105 /** @value: property value */
1106 __u64 value;
1107
1108 /** @reserved: Reserved */
1109 __u64 reserved[2];
1110};
1111
1112/**
1113 * struct drm_xe_sync - sync object
1114 *
1115 * The @type can be:
1116 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1117 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1118 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1119 *
1120 * and the @flags can be:
1121 * - %DRM_XE_SYNC_FLAG_SIGNAL
1122 *
1123 * A minimal use of @drm_xe_sync looks like this:
1124 *
1125 * .. code-block:: C
1126 *
1127 * struct drm_xe_sync sync = {
1128 * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
1129 * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
1130 * };
1131 * struct drm_syncobj_create syncobj_create = { 0 };
1132 * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
1133 * sync.handle = syncobj_create.handle;
1134 * ...
1135 * use of &sync in drm_xe_exec or drm_xe_vm_bind
1136 * ...
1137 * struct drm_syncobj_wait wait = {
1138 * .handles = &sync.handle,
1139 * .timeout_nsec = INT64_MAX,
1140 * .count_handles = 1,
1141 * .flags = 0,
1142 * .first_signaled = 0,
1143 * .pad = 0,
1144 * };
1145 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1146 */
1147struct drm_xe_sync {
1148 /** @extensions: Pointer to the first extension struct, if any */
1149 __u64 extensions;
1150
1151#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
1152#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
1153#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
1154 /** @type: Type of the this sync object */
1155 __u32 type;
1156
1157#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
1158 /** @flags: Sync Flags */
1159 __u32 flags;
1160
1161 union {
1162 /** @handle: Handle for the object */
1163 __u32 handle;
1164
1165 /**
1166 * @addr: Address of user fence. When sync is passed in via exec
1167 * IOCTL this is a GPU address in the VM. When sync passed in via
1168 * VM bind IOCTL this is a user pointer. In either case, it is
1169 * the users responsibility that this address is present and
1170 * mapped when the user fence is signalled. Must be qword
1171 * aligned.
1172 */
1173 __u64 addr;
1174 };
1175
1176 /**
1177 * @timeline_value: Input for the timeline sync object. Needs to be
1178 * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
1179 */
1180 __u64 timeline_value;
1181
1182 /** @reserved: Reserved */
1183 __u64 reserved[2];
1184};
1185
1186/**
1187 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1188 *
1189 * This is an example to use @drm_xe_exec for execution of the object
1190 * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
1191 * (see example in @drm_xe_exec_queue_create). It can be synchronized
1192 * by using the example provided for @drm_xe_sync.
1193 *
1194 * .. code-block:: C
1195 *
1196 * struct drm_xe_exec exec = {
1197 * .exec_queue_id = exec_queue,
1198 * .syncs = &sync,
1199 * .num_syncs = 1,
1200 * .address = BIND_ADDRESS,
1201 * .num_batch_buffer = 1,
1202 * };
1203 * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
1204 *
1205 */
1206struct drm_xe_exec {
1207 /** @extensions: Pointer to the first extension struct, if any */
1208 __u64 extensions;
1209
1210 /** @exec_queue_id: Exec queue ID for the batch buffer */
1211 __u32 exec_queue_id;
1212
1213 /** @num_syncs: Amount of struct drm_xe_sync in array. */
1214 __u32 num_syncs;
1215
1216 /** @syncs: Pointer to struct drm_xe_sync array. */
1217 __u64 syncs;
1218
1219 /**
1220 * @address: address of batch buffer if num_batch_buffer == 1 or an
1221 * array of batch buffer addresses
1222 */
1223 __u64 address;
1224
1225 /**
1226 * @num_batch_buffer: number of batch buffer in this exec, must match
1227 * the width of the engine
1228 */
1229 __u16 num_batch_buffer;
1230
1231 /** @pad: MBZ */
1232 __u16 pad[3];
1233
1234 /** @reserved: Reserved */
1235 __u64 reserved[2];
1236};
1237
1238/**
1239 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1240 *
1241 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1242 * instances list and check if user fence is complete::
1243 *
1244 * (*addr & MASK) OP (VALUE & MASK)
1245 *
1246 * Returns to user on user fence completion or timeout.
1247 *
1248 * The @op can be:
1249 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1250 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1251 * - %DRM_XE_UFENCE_WAIT_OP_GT
1252 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1253 * - %DRM_XE_UFENCE_WAIT_OP_LT
1254 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1255 *
1256 * and the @flags can be:
1257 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1258 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1259 *
1260 * The @mask values can be for example:
1261 * - 0xffu for u8
1262 * - 0xffffu for u16
1263 * - 0xffffffffu for u32
1264 * - 0xffffffffffffffffu for u64
1265 */
1266struct drm_xe_wait_user_fence {
1267 /** @extensions: Pointer to the first extension struct, if any */
1268 __u64 extensions;
1269
1270 /**
1271 * @addr: user pointer address to wait on, must qword aligned
1272 */
1273 __u64 addr;
1274
1275#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
1276#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
1277#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
1278#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
1279#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
1280#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
1281 /** @op: wait operation (type of comparison) */
1282 __u16 op;
1283
1284#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
1285 /** @flags: wait flags */
1286 __u16 flags;
1287
1288 /** @pad: MBZ */
1289 __u32 pad;
1290
1291 /** @value: compare value */
1292 __u64 value;
1293
1294 /** @mask: comparison mask */
1295 __u64 mask;
1296
1297 /**
1298 * @timeout: how long to wait before bailing, value in nanoseconds.
1299 * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
1300 * it contains timeout expressed in nanoseconds to wait (fence will
1301 * expire at now() + timeout).
1302 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1303 * will end at timeout (uses system MONOTONIC_CLOCK).
1304 * Passing negative timeout leads to neverending wait.
1305 *
1306 * On relative timeout this value is updated with timeout left
1307 * (for restarting the call in case of signal delivery).
1308 * On absolute timeout this value stays intact (restarted call still
1309 * expire at the same point of time).
1310 */
1311 __s64 timeout;
1312
1313 /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
1314 __u32 exec_queue_id;
1315
1316 /** @pad2: MBZ */
1317 __u32 pad2;
1318
1319 /** @reserved: Reserved */
1320 __u64 reserved[2];
1321};
1322
1323#if defined(__cplusplus)
1324}
1325#endif
1326
1327#endif /* _UAPI_XE_DRM_H_ */