Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_
29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
36/* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40/**
41 * DOC: uevents generated by i915 on it's device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
60 */
61#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62#define I915_ERROR_UEVENT "ERROR"
63#define I915_RESET_UEVENT "RESET"
64
65/**
66 * struct i915_user_extension - Base class for defining a chain of extensions
67 *
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
73 *
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the __user boundary with pointers encapsulated inside u64.
79 *
80 * Example chaining:
81 *
82 * .. code-block:: C
83 *
84 * struct i915_user_extension ext3 {
85 * .next_extension = 0, // end
86 * .name = ...,
87 * };
88 * struct i915_user_extension ext2 {
89 * .next_extension = (uintptr_t)&ext3,
90 * .name = ...,
91 * };
92 * struct i915_user_extension ext1 {
93 * .next_extension = (uintptr_t)&ext2,
94 * .name = ...,
95 * };
96 *
97 * Typically the struct i915_user_extension would be embedded in some uAPI
98 * struct, and in this case we would feed it the head of the chain(i.e ext1),
99 * which would then apply all of the above extensions.
100 *
101 */
102struct i915_user_extension {
103 /**
104 * @next_extension:
105 *
106 * Pointer to the next struct i915_user_extension, or zero if the end.
107 */
108 __u64 next_extension;
109 /**
110 * @name: Name of the extension.
111 *
112 * Note that the name here is just some integer.
113 *
114 * Also note that the name space for this is not global for the whole
115 * driver, but rather its scope/meaning is limited to the specific piece
116 * of uAPI which has embedded the struct i915_user_extension.
117 */
118 __u32 name;
119 /**
120 * @flags: MBZ
121 *
122 * All undefined bits must be zero.
123 */
124 __u32 flags;
125 /**
126 * @rsvd: MBZ
127 *
128 * Reserved for future use; must be zero.
129 */
130 __u32 rsvd[4];
131};
132
133/*
134 * MOCS indexes used for GPU surfaces, defining the cacheability of the
135 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
136 */
137enum i915_mocs_table_index {
138 /*
139 * Not cached anywhere, coherency between CPU and GPU accesses is
140 * guaranteed.
141 */
142 I915_MOCS_UNCACHED,
143 /*
144 * Cacheability and coherency controlled by the kernel automatically
145 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
146 * usage of the surface (used for display scanout or not).
147 */
148 I915_MOCS_PTE,
149 /*
150 * Cached in all GPU caches available on the platform.
151 * Coherency between CPU and GPU accesses to the surface is not
152 * guaranteed without extra synchronization.
153 */
154 I915_MOCS_CACHED,
155};
156
157/*
158 * Different engines serve different roles, and there may be more than one
159 * engine serving each role. enum drm_i915_gem_engine_class provides a
160 * classification of the role of the engine, which may be used when requesting
161 * operations to be performed on a certain subset of engines, or for providing
162 * information about that group.
163 */
164enum drm_i915_gem_engine_class {
165 I915_ENGINE_CLASS_RENDER = 0,
166 I915_ENGINE_CLASS_COPY = 1,
167 I915_ENGINE_CLASS_VIDEO = 2,
168 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
169
170 /* should be kept compact */
171
172 I915_ENGINE_CLASS_INVALID = -1
173};
174
175/*
176 * There may be more than one engine fulfilling any role within the system.
177 * Each engine of a class is given a unique instance number and therefore
178 * any engine can be specified by its class:instance tuplet. APIs that allow
179 * access to any engine in the system will use struct i915_engine_class_instance
180 * for this identification.
181 */
182struct i915_engine_class_instance {
183 __u16 engine_class; /* see enum drm_i915_gem_engine_class */
184 __u16 engine_instance;
185#define I915_ENGINE_CLASS_INVALID_NONE -1
186#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
187};
188
189/**
190 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
191 *
192 */
193
194enum drm_i915_pmu_engine_sample {
195 I915_SAMPLE_BUSY = 0,
196 I915_SAMPLE_WAIT = 1,
197 I915_SAMPLE_SEMA = 2
198};
199
200#define I915_PMU_SAMPLE_BITS (4)
201#define I915_PMU_SAMPLE_MASK (0xf)
202#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
203#define I915_PMU_CLASS_SHIFT \
204 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
205
206#define __I915_PMU_ENGINE(class, instance, sample) \
207 ((class) << I915_PMU_CLASS_SHIFT | \
208 (instance) << I915_PMU_SAMPLE_BITS | \
209 (sample))
210
211#define I915_PMU_ENGINE_BUSY(class, instance) \
212 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
213
214#define I915_PMU_ENGINE_WAIT(class, instance) \
215 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
216
217#define I915_PMU_ENGINE_SEMA(class, instance) \
218 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
219
220#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
221
222#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
223#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
224#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
225#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
226#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
227
228#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
229
230/* Each region is a minimum of 16k, and there are at most 255 of them.
231 */
232#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
233 * of chars for next/prev indices */
234#define I915_LOG_MIN_TEX_REGION_SIZE 14
235
236typedef struct _drm_i915_init {
237 enum {
238 I915_INIT_DMA = 0x01,
239 I915_CLEANUP_DMA = 0x02,
240 I915_RESUME_DMA = 0x03
241 } func;
242 unsigned int mmio_offset;
243 int sarea_priv_offset;
244 unsigned int ring_start;
245 unsigned int ring_end;
246 unsigned int ring_size;
247 unsigned int front_offset;
248 unsigned int back_offset;
249 unsigned int depth_offset;
250 unsigned int w;
251 unsigned int h;
252 unsigned int pitch;
253 unsigned int pitch_bits;
254 unsigned int back_pitch;
255 unsigned int depth_pitch;
256 unsigned int cpp;
257 unsigned int chipset;
258} drm_i915_init_t;
259
260typedef struct _drm_i915_sarea {
261 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
262 int last_upload; /* last time texture was uploaded */
263 int last_enqueue; /* last time a buffer was enqueued */
264 int last_dispatch; /* age of the most recently dispatched buffer */
265 int ctxOwner; /* last context to upload state */
266 int texAge;
267 int pf_enabled; /* is pageflipping allowed? */
268 int pf_active;
269 int pf_current_page; /* which buffer is being displayed? */
270 int perf_boxes; /* performance boxes to be displayed */
271 int width, height; /* screen size in pixels */
272
273 drm_handle_t front_handle;
274 int front_offset;
275 int front_size;
276
277 drm_handle_t back_handle;
278 int back_offset;
279 int back_size;
280
281 drm_handle_t depth_handle;
282 int depth_offset;
283 int depth_size;
284
285 drm_handle_t tex_handle;
286 int tex_offset;
287 int tex_size;
288 int log_tex_granularity;
289 int pitch;
290 int rotation; /* 0, 90, 180 or 270 */
291 int rotated_offset;
292 int rotated_size;
293 int rotated_pitch;
294 int virtualX, virtualY;
295
296 unsigned int front_tiled;
297 unsigned int back_tiled;
298 unsigned int depth_tiled;
299 unsigned int rotated_tiled;
300 unsigned int rotated2_tiled;
301
302 int pipeA_x;
303 int pipeA_y;
304 int pipeA_w;
305 int pipeA_h;
306 int pipeB_x;
307 int pipeB_y;
308 int pipeB_w;
309 int pipeB_h;
310
311 /* fill out some space for old userspace triple buffer */
312 drm_handle_t unused_handle;
313 __u32 unused1, unused2, unused3;
314
315 /* buffer object handles for static buffers. May change
316 * over the lifetime of the client.
317 */
318 __u32 front_bo_handle;
319 __u32 back_bo_handle;
320 __u32 unused_bo_handle;
321 __u32 depth_bo_handle;
322
323} drm_i915_sarea_t;
324
325/* due to userspace building against these headers we need some compat here */
326#define planeA_x pipeA_x
327#define planeA_y pipeA_y
328#define planeA_w pipeA_w
329#define planeA_h pipeA_h
330#define planeB_x pipeB_x
331#define planeB_y pipeB_y
332#define planeB_w pipeB_w
333#define planeB_h pipeB_h
334
335/* Flags for perf_boxes
336 */
337#define I915_BOX_RING_EMPTY 0x1
338#define I915_BOX_FLIP 0x2
339#define I915_BOX_WAIT 0x4
340#define I915_BOX_TEXTURE_LOAD 0x8
341#define I915_BOX_LOST_CONTEXT 0x10
342
343/*
344 * i915 specific ioctls.
345 *
346 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
347 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
348 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
349 */
350#define DRM_I915_INIT 0x00
351#define DRM_I915_FLUSH 0x01
352#define DRM_I915_FLIP 0x02
353#define DRM_I915_BATCHBUFFER 0x03
354#define DRM_I915_IRQ_EMIT 0x04
355#define DRM_I915_IRQ_WAIT 0x05
356#define DRM_I915_GETPARAM 0x06
357#define DRM_I915_SETPARAM 0x07
358#define DRM_I915_ALLOC 0x08
359#define DRM_I915_FREE 0x09
360#define DRM_I915_INIT_HEAP 0x0a
361#define DRM_I915_CMDBUFFER 0x0b
362#define DRM_I915_DESTROY_HEAP 0x0c
363#define DRM_I915_SET_VBLANK_PIPE 0x0d
364#define DRM_I915_GET_VBLANK_PIPE 0x0e
365#define DRM_I915_VBLANK_SWAP 0x0f
366#define DRM_I915_HWS_ADDR 0x11
367#define DRM_I915_GEM_INIT 0x13
368#define DRM_I915_GEM_EXECBUFFER 0x14
369#define DRM_I915_GEM_PIN 0x15
370#define DRM_I915_GEM_UNPIN 0x16
371#define DRM_I915_GEM_BUSY 0x17
372#define DRM_I915_GEM_THROTTLE 0x18
373#define DRM_I915_GEM_ENTERVT 0x19
374#define DRM_I915_GEM_LEAVEVT 0x1a
375#define DRM_I915_GEM_CREATE 0x1b
376#define DRM_I915_GEM_PREAD 0x1c
377#define DRM_I915_GEM_PWRITE 0x1d
378#define DRM_I915_GEM_MMAP 0x1e
379#define DRM_I915_GEM_SET_DOMAIN 0x1f
380#define DRM_I915_GEM_SW_FINISH 0x20
381#define DRM_I915_GEM_SET_TILING 0x21
382#define DRM_I915_GEM_GET_TILING 0x22
383#define DRM_I915_GEM_GET_APERTURE 0x23
384#define DRM_I915_GEM_MMAP_GTT 0x24
385#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
386#define DRM_I915_GEM_MADVISE 0x26
387#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
388#define DRM_I915_OVERLAY_ATTRS 0x28
389#define DRM_I915_GEM_EXECBUFFER2 0x29
390#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
391#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
392#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
393#define DRM_I915_GEM_WAIT 0x2c
394#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
395#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
396#define DRM_I915_GEM_SET_CACHING 0x2f
397#define DRM_I915_GEM_GET_CACHING 0x30
398#define DRM_I915_REG_READ 0x31
399#define DRM_I915_GET_RESET_STATS 0x32
400#define DRM_I915_GEM_USERPTR 0x33
401#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
402#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
403#define DRM_I915_PERF_OPEN 0x36
404#define DRM_I915_PERF_ADD_CONFIG 0x37
405#define DRM_I915_PERF_REMOVE_CONFIG 0x38
406#define DRM_I915_QUERY 0x39
407#define DRM_I915_GEM_VM_CREATE 0x3a
408#define DRM_I915_GEM_VM_DESTROY 0x3b
409#define DRM_I915_GEM_CREATE_EXT 0x3c
410/* Must be kept compact -- no holes */
411
412#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
413#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
414#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
415#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
416#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
417#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
418#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
419#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
420#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
421#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
422#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
423#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
424#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
425#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
426#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
427#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
428#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
429#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
430#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
431#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
432#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
433#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
434#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
435#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
436#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
437#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
438#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
439#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
440#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
441#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
442#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
443#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
444#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
445#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
446#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
447#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
448#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
449#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
450#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
451#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
452#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
453#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
454#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
455#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
456#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
457#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
458#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
459#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
460#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
461#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
462#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
463#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
464#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
465#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
466#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
467#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
468#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
469#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
470#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
471#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
472#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
473#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
474
475/* Allow drivers to submit batchbuffers directly to hardware, relying
476 * on the security mechanisms provided by hardware.
477 */
478typedef struct drm_i915_batchbuffer {
479 int start; /* agp offset */
480 int used; /* nr bytes in use */
481 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
482 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
483 int num_cliprects; /* mulitpass with multiple cliprects? */
484 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
485} drm_i915_batchbuffer_t;
486
487/* As above, but pass a pointer to userspace buffer which can be
488 * validated by the kernel prior to sending to hardware.
489 */
490typedef struct _drm_i915_cmdbuffer {
491 char __user *buf; /* pointer to userspace command buffer */
492 int sz; /* nr bytes in buf */
493 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
494 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
495 int num_cliprects; /* mulitpass with multiple cliprects? */
496 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
497} drm_i915_cmdbuffer_t;
498
499/* Userspace can request & wait on irq's:
500 */
501typedef struct drm_i915_irq_emit {
502 int __user *irq_seq;
503} drm_i915_irq_emit_t;
504
505typedef struct drm_i915_irq_wait {
506 int irq_seq;
507} drm_i915_irq_wait_t;
508
509/*
510 * Different modes of per-process Graphics Translation Table,
511 * see I915_PARAM_HAS_ALIASING_PPGTT
512 */
513#define I915_GEM_PPGTT_NONE 0
514#define I915_GEM_PPGTT_ALIASING 1
515#define I915_GEM_PPGTT_FULL 2
516
517/* Ioctl to query kernel params:
518 */
519#define I915_PARAM_IRQ_ACTIVE 1
520#define I915_PARAM_ALLOW_BATCHBUFFER 2
521#define I915_PARAM_LAST_DISPATCH 3
522#define I915_PARAM_CHIPSET_ID 4
523#define I915_PARAM_HAS_GEM 5
524#define I915_PARAM_NUM_FENCES_AVAIL 6
525#define I915_PARAM_HAS_OVERLAY 7
526#define I915_PARAM_HAS_PAGEFLIPPING 8
527#define I915_PARAM_HAS_EXECBUF2 9
528#define I915_PARAM_HAS_BSD 10
529#define I915_PARAM_HAS_BLT 11
530#define I915_PARAM_HAS_RELAXED_FENCING 12
531#define I915_PARAM_HAS_COHERENT_RINGS 13
532#define I915_PARAM_HAS_EXEC_CONSTANTS 14
533#define I915_PARAM_HAS_RELAXED_DELTA 15
534#define I915_PARAM_HAS_GEN7_SOL_RESET 16
535#define I915_PARAM_HAS_LLC 17
536#define I915_PARAM_HAS_ALIASING_PPGTT 18
537#define I915_PARAM_HAS_WAIT_TIMEOUT 19
538#define I915_PARAM_HAS_SEMAPHORES 20
539#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
540#define I915_PARAM_HAS_VEBOX 22
541#define I915_PARAM_HAS_SECURE_BATCHES 23
542#define I915_PARAM_HAS_PINNED_BATCHES 24
543#define I915_PARAM_HAS_EXEC_NO_RELOC 25
544#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
545#define I915_PARAM_HAS_WT 27
546#define I915_PARAM_CMD_PARSER_VERSION 28
547#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
548#define I915_PARAM_MMAP_VERSION 30
549#define I915_PARAM_HAS_BSD2 31
550#define I915_PARAM_REVISION 32
551#define I915_PARAM_SUBSLICE_TOTAL 33
552#define I915_PARAM_EU_TOTAL 34
553#define I915_PARAM_HAS_GPU_RESET 35
554#define I915_PARAM_HAS_RESOURCE_STREAMER 36
555#define I915_PARAM_HAS_EXEC_SOFTPIN 37
556#define I915_PARAM_HAS_POOLED_EU 38
557#define I915_PARAM_MIN_EU_IN_POOL 39
558#define I915_PARAM_MMAP_GTT_VERSION 40
559
560/*
561 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
562 * priorities and the driver will attempt to execute batches in priority order.
563 * The param returns a capability bitmask, nonzero implies that the scheduler
564 * is enabled, with different features present according to the mask.
565 *
566 * The initial priority for each batch is supplied by the context and is
567 * controlled via I915_CONTEXT_PARAM_PRIORITY.
568 */
569#define I915_PARAM_HAS_SCHEDULER 41
570#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
571#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
572#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
573#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
574#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
575/*
576 * Indicates the 2k user priority levels are statically mapped into 3 buckets as
577 * follows:
578 *
579 * -1k to -1 Low priority
580 * 0 Normal priority
581 * 1 to 1k Highest priority
582 */
583#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
584
585#define I915_PARAM_HUC_STATUS 42
586
587/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
588 * synchronisation with implicit fencing on individual objects.
589 * See EXEC_OBJECT_ASYNC.
590 */
591#define I915_PARAM_HAS_EXEC_ASYNC 43
592
593/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
594 * both being able to pass in a sync_file fd to wait upon before executing,
595 * and being able to return a new sync_file fd that is signaled when the
596 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
597 */
598#define I915_PARAM_HAS_EXEC_FENCE 44
599
600/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
601 * user specified bufffers for post-mortem debugging of GPU hangs. See
602 * EXEC_OBJECT_CAPTURE.
603 */
604#define I915_PARAM_HAS_EXEC_CAPTURE 45
605
606#define I915_PARAM_SLICE_MASK 46
607
608/* Assuming it's uniform for each slice, this queries the mask of subslices
609 * per-slice for this system.
610 */
611#define I915_PARAM_SUBSLICE_MASK 47
612
613/*
614 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
615 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
616 */
617#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
618
619/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
620 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
621 */
622#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
623
624/*
625 * Query whether every context (both per-file default and user created) is
626 * isolated (insofar as HW supports). If this parameter is not true, then
627 * freshly created contexts may inherit values from an existing context,
628 * rather than default HW values. If true, it also ensures (insofar as HW
629 * supports) that all state set by this context will not leak to any other
630 * context.
631 *
632 * As not every engine across every gen support contexts, the returned
633 * value reports the support of context isolation for individual engines by
634 * returning a bitmask of each engine class set to true if that class supports
635 * isolation.
636 */
637#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
638
639/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
640 * registers. This used to be fixed per platform but from CNL onwards, this
641 * might vary depending on the parts.
642 */
643#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
644
645/*
646 * Once upon a time we supposed that writes through the GGTT would be
647 * immediately in physical memory (once flushed out of the CPU path). However,
648 * on a few different processors and chipsets, this is not necessarily the case
649 * as the writes appear to be buffered internally. Thus a read of the backing
650 * storage (physical memory) via a different path (with different physical tags
651 * to the indirect write via the GGTT) will see stale values from before
652 * the GGTT write. Inside the kernel, we can for the most part keep track of
653 * the different read/write domains in use (e.g. set-domain), but the assumption
654 * of coherency is baked into the ABI, hence reporting its true state in this
655 * parameter.
656 *
657 * Reports true when writes via mmap_gtt are immediately visible following an
658 * lfence to flush the WCB.
659 *
660 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
661 * internal buffer and are _not_ immediately visible to third parties accessing
662 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
663 * communications channel when reporting false is strongly disadvised.
664 */
665#define I915_PARAM_MMAP_GTT_COHERENT 52
666
667/*
668 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
669 * execution through use of explicit fence support.
670 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
671 */
672#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
673
674/*
675 * Revision of the i915-perf uAPI. The value returned helps determine what
676 * i915-perf features are available. See drm_i915_perf_property_id.
677 */
678#define I915_PARAM_PERF_REVISION 54
679
680/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
681 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
682 * I915_EXEC_USE_EXTENSIONS.
683 */
684#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
685
686/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
687#define I915_PARAM_HAS_USERPTR_PROBE 56
688
689/* Must be kept compact -- no holes and well documented */
690
691typedef struct drm_i915_getparam {
692 __s32 param;
693 /*
694 * WARNING: Using pointers instead of fixed-size u64 means we need to write
695 * compat32 code. Don't repeat this mistake.
696 */
697 int __user *value;
698} drm_i915_getparam_t;
699
700/* Ioctl to set kernel params:
701 */
702#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
703#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
704#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
705#define I915_SETPARAM_NUM_USED_FENCES 4
706/* Must be kept compact -- no holes */
707
708typedef struct drm_i915_setparam {
709 int param;
710 int value;
711} drm_i915_setparam_t;
712
713/* A memory manager for regions of shared memory:
714 */
715#define I915_MEM_REGION_AGP 1
716
717typedef struct drm_i915_mem_alloc {
718 int region;
719 int alignment;
720 int size;
721 int __user *region_offset; /* offset from start of fb or agp */
722} drm_i915_mem_alloc_t;
723
724typedef struct drm_i915_mem_free {
725 int region;
726 int region_offset;
727} drm_i915_mem_free_t;
728
729typedef struct drm_i915_mem_init_heap {
730 int region;
731 int size;
732 int start;
733} drm_i915_mem_init_heap_t;
734
735/* Allow memory manager to be torn down and re-initialized (eg on
736 * rotate):
737 */
738typedef struct drm_i915_mem_destroy_heap {
739 int region;
740} drm_i915_mem_destroy_heap_t;
741
742/* Allow X server to configure which pipes to monitor for vblank signals
743 */
744#define DRM_I915_VBLANK_PIPE_A 1
745#define DRM_I915_VBLANK_PIPE_B 2
746
747typedef struct drm_i915_vblank_pipe {
748 int pipe;
749} drm_i915_vblank_pipe_t;
750
751/* Schedule buffer swap at given vertical blank:
752 */
753typedef struct drm_i915_vblank_swap {
754 drm_drawable_t drawable;
755 enum drm_vblank_seq_type seqtype;
756 unsigned int sequence;
757} drm_i915_vblank_swap_t;
758
759typedef struct drm_i915_hws_addr {
760 __u64 addr;
761} drm_i915_hws_addr_t;
762
763struct drm_i915_gem_init {
764 /**
765 * Beginning offset in the GTT to be managed by the DRM memory
766 * manager.
767 */
768 __u64 gtt_start;
769 /**
770 * Ending offset in the GTT to be managed by the DRM memory
771 * manager.
772 */
773 __u64 gtt_end;
774};
775
776struct drm_i915_gem_create {
777 /**
778 * Requested size for the object.
779 *
780 * The (page-aligned) allocated size for the object will be returned.
781 */
782 __u64 size;
783 /**
784 * Returned handle for the object.
785 *
786 * Object handles are nonzero.
787 */
788 __u32 handle;
789 __u32 pad;
790};
791
792struct drm_i915_gem_pread {
793 /** Handle for the object being read. */
794 __u32 handle;
795 __u32 pad;
796 /** Offset into the object to read from */
797 __u64 offset;
798 /** Length of data to read */
799 __u64 size;
800 /**
801 * Pointer to write the data into.
802 *
803 * This is a fixed-size type for 32/64 compatibility.
804 */
805 __u64 data_ptr;
806};
807
808struct drm_i915_gem_pwrite {
809 /** Handle for the object being written to. */
810 __u32 handle;
811 __u32 pad;
812 /** Offset into the object to write to */
813 __u64 offset;
814 /** Length of data to write */
815 __u64 size;
816 /**
817 * Pointer to read the data from.
818 *
819 * This is a fixed-size type for 32/64 compatibility.
820 */
821 __u64 data_ptr;
822};
823
824struct drm_i915_gem_mmap {
825 /** Handle for the object being mapped. */
826 __u32 handle;
827 __u32 pad;
828 /** Offset in the object to map. */
829 __u64 offset;
830 /**
831 * Length of data to map.
832 *
833 * The value will be page-aligned.
834 */
835 __u64 size;
836 /**
837 * Returned pointer the data was mapped at.
838 *
839 * This is a fixed-size type for 32/64 compatibility.
840 */
841 __u64 addr_ptr;
842
843 /**
844 * Flags for extended behaviour.
845 *
846 * Added in version 2.
847 */
848 __u64 flags;
849#define I915_MMAP_WC 0x1
850};
851
852struct drm_i915_gem_mmap_gtt {
853 /** Handle for the object being mapped. */
854 __u32 handle;
855 __u32 pad;
856 /**
857 * Fake offset to use for subsequent mmap call
858 *
859 * This is a fixed-size type for 32/64 compatibility.
860 */
861 __u64 offset;
862};
863
864/**
865 * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
866 *
867 * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
868 * and is used to retrieve the fake offset to mmap an object specified by &handle.
869 *
870 * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
871 * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
872 * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
873 */
874struct drm_i915_gem_mmap_offset {
875 /** @handle: Handle for the object being mapped. */
876 __u32 handle;
877 /** @pad: Must be zero */
878 __u32 pad;
879 /**
880 * @offset: The fake offset to use for subsequent mmap call
881 *
882 * This is a fixed-size type for 32/64 compatibility.
883 */
884 __u64 offset;
885
886 /**
887 * @flags: Flags for extended behaviour.
888 *
889 * It is mandatory that one of the `MMAP_OFFSET` types
890 * should be included:
891 *
892 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
893 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
894 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
895 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
896 *
897 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
898 * type. On devices without local memory, this caching mode is invalid.
899 *
900 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
901 * be used, depending on the object placement on creation. WB will be used
902 * when the object can only exist in system memory, WC otherwise.
903 */
904 __u64 flags;
905
906#define I915_MMAP_OFFSET_GTT 0
907#define I915_MMAP_OFFSET_WC 1
908#define I915_MMAP_OFFSET_WB 2
909#define I915_MMAP_OFFSET_UC 3
910#define I915_MMAP_OFFSET_FIXED 4
911
912 /**
913 * @extensions: Zero-terminated chain of extensions.
914 *
915 * No current extensions defined; mbz.
916 */
917 __u64 extensions;
918};
919
920/**
921 * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
922 * preparation for accessing the pages via some CPU domain.
923 *
924 * Specifying a new write or read domain will flush the object out of the
925 * previous domain(if required), before then updating the objects domain
926 * tracking with the new domain.
927 *
928 * Note this might involve waiting for the object first if it is still active on
929 * the GPU.
930 *
931 * Supported values for @read_domains and @write_domain:
932 *
933 * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
934 * - I915_GEM_DOMAIN_CPU: CPU cache domain
935 * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
936 *
937 * All other domains are rejected.
938 *
939 * Note that for discrete, starting from DG1, this is no longer supported, and
940 * is instead rejected. On such platforms the CPU domain is effectively static,
941 * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
942 * which can't be set explicitly and instead depends on the object placements,
943 * as per the below.
944 *
945 * Implicit caching rules, starting from DG1:
946 *
947 * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
948 * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
949 * mapped as write-combined only.
950 *
951 * - Everything else is always allocated and mapped as write-back, with the
952 * guarantee that everything is also coherent with the GPU.
953 *
954 * Note that this is likely to change in the future again, where we might need
955 * more flexibility on future devices, so making this all explicit as part of a
956 * new &drm_i915_gem_create_ext extension is probable.
957 */
958struct drm_i915_gem_set_domain {
959 /** @handle: Handle for the object. */
960 __u32 handle;
961
962 /** @read_domains: New read domains. */
963 __u32 read_domains;
964
965 /**
966 * @write_domain: New write domain.
967 *
968 * Note that having something in the write domain implies it's in the
969 * read domain, and only that read domain.
970 */
971 __u32 write_domain;
972};
973
974struct drm_i915_gem_sw_finish {
975 /** Handle for the object */
976 __u32 handle;
977};
978
979struct drm_i915_gem_relocation_entry {
980 /**
981 * Handle of the buffer being pointed to by this relocation entry.
982 *
983 * It's appealing to make this be an index into the mm_validate_entry
984 * list to refer to the buffer, but this allows the driver to create
985 * a relocation list for state buffers and not re-write it per
986 * exec using the buffer.
987 */
988 __u32 target_handle;
989
990 /**
991 * Value to be added to the offset of the target buffer to make up
992 * the relocation entry.
993 */
994 __u32 delta;
995
996 /** Offset in the buffer the relocation entry will be written into */
997 __u64 offset;
998
999 /**
1000 * Offset value of the target buffer that the relocation entry was last
1001 * written as.
1002 *
1003 * If the buffer has the same offset as last time, we can skip syncing
1004 * and writing the relocation. This value is written back out by
1005 * the execbuffer ioctl when the relocation is written.
1006 */
1007 __u64 presumed_offset;
1008
1009 /**
1010 * Target memory domains read by this operation.
1011 */
1012 __u32 read_domains;
1013
1014 /**
1015 * Target memory domains written by this operation.
1016 *
1017 * Note that only one domain may be written by the whole
1018 * execbuffer operation, so that where there are conflicts,
1019 * the application will get -EINVAL back.
1020 */
1021 __u32 write_domain;
1022};
1023
1024/** @{
1025 * Intel memory domains
1026 *
1027 * Most of these just align with the various caches in
1028 * the system and are used to flush and invalidate as
1029 * objects end up cached in different domains.
1030 */
1031/** CPU cache */
1032#define I915_GEM_DOMAIN_CPU 0x00000001
1033/** Render cache, used by 2D and 3D drawing */
1034#define I915_GEM_DOMAIN_RENDER 0x00000002
1035/** Sampler cache, used by texture engine */
1036#define I915_GEM_DOMAIN_SAMPLER 0x00000004
1037/** Command queue, used to load batch buffers */
1038#define I915_GEM_DOMAIN_COMMAND 0x00000008
1039/** Instruction cache, used by shader programs */
1040#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
1041/** Vertex address cache */
1042#define I915_GEM_DOMAIN_VERTEX 0x00000020
1043/** GTT domain - aperture and scanout */
1044#define I915_GEM_DOMAIN_GTT 0x00000040
1045/** WC domain - uncached access */
1046#define I915_GEM_DOMAIN_WC 0x00000080
1047/** @} */
1048
1049struct drm_i915_gem_exec_object {
1050 /**
1051 * User's handle for a buffer to be bound into the GTT for this
1052 * operation.
1053 */
1054 __u32 handle;
1055
1056 /** Number of relocations to be performed on this buffer */
1057 __u32 relocation_count;
1058 /**
1059 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1060 * the relocations to be performed in this buffer.
1061 */
1062 __u64 relocs_ptr;
1063
1064 /** Required alignment in graphics aperture */
1065 __u64 alignment;
1066
1067 /**
1068 * Returned value of the updated offset of the object, for future
1069 * presumed_offset writes.
1070 */
1071 __u64 offset;
1072};
1073
1074/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
1075struct drm_i915_gem_execbuffer {
1076 /**
1077 * List of buffers to be validated with their relocations to be
1078 * performend on them.
1079 *
1080 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
1081 *
1082 * These buffers must be listed in an order such that all relocations
1083 * a buffer is performing refer to buffers that have already appeared
1084 * in the validate list.
1085 */
1086 __u64 buffers_ptr;
1087 __u32 buffer_count;
1088
1089 /** Offset in the batchbuffer to start execution from. */
1090 __u32 batch_start_offset;
1091 /** Bytes used in batchbuffer from batch_start_offset */
1092 __u32 batch_len;
1093 __u32 DR1;
1094 __u32 DR4;
1095 __u32 num_cliprects;
1096 /** This is a struct drm_clip_rect *cliprects */
1097 __u64 cliprects_ptr;
1098};
1099
1100struct drm_i915_gem_exec_object2 {
1101 /**
1102 * User's handle for a buffer to be bound into the GTT for this
1103 * operation.
1104 */
1105 __u32 handle;
1106
1107 /** Number of relocations to be performed on this buffer */
1108 __u32 relocation_count;
1109 /**
1110 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1111 * the relocations to be performed in this buffer.
1112 */
1113 __u64 relocs_ptr;
1114
1115 /** Required alignment in graphics aperture */
1116 __u64 alignment;
1117
1118 /**
1119 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1120 * the user with the GTT offset at which this object will be pinned.
1121 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1122 * presumed_offset of the object.
1123 * During execbuffer2 the kernel populates it with the value of the
1124 * current GTT offset of the object, for future presumed_offset writes.
1125 */
1126 __u64 offset;
1127
1128#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
1129#define EXEC_OBJECT_NEEDS_GTT (1<<1)
1130#define EXEC_OBJECT_WRITE (1<<2)
1131#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1132#define EXEC_OBJECT_PINNED (1<<4)
1133#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
1134/* The kernel implicitly tracks GPU activity on all GEM objects, and
1135 * synchronises operations with outstanding rendering. This includes
1136 * rendering on other devices if exported via dma-buf. However, sometimes
1137 * this tracking is too coarse and the user knows better. For example,
1138 * if the object is split into non-overlapping ranges shared between different
1139 * clients or engines (i.e. suballocating objects), the implicit tracking
1140 * by kernel assumes that each operation affects the whole object rather
1141 * than an individual range, causing needless synchronisation between clients.
1142 * The kernel will also forgo any CPU cache flushes prior to rendering from
1143 * the object as the client is expected to be also handling such domain
1144 * tracking.
1145 *
1146 * The kernel maintains the implicit tracking in order to manage resources
1147 * used by the GPU - this flag only disables the synchronisation prior to
1148 * rendering with this object in this execbuf.
1149 *
1150 * Opting out of implicit synhronisation requires the user to do its own
1151 * explicit tracking to avoid rendering corruption. See, for example,
1152 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1153 */
1154#define EXEC_OBJECT_ASYNC (1<<6)
1155/* Request that the contents of this execobject be copied into the error
1156 * state upon a GPU hang involving this batch for post-mortem debugging.
1157 * These buffers are recorded in no particular order as "user" in
1158 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1159 * if the kernel supports this flag.
1160 */
1161#define EXEC_OBJECT_CAPTURE (1<<7)
1162/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1163#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1164 __u64 flags;
1165
1166 union {
1167 __u64 rsvd1;
1168 __u64 pad_to_size;
1169 };
1170 __u64 rsvd2;
1171};
1172
1173struct drm_i915_gem_exec_fence {
1174 /**
1175 * User's handle for a drm_syncobj to wait on or signal.
1176 */
1177 __u32 handle;
1178
1179#define I915_EXEC_FENCE_WAIT (1<<0)
1180#define I915_EXEC_FENCE_SIGNAL (1<<1)
1181#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1182 __u32 flags;
1183};
1184
1185/*
1186 * See drm_i915_gem_execbuffer_ext_timeline_fences.
1187 */
1188#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1189
1190/*
1191 * This structure describes an array of drm_syncobj and associated points for
1192 * timeline variants of drm_syncobj. It is invalid to append this structure to
1193 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1194 */
1195struct drm_i915_gem_execbuffer_ext_timeline_fences {
1196 struct i915_user_extension base;
1197
1198 /**
1199 * Number of element in the handles_ptr & value_ptr arrays.
1200 */
1201 __u64 fence_count;
1202
1203 /**
1204 * Pointer to an array of struct drm_i915_gem_exec_fence of length
1205 * fence_count.
1206 */
1207 __u64 handles_ptr;
1208
1209 /**
1210 * Pointer to an array of u64 values of length fence_count. Values
1211 * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
1212 * drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
1213 */
1214 __u64 values_ptr;
1215};
1216
1217struct drm_i915_gem_execbuffer2 {
1218 /**
1219 * List of gem_exec_object2 structs
1220 */
1221 __u64 buffers_ptr;
1222 __u32 buffer_count;
1223
1224 /** Offset in the batchbuffer to start execution from. */
1225 __u32 batch_start_offset;
1226 /** Bytes used in batchbuffer from batch_start_offset */
1227 __u32 batch_len;
1228 __u32 DR1;
1229 __u32 DR4;
1230 __u32 num_cliprects;
1231 /**
1232 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1233 * & I915_EXEC_USE_EXTENSIONS are not set.
1234 *
1235 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1236 * of struct drm_i915_gem_exec_fence and num_cliprects is the length
1237 * of the array.
1238 *
1239 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1240 * single struct i915_user_extension and num_cliprects is 0.
1241 */
1242 __u64 cliprects_ptr;
1243#define I915_EXEC_RING_MASK (0x3f)
1244#define I915_EXEC_DEFAULT (0<<0)
1245#define I915_EXEC_RENDER (1<<0)
1246#define I915_EXEC_BSD (2<<0)
1247#define I915_EXEC_BLT (3<<0)
1248#define I915_EXEC_VEBOX (4<<0)
1249
1250/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1251 * Gen6+ only supports relative addressing to dynamic state (default) and
1252 * absolute addressing.
1253 *
1254 * These flags are ignored for the BSD and BLT rings.
1255 */
1256#define I915_EXEC_CONSTANTS_MASK (3<<6)
1257#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1258#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
1259#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1260 __u64 flags;
1261 __u64 rsvd1; /* now used for context info */
1262 __u64 rsvd2;
1263};
1264
1265/** Resets the SO write offset registers for transform feedback on gen7. */
1266#define I915_EXEC_GEN7_SOL_RESET (1<<8)
1267
1268/** Request a privileged ("secure") batch buffer. Note only available for
1269 * DRM_ROOT_ONLY | DRM_MASTER processes.
1270 */
1271#define I915_EXEC_SECURE (1<<9)
1272
1273/** Inform the kernel that the batch is and will always be pinned. This
1274 * negates the requirement for a workaround to be performed to avoid
1275 * an incoherent CS (such as can be found on 830/845). If this flag is
1276 * not passed, the kernel will endeavour to make sure the batch is
1277 * coherent with the CS before execution. If this flag is passed,
1278 * userspace assumes the responsibility for ensuring the same.
1279 */
1280#define I915_EXEC_IS_PINNED (1<<10)
1281
1282/** Provide a hint to the kernel that the command stream and auxiliary
1283 * state buffers already holds the correct presumed addresses and so the
1284 * relocation process may be skipped if no buffers need to be moved in
1285 * preparation for the execbuffer.
1286 */
1287#define I915_EXEC_NO_RELOC (1<<11)
1288
1289/** Use the reloc.handle as an index into the exec object array rather
1290 * than as the per-file handle.
1291 */
1292#define I915_EXEC_HANDLE_LUT (1<<12)
1293
1294/** Used for switching BSD rings on the platforms with two BSD rings */
1295#define I915_EXEC_BSD_SHIFT (13)
1296#define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1297/* default ping-pong mode */
1298#define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1299#define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1300#define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
1301
1302/** Tell the kernel that the batchbuffer is processed by
1303 * the resource streamer.
1304 */
1305#define I915_EXEC_RESOURCE_STREAMER (1<<15)
1306
1307/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1308 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1309 * the batch.
1310 *
1311 * Returns -EINVAL if the sync_file fd cannot be found.
1312 */
1313#define I915_EXEC_FENCE_IN (1<<16)
1314
1315/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1316 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1317 * to the caller, and it should be close() after use. (The fd is a regular
1318 * file descriptor and will be cleaned up on process termination. It holds
1319 * a reference to the request, but nothing else.)
1320 *
1321 * The sync_file fd can be combined with other sync_file and passed either
1322 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1323 * will only occur after this request completes), or to other devices.
1324 *
1325 * Using I915_EXEC_FENCE_OUT requires use of
1326 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1327 * back to userspace. Failure to do so will cause the out-fence to always
1328 * be reported as zero, and the real fence fd to be leaked.
1329 */
1330#define I915_EXEC_FENCE_OUT (1<<17)
1331
1332/*
1333 * Traditionally the execbuf ioctl has only considered the final element in
1334 * the execobject[] to be the executable batch. Often though, the client
1335 * will known the batch object prior to construction and being able to place
1336 * it into the execobject[] array first can simplify the relocation tracking.
1337 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1338 * execobject[] as the * batch instead (the default is to use the last
1339 * element).
1340 */
1341#define I915_EXEC_BATCH_FIRST (1<<18)
1342
1343/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1344 * define an array of i915_gem_exec_fence structures which specify a set of
1345 * dma fences to wait upon or signal.
1346 */
1347#define I915_EXEC_FENCE_ARRAY (1<<19)
1348
1349/*
1350 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1351 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1352 * the batch.
1353 *
1354 * Returns -EINVAL if the sync_file fd cannot be found.
1355 */
1356#define I915_EXEC_FENCE_SUBMIT (1 << 20)
1357
1358/*
1359 * Setting I915_EXEC_USE_EXTENSIONS implies that
1360 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1361 * list of i915_user_extension. Each i915_user_extension node is the base of a
1362 * larger structure. The list of supported structures are listed in the
1363 * drm_i915_gem_execbuffer_ext enum.
1364 */
1365#define I915_EXEC_USE_EXTENSIONS (1 << 21)
1366
1367#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1368
1369#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1370#define i915_execbuffer2_set_context_id(eb2, context) \
1371 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1372#define i915_execbuffer2_get_context_id(eb2) \
1373 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1374
1375struct drm_i915_gem_pin {
1376 /** Handle of the buffer to be pinned. */
1377 __u32 handle;
1378 __u32 pad;
1379
1380 /** alignment required within the aperture */
1381 __u64 alignment;
1382
1383 /** Returned GTT offset of the buffer. */
1384 __u64 offset;
1385};
1386
1387struct drm_i915_gem_unpin {
1388 /** Handle of the buffer to be unpinned. */
1389 __u32 handle;
1390 __u32 pad;
1391};
1392
1393struct drm_i915_gem_busy {
1394 /** Handle of the buffer to check for busy */
1395 __u32 handle;
1396
1397 /** Return busy status
1398 *
1399 * A return of 0 implies that the object is idle (after
1400 * having flushed any pending activity), and a non-zero return that
1401 * the object is still in-flight on the GPU. (The GPU has not yet
1402 * signaled completion for all pending requests that reference the
1403 * object.) An object is guaranteed to become idle eventually (so
1404 * long as no new GPU commands are executed upon it). Due to the
1405 * asynchronous nature of the hardware, an object reported
1406 * as busy may become idle before the ioctl is completed.
1407 *
1408 * Furthermore, if the object is busy, which engine is busy is only
1409 * provided as a guide and only indirectly by reporting its class
1410 * (there may be more than one engine in each class). There are race
1411 * conditions which prevent the report of which engines are busy from
1412 * being always accurate. However, the converse is not true. If the
1413 * object is idle, the result of the ioctl, that all engines are idle,
1414 * is accurate.
1415 *
1416 * The returned dword is split into two fields to indicate both
1417 * the engine classess on which the object is being read, and the
1418 * engine class on which it is currently being written (if any).
1419 *
1420 * The low word (bits 0:15) indicate if the object is being written
1421 * to by any engine (there can only be one, as the GEM implicit
1422 * synchronisation rules force writes to be serialised). Only the
1423 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1424 * 1 not 0 etc) for the last write is reported.
1425 *
1426 * The high word (bits 16:31) are a bitmask of which engines classes
1427 * are currently reading from the object. Multiple engines may be
1428 * reading from the object simultaneously.
1429 *
1430 * The value of each engine class is the same as specified in the
1431 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1432 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1433 * Some hardware may have parallel execution engines, e.g. multiple
1434 * media engines, which are mapped to the same class identifier and so
1435 * are not separately reported for busyness.
1436 *
1437 * Caveat emptor:
1438 * Only the boolean result of this query is reliable; that is whether
1439 * the object is idle or busy. The report of which engines are busy
1440 * should be only used as a heuristic.
1441 */
1442 __u32 busy;
1443};
1444
1445/**
1446 * struct drm_i915_gem_caching - Set or get the caching for given object
1447 * handle.
1448 *
1449 * Allow userspace to control the GTT caching bits for a given object when the
1450 * object is later mapped through the ppGTT(or GGTT on older platforms lacking
1451 * ppGTT support, or if the object is used for scanout). Note that this might
1452 * require unbinding the object from the GTT first, if its current caching value
1453 * doesn't match.
1454 *
1455 * Note that this all changes on discrete platforms, starting from DG1, the
1456 * set/get caching is no longer supported, and is now rejected. Instead the CPU
1457 * caching attributes(WB vs WC) will become an immutable creation time property
1458 * for the object, along with the GTT caching level. For now we don't expose any
1459 * new uAPI for this, instead on DG1 this is all implicit, although this largely
1460 * shouldn't matter since DG1 is coherent by default(without any way of
1461 * controlling it).
1462 *
1463 * Implicit caching rules, starting from DG1:
1464 *
1465 * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1466 * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1467 * mapped as write-combined only.
1468 *
1469 * - Everything else is always allocated and mapped as write-back, with the
1470 * guarantee that everything is also coherent with the GPU.
1471 *
1472 * Note that this is likely to change in the future again, where we might need
1473 * more flexibility on future devices, so making this all explicit as part of a
1474 * new &drm_i915_gem_create_ext extension is probable.
1475 *
1476 * Side note: Part of the reason for this is that changing the at-allocation-time CPU
1477 * caching attributes for the pages might be required(and is expensive) if we
1478 * need to then CPU map the pages later with different caching attributes. This
1479 * inconsistent caching behaviour, while supported on x86, is not universally
1480 * supported on other architectures. So for simplicity we opt for setting
1481 * everything at creation time, whilst also making it immutable, on discrete
1482 * platforms.
1483 */
1484struct drm_i915_gem_caching {
1485 /**
1486 * @handle: Handle of the buffer to set/get the caching level.
1487 */
1488 __u32 handle;
1489
1490 /**
1491 * @caching: The GTT caching level to apply or possible return value.
1492 *
1493 * The supported @caching values:
1494 *
1495 * I915_CACHING_NONE:
1496 *
1497 * GPU access is not coherent with CPU caches. Default for machines
1498 * without an LLC. This means manual flushing might be needed, if we
1499 * want GPU access to be coherent.
1500 *
1501 * I915_CACHING_CACHED:
1502 *
1503 * GPU access is coherent with CPU caches and furthermore the data is
1504 * cached in last-level caches shared between CPU cores and the GPU GT.
1505 *
1506 * I915_CACHING_DISPLAY:
1507 *
1508 * Special GPU caching mode which is coherent with the scanout engines.
1509 * Transparently falls back to I915_CACHING_NONE on platforms where no
1510 * special cache mode (like write-through or gfdt flushing) is
1511 * available. The kernel automatically sets this mode when using a
1512 * buffer as a scanout target. Userspace can manually set this mode to
1513 * avoid a costly stall and clflush in the hotpath of drawing the first
1514 * frame.
1515 */
1516#define I915_CACHING_NONE 0
1517#define I915_CACHING_CACHED 1
1518#define I915_CACHING_DISPLAY 2
1519 __u32 caching;
1520};
1521
1522#define I915_TILING_NONE 0
1523#define I915_TILING_X 1
1524#define I915_TILING_Y 2
1525/*
1526 * Do not add new tiling types here. The I915_TILING_* values are for
1527 * de-tiling fence registers that no longer exist on modern platforms. Although
1528 * the hardware may support new types of tiling in general (e.g., Tile4), we
1529 * do not need to add them to the uapi that is specific to now-defunct ioctls.
1530 */
1531#define I915_TILING_LAST I915_TILING_Y
1532
1533#define I915_BIT_6_SWIZZLE_NONE 0
1534#define I915_BIT_6_SWIZZLE_9 1
1535#define I915_BIT_6_SWIZZLE_9_10 2
1536#define I915_BIT_6_SWIZZLE_9_11 3
1537#define I915_BIT_6_SWIZZLE_9_10_11 4
1538/* Not seen by userland */
1539#define I915_BIT_6_SWIZZLE_UNKNOWN 5
1540/* Seen by userland. */
1541#define I915_BIT_6_SWIZZLE_9_17 6
1542#define I915_BIT_6_SWIZZLE_9_10_17 7
1543
1544struct drm_i915_gem_set_tiling {
1545 /** Handle of the buffer to have its tiling state updated */
1546 __u32 handle;
1547
1548 /**
1549 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1550 * I915_TILING_Y).
1551 *
1552 * This value is to be set on request, and will be updated by the
1553 * kernel on successful return with the actual chosen tiling layout.
1554 *
1555 * The tiling mode may be demoted to I915_TILING_NONE when the system
1556 * has bit 6 swizzling that can't be managed correctly by GEM.
1557 *
1558 * Buffer contents become undefined when changing tiling_mode.
1559 */
1560 __u32 tiling_mode;
1561
1562 /**
1563 * Stride in bytes for the object when in I915_TILING_X or
1564 * I915_TILING_Y.
1565 */
1566 __u32 stride;
1567
1568 /**
1569 * Returned address bit 6 swizzling required for CPU access through
1570 * mmap mapping.
1571 */
1572 __u32 swizzle_mode;
1573};
1574
1575struct drm_i915_gem_get_tiling {
1576 /** Handle of the buffer to get tiling state for. */
1577 __u32 handle;
1578
1579 /**
1580 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1581 * I915_TILING_Y).
1582 */
1583 __u32 tiling_mode;
1584
1585 /**
1586 * Returned address bit 6 swizzling required for CPU access through
1587 * mmap mapping.
1588 */
1589 __u32 swizzle_mode;
1590
1591 /**
1592 * Returned address bit 6 swizzling required for CPU access through
1593 * mmap mapping whilst bound.
1594 */
1595 __u32 phys_swizzle_mode;
1596};
1597
1598struct drm_i915_gem_get_aperture {
1599 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1600 __u64 aper_size;
1601
1602 /**
1603 * Available space in the aperture used by i915_gem_execbuffer, in
1604 * bytes
1605 */
1606 __u64 aper_available_size;
1607};
1608
1609struct drm_i915_get_pipe_from_crtc_id {
1610 /** ID of CRTC being requested **/
1611 __u32 crtc_id;
1612
1613 /** pipe of requested CRTC **/
1614 __u32 pipe;
1615};
1616
1617#define I915_MADV_WILLNEED 0
1618#define I915_MADV_DONTNEED 1
1619#define __I915_MADV_PURGED 2 /* internal state */
1620
1621struct drm_i915_gem_madvise {
1622 /** Handle of the buffer to change the backing store advice */
1623 __u32 handle;
1624
1625 /* Advice: either the buffer will be needed again in the near future,
1626 * or wont be and could be discarded under memory pressure.
1627 */
1628 __u32 madv;
1629
1630 /** Whether the backing store still exists. */
1631 __u32 retained;
1632};
1633
1634/* flags */
1635#define I915_OVERLAY_TYPE_MASK 0xff
1636#define I915_OVERLAY_YUV_PLANAR 0x01
1637#define I915_OVERLAY_YUV_PACKED 0x02
1638#define I915_OVERLAY_RGB 0x03
1639
1640#define I915_OVERLAY_DEPTH_MASK 0xff00
1641#define I915_OVERLAY_RGB24 0x1000
1642#define I915_OVERLAY_RGB16 0x2000
1643#define I915_OVERLAY_RGB15 0x3000
1644#define I915_OVERLAY_YUV422 0x0100
1645#define I915_OVERLAY_YUV411 0x0200
1646#define I915_OVERLAY_YUV420 0x0300
1647#define I915_OVERLAY_YUV410 0x0400
1648
1649#define I915_OVERLAY_SWAP_MASK 0xff0000
1650#define I915_OVERLAY_NO_SWAP 0x000000
1651#define I915_OVERLAY_UV_SWAP 0x010000
1652#define I915_OVERLAY_Y_SWAP 0x020000
1653#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1654
1655#define I915_OVERLAY_FLAGS_MASK 0xff000000
1656#define I915_OVERLAY_ENABLE 0x01000000
1657
1658struct drm_intel_overlay_put_image {
1659 /* various flags and src format description */
1660 __u32 flags;
1661 /* source picture description */
1662 __u32 bo_handle;
1663 /* stride values and offsets are in bytes, buffer relative */
1664 __u16 stride_Y; /* stride for packed formats */
1665 __u16 stride_UV;
1666 __u32 offset_Y; /* offset for packet formats */
1667 __u32 offset_U;
1668 __u32 offset_V;
1669 /* in pixels */
1670 __u16 src_width;
1671 __u16 src_height;
1672 /* to compensate the scaling factors for partially covered surfaces */
1673 __u16 src_scan_width;
1674 __u16 src_scan_height;
1675 /* output crtc description */
1676 __u32 crtc_id;
1677 __u16 dst_x;
1678 __u16 dst_y;
1679 __u16 dst_width;
1680 __u16 dst_height;
1681};
1682
1683/* flags */
1684#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1685#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1686#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
1687struct drm_intel_overlay_attrs {
1688 __u32 flags;
1689 __u32 color_key;
1690 __s32 brightness;
1691 __u32 contrast;
1692 __u32 saturation;
1693 __u32 gamma0;
1694 __u32 gamma1;
1695 __u32 gamma2;
1696 __u32 gamma3;
1697 __u32 gamma4;
1698 __u32 gamma5;
1699};
1700
1701/*
1702 * Intel sprite handling
1703 *
1704 * Color keying works with a min/mask/max tuple. Both source and destination
1705 * color keying is allowed.
1706 *
1707 * Source keying:
1708 * Sprite pixels within the min & max values, masked against the color channels
1709 * specified in the mask field, will be transparent. All other pixels will
1710 * be displayed on top of the primary plane. For RGB surfaces, only the min
1711 * and mask fields will be used; ranged compares are not allowed.
1712 *
1713 * Destination keying:
1714 * Primary plane pixels that match the min value, masked against the color
1715 * channels specified in the mask field, will be replaced by corresponding
1716 * pixels from the sprite plane.
1717 *
1718 * Note that source & destination keying are exclusive; only one can be
1719 * active on a given plane.
1720 */
1721
1722#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1723 * flags==0 to disable colorkeying.
1724 */
1725#define I915_SET_COLORKEY_DESTINATION (1<<1)
1726#define I915_SET_COLORKEY_SOURCE (1<<2)
1727struct drm_intel_sprite_colorkey {
1728 __u32 plane_id;
1729 __u32 min_value;
1730 __u32 channel_mask;
1731 __u32 max_value;
1732 __u32 flags;
1733};
1734
1735struct drm_i915_gem_wait {
1736 /** Handle of BO we shall wait on */
1737 __u32 bo_handle;
1738 __u32 flags;
1739 /** Number of nanoseconds to wait, Returns time remaining. */
1740 __s64 timeout_ns;
1741};
1742
1743struct drm_i915_gem_context_create {
1744 __u32 ctx_id; /* output: id of new context*/
1745 __u32 pad;
1746};
1747
1748struct drm_i915_gem_context_create_ext {
1749 __u32 ctx_id; /* output: id of new context*/
1750 __u32 flags;
1751#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1752#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
1753#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1754 (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1755 __u64 extensions;
1756};
1757
1758struct drm_i915_gem_context_param {
1759 __u32 ctx_id;
1760 __u32 size;
1761 __u64 param;
1762#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1763/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
1764 * someone somewhere has attempted to use it, never re-use this context
1765 * param number.
1766 */
1767#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1768#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1769#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1770#define I915_CONTEXT_PARAM_BANNABLE 0x5
1771#define I915_CONTEXT_PARAM_PRIORITY 0x6
1772#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1773#define I915_CONTEXT_DEFAULT_PRIORITY 0
1774#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1775 /*
1776 * When using the following param, value should be a pointer to
1777 * drm_i915_gem_context_param_sseu.
1778 */
1779#define I915_CONTEXT_PARAM_SSEU 0x7
1780
1781/*
1782 * Not all clients may want to attempt automatic recover of a context after
1783 * a hang (for example, some clients may only submit very small incremental
1784 * batches relying on known logical state of previous batches which will never
1785 * recover correctly and each attempt will hang), and so would prefer that
1786 * the context is forever banned instead.
1787 *
1788 * If set to false (0), after a reset, subsequent (and in flight) rendering
1789 * from this context is discarded, and the client will need to create a new
1790 * context to use instead.
1791 *
1792 * If set to true (1), the kernel will automatically attempt to recover the
1793 * context by skipping the hanging batch and executing the next batch starting
1794 * from the default context state (discarding the incomplete logical context
1795 * state lost due to the reset).
1796 *
1797 * On creation, all new contexts are marked as recoverable.
1798 */
1799#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1800
1801 /*
1802 * The id of the associated virtual memory address space (ppGTT) of
1803 * this context. Can be retrieved and passed to another context
1804 * (on the same fd) for both to use the same ppGTT and so share
1805 * address layouts, and avoid reloading the page tables on context
1806 * switches between themselves.
1807 *
1808 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
1809 */
1810#define I915_CONTEXT_PARAM_VM 0x9
1811
1812/*
1813 * I915_CONTEXT_PARAM_ENGINES:
1814 *
1815 * Bind this context to operate on this subset of available engines. Henceforth,
1816 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
1817 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
1818 * and upwards. Slots 0...N are filled in using the specified (class, instance).
1819 * Use
1820 * engine_class: I915_ENGINE_CLASS_INVALID,
1821 * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
1822 * to specify a gap in the array that can be filled in later, e.g. by a
1823 * virtual engine used for load balancing.
1824 *
1825 * Setting the number of engines bound to the context to 0, by passing a zero
1826 * sized argument, will revert back to default settings.
1827 *
1828 * See struct i915_context_param_engines.
1829 *
1830 * Extensions:
1831 * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
1832 * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
1833 * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
1834 */
1835#define I915_CONTEXT_PARAM_ENGINES 0xa
1836
1837/*
1838 * I915_CONTEXT_PARAM_PERSISTENCE:
1839 *
1840 * Allow the context and active rendering to survive the process until
1841 * completion. Persistence allows fire-and-forget clients to queue up a
1842 * bunch of work, hand the output over to a display server and then quit.
1843 * If the context is marked as not persistent, upon closing (either via
1844 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
1845 * or process termination), the context and any outstanding requests will be
1846 * cancelled (and exported fences for cancelled requests marked as -EIO).
1847 *
1848 * By default, new contexts allow persistence.
1849 */
1850#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
1851
1852/* This API has been removed. On the off chance someone somewhere has
1853 * attempted to use it, never re-use this context param number.
1854 */
1855#define I915_CONTEXT_PARAM_RINGSIZE 0xc
1856
1857/*
1858 * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
1859 *
1860 * Mark that the context makes use of protected content, which will result
1861 * in the context being invalidated when the protected content session is.
1862 * Given that the protected content session is killed on suspend, the device
1863 * is kept awake for the lifetime of a protected context, so the user should
1864 * make sure to dispose of them once done.
1865 * This flag can only be set at context creation time and, when set to true,
1866 * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
1867 * to false. This flag can't be set to true in conjunction with setting the
1868 * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
1869 *
1870 * .. code-block:: C
1871 *
1872 * struct drm_i915_gem_context_create_ext_setparam p_protected = {
1873 * .base = {
1874 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
1875 * },
1876 * .param = {
1877 * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
1878 * .value = 1,
1879 * }
1880 * };
1881 * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
1882 * .base = {
1883 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
1884 * .next_extension = to_user_pointer(&p_protected),
1885 * },
1886 * .param = {
1887 * .param = I915_CONTEXT_PARAM_RECOVERABLE,
1888 * .value = 0,
1889 * }
1890 * };
1891 * struct drm_i915_gem_context_create_ext create = {
1892 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
1893 * .extensions = to_user_pointer(&p_norecover);
1894 * };
1895 *
1896 * ctx_id = gem_context_create_ext(drm_fd, &create);
1897 *
1898 * In addition to the normal failure cases, setting this flag during context
1899 * creation can result in the following errors:
1900 *
1901 * -ENODEV: feature not available
1902 * -EPERM: trying to mark a recoverable or not bannable context as protected
1903 */
1904#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
1905/* Must be kept compact -- no holes and well documented */
1906
1907 __u64 value;
1908};
1909
1910/*
1911 * Context SSEU programming
1912 *
1913 * It may be necessary for either functional or performance reason to configure
1914 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1915 * Sub-slice/EU).
1916 *
1917 * This is done by configuring SSEU configuration using the below
1918 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1919 * userspace intends to use.
1920 *
1921 * Not all GPUs or engines support this functionality in which case an error
1922 * code -ENODEV will be returned.
1923 *
1924 * Also, flexibility of possible SSEU configuration permutations varies between
1925 * GPU generations and software imposed limitations. Requesting such a
1926 * combination will return an error code of -EINVAL.
1927 *
1928 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1929 * favour of a single global setting.
1930 */
1931struct drm_i915_gem_context_param_sseu {
1932 /*
1933 * Engine class & instance to be configured or queried.
1934 */
1935 struct i915_engine_class_instance engine;
1936
1937 /*
1938 * Unknown flags must be cleared to zero.
1939 */
1940 __u32 flags;
1941#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
1942
1943 /*
1944 * Mask of slices to enable for the context. Valid values are a subset
1945 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1946 */
1947 __u64 slice_mask;
1948
1949 /*
1950 * Mask of subslices to enable for the context. Valid values are a
1951 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1952 */
1953 __u64 subslice_mask;
1954
1955 /*
1956 * Minimum/Maximum number of EUs to enable per subslice for the
1957 * context. min_eus_per_subslice must be inferior or equal to
1958 * max_eus_per_subslice.
1959 */
1960 __u16 min_eus_per_subslice;
1961 __u16 max_eus_per_subslice;
1962
1963 /*
1964 * Unused for now. Must be cleared to zero.
1965 */
1966 __u32 rsvd;
1967};
1968
1969/**
1970 * DOC: Virtual Engine uAPI
1971 *
1972 * Virtual engine is a concept where userspace is able to configure a set of
1973 * physical engines, submit a batch buffer, and let the driver execute it on any
1974 * engine from the set as it sees fit.
1975 *
1976 * This is primarily useful on parts which have multiple instances of a same
1977 * class engine, like for example GT3+ Skylake parts with their two VCS engines.
1978 *
1979 * For instance userspace can enumerate all engines of a certain class using the
1980 * previously described `Engine Discovery uAPI`_. After that userspace can
1981 * create a GEM context with a placeholder slot for the virtual engine (using
1982 * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
1983 * and instance respectively) and finally using the
1984 * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
1985 * the same reserved slot.
1986 *
1987 * Example of creating a virtual engine and submitting a batch buffer to it:
1988 *
1989 * .. code-block:: C
1990 *
1991 * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
1992 * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
1993 * .engine_index = 0, // Place this virtual engine into engine map slot 0
1994 * .num_siblings = 2,
1995 * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
1996 * { I915_ENGINE_CLASS_VIDEO, 1 }, },
1997 * };
1998 * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
1999 * .engines = { { I915_ENGINE_CLASS_INVALID,
2000 * I915_ENGINE_CLASS_INVALID_NONE } },
2001 * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
2002 * };
2003 * struct drm_i915_gem_context_create_ext_setparam p_engines = {
2004 * .base = {
2005 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2006 * },
2007 * .param = {
2008 * .param = I915_CONTEXT_PARAM_ENGINES,
2009 * .value = to_user_pointer(&engines),
2010 * .size = sizeof(engines),
2011 * },
2012 * };
2013 * struct drm_i915_gem_context_create_ext create = {
2014 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2015 * .extensions = to_user_pointer(&p_engines);
2016 * };
2017 *
2018 * ctx_id = gem_context_create_ext(drm_fd, &create);
2019 *
2020 * // Now we have created a GEM context with its engine map containing a
2021 * // single virtual engine. Submissions to this slot can go either to
2022 * // vcs0 or vcs1, depending on the load balancing algorithm used inside
2023 * // the driver. The load balancing is dynamic from one batch buffer to
2024 * // another and transparent to userspace.
2025 *
2026 * ...
2027 * execbuf.rsvd1 = ctx_id;
2028 * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
2029 * gem_execbuf(drm_fd, &execbuf);
2030 */
2031
2032/*
2033 * i915_context_engines_load_balance:
2034 *
2035 * Enable load balancing across this set of engines.
2036 *
2037 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
2038 * used will proxy the execbuffer request onto one of the set of engines
2039 * in such a way as to distribute the load evenly across the set.
2040 *
2041 * The set of engines must be compatible (e.g. the same HW class) as they
2042 * will share the same logical GPU context and ring.
2043 *
2044 * To intermix rendering with the virtual engine and direct rendering onto
2045 * the backing engines (bypassing the load balancing proxy), the context must
2046 * be defined to use a single timeline for all engines.
2047 */
2048struct i915_context_engines_load_balance {
2049 struct i915_user_extension base;
2050
2051 __u16 engine_index;
2052 __u16 num_siblings;
2053 __u32 flags; /* all undefined flags must be zero */
2054
2055 __u64 mbz64; /* reserved for future use; must be zero */
2056
2057 struct i915_engine_class_instance engines[0];
2058} __attribute__((packed));
2059
2060#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
2061 struct i915_user_extension base; \
2062 __u16 engine_index; \
2063 __u16 num_siblings; \
2064 __u32 flags; \
2065 __u64 mbz64; \
2066 struct i915_engine_class_instance engines[N__]; \
2067} __attribute__((packed)) name__
2068
2069/*
2070 * i915_context_engines_bond:
2071 *
2072 * Constructed bonded pairs for execution within a virtual engine.
2073 *
2074 * All engines are equal, but some are more equal than others. Given
2075 * the distribution of resources in the HW, it may be preferable to run
2076 * a request on a given subset of engines in parallel to a request on a
2077 * specific engine. We enable this selection of engines within a virtual
2078 * engine by specifying bonding pairs, for any given master engine we will
2079 * only execute on one of the corresponding siblings within the virtual engine.
2080 *
2081 * To execute a request in parallel on the master engine and a sibling requires
2082 * coordination with a I915_EXEC_FENCE_SUBMIT.
2083 */
2084struct i915_context_engines_bond {
2085 struct i915_user_extension base;
2086
2087 struct i915_engine_class_instance master;
2088
2089 __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
2090 __u16 num_bonds;
2091
2092 __u64 flags; /* all undefined flags must be zero */
2093 __u64 mbz64[4]; /* reserved for future use; must be zero */
2094
2095 struct i915_engine_class_instance engines[0];
2096} __attribute__((packed));
2097
2098#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
2099 struct i915_user_extension base; \
2100 struct i915_engine_class_instance master; \
2101 __u16 virtual_index; \
2102 __u16 num_bonds; \
2103 __u64 flags; \
2104 __u64 mbz64[4]; \
2105 struct i915_engine_class_instance engines[N__]; \
2106} __attribute__((packed)) name__
2107
2108/**
2109 * struct i915_context_engines_parallel_submit - Configure engine for
2110 * parallel submission.
2111 *
2112 * Setup a slot in the context engine map to allow multiple BBs to be submitted
2113 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
2114 * in parallel. Multiple hardware contexts are created internally in the i915 to
2115 * run these BBs. Once a slot is configured for N BBs only N BBs can be
2116 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
2117 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
2118 * many BBs there are based on the slot's configuration. The N BBs are the last
2119 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
2120 *
2121 * The default placement behavior is to create implicit bonds between each
2122 * context if each context maps to more than 1 physical engine (e.g. context is
2123 * a virtual engine). Also we only allow contexts of same engine class and these
2124 * contexts must be in logically contiguous order. Examples of the placement
2125 * behavior are described below. Lastly, the default is to not allow BBs to be
2126 * preempted mid-batch. Rather insert coordinated preemption points on all
2127 * hardware contexts between each set of BBs. Flags could be added in the future
2128 * to change both of these default behaviors.
2129 *
2130 * Returns -EINVAL if hardware context placement configuration is invalid or if
2131 * the placement configuration isn't supported on the platform / submission
2132 * interface.
2133 * Returns -ENODEV if extension isn't supported on the platform / submission
2134 * interface.
2135 *
2136 * .. code-block:: none
2137 *
2138 * Examples syntax:
2139 * CS[X] = generic engine of same class, logical instance X
2140 * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
2141 *
2142 * Example 1 pseudo code:
2143 * set_engines(INVALID)
2144 * set_parallel(engine_index=0, width=2, num_siblings=1,
2145 * engines=CS[0],CS[1])
2146 *
2147 * Results in the following valid placement:
2148 * CS[0], CS[1]
2149 *
2150 * Example 2 pseudo code:
2151 * set_engines(INVALID)
2152 * set_parallel(engine_index=0, width=2, num_siblings=2,
2153 * engines=CS[0],CS[2],CS[1],CS[3])
2154 *
2155 * Results in the following valid placements:
2156 * CS[0], CS[1]
2157 * CS[2], CS[3]
2158 *
2159 * This can be thought of as two virtual engines, each containing two
2160 * engines thereby making a 2D array. However, there are bonds tying the
2161 * entries together and placing restrictions on how they can be scheduled.
2162 * Specifically, the scheduler can choose only vertical columns from the 2D
2163 * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
2164 * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
2165 * versa. Same for CS[2] requires also using CS[3].
2166 * VE[0] = CS[0], CS[2]
2167 * VE[1] = CS[1], CS[3]
2168 *
2169 * Example 3 pseudo code:
2170 * set_engines(INVALID)
2171 * set_parallel(engine_index=0, width=2, num_siblings=2,
2172 * engines=CS[0],CS[1],CS[1],CS[3])
2173 *
2174 * Results in the following valid and invalid placements:
2175 * CS[0], CS[1]
2176 * CS[1], CS[3] - Not logically contiguous, return -EINVAL
2177 */
2178struct i915_context_engines_parallel_submit {
2179 /**
2180 * @base: base user extension.
2181 */
2182 struct i915_user_extension base;
2183
2184 /**
2185 * @engine_index: slot for parallel engine
2186 */
2187 __u16 engine_index;
2188
2189 /**
2190 * @width: number of contexts per parallel engine or in other words the
2191 * number of batches in each submission
2192 */
2193 __u16 width;
2194
2195 /**
2196 * @num_siblings: number of siblings per context or in other words the
2197 * number of possible placements for each submission
2198 */
2199 __u16 num_siblings;
2200
2201 /**
2202 * @mbz16: reserved for future use; must be zero
2203 */
2204 __u16 mbz16;
2205
2206 /**
2207 * @flags: all undefined flags must be zero, currently not defined flags
2208 */
2209 __u64 flags;
2210
2211 /**
2212 * @mbz64: reserved for future use; must be zero
2213 */
2214 __u64 mbz64[3];
2215
2216 /**
2217 * @engines: 2-d array of engine instances to configure parallel engine
2218 *
2219 * length = width (i) * num_siblings (j)
2220 * index = j + i * num_siblings
2221 */
2222 struct i915_engine_class_instance engines[0];
2223
2224} __packed;
2225
2226#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
2227 struct i915_user_extension base; \
2228 __u16 engine_index; \
2229 __u16 width; \
2230 __u16 num_siblings; \
2231 __u16 mbz16; \
2232 __u64 flags; \
2233 __u64 mbz64[3]; \
2234 struct i915_engine_class_instance engines[N__]; \
2235} __attribute__((packed)) name__
2236
2237/**
2238 * DOC: Context Engine Map uAPI
2239 *
2240 * Context engine map is a new way of addressing engines when submitting batch-
2241 * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
2242 * inside the flags field of `struct drm_i915_gem_execbuffer2`.
2243 *
2244 * To use it created GEM contexts need to be configured with a list of engines
2245 * the user is intending to submit to. This is accomplished using the
2246 * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
2247 * i915_context_param_engines`.
2248 *
2249 * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
2250 * configured map.
2251 *
2252 * Example of creating such context and submitting against it:
2253 *
2254 * .. code-block:: C
2255 *
2256 * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
2257 * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
2258 * { I915_ENGINE_CLASS_COPY, 0 } }
2259 * };
2260 * struct drm_i915_gem_context_create_ext_setparam p_engines = {
2261 * .base = {
2262 * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2263 * },
2264 * .param = {
2265 * .param = I915_CONTEXT_PARAM_ENGINES,
2266 * .value = to_user_pointer(&engines),
2267 * .size = sizeof(engines),
2268 * },
2269 * };
2270 * struct drm_i915_gem_context_create_ext create = {
2271 * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2272 * .extensions = to_user_pointer(&p_engines);
2273 * };
2274 *
2275 * ctx_id = gem_context_create_ext(drm_fd, &create);
2276 *
2277 * // We have now created a GEM context with two engines in the map:
2278 * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
2279 * // will not be accessible from this context.
2280 *
2281 * ...
2282 * execbuf.rsvd1 = ctx_id;
2283 * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
2284 * gem_execbuf(drm_fd, &execbuf);
2285 *
2286 * ...
2287 * execbuf.rsvd1 = ctx_id;
2288 * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
2289 * gem_execbuf(drm_fd, &execbuf);
2290 */
2291
2292struct i915_context_param_engines {
2293 __u64 extensions; /* linked chain of extension blocks, 0 terminates */
2294#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
2295#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
2296#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
2297 struct i915_engine_class_instance engines[0];
2298} __attribute__((packed));
2299
2300#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
2301 __u64 extensions; \
2302 struct i915_engine_class_instance engines[N__]; \
2303} __attribute__((packed)) name__
2304
2305struct drm_i915_gem_context_create_ext_setparam {
2306#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
2307 struct i915_user_extension base;
2308 struct drm_i915_gem_context_param param;
2309};
2310
2311/* This API has been removed. On the off chance someone somewhere has
2312 * attempted to use it, never re-use this extension number.
2313 */
2314#define I915_CONTEXT_CREATE_EXT_CLONE 1
2315
2316struct drm_i915_gem_context_destroy {
2317 __u32 ctx_id;
2318 __u32 pad;
2319};
2320
2321/*
2322 * DRM_I915_GEM_VM_CREATE -
2323 *
2324 * Create a new virtual memory address space (ppGTT) for use within a context
2325 * on the same file. Extensions can be provided to configure exactly how the
2326 * address space is setup upon creation.
2327 *
2328 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2329 * returned in the outparam @id.
2330 *
2331 * No flags are defined, with all bits reserved and must be zero.
2332 *
2333 * An extension chain maybe provided, starting with @extensions, and terminated
2334 * by the @next_extension being 0. Currently, no extensions are defined.
2335 *
2336 * DRM_I915_GEM_VM_DESTROY -
2337 *
2338 * Destroys a previously created VM id, specified in @id.
2339 *
2340 * No extensions or flags are allowed currently, and so must be zero.
2341 */
2342struct drm_i915_gem_vm_control {
2343 __u64 extensions;
2344 __u32 flags;
2345 __u32 vm_id;
2346};
2347
2348struct drm_i915_reg_read {
2349 /*
2350 * Register offset.
2351 * For 64bit wide registers where the upper 32bits don't immediately
2352 * follow the lower 32bits, the offset of the lower 32bits must
2353 * be specified
2354 */
2355 __u64 offset;
2356#define I915_REG_READ_8B_WA (1ul << 0)
2357
2358 __u64 val; /* Return value */
2359};
2360
2361/* Known registers:
2362 *
2363 * Render engine timestamp - 0x2358 + 64bit - gen7+
2364 * - Note this register returns an invalid value if using the default
2365 * single instruction 8byte read, in order to workaround that pass
2366 * flag I915_REG_READ_8B_WA in offset field.
2367 *
2368 */
2369
2370struct drm_i915_reset_stats {
2371 __u32 ctx_id;
2372 __u32 flags;
2373
2374 /* All resets since boot/module reload, for all contexts */
2375 __u32 reset_count;
2376
2377 /* Number of batches lost when active in GPU, for this context */
2378 __u32 batch_active;
2379
2380 /* Number of batches lost pending for execution, for this context */
2381 __u32 batch_pending;
2382
2383 __u32 pad;
2384};
2385
2386/**
2387 * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
2388 *
2389 * Userptr objects have several restrictions on what ioctls can be used with the
2390 * object handle.
2391 */
2392struct drm_i915_gem_userptr {
2393 /**
2394 * @user_ptr: The pointer to the allocated memory.
2395 *
2396 * Needs to be aligned to PAGE_SIZE.
2397 */
2398 __u64 user_ptr;
2399
2400 /**
2401 * @user_size:
2402 *
2403 * The size in bytes for the allocated memory. This will also become the
2404 * object size.
2405 *
2406 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
2407 * or larger.
2408 */
2409 __u64 user_size;
2410
2411 /**
2412 * @flags:
2413 *
2414 * Supported flags:
2415 *
2416 * I915_USERPTR_READ_ONLY:
2417 *
2418 * Mark the object as readonly, this also means GPU access can only be
2419 * readonly. This is only supported on HW which supports readonly access
2420 * through the GTT. If the HW can't support readonly access, an error is
2421 * returned.
2422 *
2423 * I915_USERPTR_PROBE:
2424 *
2425 * Probe the provided @user_ptr range and validate that the @user_ptr is
2426 * indeed pointing to normal memory and that the range is also valid.
2427 * For example if some garbage address is given to the kernel, then this
2428 * should complain.
2429 *
2430 * Returns -EFAULT if the probe failed.
2431 *
2432 * Note that this doesn't populate the backing pages, and also doesn't
2433 * guarantee that the object will remain valid when the object is
2434 * eventually used.
2435 *
2436 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
2437 * returns a non-zero value.
2438 *
2439 * I915_USERPTR_UNSYNCHRONIZED:
2440 *
2441 * NOT USED. Setting this flag will result in an error.
2442 */
2443 __u32 flags;
2444#define I915_USERPTR_READ_ONLY 0x1
2445#define I915_USERPTR_PROBE 0x2
2446#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2447 /**
2448 * @handle: Returned handle for the object.
2449 *
2450 * Object handles are nonzero.
2451 */
2452 __u32 handle;
2453};
2454
2455enum drm_i915_oa_format {
2456 I915_OA_FORMAT_A13 = 1, /* HSW only */
2457 I915_OA_FORMAT_A29, /* HSW only */
2458 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
2459 I915_OA_FORMAT_B4_C8, /* HSW only */
2460 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
2461 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
2462 I915_OA_FORMAT_C4_B8, /* HSW+ */
2463
2464 /* Gen8+ */
2465 I915_OA_FORMAT_A12,
2466 I915_OA_FORMAT_A12_B8_C8,
2467 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2468
2469 I915_OA_FORMAT_MAX /* non-ABI */
2470};
2471
2472enum drm_i915_perf_property_id {
2473 /**
2474 * Open the stream for a specific context handle (as used with
2475 * execbuffer2). A stream opened for a specific context this way
2476 * won't typically require root privileges.
2477 *
2478 * This property is available in perf revision 1.
2479 */
2480 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2481
2482 /**
2483 * A value of 1 requests the inclusion of raw OA unit reports as
2484 * part of stream samples.
2485 *
2486 * This property is available in perf revision 1.
2487 */
2488 DRM_I915_PERF_PROP_SAMPLE_OA,
2489
2490 /**
2491 * The value specifies which set of OA unit metrics should be
2492 * configured, defining the contents of any OA unit reports.
2493 *
2494 * This property is available in perf revision 1.
2495 */
2496 DRM_I915_PERF_PROP_OA_METRICS_SET,
2497
2498 /**
2499 * The value specifies the size and layout of OA unit reports.
2500 *
2501 * This property is available in perf revision 1.
2502 */
2503 DRM_I915_PERF_PROP_OA_FORMAT,
2504
2505 /**
2506 * Specifying this property implicitly requests periodic OA unit
2507 * sampling and (at least on Haswell) the sampling frequency is derived
2508 * from this exponent as follows:
2509 *
2510 * 80ns * 2^(period_exponent + 1)
2511 *
2512 * This property is available in perf revision 1.
2513 */
2514 DRM_I915_PERF_PROP_OA_EXPONENT,
2515
2516 /**
2517 * Specifying this property is only valid when specify a context to
2518 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2519 * will hold preemption of the particular context we want to gather
2520 * performance data about. The execbuf2 submissions must include a
2521 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2522 *
2523 * This property is available in perf revision 3.
2524 */
2525 DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2526
2527 /**
2528 * Specifying this pins all contexts to the specified SSEU power
2529 * configuration for the duration of the recording.
2530 *
2531 * This parameter's value is a pointer to a struct
2532 * drm_i915_gem_context_param_sseu.
2533 *
2534 * This property is available in perf revision 4.
2535 */
2536 DRM_I915_PERF_PROP_GLOBAL_SSEU,
2537
2538 /**
2539 * This optional parameter specifies the timer interval in nanoseconds
2540 * at which the i915 driver will check the OA buffer for available data.
2541 * Minimum allowed value is 100 microseconds. A default value is used by
2542 * the driver if this parameter is not specified. Note that larger timer
2543 * values will reduce cpu consumption during OA perf captures. However,
2544 * excessively large values would potentially result in OA buffer
2545 * overwrites as captures reach end of the OA buffer.
2546 *
2547 * This property is available in perf revision 5.
2548 */
2549 DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2550
2551 DRM_I915_PERF_PROP_MAX /* non-ABI */
2552};
2553
2554struct drm_i915_perf_open_param {
2555 __u32 flags;
2556#define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
2557#define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
2558#define I915_PERF_FLAG_DISABLED (1<<2)
2559
2560 /** The number of u64 (id, value) pairs */
2561 __u32 num_properties;
2562
2563 /**
2564 * Pointer to array of u64 (id, value) pairs configuring the stream
2565 * to open.
2566 */
2567 __u64 properties_ptr;
2568};
2569
2570/*
2571 * Enable data capture for a stream that was either opened in a disabled state
2572 * via I915_PERF_FLAG_DISABLED or was later disabled via
2573 * I915_PERF_IOCTL_DISABLE.
2574 *
2575 * It is intended to be cheaper to disable and enable a stream than it may be
2576 * to close and re-open a stream with the same configuration.
2577 *
2578 * It's undefined whether any pending data for the stream will be lost.
2579 *
2580 * This ioctl is available in perf revision 1.
2581 */
2582#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
2583
2584/*
2585 * Disable data capture for a stream.
2586 *
2587 * It is an error to try and read a stream that is disabled.
2588 *
2589 * This ioctl is available in perf revision 1.
2590 */
2591#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
2592
2593/*
2594 * Change metrics_set captured by a stream.
2595 *
2596 * If the stream is bound to a specific context, the configuration change
2597 * will performed inline with that context such that it takes effect before
2598 * the next execbuf submission.
2599 *
2600 * Returns the previously bound metrics set id, or a negative error code.
2601 *
2602 * This ioctl is available in perf revision 2.
2603 */
2604#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
2605
2606/*
2607 * Common to all i915 perf records
2608 */
2609struct drm_i915_perf_record_header {
2610 __u32 type;
2611 __u16 pad;
2612 __u16 size;
2613};
2614
2615enum drm_i915_perf_record_type {
2616
2617 /**
2618 * Samples are the work horse record type whose contents are extensible
2619 * and defined when opening an i915 perf stream based on the given
2620 * properties.
2621 *
2622 * Boolean properties following the naming convention
2623 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2624 * every sample.
2625 *
2626 * The order of these sample properties given by userspace has no
2627 * affect on the ordering of data within a sample. The order is
2628 * documented here.
2629 *
2630 * struct {
2631 * struct drm_i915_perf_record_header header;
2632 *
2633 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2634 * };
2635 */
2636 DRM_I915_PERF_RECORD_SAMPLE = 1,
2637
2638 /*
2639 * Indicates that one or more OA reports were not written by the
2640 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2641 * command collides with periodic sampling - which would be more likely
2642 * at higher sampling frequencies.
2643 */
2644 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2645
2646 /**
2647 * An error occurred that resulted in all pending OA reports being lost.
2648 */
2649 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2650
2651 DRM_I915_PERF_RECORD_MAX /* non-ABI */
2652};
2653
2654/*
2655 * Structure to upload perf dynamic configuration into the kernel.
2656 */
2657struct drm_i915_perf_oa_config {
2658 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
2659 char uuid[36];
2660
2661 __u32 n_mux_regs;
2662 __u32 n_boolean_regs;
2663 __u32 n_flex_regs;
2664
2665 /*
2666 * These fields are pointers to tuples of u32 values (register address,
2667 * value). For example the expected length of the buffer pointed by
2668 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
2669 */
2670 __u64 mux_regs_ptr;
2671 __u64 boolean_regs_ptr;
2672 __u64 flex_regs_ptr;
2673};
2674
2675/**
2676 * struct drm_i915_query_item - An individual query for the kernel to process.
2677 *
2678 * The behaviour is determined by the @query_id. Note that exactly what
2679 * @data_ptr is also depends on the specific @query_id.
2680 */
2681struct drm_i915_query_item {
2682 /** @query_id: The id for this query */
2683 __u64 query_id;
2684#define DRM_I915_QUERY_TOPOLOGY_INFO 1
2685#define DRM_I915_QUERY_ENGINE_INFO 2
2686#define DRM_I915_QUERY_PERF_CONFIG 3
2687#define DRM_I915_QUERY_MEMORY_REGIONS 4
2688/* Must be kept compact -- no holes and well documented */
2689
2690 /**
2691 * @length:
2692 *
2693 * When set to zero by userspace, this is filled with the size of the
2694 * data to be written at the @data_ptr pointer. The kernel sets this
2695 * value to a negative value to signal an error on a particular query
2696 * item.
2697 */
2698 __s32 length;
2699
2700 /**
2701 * @flags:
2702 *
2703 * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
2704 *
2705 * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
2706 * following:
2707 *
2708 * - DRM_I915_QUERY_PERF_CONFIG_LIST
2709 * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
2710 * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
2711 */
2712 __u32 flags;
2713#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
2714#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
2715#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
2716
2717 /**
2718 * @data_ptr:
2719 *
2720 * Data will be written at the location pointed by @data_ptr when the
2721 * value of @length matches the length of the data to be written by the
2722 * kernel.
2723 */
2724 __u64 data_ptr;
2725};
2726
2727/**
2728 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
2729 * kernel to fill out.
2730 *
2731 * Note that this is generally a two step process for each struct
2732 * drm_i915_query_item in the array:
2733 *
2734 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
2735 * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
2736 * kernel will then fill in the size, in bytes, which tells userspace how
2737 * memory it needs to allocate for the blob(say for an array of properties).
2738 *
2739 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
2740 * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
2741 * the &drm_i915_query_item.length should still be the same as what the
2742 * kernel previously set. At this point the kernel can fill in the blob.
2743 *
2744 * Note that for some query items it can make sense for userspace to just pass
2745 * in a buffer/blob equal to or larger than the required size. In this case only
2746 * a single ioctl call is needed. For some smaller query items this can work
2747 * quite well.
2748 *
2749 */
2750struct drm_i915_query {
2751 /** @num_items: The number of elements in the @items_ptr array */
2752 __u32 num_items;
2753
2754 /**
2755 * @flags: Unused for now. Must be cleared to zero.
2756 */
2757 __u32 flags;
2758
2759 /**
2760 * @items_ptr:
2761 *
2762 * Pointer to an array of struct drm_i915_query_item. The number of
2763 * array elements is @num_items.
2764 */
2765 __u64 items_ptr;
2766};
2767
2768/*
2769 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
2770 *
2771 * data: contains the 3 pieces of information :
2772 *
2773 * - the slice mask with one bit per slice telling whether a slice is
2774 * available. The availability of slice X can be queried with the following
2775 * formula :
2776 *
2777 * (data[X / 8] >> (X % 8)) & 1
2778 *
2779 * - the subslice mask for each slice with one bit per subslice telling
2780 * whether a subslice is available. Gen12 has dual-subslices, which are
2781 * similar to two gen11 subslices. For gen12, this array represents dual-
2782 * subslices. The availability of subslice Y in slice X can be queried
2783 * with the following formula :
2784 *
2785 * (data[subslice_offset +
2786 * X * subslice_stride +
2787 * Y / 8] >> (Y % 8)) & 1
2788 *
2789 * - the EU mask for each subslice in each slice with one bit per EU telling
2790 * whether an EU is available. The availability of EU Z in subslice Y in
2791 * slice X can be queried with the following formula :
2792 *
2793 * (data[eu_offset +
2794 * (X * max_subslices + Y) * eu_stride +
2795 * Z / 8] >> (Z % 8)) & 1
2796 */
2797struct drm_i915_query_topology_info {
2798 /*
2799 * Unused for now. Must be cleared to zero.
2800 */
2801 __u16 flags;
2802
2803 __u16 max_slices;
2804 __u16 max_subslices;
2805 __u16 max_eus_per_subslice;
2806
2807 /*
2808 * Offset in data[] at which the subslice masks are stored.
2809 */
2810 __u16 subslice_offset;
2811
2812 /*
2813 * Stride at which each of the subslice masks for each slice are
2814 * stored.
2815 */
2816 __u16 subslice_stride;
2817
2818 /*
2819 * Offset in data[] at which the EU masks are stored.
2820 */
2821 __u16 eu_offset;
2822
2823 /*
2824 * Stride at which each of the EU masks for each subslice are stored.
2825 */
2826 __u16 eu_stride;
2827
2828 __u8 data[];
2829};
2830
2831/**
2832 * DOC: Engine Discovery uAPI
2833 *
2834 * Engine discovery uAPI is a way of enumerating physical engines present in a
2835 * GPU associated with an open i915 DRM file descriptor. This supersedes the old
2836 * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
2837 * `I915_PARAM_HAS_BLT`.
2838 *
2839 * The need for this interface came starting with Icelake and newer GPUs, which
2840 * started to establish a pattern of having multiple engines of a same class,
2841 * where not all instances were always completely functionally equivalent.
2842 *
2843 * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
2844 * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
2845 *
2846 * Example for getting the list of engines:
2847 *
2848 * .. code-block:: C
2849 *
2850 * struct drm_i915_query_engine_info *info;
2851 * struct drm_i915_query_item item = {
2852 * .query_id = DRM_I915_QUERY_ENGINE_INFO;
2853 * };
2854 * struct drm_i915_query query = {
2855 * .num_items = 1,
2856 * .items_ptr = (uintptr_t)&item,
2857 * };
2858 * int err, i;
2859 *
2860 * // First query the size of the blob we need, this needs to be large
2861 * // enough to hold our array of engines. The kernel will fill out the
2862 * // item.length for us, which is the number of bytes we need.
2863 * //
2864 * // Alternatively a large buffer can be allocated straight away enabling
2865 * // querying in one pass, in which case item.length should contain the
2866 * // length of the provided buffer.
2867 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
2868 * if (err) ...
2869 *
2870 * info = calloc(1, item.length);
2871 * // Now that we allocated the required number of bytes, we call the ioctl
2872 * // again, this time with the data_ptr pointing to our newly allocated
2873 * // blob, which the kernel can then populate with info on all engines.
2874 * item.data_ptr = (uintptr_t)&info,
2875 *
2876 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
2877 * if (err) ...
2878 *
2879 * // We can now access each engine in the array
2880 * for (i = 0; i < info->num_engines; i++) {
2881 * struct drm_i915_engine_info einfo = info->engines[i];
2882 * u16 class = einfo.engine.class;
2883 * u16 instance = einfo.engine.instance;
2884 * ....
2885 * }
2886 *
2887 * free(info);
2888 *
2889 * Each of the enumerated engines, apart from being defined by its class and
2890 * instance (see `struct i915_engine_class_instance`), also can have flags and
2891 * capabilities defined as documented in i915_drm.h.
2892 *
2893 * For instance video engines which support HEVC encoding will have the
2894 * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
2895 *
2896 * Engine discovery only fully comes to its own when combined with the new way
2897 * of addressing engines when submitting batch buffers using contexts with
2898 * engine maps configured.
2899 */
2900
2901/**
2902 * struct drm_i915_engine_info
2903 *
2904 * Describes one engine and it's capabilities as known to the driver.
2905 */
2906struct drm_i915_engine_info {
2907 /** @engine: Engine class and instance. */
2908 struct i915_engine_class_instance engine;
2909
2910 /** @rsvd0: Reserved field. */
2911 __u32 rsvd0;
2912
2913 /** @flags: Engine flags. */
2914 __u64 flags;
2915#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
2916
2917 /** @capabilities: Capabilities of this engine. */
2918 __u64 capabilities;
2919#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
2920#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
2921
2922 /** @logical_instance: Logical instance of engine */
2923 __u16 logical_instance;
2924
2925 /** @rsvd1: Reserved fields. */
2926 __u16 rsvd1[3];
2927 /** @rsvd2: Reserved fields. */
2928 __u64 rsvd2[3];
2929};
2930
2931/**
2932 * struct drm_i915_query_engine_info
2933 *
2934 * Engine info query enumerates all engines known to the driver by filling in
2935 * an array of struct drm_i915_engine_info structures.
2936 */
2937struct drm_i915_query_engine_info {
2938 /** @num_engines: Number of struct drm_i915_engine_info structs following. */
2939 __u32 num_engines;
2940
2941 /** @rsvd: MBZ */
2942 __u32 rsvd[3];
2943
2944 /** @engines: Marker for drm_i915_engine_info structures. */
2945 struct drm_i915_engine_info engines[];
2946};
2947
2948/*
2949 * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
2950 */
2951struct drm_i915_query_perf_config {
2952 union {
2953 /*
2954 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
2955 * this fields to the number of configurations available.
2956 */
2957 __u64 n_configs;
2958
2959 /*
2960 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
2961 * i915 will use the value in this field as configuration
2962 * identifier to decide what data to write into config_ptr.
2963 */
2964 __u64 config;
2965
2966 /*
2967 * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
2968 * i915 will use the value in this field as configuration
2969 * identifier to decide what data to write into config_ptr.
2970 *
2971 * String formatted like "%08x-%04x-%04x-%04x-%012x"
2972 */
2973 char uuid[36];
2974 };
2975
2976 /*
2977 * Unused for now. Must be cleared to zero.
2978 */
2979 __u32 flags;
2980
2981 /*
2982 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
2983 * write an array of __u64 of configuration identifiers.
2984 *
2985 * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
2986 * write a struct drm_i915_perf_oa_config. If the following fields of
2987 * drm_i915_perf_oa_config are set not set to 0, i915 will write into
2988 * the associated pointers the values of submitted when the
2989 * configuration was created :
2990 *
2991 * - n_mux_regs
2992 * - n_boolean_regs
2993 * - n_flex_regs
2994 */
2995 __u8 data[];
2996};
2997
2998/**
2999 * enum drm_i915_gem_memory_class - Supported memory classes
3000 */
3001enum drm_i915_gem_memory_class {
3002 /** @I915_MEMORY_CLASS_SYSTEM: System memory */
3003 I915_MEMORY_CLASS_SYSTEM = 0,
3004 /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
3005 I915_MEMORY_CLASS_DEVICE,
3006};
3007
3008/**
3009 * struct drm_i915_gem_memory_class_instance - Identify particular memory region
3010 */
3011struct drm_i915_gem_memory_class_instance {
3012 /** @memory_class: See enum drm_i915_gem_memory_class */
3013 __u16 memory_class;
3014
3015 /** @memory_instance: Which instance */
3016 __u16 memory_instance;
3017};
3018
3019/**
3020 * struct drm_i915_memory_region_info - Describes one region as known to the
3021 * driver.
3022 *
3023 * Note that we reserve some stuff here for potential future work. As an example
3024 * we might want expose the capabilities for a given region, which could include
3025 * things like if the region is CPU mappable/accessible, what are the supported
3026 * mapping types etc.
3027 *
3028 * Note that to extend struct drm_i915_memory_region_info and struct
3029 * drm_i915_query_memory_regions in the future the plan is to do the following:
3030 *
3031 * .. code-block:: C
3032 *
3033 * struct drm_i915_memory_region_info {
3034 * struct drm_i915_gem_memory_class_instance region;
3035 * union {
3036 * __u32 rsvd0;
3037 * __u32 new_thing1;
3038 * };
3039 * ...
3040 * union {
3041 * __u64 rsvd1[8];
3042 * struct {
3043 * __u64 new_thing2;
3044 * __u64 new_thing3;
3045 * ...
3046 * };
3047 * };
3048 * };
3049 *
3050 * With this things should remain source compatible between versions for
3051 * userspace, even as we add new fields.
3052 *
3053 * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
3054 * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
3055 * at &drm_i915_query_item.query_id.
3056 */
3057struct drm_i915_memory_region_info {
3058 /** @region: The class:instance pair encoding */
3059 struct drm_i915_gem_memory_class_instance region;
3060
3061 /** @rsvd0: MBZ */
3062 __u32 rsvd0;
3063
3064 /** @probed_size: Memory probed by the driver (-1 = unknown) */
3065 __u64 probed_size;
3066
3067 /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
3068 __u64 unallocated_size;
3069
3070 /** @rsvd1: MBZ */
3071 __u64 rsvd1[8];
3072};
3073
3074/**
3075 * struct drm_i915_query_memory_regions
3076 *
3077 * The region info query enumerates all regions known to the driver by filling
3078 * in an array of struct drm_i915_memory_region_info structures.
3079 *
3080 * Example for getting the list of supported regions:
3081 *
3082 * .. code-block:: C
3083 *
3084 * struct drm_i915_query_memory_regions *info;
3085 * struct drm_i915_query_item item = {
3086 * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
3087 * };
3088 * struct drm_i915_query query = {
3089 * .num_items = 1,
3090 * .items_ptr = (uintptr_t)&item,
3091 * };
3092 * int err, i;
3093 *
3094 * // First query the size of the blob we need, this needs to be large
3095 * // enough to hold our array of regions. The kernel will fill out the
3096 * // item.length for us, which is the number of bytes we need.
3097 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3098 * if (err) ...
3099 *
3100 * info = calloc(1, item.length);
3101 * // Now that we allocated the required number of bytes, we call the ioctl
3102 * // again, this time with the data_ptr pointing to our newly allocated
3103 * // blob, which the kernel can then populate with the all the region info.
3104 * item.data_ptr = (uintptr_t)&info,
3105 *
3106 * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3107 * if (err) ...
3108 *
3109 * // We can now access each region in the array
3110 * for (i = 0; i < info->num_regions; i++) {
3111 * struct drm_i915_memory_region_info mr = info->regions[i];
3112 * u16 class = mr.region.class;
3113 * u16 instance = mr.region.instance;
3114 *
3115 * ....
3116 * }
3117 *
3118 * free(info);
3119 */
3120struct drm_i915_query_memory_regions {
3121 /** @num_regions: Number of supported regions */
3122 __u32 num_regions;
3123
3124 /** @rsvd: MBZ */
3125 __u32 rsvd[3];
3126
3127 /** @regions: Info about each supported region */
3128 struct drm_i915_memory_region_info regions[];
3129};
3130
3131/**
3132 * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
3133 * extension support using struct i915_user_extension.
3134 *
3135 * Note that in the future we want to have our buffer flags here, at least for
3136 * the stuff that is immutable. Previously we would have two ioctls, one to
3137 * create the object with gem_create, and another to apply various parameters,
3138 * however this creates some ambiguity for the params which are considered
3139 * immutable. Also in general we're phasing out the various SET/GET ioctls.
3140 */
3141struct drm_i915_gem_create_ext {
3142 /**
3143 * @size: Requested size for the object.
3144 *
3145 * The (page-aligned) allocated size for the object will be returned.
3146 *
3147 * Note that for some devices we have might have further minimum
3148 * page-size restrictions(larger than 4K), like for device local-memory.
3149 * However in general the final size here should always reflect any
3150 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
3151 * extension to place the object in device local-memory.
3152 */
3153 __u64 size;
3154 /**
3155 * @handle: Returned handle for the object.
3156 *
3157 * Object handles are nonzero.
3158 */
3159 __u32 handle;
3160 /** @flags: MBZ */
3161 __u32 flags;
3162 /**
3163 * @extensions: The chain of extensions to apply to this object.
3164 *
3165 * This will be useful in the future when we need to support several
3166 * different extensions, and we need to apply more than one when
3167 * creating the object. See struct i915_user_extension.
3168 *
3169 * If we don't supply any extensions then we get the same old gem_create
3170 * behaviour.
3171 *
3172 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
3173 * struct drm_i915_gem_create_ext_memory_regions.
3174 *
3175 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
3176 * struct drm_i915_gem_create_ext_protected_content.
3177 */
3178#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
3179#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
3180 __u64 extensions;
3181};
3182
3183/**
3184 * struct drm_i915_gem_create_ext_memory_regions - The
3185 * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
3186 *
3187 * Set the object with the desired set of placements/regions in priority
3188 * order. Each entry must be unique and supported by the device.
3189 *
3190 * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
3191 * an equivalent layout of class:instance pair encodings. See struct
3192 * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
3193 * query the supported regions for a device.
3194 *
3195 * As an example, on discrete devices, if we wish to set the placement as
3196 * device local-memory we can do something like:
3197 *
3198 * .. code-block:: C
3199 *
3200 * struct drm_i915_gem_memory_class_instance region_lmem = {
3201 * .memory_class = I915_MEMORY_CLASS_DEVICE,
3202 * .memory_instance = 0,
3203 * };
3204 * struct drm_i915_gem_create_ext_memory_regions regions = {
3205 * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
3206 * .regions = (uintptr_t)®ion_lmem,
3207 * .num_regions = 1,
3208 * };
3209 * struct drm_i915_gem_create_ext create_ext = {
3210 * .size = 16 * PAGE_SIZE,
3211 * .extensions = (uintptr_t)®ions,
3212 * };
3213 *
3214 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3215 * if (err) ...
3216 *
3217 * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
3218 * along with the final object size in &drm_i915_gem_create_ext.size, which
3219 * should account for any rounding up, if required.
3220 */
3221struct drm_i915_gem_create_ext_memory_regions {
3222 /** @base: Extension link. See struct i915_user_extension. */
3223 struct i915_user_extension base;
3224
3225 /** @pad: MBZ */
3226 __u32 pad;
3227 /** @num_regions: Number of elements in the @regions array. */
3228 __u32 num_regions;
3229 /**
3230 * @regions: The regions/placements array.
3231 *
3232 * An array of struct drm_i915_gem_memory_class_instance.
3233 */
3234 __u64 regions;
3235};
3236
3237/**
3238 * struct drm_i915_gem_create_ext_protected_content - The
3239 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
3240 *
3241 * If this extension is provided, buffer contents are expected to be protected
3242 * by PXP encryption and require decryption for scan out and processing. This
3243 * is only possible on platforms that have PXP enabled, on all other scenarios
3244 * using this extension will cause the ioctl to fail and return -ENODEV. The
3245 * flags parameter is reserved for future expansion and must currently be set
3246 * to zero.
3247 *
3248 * The buffer contents are considered invalid after a PXP session teardown.
3249 *
3250 * The encryption is guaranteed to be processed correctly only if the object
3251 * is submitted with a context created using the
3252 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
3253 * at submission time on the validity of the objects involved.
3254 *
3255 * Below is an example on how to create a protected object:
3256 *
3257 * .. code-block:: C
3258 *
3259 * struct drm_i915_gem_create_ext_protected_content protected_ext = {
3260 * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
3261 * .flags = 0,
3262 * };
3263 * struct drm_i915_gem_create_ext create_ext = {
3264 * .size = PAGE_SIZE,
3265 * .extensions = (uintptr_t)&protected_ext,
3266 * };
3267 *
3268 * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3269 * if (err) ...
3270 */
3271struct drm_i915_gem_create_ext_protected_content {
3272 /** @base: Extension link. See struct i915_user_extension. */
3273 struct i915_user_extension base;
3274 /** @flags: reserved for future usage, currently MBZ */
3275 __u32 flags;
3276};
3277
3278/* ID of the protected content session managed by i915 when PXP is active */
3279#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
3280
3281#if defined(__cplusplus)
3282}
3283#endif
3284
3285#endif /* _UAPI_I915_DRM_H_ */