Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#include "drm.h"
32
33#if defined(__cplusplus)
34extern "C" {
35#endif
36
37#define DRM_VMW_MAX_SURFACE_FACES 6
38#define DRM_VMW_MAX_MIP_LEVELS 24
39
40
41#define DRM_VMW_GET_PARAM 0
42#define DRM_VMW_ALLOC_DMABUF 1
43#define DRM_VMW_ALLOC_BO 1
44#define DRM_VMW_UNREF_DMABUF 2
45#define DRM_VMW_HANDLE_CLOSE 2
46#define DRM_VMW_CURSOR_BYPASS 3
47/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
48#define DRM_VMW_CONTROL_STREAM 4
49#define DRM_VMW_CLAIM_STREAM 5
50#define DRM_VMW_UNREF_STREAM 6
51/* guarded by DRM_VMW_PARAM_3D == 1 */
52#define DRM_VMW_CREATE_CONTEXT 7
53#define DRM_VMW_UNREF_CONTEXT 8
54#define DRM_VMW_CREATE_SURFACE 9
55#define DRM_VMW_UNREF_SURFACE 10
56#define DRM_VMW_REF_SURFACE 11
57#define DRM_VMW_EXECBUF 12
58#define DRM_VMW_GET_3D_CAP 13
59#define DRM_VMW_FENCE_WAIT 14
60#define DRM_VMW_FENCE_SIGNALED 15
61#define DRM_VMW_FENCE_UNREF 16
62#define DRM_VMW_FENCE_EVENT 17
63#define DRM_VMW_PRESENT 18
64#define DRM_VMW_PRESENT_READBACK 19
65#define DRM_VMW_UPDATE_LAYOUT 20
66#define DRM_VMW_CREATE_SHADER 21
67#define DRM_VMW_UNREF_SHADER 22
68#define DRM_VMW_GB_SURFACE_CREATE 23
69#define DRM_VMW_GB_SURFACE_REF 24
70#define DRM_VMW_SYNCCPU 25
71#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
73#define DRM_VMW_GB_SURFACE_REF_EXT 28
74#define DRM_VMW_MSG 29
75#define DRM_VMW_MKSSTAT_RESET 30
76#define DRM_VMW_MKSSTAT_ADD 31
77#define DRM_VMW_MKSSTAT_REMOVE 32
78
79/*************************************************************************/
80/**
81 * DRM_VMW_GET_PARAM - get device information.
82 *
83 * DRM_VMW_PARAM_FIFO_OFFSET:
84 * Offset to use to map the first page of the FIFO read-only.
85 * The fifo is mapped using the mmap() system call on the drm device.
86 *
87 * DRM_VMW_PARAM_OVERLAY_IOCTL:
88 * Does the driver support the overlay ioctl.
89 *
90 * DRM_VMW_PARAM_SM4_1
91 * SM4_1 support is enabled.
92 *
93 * DRM_VMW_PARAM_SM5
94 * SM5 support is enabled.
95 */
96
97#define DRM_VMW_PARAM_NUM_STREAMS 0
98#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
99#define DRM_VMW_PARAM_3D 2
100#define DRM_VMW_PARAM_HW_CAPS 3
101#define DRM_VMW_PARAM_FIFO_CAPS 4
102#define DRM_VMW_PARAM_MAX_FB_SIZE 5
103#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
104#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
105#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
106#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
107#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
108#define DRM_VMW_PARAM_SCREEN_TARGET 11
109#define DRM_VMW_PARAM_DX 12
110#define DRM_VMW_PARAM_HW_CAPS2 13
111#define DRM_VMW_PARAM_SM4_1 14
112#define DRM_VMW_PARAM_SM5 15
113#define DRM_VMW_PARAM_GL43 16
114
115/**
116 * enum drm_vmw_handle_type - handle type for ref ioctls
117 *
118 */
119enum drm_vmw_handle_type {
120 DRM_VMW_HANDLE_LEGACY = 0,
121 DRM_VMW_HANDLE_PRIME = 1
122};
123
124/**
125 * struct drm_vmw_getparam_arg
126 *
127 * @value: Returned value. //Out
128 * @param: Parameter to query. //In.
129 *
130 * Argument to the DRM_VMW_GET_PARAM Ioctl.
131 */
132
133struct drm_vmw_getparam_arg {
134 __u64 value;
135 __u32 param;
136 __u32 pad64;
137};
138
139/*************************************************************************/
140/**
141 * DRM_VMW_CREATE_CONTEXT - Create a host context.
142 *
143 * Allocates a device unique context id, and queues a create context command
144 * for the host. Does not wait for host completion.
145 */
146
147/**
148 * struct drm_vmw_context_arg
149 *
150 * @cid: Device unique context ID.
151 *
152 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
153 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
154 */
155
156struct drm_vmw_context_arg {
157 __s32 cid;
158 __u32 pad64;
159};
160
161/*************************************************************************/
162/**
163 * DRM_VMW_UNREF_CONTEXT - Create a host context.
164 *
165 * Frees a global context id, and queues a destroy host command for the host.
166 * Does not wait for host completion. The context ID can be used directly
167 * in the command stream and shows up as the same context ID on the host.
168 */
169
170/*************************************************************************/
171/**
172 * DRM_VMW_CREATE_SURFACE - Create a host suface.
173 *
174 * Allocates a device unique surface id, and queues a create surface command
175 * for the host. Does not wait for host completion. The surface ID can be
176 * used directly in the command stream and shows up as the same surface
177 * ID on the host.
178 */
179
180/**
181 * struct drm_wmv_surface_create_req
182 *
183 * @flags: Surface flags as understood by the host.
184 * @format: Surface format as understood by the host.
185 * @mip_levels: Number of mip levels for each face.
186 * An unused face should have 0 encoded.
187 * @size_addr: Address of a user-space array of sruct drm_vmw_size
188 * cast to an __u64 for 32-64 bit compatibility.
189 * The size of the array should equal the total number of mipmap levels.
190 * @shareable: Boolean whether other clients (as identified by file descriptors)
191 * may reference this surface.
192 * @scanout: Boolean whether the surface is intended to be used as a
193 * scanout.
194 *
195 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
196 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
197 */
198
199struct drm_vmw_surface_create_req {
200 __u32 flags;
201 __u32 format;
202 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
203 __u64 size_addr;
204 __s32 shareable;
205 __s32 scanout;
206};
207
208/**
209 * struct drm_wmv_surface_arg
210 *
211 * @sid: Surface id of created surface or surface to destroy or reference.
212 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
213 *
214 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
215 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
216 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
217 */
218
219struct drm_vmw_surface_arg {
220 __s32 sid;
221 enum drm_vmw_handle_type handle_type;
222};
223
224/**
225 * struct drm_vmw_size ioctl.
226 *
227 * @width - mip level width
228 * @height - mip level height
229 * @depth - mip level depth
230 *
231 * Description of a mip level.
232 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
233 */
234
235struct drm_vmw_size {
236 __u32 width;
237 __u32 height;
238 __u32 depth;
239 __u32 pad64;
240};
241
242/**
243 * union drm_vmw_surface_create_arg
244 *
245 * @rep: Output data as described above.
246 * @req: Input data as described above.
247 *
248 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
249 */
250
251union drm_vmw_surface_create_arg {
252 struct drm_vmw_surface_arg rep;
253 struct drm_vmw_surface_create_req req;
254};
255
256/*************************************************************************/
257/**
258 * DRM_VMW_REF_SURFACE - Reference a host surface.
259 *
260 * Puts a reference on a host surface with a give sid, as previously
261 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
262 * A reference will make sure the surface isn't destroyed while we hold
263 * it and will allow the calling client to use the surface ID in the command
264 * stream.
265 *
266 * On successful return, the Ioctl returns the surface information given
267 * in the DRM_VMW_CREATE_SURFACE ioctl.
268 */
269
270/**
271 * union drm_vmw_surface_reference_arg
272 *
273 * @rep: Output data as described above.
274 * @req: Input data as described above.
275 *
276 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
277 */
278
279union drm_vmw_surface_reference_arg {
280 struct drm_vmw_surface_create_req rep;
281 struct drm_vmw_surface_arg req;
282};
283
284/*************************************************************************/
285/**
286 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
287 *
288 * Clear a reference previously put on a host surface.
289 * When all references are gone, including the one implicitly placed
290 * on creation,
291 * a destroy surface command will be queued for the host.
292 * Does not wait for completion.
293 */
294
295/*************************************************************************/
296/**
297 * DRM_VMW_EXECBUF
298 *
299 * Submit a command buffer for execution on the host, and return a
300 * fence seqno that when signaled, indicates that the command buffer has
301 * executed.
302 */
303
304/**
305 * struct drm_vmw_execbuf_arg
306 *
307 * @commands: User-space address of a command buffer cast to an __u64.
308 * @command-size: Size in bytes of the command buffer.
309 * @throttle-us: Sleep until software is less than @throttle_us
310 * microseconds ahead of hardware. The driver may round this value
311 * to the nearest kernel tick.
312 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
313 * __u64.
314 * @version: Allows expanding the execbuf ioctl parameters without breaking
315 * backwards compatibility, since user-space will always tell the kernel
316 * which version it uses.
317 * @flags: Execbuf flags.
318 * @imported_fence_fd: FD for a fence imported from another device
319 *
320 * Argument to the DRM_VMW_EXECBUF Ioctl.
321 */
322
323#define DRM_VMW_EXECBUF_VERSION 2
324
325#define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
326#define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
327
328struct drm_vmw_execbuf_arg {
329 __u64 commands;
330 __u32 command_size;
331 __u32 throttle_us;
332 __u64 fence_rep;
333 __u32 version;
334 __u32 flags;
335 __u32 context_handle;
336 __s32 imported_fence_fd;
337};
338
339/**
340 * struct drm_vmw_fence_rep
341 *
342 * @handle: Fence object handle for fence associated with a command submission.
343 * @mask: Fence flags relevant for this fence object.
344 * @seqno: Fence sequence number in fifo. A fence object with a lower
345 * seqno will signal the EXEC flag before a fence object with a higher
346 * seqno. This can be used by user-space to avoid kernel calls to determine
347 * whether a fence has signaled the EXEC flag. Note that @seqno will
348 * wrap at 32-bit.
349 * @passed_seqno: The highest seqno number processed by the hardware
350 * so far. This can be used to mark user-space fence objects as signaled, and
351 * to determine whether a fence seqno might be stale.
352 * @fd: FD associated with the fence, -1 if not exported
353 * @error: This member should've been set to -EFAULT on submission.
354 * The following actions should be take on completion:
355 * error == -EFAULT: Fence communication failed. The host is synchronized.
356 * Use the last fence id read from the FIFO fence register.
357 * error != 0 && error != -EFAULT:
358 * Fence submission failed. The host is synchronized. Use the fence_seq member.
359 * error == 0: All is OK, The host may not be synchronized.
360 * Use the fence_seq member.
361 *
362 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
363 */
364
365struct drm_vmw_fence_rep {
366 __u32 handle;
367 __u32 mask;
368 __u32 seqno;
369 __u32 passed_seqno;
370 __s32 fd;
371 __s32 error;
372};
373
374/*************************************************************************/
375/**
376 * DRM_VMW_ALLOC_BO
377 *
378 * Allocate a buffer object that is visible also to the host.
379 * NOTE: The buffer is
380 * identified by a handle and an offset, which are private to the guest, but
381 * useable in the command stream. The guest kernel may translate these
382 * and patch up the command stream accordingly. In the future, the offset may
383 * be zero at all times, or it may disappear from the interface before it is
384 * fixed.
385 *
386 * The buffer object may stay user-space mapped in the guest at all times,
387 * and is thus suitable for sub-allocation.
388 *
389 * Buffer objects are mapped using the mmap() syscall on the drm device.
390 */
391
392/**
393 * struct drm_vmw_alloc_bo_req
394 *
395 * @size: Required minimum size of the buffer.
396 *
397 * Input data to the DRM_VMW_ALLOC_BO Ioctl.
398 */
399
400struct drm_vmw_alloc_bo_req {
401 __u32 size;
402 __u32 pad64;
403};
404#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
405
406/**
407 * struct drm_vmw_bo_rep
408 *
409 * @map_handle: Offset to use in the mmap() call used to map the buffer.
410 * @handle: Handle unique to this buffer. Used for unreferencing.
411 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
412 * referenced. See not above.
413 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
414 * referenced. See note above.
415 *
416 * Output data from the DRM_VMW_ALLOC_BO Ioctl.
417 */
418
419struct drm_vmw_bo_rep {
420 __u64 map_handle;
421 __u32 handle;
422 __u32 cur_gmr_id;
423 __u32 cur_gmr_offset;
424 __u32 pad64;
425};
426#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
427
428/**
429 * union drm_vmw_alloc_bo_arg
430 *
431 * @req: Input data as described above.
432 * @rep: Output data as described above.
433 *
434 * Argument to the DRM_VMW_ALLOC_BO Ioctl.
435 */
436
437union drm_vmw_alloc_bo_arg {
438 struct drm_vmw_alloc_bo_req req;
439 struct drm_vmw_bo_rep rep;
440};
441#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
442
443/*************************************************************************/
444/**
445 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
446 *
447 * This IOCTL controls the overlay units of the svga device.
448 * The SVGA overlay units does not work like regular hardware units in
449 * that they do not automaticaly read back the contents of the given dma
450 * buffer. But instead only read back for each call to this ioctl, and
451 * at any point between this call being made and a following call that
452 * either changes the buffer or disables the stream.
453 */
454
455/**
456 * struct drm_vmw_rect
457 *
458 * Defines a rectangle. Used in the overlay ioctl to define
459 * source and destination rectangle.
460 */
461
462struct drm_vmw_rect {
463 __s32 x;
464 __s32 y;
465 __u32 w;
466 __u32 h;
467};
468
469/**
470 * struct drm_vmw_control_stream_arg
471 *
472 * @stream_id: Stearm to control
473 * @enabled: If false all following arguments are ignored.
474 * @handle: Handle to buffer for getting data from.
475 * @format: Format of the overlay as understood by the host.
476 * @width: Width of the overlay.
477 * @height: Height of the overlay.
478 * @size: Size of the overlay in bytes.
479 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
480 * @offset: Offset from start of dma buffer to overlay.
481 * @src: Source rect, must be within the defined area above.
482 * @dst: Destination rect, x and y may be negative.
483 *
484 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
485 */
486
487struct drm_vmw_control_stream_arg {
488 __u32 stream_id;
489 __u32 enabled;
490
491 __u32 flags;
492 __u32 color_key;
493
494 __u32 handle;
495 __u32 offset;
496 __s32 format;
497 __u32 size;
498 __u32 width;
499 __u32 height;
500 __u32 pitch[3];
501
502 __u32 pad64;
503 struct drm_vmw_rect src;
504 struct drm_vmw_rect dst;
505};
506
507/*************************************************************************/
508/**
509 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
510 *
511 */
512
513#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
514#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
515
516/**
517 * struct drm_vmw_cursor_bypass_arg
518 *
519 * @flags: Flags.
520 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
521 * @xpos: X position of cursor.
522 * @ypos: Y position of cursor.
523 * @xhot: X hotspot.
524 * @yhot: Y hotspot.
525 *
526 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
527 */
528
529struct drm_vmw_cursor_bypass_arg {
530 __u32 flags;
531 __u32 crtc_id;
532 __s32 xpos;
533 __s32 ypos;
534 __s32 xhot;
535 __s32 yhot;
536};
537
538/*************************************************************************/
539/**
540 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
541 */
542
543/**
544 * struct drm_vmw_context_arg
545 *
546 * @stream_id: Device unique context ID.
547 *
548 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
549 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
550 */
551
552struct drm_vmw_stream_arg {
553 __u32 stream_id;
554 __u32 pad64;
555};
556
557/*************************************************************************/
558/**
559 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
560 *
561 * Return a single stream that was claimed by this process. Also makes
562 * sure that the stream has been stopped.
563 */
564
565/*************************************************************************/
566/**
567 * DRM_VMW_GET_3D_CAP
568 *
569 * Read 3D capabilities from the FIFO
570 *
571 */
572
573/**
574 * struct drm_vmw_get_3d_cap_arg
575 *
576 * @buffer: Pointer to a buffer for capability data, cast to an __u64
577 * @size: Max size to copy
578 *
579 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
580 * ioctls.
581 */
582
583struct drm_vmw_get_3d_cap_arg {
584 __u64 buffer;
585 __u32 max_size;
586 __u32 pad64;
587};
588
589/*************************************************************************/
590/**
591 * DRM_VMW_FENCE_WAIT
592 *
593 * Waits for a fence object to signal. The wait is interruptible, so that
594 * signals may be delivered during the interrupt. The wait may timeout,
595 * in which case the calls returns -EBUSY. If the wait is restarted,
596 * that is restarting without resetting @cookie_valid to zero,
597 * the timeout is computed from the first call.
598 *
599 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
600 * on:
601 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
602 * stream
603 * have executed.
604 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
605 * commands
606 * in the buffer given to the EXECBUF ioctl returning the fence object handle
607 * are available to user-space.
608 *
609 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
610 * fenc wait ioctl returns 0, the fence object has been unreferenced after
611 * the wait.
612 */
613
614#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
615#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
616
617#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
618
619/**
620 * struct drm_vmw_fence_wait_arg
621 *
622 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
623 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
624 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
625 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
626 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
627 * before returning.
628 * @flags: Fence flags to wait on.
629 * @wait_options: Options that control the behaviour of the wait ioctl.
630 *
631 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
632 */
633
634struct drm_vmw_fence_wait_arg {
635 __u32 handle;
636 __s32 cookie_valid;
637 __u64 kernel_cookie;
638 __u64 timeout_us;
639 __s32 lazy;
640 __s32 flags;
641 __s32 wait_options;
642 __s32 pad64;
643};
644
645/*************************************************************************/
646/**
647 * DRM_VMW_FENCE_SIGNALED
648 *
649 * Checks if a fence object is signaled..
650 */
651
652/**
653 * struct drm_vmw_fence_signaled_arg
654 *
655 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
656 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
657 * @signaled: Out: Flags signaled.
658 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
659 * EXEC flag of user-space fence objects.
660 *
661 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
662 * ioctls.
663 */
664
665struct drm_vmw_fence_signaled_arg {
666 __u32 handle;
667 __u32 flags;
668 __s32 signaled;
669 __u32 passed_seqno;
670 __u32 signaled_flags;
671 __u32 pad64;
672};
673
674/*************************************************************************/
675/**
676 * DRM_VMW_FENCE_UNREF
677 *
678 * Unreferences a fence object, and causes it to be destroyed if there are no
679 * other references to it.
680 *
681 */
682
683/**
684 * struct drm_vmw_fence_arg
685 *
686 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
687 *
688 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
689 */
690
691struct drm_vmw_fence_arg {
692 __u32 handle;
693 __u32 pad64;
694};
695
696
697/*************************************************************************/
698/**
699 * DRM_VMW_FENCE_EVENT
700 *
701 * Queues an event on a fence to be delivered on the drm character device
702 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
703 * Optionally the approximate time when the fence signaled is
704 * given by the event.
705 */
706
707/*
708 * The event type
709 */
710#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
711
712struct drm_vmw_event_fence {
713 struct drm_event base;
714 __u64 user_data;
715 __u32 tv_sec;
716 __u32 tv_usec;
717};
718
719/*
720 * Flags that may be given to the command.
721 */
722/* Request fence signaled time on the event. */
723#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
724
725/**
726 * struct drm_vmw_fence_event_arg
727 *
728 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
729 * the fence is not supposed to be referenced by user-space.
730 * @user_info: Info to be delivered with the event.
731 * @handle: Attach the event to this fence only.
732 * @flags: A set of flags as defined above.
733 */
734struct drm_vmw_fence_event_arg {
735 __u64 fence_rep;
736 __u64 user_data;
737 __u32 handle;
738 __u32 flags;
739};
740
741
742/*************************************************************************/
743/**
744 * DRM_VMW_PRESENT
745 *
746 * Executes an SVGA present on a given fb for a given surface. The surface
747 * is placed on the framebuffer. Cliprects are given relative to the given
748 * point (the point disignated by dest_{x|y}).
749 *
750 */
751
752/**
753 * struct drm_vmw_present_arg
754 * @fb_id: framebuffer id to present / read back from.
755 * @sid: Surface id to present from.
756 * @dest_x: X placement coordinate for surface.
757 * @dest_y: Y placement coordinate for surface.
758 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
759 * @num_clips: Number of cliprects given relative to the framebuffer origin,
760 * in the same coordinate space as the frame buffer.
761 * @pad64: Unused 64-bit padding.
762 *
763 * Input argument to the DRM_VMW_PRESENT ioctl.
764 */
765
766struct drm_vmw_present_arg {
767 __u32 fb_id;
768 __u32 sid;
769 __s32 dest_x;
770 __s32 dest_y;
771 __u64 clips_ptr;
772 __u32 num_clips;
773 __u32 pad64;
774};
775
776
777/*************************************************************************/
778/**
779 * DRM_VMW_PRESENT_READBACK
780 *
781 * Executes an SVGA present readback from a given fb to the dma buffer
782 * currently bound as the fb. If there is no dma buffer bound to the fb,
783 * an error will be returned.
784 *
785 */
786
787/**
788 * struct drm_vmw_present_arg
789 * @fb_id: fb_id to present / read back from.
790 * @num_clips: Number of cliprects.
791 * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
792 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
793 * If this member is NULL, then the ioctl should not return a fence.
794 */
795
796struct drm_vmw_present_readback_arg {
797 __u32 fb_id;
798 __u32 num_clips;
799 __u64 clips_ptr;
800 __u64 fence_rep;
801};
802
803/*************************************************************************/
804/**
805 * DRM_VMW_UPDATE_LAYOUT - Update layout
806 *
807 * Updates the preferred modes and connection status for connectors. The
808 * command consists of one drm_vmw_update_layout_arg pointing to an array
809 * of num_outputs drm_vmw_rect's.
810 */
811
812/**
813 * struct drm_vmw_update_layout_arg
814 *
815 * @num_outputs: number of active connectors
816 * @rects: pointer to array of drm_vmw_rect cast to an __u64
817 *
818 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
819 */
820struct drm_vmw_update_layout_arg {
821 __u32 num_outputs;
822 __u32 pad64;
823 __u64 rects;
824};
825
826
827/*************************************************************************/
828/**
829 * DRM_VMW_CREATE_SHADER - Create shader
830 *
831 * Creates a shader and optionally binds it to a dma buffer containing
832 * the shader byte-code.
833 */
834
835/**
836 * enum drm_vmw_shader_type - Shader types
837 */
838enum drm_vmw_shader_type {
839 drm_vmw_shader_type_vs = 0,
840 drm_vmw_shader_type_ps,
841};
842
843
844/**
845 * struct drm_vmw_shader_create_arg
846 *
847 * @shader_type: Shader type of the shader to create.
848 * @size: Size of the byte-code in bytes.
849 * where the shader byte-code starts
850 * @buffer_handle: Buffer handle identifying the buffer containing the
851 * shader byte-code
852 * @shader_handle: On successful completion contains a handle that
853 * can be used to subsequently identify the shader.
854 * @offset: Offset in bytes into the buffer given by @buffer_handle,
855 *
856 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
857 */
858struct drm_vmw_shader_create_arg {
859 enum drm_vmw_shader_type shader_type;
860 __u32 size;
861 __u32 buffer_handle;
862 __u32 shader_handle;
863 __u64 offset;
864};
865
866/*************************************************************************/
867/**
868 * DRM_VMW_UNREF_SHADER - Unreferences a shader
869 *
870 * Destroys a user-space reference to a shader, optionally destroying
871 * it.
872 */
873
874/**
875 * struct drm_vmw_shader_arg
876 *
877 * @handle: Handle identifying the shader to destroy.
878 *
879 * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
880 */
881struct drm_vmw_shader_arg {
882 __u32 handle;
883 __u32 pad64;
884};
885
886/*************************************************************************/
887/**
888 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
889 *
890 * Allocates a surface handle and queues a create surface command
891 * for the host on the first use of the surface. The surface ID can
892 * be used as the surface ID in commands referencing the surface.
893 */
894
895/**
896 * enum drm_vmw_surface_flags
897 *
898 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable
899 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
900 * surface.
901 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
902 * given.
903 * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
904 */
905enum drm_vmw_surface_flags {
906 drm_vmw_surface_flag_shareable = (1 << 0),
907 drm_vmw_surface_flag_scanout = (1 << 1),
908 drm_vmw_surface_flag_create_buffer = (1 << 2),
909 drm_vmw_surface_flag_coherent = (1 << 3),
910};
911
912/**
913 * struct drm_vmw_gb_surface_create_req
914 *
915 * @svga3d_flags: SVGA3d surface flags for the device.
916 * @format: SVGA3d format.
917 * @mip_level: Number of mip levels for all faces.
918 * @drm_surface_flags Flags as described above.
919 * @multisample_count Future use. Set to 0.
920 * @autogen_filter Future use. Set to 0.
921 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
922 * if none.
923 * @base_size Size of the base mip level for all faces.
924 * @array_size Must be zero for non-DX hardware, and if non-zero
925 * svga3d_flags must have proper bind flags setup.
926 *
927 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
928 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
929 */
930struct drm_vmw_gb_surface_create_req {
931 __u32 svga3d_flags;
932 __u32 format;
933 __u32 mip_levels;
934 enum drm_vmw_surface_flags drm_surface_flags;
935 __u32 multisample_count;
936 __u32 autogen_filter;
937 __u32 buffer_handle;
938 __u32 array_size;
939 struct drm_vmw_size base_size;
940};
941
942/**
943 * struct drm_vmw_gb_surface_create_rep
944 *
945 * @handle: Surface handle.
946 * @backup_size: Size of backup buffers for this surface.
947 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
948 * @buffer_size: Actual size of the buffer identified by
949 * @buffer_handle
950 * @buffer_map_handle: Offset into device address space for the buffer
951 * identified by @buffer_handle.
952 *
953 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
954 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
955 */
956struct drm_vmw_gb_surface_create_rep {
957 __u32 handle;
958 __u32 backup_size;
959 __u32 buffer_handle;
960 __u32 buffer_size;
961 __u64 buffer_map_handle;
962};
963
964/**
965 * union drm_vmw_gb_surface_create_arg
966 *
967 * @req: Input argument as described above.
968 * @rep: Output argument as described above.
969 *
970 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
971 */
972union drm_vmw_gb_surface_create_arg {
973 struct drm_vmw_gb_surface_create_rep rep;
974 struct drm_vmw_gb_surface_create_req req;
975};
976
977/*************************************************************************/
978/**
979 * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
980 *
981 * Puts a reference on a host surface with a given handle, as previously
982 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
983 * A reference will make sure the surface isn't destroyed while we hold
984 * it and will allow the calling client to use the surface handle in
985 * the command stream.
986 *
987 * On successful return, the Ioctl returns the surface information given
988 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
989 */
990
991/**
992 * struct drm_vmw_gb_surface_reference_arg
993 *
994 * @creq: The data used as input when the surface was created, as described
995 * above at "struct drm_vmw_gb_surface_create_req"
996 * @crep: Additional data output when the surface was created, as described
997 * above at "struct drm_vmw_gb_surface_create_rep"
998 *
999 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
1000 */
1001struct drm_vmw_gb_surface_ref_rep {
1002 struct drm_vmw_gb_surface_create_req creq;
1003 struct drm_vmw_gb_surface_create_rep crep;
1004};
1005
1006/**
1007 * union drm_vmw_gb_surface_reference_arg
1008 *
1009 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1010 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1011 *
1012 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1013 */
1014union drm_vmw_gb_surface_reference_arg {
1015 struct drm_vmw_gb_surface_ref_rep rep;
1016 struct drm_vmw_surface_arg req;
1017};
1018
1019
1020/*************************************************************************/
1021/**
1022 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1023 *
1024 * Idles any previously submitted GPU operations on the buffer and
1025 * by default blocks command submissions that reference the buffer.
1026 * If the file descriptor used to grab a blocking CPU sync is closed, the
1027 * cpu sync is released.
1028 * The flags argument indicates how the grab / release operation should be
1029 * performed:
1030 */
1031
1032/**
1033 * enum drm_vmw_synccpu_flags - Synccpu flags:
1034 *
1035 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1036 * hint to the kernel to allow command submissions that references the buffer
1037 * for read-only.
1038 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1039 * referencing this buffer.
1040 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1041 * -EBUSY should the buffer be busy.
1042 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1043 * while the buffer is synced for CPU. This is similar to the GEM bo idle
1044 * behavior.
1045 */
1046enum drm_vmw_synccpu_flags {
1047 drm_vmw_synccpu_read = (1 << 0),
1048 drm_vmw_synccpu_write = (1 << 1),
1049 drm_vmw_synccpu_dontblock = (1 << 2),
1050 drm_vmw_synccpu_allow_cs = (1 << 3)
1051};
1052
1053/**
1054 * enum drm_vmw_synccpu_op - Synccpu operations:
1055 *
1056 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
1057 * @drm_vmw_synccpu_release: Release a previous grab.
1058 */
1059enum drm_vmw_synccpu_op {
1060 drm_vmw_synccpu_grab,
1061 drm_vmw_synccpu_release
1062};
1063
1064/**
1065 * struct drm_vmw_synccpu_arg
1066 *
1067 * @op: The synccpu operation as described above.
1068 * @handle: Handle identifying the buffer object.
1069 * @flags: Flags as described above.
1070 */
1071struct drm_vmw_synccpu_arg {
1072 enum drm_vmw_synccpu_op op;
1073 enum drm_vmw_synccpu_flags flags;
1074 __u32 handle;
1075 __u32 pad64;
1076};
1077
1078/*************************************************************************/
1079/**
1080 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1081 *
1082 * Allocates a device unique context id, and queues a create context command
1083 * for the host. Does not wait for host completion.
1084 */
1085enum drm_vmw_extended_context {
1086 drm_vmw_context_legacy,
1087 drm_vmw_context_dx
1088};
1089
1090/**
1091 * union drm_vmw_extended_context_arg
1092 *
1093 * @req: Context type.
1094 * @rep: Context identifier.
1095 *
1096 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1097 */
1098union drm_vmw_extended_context_arg {
1099 enum drm_vmw_extended_context req;
1100 struct drm_vmw_context_arg rep;
1101};
1102
1103/*************************************************************************/
1104/*
1105 * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1106 * underlying resource.
1107 *
1108 * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1109 * Ioctl.
1110 */
1111
1112/**
1113 * struct drm_vmw_handle_close_arg
1114 *
1115 * @handle: Handle to close.
1116 *
1117 * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1118 */
1119struct drm_vmw_handle_close_arg {
1120 __u32 handle;
1121 __u32 pad64;
1122};
1123#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1124
1125/*************************************************************************/
1126/**
1127 * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1128 *
1129 * Allocates a surface handle and queues a create surface command
1130 * for the host on the first use of the surface. The surface ID can
1131 * be used as the surface ID in commands referencing the surface.
1132 *
1133 * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1134 * parameter and 64 bit svga flag.
1135 */
1136
1137/**
1138 * enum drm_vmw_surface_version
1139 *
1140 * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1141 * svga3d surface flags split into 2, upper half and lower half.
1142 */
1143enum drm_vmw_surface_version {
1144 drm_vmw_gb_surface_v1,
1145};
1146
1147/**
1148 * struct drm_vmw_gb_surface_create_ext_req
1149 *
1150 * @base: Surface create parameters.
1151 * @version: Version of surface create ioctl.
1152 * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1153 * @multisample_pattern: Multisampling pattern when msaa is supported.
1154 * @quality_level: Precision settings for each sample.
1155 * @buffer_byte_stride: Buffer byte stride.
1156 * @must_be_zero: Reserved for future usage.
1157 *
1158 * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1159 * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1160 */
1161struct drm_vmw_gb_surface_create_ext_req {
1162 struct drm_vmw_gb_surface_create_req base;
1163 enum drm_vmw_surface_version version;
1164 __u32 svga3d_flags_upper_32_bits;
1165 __u32 multisample_pattern;
1166 __u32 quality_level;
1167 __u32 buffer_byte_stride;
1168 __u32 must_be_zero;
1169};
1170
1171/**
1172 * union drm_vmw_gb_surface_create_ext_arg
1173 *
1174 * @req: Input argument as described above.
1175 * @rep: Output argument as described above.
1176 *
1177 * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1178 */
1179union drm_vmw_gb_surface_create_ext_arg {
1180 struct drm_vmw_gb_surface_create_rep rep;
1181 struct drm_vmw_gb_surface_create_ext_req req;
1182};
1183
1184/*************************************************************************/
1185/**
1186 * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1187 *
1188 * Puts a reference on a host surface with a given handle, as previously
1189 * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1190 * A reference will make sure the surface isn't destroyed while we hold
1191 * it and will allow the calling client to use the surface handle in
1192 * the command stream.
1193 *
1194 * On successful return, the Ioctl returns the surface information given
1195 * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1196 */
1197
1198/**
1199 * struct drm_vmw_gb_surface_ref_ext_rep
1200 *
1201 * @creq: The data used as input when the surface was created, as described
1202 * above at "struct drm_vmw_gb_surface_create_ext_req"
1203 * @crep: Additional data output when the surface was created, as described
1204 * above at "struct drm_vmw_gb_surface_create_rep"
1205 *
1206 * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1207 */
1208struct drm_vmw_gb_surface_ref_ext_rep {
1209 struct drm_vmw_gb_surface_create_ext_req creq;
1210 struct drm_vmw_gb_surface_create_rep crep;
1211};
1212
1213/**
1214 * union drm_vmw_gb_surface_reference_ext_arg
1215 *
1216 * @req: Input data as described above at "struct drm_vmw_surface_arg"
1217 * @rep: Output data as described above at
1218 * "struct drm_vmw_gb_surface_ref_ext_rep"
1219 *
1220 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1221 */
1222union drm_vmw_gb_surface_reference_ext_arg {
1223 struct drm_vmw_gb_surface_ref_ext_rep rep;
1224 struct drm_vmw_surface_arg req;
1225};
1226
1227/**
1228 * struct drm_vmw_msg_arg
1229 *
1230 * @send: Pointer to user-space msg string (null terminated).
1231 * @receive: Pointer to user-space receive buffer.
1232 * @send_only: Boolean whether this is only sending or receiving too.
1233 *
1234 * Argument to the DRM_VMW_MSG ioctl.
1235 */
1236struct drm_vmw_msg_arg {
1237 __u64 send;
1238 __u64 receive;
1239 __s32 send_only;
1240 __u32 receive_len;
1241};
1242
1243/**
1244 * struct drm_vmw_mksstat_add_arg
1245 *
1246 * @stat: Pointer to user-space stat-counters array, page-aligned.
1247 * @info: Pointer to user-space counter-infos array, page-aligned.
1248 * @strs: Pointer to user-space stat strings, page-aligned.
1249 * @stat_len: Length in bytes of stat-counters array.
1250 * @info_len: Length in bytes of counter-infos array.
1251 * @strs_len: Length in bytes of the stat strings, terminators included.
1252 * @description: Pointer to instance descriptor string; will be truncated
1253 * to MKS_GUEST_STAT_INSTANCE_DESC_LENGTH chars.
1254 * @id: Output identifier of the produced record; -1 if error.
1255 *
1256 * Argument to the DRM_VMW_MKSSTAT_ADD ioctl.
1257 */
1258struct drm_vmw_mksstat_add_arg {
1259 __u64 stat;
1260 __u64 info;
1261 __u64 strs;
1262 __u64 stat_len;
1263 __u64 info_len;
1264 __u64 strs_len;
1265 __u64 description;
1266 __u64 id;
1267};
1268
1269/**
1270 * struct drm_vmw_mksstat_remove_arg
1271 *
1272 * @id: Identifier of the record being disposed, originally obtained through
1273 * DRM_VMW_MKSSTAT_ADD ioctl.
1274 *
1275 * Argument to the DRM_VMW_MKSSTAT_REMOVE ioctl.
1276 */
1277struct drm_vmw_mksstat_remove_arg {
1278 __u64 id;
1279};
1280
1281#if defined(__cplusplus)
1282}
1283#endif
1284
1285#endif