Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/* Copyright (C) 2023 Collabora ltd. */
3#ifndef _PANTHOR_DRM_H_
4#define _PANTHOR_DRM_H_
5
6#include "drm.h"
7
8#if defined(__cplusplus)
9extern "C" {
10#endif
11
12/**
13 * DOC: Introduction
14 *
15 * This documentation describes the Panthor IOCTLs.
16 *
17 * Just a few generic rules about the data passed to the Panthor IOCTLs:
18 *
19 * - Structures must be aligned on 64-bit/8-byte. If the object is not
20 * naturally aligned, a padding field must be added.
21 * - Fields must be explicitly aligned to their natural type alignment with
22 * pad[0..N] fields.
23 * - All padding fields will be checked by the driver to make sure they are
24 * zeroed.
25 * - Flags can be added, but not removed/replaced.
26 * - New fields can be added to the main structures (the structures
27 * directly passed to the ioctl). Those fields can be added at the end of
28 * the structure, or replace existing padding fields. Any new field being
29 * added must preserve the behavior that existed before those fields were
30 * added when a value of zero is passed.
31 * - New fields can be added to indirect objects (objects pointed by the
32 * main structure), iff those objects are passed a size to reflect the
33 * size known by the userspace driver (see drm_panthor_obj_array::stride
34 * or drm_panthor_dev_query::size).
35 * - If the kernel driver is too old to know some fields, those will be
36 * ignored if zero, and otherwise rejected (and so will be zero on output).
37 * - If userspace is too old to know some fields, those will be zeroed
38 * (input) before the structure is parsed by the kernel driver.
39 * - Each new flag/field addition must come with a driver version update so
40 * the userspace driver doesn't have to trial and error to know which
41 * flags are supported.
42 * - Structures should not contain unions, as this would defeat the
43 * extensibility of such structures.
44 * - IOCTLs can't be removed or replaced. New IOCTL IDs should be placed
45 * at the end of the drm_panthor_ioctl_id enum.
46 */
47
48/**
49 * DOC: MMIO regions exposed to userspace.
50 *
51 * .. c:macro:: DRM_PANTHOR_USER_MMIO_OFFSET
52 *
53 * File offset for all MMIO regions being exposed to userspace. Don't use
54 * this value directly, use DRM_PANTHOR_USER_<name>_OFFSET values instead.
55 * pgoffset passed to mmap2() is an unsigned long, which forces us to use a
56 * different offset on 32-bit and 64-bit systems.
57 *
58 * .. c:macro:: DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET
59 *
60 * File offset for the LATEST_FLUSH_ID register. The Userspace driver controls
61 * GPU cache flushing through CS instructions, but the flush reduction
62 * mechanism requires a flush_id. This flush_id could be queried with an
63 * ioctl, but Arm provides a well-isolated register page containing only this
64 * read-only register, so let's expose this page through a static mmap offset
65 * and allow direct mapping of this MMIO region so we can avoid the
66 * user <-> kernel round-trip.
67 */
68#define DRM_PANTHOR_USER_MMIO_OFFSET_32BIT (1ull << 43)
69#define DRM_PANTHOR_USER_MMIO_OFFSET_64BIT (1ull << 56)
70#define DRM_PANTHOR_USER_MMIO_OFFSET (sizeof(unsigned long) < 8 ? \
71 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT : \
72 DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
73#define DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET (DRM_PANTHOR_USER_MMIO_OFFSET | 0)
74
75/**
76 * DOC: IOCTL IDs
77 *
78 * enum drm_panthor_ioctl_id - IOCTL IDs
79 *
80 * Place new ioctls at the end, don't re-order, don't replace or remove entries.
81 *
82 * These IDs are not meant to be used directly. Use the DRM_IOCTL_PANTHOR_xxx
83 * definitions instead.
84 */
85enum drm_panthor_ioctl_id {
86 /** @DRM_PANTHOR_DEV_QUERY: Query device information. */
87 DRM_PANTHOR_DEV_QUERY = 0,
88
89 /** @DRM_PANTHOR_VM_CREATE: Create a VM. */
90 DRM_PANTHOR_VM_CREATE,
91
92 /** @DRM_PANTHOR_VM_DESTROY: Destroy a VM. */
93 DRM_PANTHOR_VM_DESTROY,
94
95 /** @DRM_PANTHOR_VM_BIND: Bind/unbind memory to a VM. */
96 DRM_PANTHOR_VM_BIND,
97
98 /** @DRM_PANTHOR_VM_GET_STATE: Get VM state. */
99 DRM_PANTHOR_VM_GET_STATE,
100
101 /** @DRM_PANTHOR_BO_CREATE: Create a buffer object. */
102 DRM_PANTHOR_BO_CREATE,
103
104 /**
105 * @DRM_PANTHOR_BO_MMAP_OFFSET: Get the file offset to pass to
106 * mmap to map a GEM object.
107 */
108 DRM_PANTHOR_BO_MMAP_OFFSET,
109
110 /** @DRM_PANTHOR_GROUP_CREATE: Create a scheduling group. */
111 DRM_PANTHOR_GROUP_CREATE,
112
113 /** @DRM_PANTHOR_GROUP_DESTROY: Destroy a scheduling group. */
114 DRM_PANTHOR_GROUP_DESTROY,
115
116 /**
117 * @DRM_PANTHOR_GROUP_SUBMIT: Submit jobs to queues belonging
118 * to a specific scheduling group.
119 */
120 DRM_PANTHOR_GROUP_SUBMIT,
121
122 /** @DRM_PANTHOR_GROUP_GET_STATE: Get the state of a scheduling group. */
123 DRM_PANTHOR_GROUP_GET_STATE,
124
125 /** @DRM_PANTHOR_TILER_HEAP_CREATE: Create a tiler heap. */
126 DRM_PANTHOR_TILER_HEAP_CREATE,
127
128 /** @DRM_PANTHOR_TILER_HEAP_DESTROY: Destroy a tiler heap. */
129 DRM_PANTHOR_TILER_HEAP_DESTROY,
130
131 /** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
132 DRM_PANTHOR_BO_SET_LABEL,
133
134 /**
135 * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
136 *
137 * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
138 * type seen by the process that manipulates the FD, such that a 32-bit process can
139 * always map the user MMIO ranges. But this approach doesn't work well for emulators
140 * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
141 * code. In that case, the kernel thinks it's the 64-bit process and assumes
142 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
143 * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
144 * pgoff_t size.
145 */
146 DRM_PANTHOR_SET_USER_MMIO_OFFSET,
147};
148
149/**
150 * DOC: IOCTL arguments
151 */
152
153/**
154 * struct drm_panthor_obj_array - Object array.
155 *
156 * This object is used to pass an array of objects whose size is subject to changes in
157 * future versions of the driver. In order to support this mutability, we pass a stride
158 * describing the size of the object as known by userspace.
159 *
160 * You shouldn't fill drm_panthor_obj_array fields directly. You should instead use
161 * the DRM_PANTHOR_OBJ_ARRAY() macro that takes care of initializing the stride to
162 * the object size.
163 */
164struct drm_panthor_obj_array {
165 /** @stride: Stride of object struct. Used for versioning. */
166 __u32 stride;
167
168 /** @count: Number of objects in the array. */
169 __u32 count;
170
171 /** @array: User pointer to an array of objects. */
172 __u64 array;
173};
174
175/**
176 * DRM_PANTHOR_OBJ_ARRAY() - Initialize a drm_panthor_obj_array field.
177 * @cnt: Number of elements in the array.
178 * @ptr: Pointer to the array to pass to the kernel.
179 *
180 * Macro initializing a drm_panthor_obj_array based on the object size as known
181 * by userspace.
182 */
183#define DRM_PANTHOR_OBJ_ARRAY(cnt, ptr) \
184 { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
185
186/**
187 * enum drm_panthor_sync_op_flags - Synchronization operation flags.
188 */
189enum drm_panthor_sync_op_flags {
190 /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK: Synchronization handle type mask. */
191 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK = 0xff,
192
193 /** @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ: Synchronization object type. */
194 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ = 0,
195
196 /**
197 * @DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ: Timeline synchronization
198 * object type.
199 */
200 DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ = 1,
201
202 /** @DRM_PANTHOR_SYNC_OP_WAIT: Wait operation. */
203 DRM_PANTHOR_SYNC_OP_WAIT = 0 << 31,
204
205 /** @DRM_PANTHOR_SYNC_OP_SIGNAL: Signal operation. */
206 DRM_PANTHOR_SYNC_OP_SIGNAL = (int)(1u << 31),
207};
208
209/**
210 * struct drm_panthor_sync_op - Synchronization operation.
211 */
212struct drm_panthor_sync_op {
213 /** @flags: Synchronization operation flags. Combination of DRM_PANTHOR_SYNC_OP values. */
214 __u32 flags;
215
216 /** @handle: Sync handle. */
217 __u32 handle;
218
219 /**
220 * @timeline_value: MBZ if
221 * (flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK) !=
222 * DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ.
223 */
224 __u64 timeline_value;
225};
226
227/**
228 * enum drm_panthor_dev_query_type - Query type
229 *
230 * Place new types at the end, don't re-order, don't remove or replace.
231 */
232enum drm_panthor_dev_query_type {
233 /** @DRM_PANTHOR_DEV_QUERY_GPU_INFO: Query GPU information. */
234 DRM_PANTHOR_DEV_QUERY_GPU_INFO = 0,
235
236 /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */
237 DRM_PANTHOR_DEV_QUERY_CSIF_INFO,
238
239 /** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */
240 DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO,
241
242 /**
243 * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information.
244 */
245 DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO,
246};
247
248/**
249 * struct drm_panthor_gpu_info - GPU information
250 *
251 * Structure grouping all queryable information relating to the GPU.
252 */
253struct drm_panthor_gpu_info {
254 /** @gpu_id : GPU ID. */
255 __u32 gpu_id;
256#define DRM_PANTHOR_ARCH_MAJOR(x) ((x) >> 28)
257#define DRM_PANTHOR_ARCH_MINOR(x) (((x) >> 24) & 0xf)
258#define DRM_PANTHOR_ARCH_REV(x) (((x) >> 20) & 0xf)
259#define DRM_PANTHOR_PRODUCT_MAJOR(x) (((x) >> 16) & 0xf)
260#define DRM_PANTHOR_VERSION_MAJOR(x) (((x) >> 12) & 0xf)
261#define DRM_PANTHOR_VERSION_MINOR(x) (((x) >> 4) & 0xff)
262#define DRM_PANTHOR_VERSION_STATUS(x) ((x) & 0xf)
263
264 /** @gpu_rev: GPU revision. */
265 __u32 gpu_rev;
266
267 /** @csf_id: Command stream frontend ID. */
268 __u32 csf_id;
269#define DRM_PANTHOR_CSHW_MAJOR(x) (((x) >> 26) & 0x3f)
270#define DRM_PANTHOR_CSHW_MINOR(x) (((x) >> 20) & 0x3f)
271#define DRM_PANTHOR_CSHW_REV(x) (((x) >> 16) & 0xf)
272#define DRM_PANTHOR_MCU_MAJOR(x) (((x) >> 10) & 0x3f)
273#define DRM_PANTHOR_MCU_MINOR(x) (((x) >> 4) & 0x3f)
274#define DRM_PANTHOR_MCU_REV(x) ((x) & 0xf)
275
276 /** @l2_features: L2-cache features. */
277 __u32 l2_features;
278
279 /** @tiler_features: Tiler features. */
280 __u32 tiler_features;
281
282 /** @mem_features: Memory features. */
283 __u32 mem_features;
284
285 /** @mmu_features: MMU features. */
286 __u32 mmu_features;
287#define DRM_PANTHOR_MMU_VA_BITS(x) ((x) & 0xff)
288
289 /** @thread_features: Thread features. */
290 __u32 thread_features;
291
292 /** @max_threads: Maximum number of threads. */
293 __u32 max_threads;
294
295 /** @thread_max_workgroup_size: Maximum workgroup size. */
296 __u32 thread_max_workgroup_size;
297
298 /**
299 * @thread_max_barrier_size: Maximum number of threads that can wait
300 * simultaneously on a barrier.
301 */
302 __u32 thread_max_barrier_size;
303
304 /** @coherency_features: Coherency features. */
305 __u32 coherency_features;
306
307 /** @texture_features: Texture features. */
308 __u32 texture_features[4];
309
310 /** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
311 __u32 as_present;
312
313 /** @pad0: MBZ. */
314 __u32 pad0;
315
316 /** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
317 __u64 shader_present;
318
319 /** @l2_present: Bitmask encoding the L2 caches exposed by the GPU. */
320 __u64 l2_present;
321
322 /** @tiler_present: Bitmask encoding the tiler units exposed by the GPU. */
323 __u64 tiler_present;
324
325 /** @core_features: Used to discriminate core variants when they exist. */
326 __u32 core_features;
327
328 /** @pad: MBZ. */
329 __u32 pad;
330
331 /** @gpu_features: Bitmask describing supported GPU-wide features */
332 __u64 gpu_features;
333};
334
335/**
336 * struct drm_panthor_csif_info - Command stream interface information
337 *
338 * Structure grouping all queryable information relating to the command stream interface.
339 */
340struct drm_panthor_csif_info {
341 /** @csg_slot_count: Number of command stream group slots exposed by the firmware. */
342 __u32 csg_slot_count;
343
344 /** @cs_slot_count: Number of command stream slots per group. */
345 __u32 cs_slot_count;
346
347 /** @cs_reg_count: Number of command stream registers. */
348 __u32 cs_reg_count;
349
350 /** @scoreboard_slot_count: Number of scoreboard slots. */
351 __u32 scoreboard_slot_count;
352
353 /**
354 * @unpreserved_cs_reg_count: Number of command stream registers reserved by
355 * the kernel driver to call a userspace command stream.
356 *
357 * All registers can be used by a userspace command stream, but the
358 * [cs_slot_count - unpreserved_cs_reg_count .. cs_slot_count] registers are
359 * used by the kernel when DRM_PANTHOR_IOCTL_GROUP_SUBMIT is called.
360 */
361 __u32 unpreserved_cs_reg_count;
362
363 /**
364 * @pad: Padding field, set to zero.
365 */
366 __u32 pad;
367};
368
369/**
370 * struct drm_panthor_timestamp_info - Timestamp information
371 *
372 * Structure grouping all queryable information relating to the GPU timestamp.
373 */
374struct drm_panthor_timestamp_info {
375 /**
376 * @timestamp_frequency: The frequency of the timestamp timer or 0 if
377 * unknown.
378 */
379 __u64 timestamp_frequency;
380
381 /** @current_timestamp: The current timestamp. */
382 __u64 current_timestamp;
383
384 /** @timestamp_offset: The offset of the timestamp timer. */
385 __u64 timestamp_offset;
386};
387
388/**
389 * struct drm_panthor_group_priorities_info - Group priorities information
390 *
391 * Structure grouping all queryable information relating to the allowed group priorities.
392 */
393struct drm_panthor_group_priorities_info {
394 /**
395 * @allowed_mask: Bitmask of the allowed group priorities.
396 *
397 * Each bit represents a variant of the enum drm_panthor_group_priority.
398 */
399 __u8 allowed_mask;
400
401 /** @pad: Padding fields, MBZ. */
402 __u8 pad[3];
403};
404
405/**
406 * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY
407 */
408struct drm_panthor_dev_query {
409 /** @type: the query type (see drm_panthor_dev_query_type). */
410 __u32 type;
411
412 /**
413 * @size: size of the type being queried.
414 *
415 * If pointer is NULL, size is updated by the driver to provide the
416 * output structure size. If pointer is not NULL, the driver will
417 * only copy min(size, actual_structure_size) bytes to the pointer,
418 * and update the size accordingly. This allows us to extend query
419 * types without breaking userspace.
420 */
421 __u32 size;
422
423 /**
424 * @pointer: user pointer to a query type struct.
425 *
426 * Pointer can be NULL, in which case, nothing is copied, but the
427 * actual structure size is returned. If not NULL, it must point to
428 * a location that's large enough to hold size bytes.
429 */
430 __u64 pointer;
431};
432
433/**
434 * struct drm_panthor_vm_create - Arguments passed to DRM_PANTHOR_IOCTL_VM_CREATE
435 */
436struct drm_panthor_vm_create {
437 /** @flags: VM flags, MBZ. */
438 __u32 flags;
439
440 /** @id: Returned VM ID. */
441 __u32 id;
442
443 /**
444 * @user_va_range: Size of the VA space reserved for user objects.
445 *
446 * The kernel will pick the remaining space to map kernel-only objects to the
447 * VM (heap chunks, heap context, ring buffers, kernel synchronization objects,
448 * ...). If the space left for kernel objects is too small, kernel object
449 * allocation will fail further down the road. One can use
450 * drm_panthor_gpu_info::mmu_features to extract the total virtual address
451 * range, and chose a user_va_range that leaves some space to the kernel.
452 *
453 * If user_va_range is zero, the kernel will pick a sensible value based on
454 * TASK_SIZE and the virtual range supported by the GPU MMU (the kernel/user
455 * split should leave enough VA space for userspace processes to support SVM,
456 * while still allowing the kernel to map some amount of kernel objects in
457 * the kernel VA range). The value chosen by the driver will be returned in
458 * @user_va_range.
459 *
460 * User VA space always starts at 0x0, kernel VA space is always placed after
461 * the user VA range.
462 */
463 __u64 user_va_range;
464};
465
466/**
467 * struct drm_panthor_vm_destroy - Arguments passed to DRM_PANTHOR_IOCTL_VM_DESTROY
468 */
469struct drm_panthor_vm_destroy {
470 /** @id: ID of the VM to destroy. */
471 __u32 id;
472
473 /** @pad: MBZ. */
474 __u32 pad;
475};
476
477/**
478 * enum drm_panthor_vm_bind_op_flags - VM bind operation flags
479 */
480enum drm_panthor_vm_bind_op_flags {
481 /**
482 * @DRM_PANTHOR_VM_BIND_OP_MAP_READONLY: Map the memory read-only.
483 *
484 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
485 */
486 DRM_PANTHOR_VM_BIND_OP_MAP_READONLY = 1 << 0,
487
488 /**
489 * @DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC: Map the memory not-executable.
490 *
491 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
492 */
493 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC = 1 << 1,
494
495 /**
496 * @DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED: Map the memory uncached.
497 *
498 * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
499 */
500 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
501
502 /**
503 * @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
504 */
505 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK = (int)(0xfu << 28),
506
507 /** @DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: Map operation. */
508 DRM_PANTHOR_VM_BIND_OP_TYPE_MAP = 0 << 28,
509
510 /** @DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: Unmap operation. */
511 DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP = 1 << 28,
512
513 /**
514 * @DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: No VM operation.
515 *
516 * Just serves as a synchronization point on a VM queue.
517 *
518 * Only valid if %DRM_PANTHOR_VM_BIND_ASYNC is set in drm_panthor_vm_bind::flags,
519 * and drm_panthor_vm_bind_op::syncs contains at least one element.
520 */
521 DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY = 2 << 28,
522};
523
524/**
525 * struct drm_panthor_vm_bind_op - VM bind operation
526 */
527struct drm_panthor_vm_bind_op {
528 /** @flags: Combination of drm_panthor_vm_bind_op_flags flags. */
529 __u32 flags;
530
531 /**
532 * @bo_handle: Handle of the buffer object to map.
533 * MBZ for unmap or sync-only operations.
534 */
535 __u32 bo_handle;
536
537 /**
538 * @bo_offset: Buffer object offset.
539 * MBZ for unmap or sync-only operations.
540 */
541 __u64 bo_offset;
542
543 /**
544 * @va: Virtual address to map/unmap.
545 * MBZ for sync-only operations.
546 */
547 __u64 va;
548
549 /**
550 * @size: Size to map/unmap.
551 * MBZ for sync-only operations.
552 */
553 __u64 size;
554
555 /**
556 * @syncs: Array of struct drm_panthor_sync_op synchronization
557 * operations.
558 *
559 * This array must be empty if %DRM_PANTHOR_VM_BIND_ASYNC is not set on
560 * the drm_panthor_vm_bind object containing this VM bind operation.
561 *
562 * This array shall not be empty for sync-only operations.
563 */
564 struct drm_panthor_obj_array syncs;
565
566};
567
568/**
569 * enum drm_panthor_vm_bind_flags - VM bind flags
570 */
571enum drm_panthor_vm_bind_flags {
572 /**
573 * @DRM_PANTHOR_VM_BIND_ASYNC: VM bind operations are queued to the VM
574 * queue instead of being executed synchronously.
575 */
576 DRM_PANTHOR_VM_BIND_ASYNC = 1 << 0,
577};
578
579/**
580 * struct drm_panthor_vm_bind - Arguments passed to DRM_IOCTL_PANTHOR_VM_BIND
581 */
582struct drm_panthor_vm_bind {
583 /** @vm_id: VM targeted by the bind request. */
584 __u32 vm_id;
585
586 /** @flags: Combination of drm_panthor_vm_bind_flags flags. */
587 __u32 flags;
588
589 /** @ops: Array of struct drm_panthor_vm_bind_op bind operations. */
590 struct drm_panthor_obj_array ops;
591};
592
593/**
594 * enum drm_panthor_vm_state - VM states.
595 */
596enum drm_panthor_vm_state {
597 /**
598 * @DRM_PANTHOR_VM_STATE_USABLE: VM is usable.
599 *
600 * New VM operations will be accepted on this VM.
601 */
602 DRM_PANTHOR_VM_STATE_USABLE,
603
604 /**
605 * @DRM_PANTHOR_VM_STATE_UNUSABLE: VM is unusable.
606 *
607 * Something put the VM in an unusable state (like an asynchronous
608 * VM_BIND request failing for any reason).
609 *
610 * Once the VM is in this state, all new MAP operations will be
611 * rejected, and any GPU job targeting this VM will fail.
612 * UNMAP operations are still accepted.
613 *
614 * The only way to recover from an unusable VM is to create a new
615 * VM, and destroy the old one.
616 */
617 DRM_PANTHOR_VM_STATE_UNUSABLE,
618};
619
620/**
621 * struct drm_panthor_vm_get_state - Get VM state.
622 */
623struct drm_panthor_vm_get_state {
624 /** @vm_id: VM targeted by the get_state request. */
625 __u32 vm_id;
626
627 /**
628 * @state: state returned by the driver.
629 *
630 * Must be one of the enum drm_panthor_vm_state values.
631 */
632 __u32 state;
633};
634
635/**
636 * enum drm_panthor_bo_flags - Buffer object flags, passed at creation time.
637 */
638enum drm_panthor_bo_flags {
639 /** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
640 DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
641};
642
643/**
644 * struct drm_panthor_bo_create - Arguments passed to DRM_IOCTL_PANTHOR_BO_CREATE.
645 */
646struct drm_panthor_bo_create {
647 /**
648 * @size: Requested size for the object
649 *
650 * The (page-aligned) allocated size for the object will be returned.
651 */
652 __u64 size;
653
654 /**
655 * @flags: Flags. Must be a combination of drm_panthor_bo_flags flags.
656 */
657 __u32 flags;
658
659 /**
660 * @exclusive_vm_id: Exclusive VM this buffer object will be mapped to.
661 *
662 * If not zero, the field must refer to a valid VM ID, and implies that:
663 * - the buffer object will only ever be bound to that VM
664 * - cannot be exported as a PRIME fd
665 */
666 __u32 exclusive_vm_id;
667
668 /**
669 * @handle: Returned handle for the object.
670 *
671 * Object handles are nonzero.
672 */
673 __u32 handle;
674
675 /** @pad: MBZ. */
676 __u32 pad;
677};
678
679/**
680 * struct drm_panthor_bo_mmap_offset - Arguments passed to DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET.
681 */
682struct drm_panthor_bo_mmap_offset {
683 /** @handle: Handle of the object we want an mmap offset for. */
684 __u32 handle;
685
686 /** @pad: MBZ. */
687 __u32 pad;
688
689 /** @offset: The fake offset to use for subsequent mmap calls. */
690 __u64 offset;
691};
692
693/**
694 * struct drm_panthor_queue_create - Queue creation arguments.
695 */
696struct drm_panthor_queue_create {
697 /**
698 * @priority: Defines the priority of queues inside a group. Goes from 0 to 15,
699 * 15 being the highest priority.
700 */
701 __u8 priority;
702
703 /** @pad: Padding fields, MBZ. */
704 __u8 pad[3];
705
706 /** @ringbuf_size: Size of the ring buffer to allocate to this queue. */
707 __u32 ringbuf_size;
708};
709
710/**
711 * enum drm_panthor_group_priority - Scheduling group priority
712 */
713enum drm_panthor_group_priority {
714 /** @PANTHOR_GROUP_PRIORITY_LOW: Low priority group. */
715 PANTHOR_GROUP_PRIORITY_LOW = 0,
716
717 /** @PANTHOR_GROUP_PRIORITY_MEDIUM: Medium priority group. */
718 PANTHOR_GROUP_PRIORITY_MEDIUM,
719
720 /**
721 * @PANTHOR_GROUP_PRIORITY_HIGH: High priority group.
722 *
723 * Requires CAP_SYS_NICE or DRM_MASTER.
724 */
725 PANTHOR_GROUP_PRIORITY_HIGH,
726
727 /**
728 * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group.
729 *
730 * Requires CAP_SYS_NICE or DRM_MASTER.
731 */
732 PANTHOR_GROUP_PRIORITY_REALTIME,
733};
734
735/**
736 * struct drm_panthor_group_create - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_CREATE
737 */
738struct drm_panthor_group_create {
739 /** @queues: Array of drm_panthor_queue_create elements. */
740 struct drm_panthor_obj_array queues;
741
742 /**
743 * @max_compute_cores: Maximum number of cores that can be used by compute
744 * jobs across CS queues bound to this group.
745 *
746 * Must be less or equal to the number of bits set in @compute_core_mask.
747 */
748 __u8 max_compute_cores;
749
750 /**
751 * @max_fragment_cores: Maximum number of cores that can be used by fragment
752 * jobs across CS queues bound to this group.
753 *
754 * Must be less or equal to the number of bits set in @fragment_core_mask.
755 */
756 __u8 max_fragment_cores;
757
758 /**
759 * @max_tiler_cores: Maximum number of tilers that can be used by tiler jobs
760 * across CS queues bound to this group.
761 *
762 * Must be less or equal to the number of bits set in @tiler_core_mask.
763 */
764 __u8 max_tiler_cores;
765
766 /** @priority: Group priority (see enum drm_panthor_group_priority). */
767 __u8 priority;
768
769 /** @pad: Padding field, MBZ. */
770 __u32 pad;
771
772 /**
773 * @compute_core_mask: Mask encoding cores that can be used for compute jobs.
774 *
775 * This field must have at least @max_compute_cores bits set.
776 *
777 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
778 */
779 __u64 compute_core_mask;
780
781 /**
782 * @fragment_core_mask: Mask encoding cores that can be used for fragment jobs.
783 *
784 * This field must have at least @max_fragment_cores bits set.
785 *
786 * The bits set here should also be set in drm_panthor_gpu_info::shader_present.
787 */
788 __u64 fragment_core_mask;
789
790 /**
791 * @tiler_core_mask: Mask encoding cores that can be used for tiler jobs.
792 *
793 * This field must have at least @max_tiler_cores bits set.
794 *
795 * The bits set here should also be set in drm_panthor_gpu_info::tiler_present.
796 */
797 __u64 tiler_core_mask;
798
799 /**
800 * @vm_id: VM ID to bind this group to.
801 *
802 * All submission to queues bound to this group will use this VM.
803 */
804 __u32 vm_id;
805
806 /**
807 * @group_handle: Returned group handle. Passed back when submitting jobs or
808 * destroying a group.
809 */
810 __u32 group_handle;
811};
812
813/**
814 * struct drm_panthor_group_destroy - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_DESTROY
815 */
816struct drm_panthor_group_destroy {
817 /** @group_handle: Group to destroy */
818 __u32 group_handle;
819
820 /** @pad: Padding field, MBZ. */
821 __u32 pad;
822};
823
824/**
825 * struct drm_panthor_queue_submit - Job submission arguments.
826 *
827 * This is describing the userspace command stream to call from the kernel
828 * command stream ring-buffer. Queue submission is always part of a group
829 * submission, taking one or more jobs to submit to the underlying queues.
830 */
831struct drm_panthor_queue_submit {
832 /** @queue_index: Index of the queue inside a group. */
833 __u32 queue_index;
834
835 /**
836 * @stream_size: Size of the command stream to execute.
837 *
838 * Must be 64-bit/8-byte aligned (the size of a CS instruction)
839 *
840 * Can be zero if stream_addr is zero too.
841 *
842 * When the stream size is zero, the queue submit serves as a
843 * synchronization point.
844 */
845 __u32 stream_size;
846
847 /**
848 * @stream_addr: GPU address of the command stream to execute.
849 *
850 * Must be aligned on 64-byte.
851 *
852 * Can be zero is stream_size is zero too.
853 */
854 __u64 stream_addr;
855
856 /**
857 * @latest_flush: FLUSH_ID read at the time the stream was built.
858 *
859 * This allows cache flush elimination for the automatic
860 * flush+invalidate(all) done at submission time, which is needed to
861 * ensure the GPU doesn't get garbage when reading the indirect command
862 * stream buffers. If you want the cache flush to happen
863 * unconditionally, pass a zero here.
864 *
865 * Ignored when stream_size is zero.
866 */
867 __u32 latest_flush;
868
869 /** @pad: MBZ. */
870 __u32 pad;
871
872 /** @syncs: Array of struct drm_panthor_sync_op sync operations. */
873 struct drm_panthor_obj_array syncs;
874};
875
876/**
877 * struct drm_panthor_group_submit - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_SUBMIT
878 */
879struct drm_panthor_group_submit {
880 /** @group_handle: Handle of the group to queue jobs to. */
881 __u32 group_handle;
882
883 /** @pad: MBZ. */
884 __u32 pad;
885
886 /** @queue_submits: Array of drm_panthor_queue_submit objects. */
887 struct drm_panthor_obj_array queue_submits;
888};
889
890/**
891 * enum drm_panthor_group_state_flags - Group state flags
892 */
893enum drm_panthor_group_state_flags {
894 /**
895 * @DRM_PANTHOR_GROUP_STATE_TIMEDOUT: Group had unfinished jobs.
896 *
897 * When a group ends up with this flag set, no jobs can be submitted to its queues.
898 */
899 DRM_PANTHOR_GROUP_STATE_TIMEDOUT = 1 << 0,
900
901 /**
902 * @DRM_PANTHOR_GROUP_STATE_FATAL_FAULT: Group had fatal faults.
903 *
904 * When a group ends up with this flag set, no jobs can be submitted to its queues.
905 */
906 DRM_PANTHOR_GROUP_STATE_FATAL_FAULT = 1 << 1,
907
908 /**
909 * @DRM_PANTHOR_GROUP_STATE_INNOCENT: Group was killed during a reset caused by other
910 * groups.
911 *
912 * This flag can only be set if DRM_PANTHOR_GROUP_STATE_TIMEDOUT is set and
913 * DRM_PANTHOR_GROUP_STATE_FATAL_FAULT is not.
914 */
915 DRM_PANTHOR_GROUP_STATE_INNOCENT = 1 << 2,
916};
917
918/**
919 * struct drm_panthor_group_get_state - Arguments passed to DRM_IOCTL_PANTHOR_GROUP_GET_STATE
920 *
921 * Used to query the state of a group and decide whether a new group should be created to
922 * replace it.
923 */
924struct drm_panthor_group_get_state {
925 /** @group_handle: Handle of the group to query state on */
926 __u32 group_handle;
927
928 /**
929 * @state: Combination of DRM_PANTHOR_GROUP_STATE_* flags encoding the
930 * group state.
931 */
932 __u32 state;
933
934 /** @fatal_queues: Bitmask of queues that faced fatal faults. */
935 __u32 fatal_queues;
936
937 /** @pad: MBZ */
938 __u32 pad;
939};
940
941/**
942 * struct drm_panthor_tiler_heap_create - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE
943 */
944struct drm_panthor_tiler_heap_create {
945 /** @vm_id: VM ID the tiler heap should be mapped to */
946 __u32 vm_id;
947
948 /** @initial_chunk_count: Initial number of chunks to allocate. Must be at least one. */
949 __u32 initial_chunk_count;
950
951 /**
952 * @chunk_size: Chunk size.
953 *
954 * Must be page-aligned and lie in the [128k:8M] range.
955 */
956 __u32 chunk_size;
957
958 /**
959 * @max_chunks: Maximum number of chunks that can be allocated.
960 *
961 * Must be at least @initial_chunk_count.
962 */
963 __u32 max_chunks;
964
965 /**
966 * @target_in_flight: Maximum number of in-flight render passes.
967 *
968 * If the heap has more than tiler jobs in-flight, the FW will wait for render
969 * passes to finish before queuing new tiler jobs.
970 */
971 __u32 target_in_flight;
972
973 /** @handle: Returned heap handle. Passed back to DESTROY_TILER_HEAP. */
974 __u32 handle;
975
976 /** @tiler_heap_ctx_gpu_va: Returned heap GPU virtual address returned */
977 __u64 tiler_heap_ctx_gpu_va;
978
979 /**
980 * @first_heap_chunk_gpu_va: First heap chunk.
981 *
982 * The tiler heap is formed of heap chunks forming a single-link list. This
983 * is the first element in the list.
984 */
985 __u64 first_heap_chunk_gpu_va;
986};
987
988/**
989 * struct drm_panthor_tiler_heap_destroy - Arguments passed to DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY
990 */
991struct drm_panthor_tiler_heap_destroy {
992 /**
993 * @handle: Handle of the tiler heap to destroy.
994 *
995 * Must be a valid heap handle returned by DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE.
996 */
997 __u32 handle;
998
999 /** @pad: Padding field, MBZ. */
1000 __u32 pad;
1001};
1002
1003/**
1004 * struct drm_panthor_bo_set_label - Arguments passed to DRM_IOCTL_PANTHOR_BO_SET_LABEL
1005 */
1006struct drm_panthor_bo_set_label {
1007 /** @handle: Handle of the buffer object to label. */
1008 __u32 handle;
1009
1010 /** @pad: MBZ. */
1011 __u32 pad;
1012
1013 /**
1014 * @label: User pointer to a NUL-terminated string
1015 *
1016 * Length cannot be greater than 4096
1017 */
1018 __u64 label;
1019};
1020
1021/**
1022 * struct drm_panthor_set_user_mmio_offset - Arguments passed to
1023 * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
1024 *
1025 * This ioctl is only really useful if you want to support userspace
1026 * CPU emulation environments where the size of an unsigned long differs
1027 * between the host and the guest architectures.
1028 */
1029struct drm_panthor_set_user_mmio_offset {
1030 /**
1031 * @offset: User MMIO offset to use.
1032 *
1033 * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
1034 * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
1035 *
1036 * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
1037 * OFFSET_64BIT based on the size of an unsigned long) unless you
1038 * have a very good reason to overrule this decision.
1039 */
1040 __u64 offset;
1041};
1042
1043/**
1044 * DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
1045 * @__access: Access type. Must be R, W or RW.
1046 * @__id: One of the DRM_PANTHOR_xxx id.
1047 * @__type: Suffix of the type being passed to the IOCTL.
1048 *
1049 * Don't use this macro directly, use the DRM_IOCTL_PANTHOR_xxx
1050 * values instead.
1051 *
1052 * Return: An IOCTL number to be passed to ioctl() from userspace.
1053 */
1054#define DRM_IOCTL_PANTHOR(__access, __id, __type) \
1055 DRM_IO ## __access(DRM_COMMAND_BASE + DRM_PANTHOR_ ## __id, \
1056 struct drm_panthor_ ## __type)
1057
1058enum {
1059 DRM_IOCTL_PANTHOR_DEV_QUERY =
1060 DRM_IOCTL_PANTHOR(WR, DEV_QUERY, dev_query),
1061 DRM_IOCTL_PANTHOR_VM_CREATE =
1062 DRM_IOCTL_PANTHOR(WR, VM_CREATE, vm_create),
1063 DRM_IOCTL_PANTHOR_VM_DESTROY =
1064 DRM_IOCTL_PANTHOR(WR, VM_DESTROY, vm_destroy),
1065 DRM_IOCTL_PANTHOR_VM_BIND =
1066 DRM_IOCTL_PANTHOR(WR, VM_BIND, vm_bind),
1067 DRM_IOCTL_PANTHOR_VM_GET_STATE =
1068 DRM_IOCTL_PANTHOR(WR, VM_GET_STATE, vm_get_state),
1069 DRM_IOCTL_PANTHOR_BO_CREATE =
1070 DRM_IOCTL_PANTHOR(WR, BO_CREATE, bo_create),
1071 DRM_IOCTL_PANTHOR_BO_MMAP_OFFSET =
1072 DRM_IOCTL_PANTHOR(WR, BO_MMAP_OFFSET, bo_mmap_offset),
1073 DRM_IOCTL_PANTHOR_GROUP_CREATE =
1074 DRM_IOCTL_PANTHOR(WR, GROUP_CREATE, group_create),
1075 DRM_IOCTL_PANTHOR_GROUP_DESTROY =
1076 DRM_IOCTL_PANTHOR(WR, GROUP_DESTROY, group_destroy),
1077 DRM_IOCTL_PANTHOR_GROUP_SUBMIT =
1078 DRM_IOCTL_PANTHOR(WR, GROUP_SUBMIT, group_submit),
1079 DRM_IOCTL_PANTHOR_GROUP_GET_STATE =
1080 DRM_IOCTL_PANTHOR(WR, GROUP_GET_STATE, group_get_state),
1081 DRM_IOCTL_PANTHOR_TILER_HEAP_CREATE =
1082 DRM_IOCTL_PANTHOR(WR, TILER_HEAP_CREATE, tiler_heap_create),
1083 DRM_IOCTL_PANTHOR_TILER_HEAP_DESTROY =
1084 DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
1085 DRM_IOCTL_PANTHOR_BO_SET_LABEL =
1086 DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
1087 DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
1088 DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
1089};
1090
1091#if defined(__cplusplus)
1092}
1093#endif
1094
1095#endif /* _PANTHOR_DRM_H_ */