Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_EXEC_QUEUE_H_
7#define _XE_EXEC_QUEUE_H_
8
9#include "xe_exec_queue_types.h"
10#include "xe_vm_types.h"
11
12struct drm_device;
13struct drm_file;
14struct xe_device;
15struct xe_file;
16
17#define for_each_tlb_inval(__i) \
18 for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
19 __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
20
21struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
22 u32 logical_mask, u16 width,
23 struct xe_hw_engine *hw_engine, u32 flags,
24 u64 extensions);
25struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
26 struct xe_vm *vm,
27 enum xe_engine_class class,
28 u32 flags, u64 extensions);
29struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
30 struct xe_tile *tile,
31 struct xe_vm *user_vm,
32 u32 flags, u64 extensions);
33
34void xe_exec_queue_fini(struct xe_exec_queue *q);
35void xe_exec_queue_destroy(struct kref *ref);
36void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance);
37
38static inline struct xe_exec_queue *
39xe_exec_queue_get_unless_zero(struct xe_exec_queue *q)
40{
41 if (kref_get_unless_zero(&q->refcount))
42 return q;
43
44 return NULL;
45}
46
47struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id);
48
49static inline struct xe_exec_queue *xe_exec_queue_get(struct xe_exec_queue *q)
50{
51 kref_get(&q->refcount);
52 return q;
53}
54
55static inline void xe_exec_queue_put(struct xe_exec_queue *q)
56{
57 kref_put(&q->refcount, xe_exec_queue_destroy);
58}
59
60static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
61{
62 return q->width > 1;
63}
64
65static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
66{
67 return q->pxp.type;
68}
69
70bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
71
72bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
73
74void xe_exec_queue_kill(struct xe_exec_queue *q);
75
76int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
77 struct drm_file *file);
78int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
79 struct drm_file *file);
80int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file);
82enum xe_exec_queue_priority xe_exec_queue_device_get_max_priority(struct xe_device *xe);
83
84void xe_exec_queue_last_fence_put(struct xe_exec_queue *e, struct xe_vm *vm);
85void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e);
86struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
87 struct xe_vm *vm);
88struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e,
89 struct xe_vm *vm);
90void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
91 struct dma_fence *fence);
92
93void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
94 struct xe_vm *vm,
95 unsigned int type);
96
97void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
98 unsigned int type);
99
100struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
101 struct xe_vm *vm,
102 unsigned int type);
103
104void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
105 struct xe_vm *vm,
106 struct dma_fence *fence,
107 unsigned int type);
108
109void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
110
111int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
112
113struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
114
115#endif