Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 */
4
5#include <linux/kref.h>
6#include <linux/uaccess.h>
7
8#include "msm_gpu.h"
9
10int msm_file_private_set_sysprof(struct msm_file_private *ctx,
11 struct msm_gpu *gpu, int sysprof)
12{
13 /*
14 * Since pm_runtime and sysprof_active are both refcounts, we
15 * call apply the new value first, and then unwind the previous
16 * value
17 */
18
19 switch (sysprof) {
20 default:
21 return -EINVAL;
22 case 2:
23 pm_runtime_get_sync(&gpu->pdev->dev);
24 fallthrough;
25 case 1:
26 refcount_inc(&gpu->sysprof_active);
27 fallthrough;
28 case 0:
29 break;
30 }
31
32 /* unwind old value: */
33 switch (ctx->sysprof) {
34 case 2:
35 pm_runtime_put_autosuspend(&gpu->pdev->dev);
36 fallthrough;
37 case 1:
38 refcount_dec(&gpu->sysprof_active);
39 fallthrough;
40 case 0:
41 break;
42 }
43
44 ctx->sysprof = sysprof;
45
46 return 0;
47}
48
49void __msm_file_private_destroy(struct kref *kref)
50{
51 struct msm_file_private *ctx = container_of(kref,
52 struct msm_file_private, ref);
53 int i;
54
55 for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
56 if (!ctx->entities[i])
57 continue;
58
59 drm_sched_entity_destroy(ctx->entities[i]);
60 kfree(ctx->entities[i]);
61 }
62
63 msm_gem_address_space_put(ctx->aspace);
64 kfree(ctx);
65}
66
67void msm_submitqueue_destroy(struct kref *kref)
68{
69 struct msm_gpu_submitqueue *queue = container_of(kref,
70 struct msm_gpu_submitqueue, ref);
71
72 idr_destroy(&queue->fence_idr);
73
74 msm_file_private_put(queue->ctx);
75
76 kfree(queue);
77}
78
79struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
80 u32 id)
81{
82 struct msm_gpu_submitqueue *entry;
83
84 if (!ctx)
85 return NULL;
86
87 read_lock(&ctx->queuelock);
88
89 list_for_each_entry(entry, &ctx->submitqueues, node) {
90 if (entry->id == id) {
91 kref_get(&entry->ref);
92 read_unlock(&ctx->queuelock);
93
94 return entry;
95 }
96 }
97
98 read_unlock(&ctx->queuelock);
99 return NULL;
100}
101
102void msm_submitqueue_close(struct msm_file_private *ctx)
103{
104 struct msm_gpu_submitqueue *entry, *tmp;
105
106 if (!ctx)
107 return;
108
109 /*
110 * No lock needed in close and there won't
111 * be any more user ioctls coming our way
112 */
113 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
114 list_del(&entry->node);
115 msm_submitqueue_put(entry);
116 }
117}
118
119static struct drm_sched_entity *
120get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
121 unsigned ring_nr, enum drm_sched_priority sched_prio)
122{
123 static DEFINE_MUTEX(entity_lock);
124 unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
125
126 /* We should have already validated that the requested priority is
127 * valid by the time we get here.
128 */
129 if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
130 return ERR_PTR(-EINVAL);
131
132 mutex_lock(&entity_lock);
133
134 if (!ctx->entities[idx]) {
135 struct drm_sched_entity *entity;
136 struct drm_gpu_scheduler *sched = &ring->sched;
137 int ret;
138
139 entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
140
141 ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
142 if (ret) {
143 mutex_unlock(&entity_lock);
144 kfree(entity);
145 return ERR_PTR(ret);
146 }
147
148 ctx->entities[idx] = entity;
149 }
150
151 mutex_unlock(&entity_lock);
152
153 return ctx->entities[idx];
154}
155
156int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
157 u32 prio, u32 flags, u32 *id)
158{
159 struct msm_drm_private *priv = drm->dev_private;
160 struct msm_gpu_submitqueue *queue;
161 enum drm_sched_priority sched_prio;
162 unsigned ring_nr;
163 int ret;
164
165 if (!ctx)
166 return -ENODEV;
167
168 if (!priv->gpu)
169 return -ENODEV;
170
171 ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
172 if (ret)
173 return ret;
174
175 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
176
177 if (!queue)
178 return -ENOMEM;
179
180 kref_init(&queue->ref);
181 queue->flags = flags;
182 queue->ring_nr = ring_nr;
183
184 queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
185 ring_nr, sched_prio);
186 if (IS_ERR(queue->entity)) {
187 ret = PTR_ERR(queue->entity);
188 kfree(queue);
189 return ret;
190 }
191
192 write_lock(&ctx->queuelock);
193
194 queue->ctx = msm_file_private_get(ctx);
195 queue->id = ctx->queueid++;
196
197 if (id)
198 *id = queue->id;
199
200 idr_init(&queue->fence_idr);
201 mutex_init(&queue->lock);
202
203 list_add_tail(&queue->node, &ctx->submitqueues);
204
205 write_unlock(&ctx->queuelock);
206
207 return 0;
208}
209
210/*
211 * Create the default submit-queue (id==0), used for backwards compatibility
212 * for userspace that pre-dates the introduction of submitqueues.
213 */
214int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
215{
216 struct msm_drm_private *priv = drm->dev_private;
217 int default_prio, max_priority;
218
219 if (!priv->gpu)
220 return -ENODEV;
221
222 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
223
224 /*
225 * Pick a medium priority level as default. Lower numeric value is
226 * higher priority, so round-up to pick a priority that is not higher
227 * than the middle priority level.
228 */
229 default_prio = DIV_ROUND_UP(max_priority, 2);
230
231 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
232}
233
234static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
235 struct drm_msm_submitqueue_query *args)
236{
237 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
238 int ret;
239
240 /* If a zero length was passed in, return the data size we expect */
241 if (!args->len) {
242 args->len = sizeof(queue->faults);
243 return 0;
244 }
245
246 /* Set the length to the actual size of the data */
247 args->len = size;
248
249 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
250
251 return ret ? -EFAULT : 0;
252}
253
254int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
255 struct drm_msm_submitqueue_query *args)
256{
257 struct msm_gpu_submitqueue *queue;
258 int ret = -EINVAL;
259
260 if (args->pad)
261 return -EINVAL;
262
263 queue = msm_submitqueue_get(ctx, args->id);
264 if (!queue)
265 return -ENOENT;
266
267 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
268 ret = msm_submitqueue_query_faults(queue, args);
269
270 msm_submitqueue_put(queue);
271
272 return ret;
273}
274
275int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
276{
277 struct msm_gpu_submitqueue *entry;
278
279 if (!ctx)
280 return 0;
281
282 /*
283 * id 0 is the "default" queue and can't be destroyed
284 * by the user
285 */
286 if (!id)
287 return -ENOENT;
288
289 write_lock(&ctx->queuelock);
290
291 list_for_each_entry(entry, &ctx->submitqueues, node) {
292 if (entry->id == id) {
293 list_del(&entry->node);
294 write_unlock(&ctx->queuelock);
295
296 msm_submitqueue_put(entry);
297 return 0;
298 }
299 }
300
301 write_unlock(&ctx->queuelock);
302 return -ENOENT;
303}
304