Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
25#include <drm/drm_auth.h>
26#include "amdgpu.h"
27#include "amdgpu_sched.h"
28#include "amdgpu_ras.h"
29
30#define to_amdgpu_ctx_entity(e) \
31 container_of((e), struct amdgpu_ctx_entity, entity)
32
33const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 [AMDGPU_HW_IP_GFX] = 1,
35 [AMDGPU_HW_IP_COMPUTE] = 4,
36 [AMDGPU_HW_IP_DMA] = 2,
37 [AMDGPU_HW_IP_UVD] = 1,
38 [AMDGPU_HW_IP_VCE] = 1,
39 [AMDGPU_HW_IP_UVD_ENC] = 1,
40 [AMDGPU_HW_IP_VCN_DEC] = 1,
41 [AMDGPU_HW_IP_VCN_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_JPEG] = 1,
43};
44
45static int amdgpu_ctx_priority_permit(struct drm_file *filp,
46 enum drm_sched_priority priority)
47{
48 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
49 return -EINVAL;
50
51 /* NORMAL and below are accessible by everyone */
52 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
53 return 0;
54
55 if (capable(CAP_SYS_NICE))
56 return 0;
57
58 if (drm_is_current_master(filp))
59 return 0;
60
61 return -EACCES;
62}
63
64static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
65{
66 struct amdgpu_device *adev = ctx->adev;
67 struct amdgpu_ctx_entity *entity;
68 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
69 unsigned num_scheds = 0;
70 enum drm_sched_priority priority;
71 int r;
72
73 entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
74 GFP_KERNEL);
75 if (!entity)
76 return -ENOMEM;
77
78 entity->sequence = 1;
79 priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
80 ctx->init_priority : ctx->override_priority;
81 switch (hw_ip) {
82 case AMDGPU_HW_IP_GFX:
83 sched = &adev->gfx.gfx_ring[0].sched;
84 scheds = &sched;
85 num_scheds = 1;
86 break;
87 case AMDGPU_HW_IP_COMPUTE:
88 scheds = adev->gfx.compute_sched;
89 num_scheds = adev->gfx.num_compute_sched;
90 break;
91 case AMDGPU_HW_IP_DMA:
92 scheds = adev->sdma.sdma_sched;
93 num_scheds = adev->sdma.num_sdma_sched;
94 break;
95 case AMDGPU_HW_IP_UVD:
96 sched = &adev->uvd.inst[0].ring.sched;
97 scheds = &sched;
98 num_scheds = 1;
99 break;
100 case AMDGPU_HW_IP_VCE:
101 sched = &adev->vce.ring[0].sched;
102 scheds = &sched;
103 num_scheds = 1;
104 break;
105 case AMDGPU_HW_IP_UVD_ENC:
106 sched = &adev->uvd.inst[0].ring_enc[0].sched;
107 scheds = &sched;
108 num_scheds = 1;
109 break;
110 case AMDGPU_HW_IP_VCN_DEC:
111 scheds = adev->vcn.vcn_dec_sched;
112 num_scheds = adev->vcn.num_vcn_dec_sched;
113 break;
114 case AMDGPU_HW_IP_VCN_ENC:
115 scheds = adev->vcn.vcn_enc_sched;
116 num_scheds = adev->vcn.num_vcn_enc_sched;
117 break;
118 case AMDGPU_HW_IP_VCN_JPEG:
119 scheds = adev->jpeg.jpeg_sched;
120 num_scheds = adev->jpeg.num_jpeg_sched;
121 break;
122 }
123
124 r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
125 &ctx->guilty);
126 if (r)
127 goto error_free_entity;
128
129 ctx->entities[hw_ip][ring] = entity;
130 return 0;
131
132error_free_entity:
133 kfree(entity);
134
135 return r;
136}
137
138static int amdgpu_ctx_init(struct amdgpu_device *adev,
139 enum drm_sched_priority priority,
140 struct drm_file *filp,
141 struct amdgpu_ctx *ctx)
142{
143 int r;
144
145 r = amdgpu_ctx_priority_permit(filp, priority);
146 if (r)
147 return r;
148
149 memset(ctx, 0, sizeof(*ctx));
150
151 ctx->adev = adev;
152
153 kref_init(&ctx->refcount);
154 spin_lock_init(&ctx->ring_lock);
155 mutex_init(&ctx->lock);
156
157 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
158 ctx->reset_counter_query = ctx->reset_counter;
159 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
160 ctx->init_priority = priority;
161 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
162
163 return 0;
164
165}
166
167static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
168{
169
170 int i;
171
172 if (!entity)
173 return;
174
175 for (i = 0; i < amdgpu_sched_jobs; ++i)
176 dma_fence_put(entity->fences[i]);
177
178 kfree(entity);
179}
180
181static void amdgpu_ctx_fini(struct kref *ref)
182{
183 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
184 struct amdgpu_device *adev = ctx->adev;
185 unsigned i, j;
186
187 if (!adev)
188 return;
189
190 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
191 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
192 amdgpu_ctx_fini_entity(ctx->entities[i][j]);
193 ctx->entities[i][j] = NULL;
194 }
195 }
196
197 mutex_destroy(&ctx->lock);
198 kfree(ctx);
199}
200
201int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
202 u32 ring, struct drm_sched_entity **entity)
203{
204 int r;
205
206 if (hw_ip >= AMDGPU_HW_IP_NUM) {
207 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
208 return -EINVAL;
209 }
210
211 /* Right now all IPs have only one instance - multiple rings. */
212 if (instance != 0) {
213 DRM_DEBUG("invalid ip instance: %d\n", instance);
214 return -EINVAL;
215 }
216
217 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
218 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
219 return -EINVAL;
220 }
221
222 if (ctx->entities[hw_ip][ring] == NULL) {
223 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
224 if (r)
225 return r;
226 }
227
228 *entity = &ctx->entities[hw_ip][ring]->entity;
229 return 0;
230}
231
232static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
233 struct amdgpu_fpriv *fpriv,
234 struct drm_file *filp,
235 enum drm_sched_priority priority,
236 uint32_t *id)
237{
238 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
239 struct amdgpu_ctx *ctx;
240 int r;
241
242 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
243 if (!ctx)
244 return -ENOMEM;
245
246 mutex_lock(&mgr->lock);
247 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
248 if (r < 0) {
249 mutex_unlock(&mgr->lock);
250 kfree(ctx);
251 return r;
252 }
253
254 *id = (uint32_t)r;
255 r = amdgpu_ctx_init(adev, priority, filp, ctx);
256 if (r) {
257 idr_remove(&mgr->ctx_handles, *id);
258 *id = 0;
259 kfree(ctx);
260 }
261 mutex_unlock(&mgr->lock);
262 return r;
263}
264
265static void amdgpu_ctx_do_release(struct kref *ref)
266{
267 struct amdgpu_ctx *ctx;
268 u32 i, j;
269
270 ctx = container_of(ref, struct amdgpu_ctx, refcount);
271 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
272 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
273 if (!ctx->entities[i][j])
274 continue;
275
276 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
277 }
278 }
279
280 amdgpu_ctx_fini(ref);
281}
282
283static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
284{
285 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
286 struct amdgpu_ctx *ctx;
287
288 mutex_lock(&mgr->lock);
289 ctx = idr_remove(&mgr->ctx_handles, id);
290 if (ctx)
291 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
292 mutex_unlock(&mgr->lock);
293 return ctx ? 0 : -EINVAL;
294}
295
296static int amdgpu_ctx_query(struct amdgpu_device *adev,
297 struct amdgpu_fpriv *fpriv, uint32_t id,
298 union drm_amdgpu_ctx_out *out)
299{
300 struct amdgpu_ctx *ctx;
301 struct amdgpu_ctx_mgr *mgr;
302 unsigned reset_counter;
303
304 if (!fpriv)
305 return -EINVAL;
306
307 mgr = &fpriv->ctx_mgr;
308 mutex_lock(&mgr->lock);
309 ctx = idr_find(&mgr->ctx_handles, id);
310 if (!ctx) {
311 mutex_unlock(&mgr->lock);
312 return -EINVAL;
313 }
314
315 /* TODO: these two are always zero */
316 out->state.flags = 0x0;
317 out->state.hangs = 0x0;
318
319 /* determine if a GPU reset has occured since the last call */
320 reset_counter = atomic_read(&adev->gpu_reset_counter);
321 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
322 if (ctx->reset_counter_query == reset_counter)
323 out->state.reset_status = AMDGPU_CTX_NO_RESET;
324 else
325 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
326 ctx->reset_counter_query = reset_counter;
327
328 mutex_unlock(&mgr->lock);
329 return 0;
330}
331
332static int amdgpu_ctx_query2(struct amdgpu_device *adev,
333 struct amdgpu_fpriv *fpriv, uint32_t id,
334 union drm_amdgpu_ctx_out *out)
335{
336 struct amdgpu_ctx *ctx;
337 struct amdgpu_ctx_mgr *mgr;
338 unsigned long ras_counter;
339
340 if (!fpriv)
341 return -EINVAL;
342
343 mgr = &fpriv->ctx_mgr;
344 mutex_lock(&mgr->lock);
345 ctx = idr_find(&mgr->ctx_handles, id);
346 if (!ctx) {
347 mutex_unlock(&mgr->lock);
348 return -EINVAL;
349 }
350
351 out->state.flags = 0x0;
352 out->state.hangs = 0x0;
353
354 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
355 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
356
357 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
358 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
359
360 if (atomic_read(&ctx->guilty))
361 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
362
363 /*query ue count*/
364 ras_counter = amdgpu_ras_query_error_count(adev, false);
365 /*ras counter is monotonic increasing*/
366 if (ras_counter != ctx->ras_counter_ue) {
367 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
368 ctx->ras_counter_ue = ras_counter;
369 }
370
371 /*query ce count*/
372 ras_counter = amdgpu_ras_query_error_count(adev, true);
373 if (ras_counter != ctx->ras_counter_ce) {
374 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
375 ctx->ras_counter_ce = ras_counter;
376 }
377
378 mutex_unlock(&mgr->lock);
379 return 0;
380}
381
382int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
383 struct drm_file *filp)
384{
385 int r;
386 uint32_t id;
387 enum drm_sched_priority priority;
388
389 union drm_amdgpu_ctx *args = data;
390 struct amdgpu_device *adev = dev->dev_private;
391 struct amdgpu_fpriv *fpriv = filp->driver_priv;
392
393 r = 0;
394 id = args->in.ctx_id;
395 priority = amdgpu_to_sched_priority(args->in.priority);
396
397 /* For backwards compatibility reasons, we need to accept
398 * ioctls with garbage in the priority field */
399 if (priority == DRM_SCHED_PRIORITY_INVALID)
400 priority = DRM_SCHED_PRIORITY_NORMAL;
401
402 switch (args->in.op) {
403 case AMDGPU_CTX_OP_ALLOC_CTX:
404 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
405 args->out.alloc.ctx_id = id;
406 break;
407 case AMDGPU_CTX_OP_FREE_CTX:
408 r = amdgpu_ctx_free(fpriv, id);
409 break;
410 case AMDGPU_CTX_OP_QUERY_STATE:
411 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
412 break;
413 case AMDGPU_CTX_OP_QUERY_STATE2:
414 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 return r;
421}
422
423struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
424{
425 struct amdgpu_ctx *ctx;
426 struct amdgpu_ctx_mgr *mgr;
427
428 if (!fpriv)
429 return NULL;
430
431 mgr = &fpriv->ctx_mgr;
432
433 mutex_lock(&mgr->lock);
434 ctx = idr_find(&mgr->ctx_handles, id);
435 if (ctx)
436 kref_get(&ctx->refcount);
437 mutex_unlock(&mgr->lock);
438 return ctx;
439}
440
441int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
442{
443 if (ctx == NULL)
444 return -EINVAL;
445
446 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
447 return 0;
448}
449
450void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
451 struct drm_sched_entity *entity,
452 struct dma_fence *fence, uint64_t* handle)
453{
454 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
455 uint64_t seq = centity->sequence;
456 struct dma_fence *other = NULL;
457 unsigned idx = 0;
458
459 idx = seq & (amdgpu_sched_jobs - 1);
460 other = centity->fences[idx];
461 if (other)
462 BUG_ON(!dma_fence_is_signaled(other));
463
464 dma_fence_get(fence);
465
466 spin_lock(&ctx->ring_lock);
467 centity->fences[idx] = fence;
468 centity->sequence++;
469 spin_unlock(&ctx->ring_lock);
470
471 dma_fence_put(other);
472 if (handle)
473 *handle = seq;
474}
475
476struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
477 struct drm_sched_entity *entity,
478 uint64_t seq)
479{
480 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
481 struct dma_fence *fence;
482
483 spin_lock(&ctx->ring_lock);
484
485 if (seq == ~0ull)
486 seq = centity->sequence - 1;
487
488 if (seq >= centity->sequence) {
489 spin_unlock(&ctx->ring_lock);
490 return ERR_PTR(-EINVAL);
491 }
492
493
494 if (seq + amdgpu_sched_jobs < centity->sequence) {
495 spin_unlock(&ctx->ring_lock);
496 return NULL;
497 }
498
499 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
500 spin_unlock(&ctx->ring_lock);
501
502 return fence;
503}
504
505void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
506 enum drm_sched_priority priority)
507{
508 enum drm_sched_priority ctx_prio;
509 unsigned i, j;
510
511 ctx->override_priority = priority;
512
513 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
514 ctx->init_priority : ctx->override_priority;
515 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
516 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
517 struct drm_sched_entity *entity;
518
519 if (!ctx->entities[i][j])
520 continue;
521
522 entity = &ctx->entities[i][j]->entity;
523 drm_sched_entity_set_priority(entity, ctx_prio);
524 }
525 }
526}
527
528int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
529 struct drm_sched_entity *entity)
530{
531 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
532 struct dma_fence *other;
533 unsigned idx;
534 long r;
535
536 spin_lock(&ctx->ring_lock);
537 idx = centity->sequence & (amdgpu_sched_jobs - 1);
538 other = dma_fence_get(centity->fences[idx]);
539 spin_unlock(&ctx->ring_lock);
540
541 if (!other)
542 return 0;
543
544 r = dma_fence_wait(other, true);
545 if (r < 0 && r != -ERESTARTSYS)
546 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
547
548 dma_fence_put(other);
549 return r;
550}
551
552void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
553{
554 mutex_init(&mgr->lock);
555 idr_init(&mgr->ctx_handles);
556}
557
558long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
559{
560 struct amdgpu_ctx *ctx;
561 struct idr *idp;
562 uint32_t id, i, j;
563
564 idp = &mgr->ctx_handles;
565
566 mutex_lock(&mgr->lock);
567 idr_for_each_entry(idp, ctx, id) {
568 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
569 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
570 struct drm_sched_entity *entity;
571
572 if (!ctx->entities[i][j])
573 continue;
574
575 entity = &ctx->entities[i][j]->entity;
576 timeout = drm_sched_entity_flush(entity, timeout);
577 }
578 }
579 }
580 mutex_unlock(&mgr->lock);
581 return timeout;
582}
583
584void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
585{
586 struct amdgpu_ctx *ctx;
587 struct idr *idp;
588 uint32_t id, i, j;
589
590 idp = &mgr->ctx_handles;
591
592 idr_for_each_entry(idp, ctx, id) {
593 if (kref_read(&ctx->refcount) != 1) {
594 DRM_ERROR("ctx %p is still alive\n", ctx);
595 continue;
596 }
597
598 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
599 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
600 struct drm_sched_entity *entity;
601
602 if (!ctx->entities[i][j])
603 continue;
604
605 entity = &ctx->entities[i][j]->entity;
606 drm_sched_entity_fini(entity);
607 }
608 }
609 }
610}
611
612void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
613{
614 struct amdgpu_ctx *ctx;
615 struct idr *idp;
616 uint32_t id;
617
618 amdgpu_ctx_mgr_entity_fini(mgr);
619
620 idp = &mgr->ctx_handles;
621
622 idr_for_each_entry(idp, ctx, id) {
623 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
624 DRM_ERROR("ctx %p is still alive\n", ctx);
625 }
626
627 idr_destroy(&mgr->ctx_handles);
628 mutex_destroy(&mgr->lock);
629}
630
631void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
632{
633 int i, j;
634
635 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
636 adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
637 adev->gfx.num_gfx_sched++;
638 }
639
640 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
641 adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
642 adev->gfx.num_compute_sched++;
643 }
644
645 for (i = 0; i < adev->sdma.num_instances; i++) {
646 adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
647 adev->sdma.num_sdma_sched++;
648 }
649
650 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
651 if (adev->vcn.harvest_config & (1 << i))
652 continue;
653 adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
654 &adev->vcn.inst[i].ring_dec.sched;
655 }
656
657 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
658 if (adev->vcn.harvest_config & (1 << i))
659 continue;
660 for (j = 0; j < adev->vcn.num_enc_rings; ++j)
661 adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
662 &adev->vcn.inst[i].ring_enc[j].sched;
663 }
664
665 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
666 if (adev->jpeg.harvest_config & (1 << i))
667 continue;
668 adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
669 &adev->jpeg.inst[i].ring_dec.sched;
670 }
671}