Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_guc_submit.h"
7
8#include <linux/bitfield.h>
9#include <linux/bitmap.h>
10#include <linux/circ_buf.h>
11#include <linux/delay.h>
12#include <linux/dma-fence-array.h>
13#include <linux/math64.h>
14
15#include <drm/drm_managed.h>
16
17#include "abi/guc_actions_abi.h"
18#include "abi/guc_klvs_abi.h"
19#include "regs/xe_lrc_layout.h"
20#include "xe_assert.h"
21#include "xe_devcoredump.h"
22#include "xe_device.h"
23#include "xe_exec_queue.h"
24#include "xe_force_wake.h"
25#include "xe_gpu_scheduler.h"
26#include "xe_gt.h"
27#include "xe_gt_clock.h"
28#include "xe_gt_printk.h"
29#include "xe_guc.h"
30#include "xe_guc_ct.h"
31#include "xe_guc_exec_queue_types.h"
32#include "xe_guc_id_mgr.h"
33#include "xe_guc_submit_types.h"
34#include "xe_hw_engine.h"
35#include "xe_hw_fence.h"
36#include "xe_lrc.h"
37#include "xe_macros.h"
38#include "xe_map.h"
39#include "xe_mocs.h"
40#include "xe_pm.h"
41#include "xe_ring_ops_types.h"
42#include "xe_sched_job.h"
43#include "xe_trace.h"
44#include "xe_vm.h"
45
46static struct xe_guc *
47exec_queue_to_guc(struct xe_exec_queue *q)
48{
49 return &q->gt->uc.guc;
50}
51
52/*
53 * Helpers for engine state, using an atomic as some of the bits can transition
54 * as the same time (e.g. a suspend can be happning at the same time as schedule
55 * engine done being processed).
56 */
57#define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
58#define EXEC_QUEUE_STATE_ENABLED (1 << 1)
59#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
60#define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
61#define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
62#define EXEC_QUEUE_STATE_SUSPENDED (1 << 5)
63#define EXEC_QUEUE_STATE_RESET (1 << 6)
64#define EXEC_QUEUE_STATE_KILLED (1 << 7)
65#define EXEC_QUEUE_STATE_WEDGED (1 << 8)
66#define EXEC_QUEUE_STATE_BANNED (1 << 9)
67#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
68#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
69
70static bool exec_queue_registered(struct xe_exec_queue *q)
71{
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
73}
74
75static void set_exec_queue_registered(struct xe_exec_queue *q)
76{
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
78}
79
80static void clear_exec_queue_registered(struct xe_exec_queue *q)
81{
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
83}
84
85static bool exec_queue_enabled(struct xe_exec_queue *q)
86{
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
88}
89
90static void set_exec_queue_enabled(struct xe_exec_queue *q)
91{
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
93}
94
95static void clear_exec_queue_enabled(struct xe_exec_queue *q)
96{
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
98}
99
100static bool exec_queue_pending_enable(struct xe_exec_queue *q)
101{
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
103}
104
105static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
106{
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
108}
109
110static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
111{
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
113}
114
115static bool exec_queue_pending_disable(struct xe_exec_queue *q)
116{
117 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
118}
119
120static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
121{
122 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
123}
124
125static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
126{
127 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
128}
129
130static bool exec_queue_destroyed(struct xe_exec_queue *q)
131{
132 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
133}
134
135static void set_exec_queue_destroyed(struct xe_exec_queue *q)
136{
137 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
138}
139
140static bool exec_queue_banned(struct xe_exec_queue *q)
141{
142 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
143}
144
145static void set_exec_queue_banned(struct xe_exec_queue *q)
146{
147 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
148}
149
150static bool exec_queue_suspended(struct xe_exec_queue *q)
151{
152 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
153}
154
155static void set_exec_queue_suspended(struct xe_exec_queue *q)
156{
157 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
158}
159
160static void clear_exec_queue_suspended(struct xe_exec_queue *q)
161{
162 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
163}
164
165static bool exec_queue_reset(struct xe_exec_queue *q)
166{
167 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
168}
169
170static void set_exec_queue_reset(struct xe_exec_queue *q)
171{
172 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
173}
174
175static bool exec_queue_killed(struct xe_exec_queue *q)
176{
177 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
178}
179
180static void set_exec_queue_killed(struct xe_exec_queue *q)
181{
182 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
183}
184
185static bool exec_queue_wedged(struct xe_exec_queue *q)
186{
187 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
188}
189
190static void set_exec_queue_wedged(struct xe_exec_queue *q)
191{
192 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
193}
194
195static bool exec_queue_check_timeout(struct xe_exec_queue *q)
196{
197 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
198}
199
200static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
201{
202 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
203}
204
205static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
206{
207 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
208}
209
210static bool exec_queue_extra_ref(struct xe_exec_queue *q)
211{
212 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF;
213}
214
215static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
216{
217 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
218}
219
220static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
221{
222 return (atomic_read(&q->guc->state) &
223 (EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED |
224 EXEC_QUEUE_STATE_BANNED));
225}
226
227#ifdef CONFIG_PROVE_LOCKING
228static int alloc_submit_wq(struct xe_guc *guc)
229{
230 int i;
231
232 for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
233 guc->submission_state.submit_wq_pool[i] =
234 alloc_ordered_workqueue("submit_wq", 0);
235 if (!guc->submission_state.submit_wq_pool[i])
236 goto err_free;
237 }
238
239 return 0;
240
241err_free:
242 while (i)
243 destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
244
245 return -ENOMEM;
246}
247
248static void free_submit_wq(struct xe_guc *guc)
249{
250 int i;
251
252 for (i = 0; i < NUM_SUBMIT_WQ; ++i)
253 destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
254}
255
256static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
257{
258 int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
259
260 return guc->submission_state.submit_wq_pool[idx];
261}
262#else
263static int alloc_submit_wq(struct xe_guc *guc)
264{
265 return 0;
266}
267
268static void free_submit_wq(struct xe_guc *guc)
269{
270
271}
272
273static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
274{
275 return NULL;
276}
277#endif
278
279static void guc_submit_fini(struct drm_device *drm, void *arg)
280{
281 struct xe_guc *guc = arg;
282
283 xa_destroy(&guc->submission_state.exec_queue_lookup);
284 free_submit_wq(guc);
285}
286
287static void guc_submit_wedged_fini(struct drm_device *drm, void *arg)
288{
289 struct xe_guc *guc = arg;
290 struct xe_exec_queue *q;
291 unsigned long index;
292
293 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
294 if (exec_queue_wedged(q))
295 xe_exec_queue_put(q);
296}
297
298static const struct xe_exec_queue_ops guc_exec_queue_ops;
299
300static void primelockdep(struct xe_guc *guc)
301{
302 if (!IS_ENABLED(CONFIG_LOCKDEP))
303 return;
304
305 fs_reclaim_acquire(GFP_KERNEL);
306
307 mutex_lock(&guc->submission_state.lock);
308 mutex_unlock(&guc->submission_state.lock);
309
310 fs_reclaim_release(GFP_KERNEL);
311}
312
313/**
314 * xe_guc_submit_init() - Initialize GuC submission.
315 * @guc: the &xe_guc to initialize
316 * @num_ids: number of GuC context IDs to use
317 *
318 * The bare-metal or PF driver can pass ~0 as &num_ids to indicate that all
319 * GuC context IDs supported by the GuC firmware should be used for submission.
320 *
321 * Only VF drivers will have to provide explicit number of GuC context IDs
322 * that they can use for submission.
323 *
324 * Return: 0 on success or a negative error code on failure.
325 */
326int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
327{
328 struct xe_device *xe = guc_to_xe(guc);
329 struct xe_gt *gt = guc_to_gt(guc);
330 int err;
331
332 err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
333 if (err)
334 return err;
335
336 err = xe_guc_id_mgr_init(&guc->submission_state.idm, num_ids);
337 if (err)
338 return err;
339
340 err = alloc_submit_wq(guc);
341 if (err)
342 return err;
343
344 gt->exec_queue_ops = &guc_exec_queue_ops;
345
346 xa_init(&guc->submission_state.exec_queue_lookup);
347
348 primelockdep(guc);
349
350 return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
351}
352
353static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
354{
355 int i;
356
357 lockdep_assert_held(&guc->submission_state.lock);
358
359 for (i = 0; i < xa_count; ++i)
360 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
361
362 xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
363 q->guc->id, q->width);
364}
365
366static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
367{
368 int ret;
369 void *ptr;
370 int i;
371
372 /*
373 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
374 * worse case user gets -ENOMEM on engine create and has to try again.
375 *
376 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
377 * failure.
378 */
379 lockdep_assert_held(&guc->submission_state.lock);
380
381 ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
382 q->width);
383 if (ret < 0)
384 return ret;
385
386 q->guc->id = ret;
387
388 for (i = 0; i < q->width; ++i) {
389 ptr = xa_store(&guc->submission_state.exec_queue_lookup,
390 q->guc->id + i, q, GFP_NOWAIT);
391 if (IS_ERR(ptr)) {
392 ret = PTR_ERR(ptr);
393 goto err_release;
394 }
395 }
396
397 return 0;
398
399err_release:
400 __release_guc_id(guc, q, i);
401
402 return ret;
403}
404
405static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
406{
407 mutex_lock(&guc->submission_state.lock);
408 __release_guc_id(guc, q, q->width);
409 mutex_unlock(&guc->submission_state.lock);
410}
411
412struct exec_queue_policy {
413 u32 count;
414 struct guc_update_exec_queue_policy h2g;
415};
416
417static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
418{
419 size_t bytes = sizeof(policy->h2g.header) +
420 (sizeof(policy->h2g.klv[0]) * policy->count);
421
422 return bytes / sizeof(u32);
423}
424
425static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
426 u16 guc_id)
427{
428 policy->h2g.header.action =
429 XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
430 policy->h2g.header.guc_id = guc_id;
431 policy->count = 0;
432}
433
434#define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
435static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
436 u32 data) \
437{ \
438 XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
439\
440 policy->h2g.klv[policy->count].kl = \
441 FIELD_PREP(GUC_KLV_0_KEY, \
442 GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
443 FIELD_PREP(GUC_KLV_0_LEN, 1); \
444 policy->h2g.klv[policy->count].value = data; \
445 policy->count++; \
446}
447
448MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
449MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
450MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
451#undef MAKE_EXEC_QUEUE_POLICY_ADD
452
453static const int xe_exec_queue_prio_to_guc[] = {
454 [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
455 [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
456 [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
457 [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
458};
459
460static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
461{
462 struct exec_queue_policy policy;
463 struct xe_device *xe = guc_to_xe(guc);
464 enum xe_exec_queue_priority prio = q->sched_props.priority;
465 u32 timeslice_us = q->sched_props.timeslice_us;
466 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
467
468 xe_assert(xe, exec_queue_registered(q));
469
470 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
471 __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
472 __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
473 __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
474
475 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
476 __guc_exec_queue_policy_action_size(&policy), 0, 0);
477}
478
479static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
480{
481 struct exec_queue_policy policy;
482
483 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
484 __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
485
486 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
487 __guc_exec_queue_policy_action_size(&policy), 0, 0);
488}
489
490#define parallel_read(xe_, map_, field_) \
491 xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
492 field_)
493#define parallel_write(xe_, map_, field_, val_) \
494 xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
495 field_, val_)
496
497static void __register_mlrc_exec_queue(struct xe_guc *guc,
498 struct xe_exec_queue *q,
499 struct guc_ctxt_registration_info *info)
500{
501#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
502 struct xe_device *xe = guc_to_xe(guc);
503 u32 action[MAX_MLRC_REG_SIZE];
504 int len = 0;
505 int i;
506
507 xe_assert(xe, xe_exec_queue_is_parallel(q));
508
509 action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
510 action[len++] = info->flags;
511 action[len++] = info->context_idx;
512 action[len++] = info->engine_class;
513 action[len++] = info->engine_submit_mask;
514 action[len++] = info->wq_desc_lo;
515 action[len++] = info->wq_desc_hi;
516 action[len++] = info->wq_base_lo;
517 action[len++] = info->wq_base_hi;
518 action[len++] = info->wq_size;
519 action[len++] = q->width;
520 action[len++] = info->hwlrca_lo;
521 action[len++] = info->hwlrca_hi;
522
523 for (i = 1; i < q->width; ++i) {
524 struct xe_lrc *lrc = q->lrc[i];
525
526 action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
527 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
528 }
529
530 xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
531#undef MAX_MLRC_REG_SIZE
532
533 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
534}
535
536static void __register_exec_queue(struct xe_guc *guc,
537 struct guc_ctxt_registration_info *info)
538{
539 u32 action[] = {
540 XE_GUC_ACTION_REGISTER_CONTEXT,
541 info->flags,
542 info->context_idx,
543 info->engine_class,
544 info->engine_submit_mask,
545 info->wq_desc_lo,
546 info->wq_desc_hi,
547 info->wq_base_lo,
548 info->wq_base_hi,
549 info->wq_size,
550 info->hwlrca_lo,
551 info->hwlrca_hi,
552 };
553
554 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
555}
556
557static void register_exec_queue(struct xe_exec_queue *q)
558{
559 struct xe_guc *guc = exec_queue_to_guc(q);
560 struct xe_device *xe = guc_to_xe(guc);
561 struct xe_lrc *lrc = q->lrc[0];
562 struct guc_ctxt_registration_info info;
563
564 xe_assert(xe, !exec_queue_registered(q));
565
566 memset(&info, 0, sizeof(info));
567 info.context_idx = q->guc->id;
568 info.engine_class = xe_engine_class_to_guc_class(q->class);
569 info.engine_submit_mask = q->logical_mask;
570 info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
571 info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
572 info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
573
574 if (xe_exec_queue_is_parallel(q)) {
575 u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
576 struct iosys_map map = xe_lrc_parallel_map(lrc);
577
578 info.wq_desc_lo = lower_32_bits(ggtt_addr +
579 offsetof(struct guc_submit_parallel_scratch, wq_desc));
580 info.wq_desc_hi = upper_32_bits(ggtt_addr +
581 offsetof(struct guc_submit_parallel_scratch, wq_desc));
582 info.wq_base_lo = lower_32_bits(ggtt_addr +
583 offsetof(struct guc_submit_parallel_scratch, wq[0]));
584 info.wq_base_hi = upper_32_bits(ggtt_addr +
585 offsetof(struct guc_submit_parallel_scratch, wq[0]));
586 info.wq_size = WQ_SIZE;
587
588 q->guc->wqi_head = 0;
589 q->guc->wqi_tail = 0;
590 xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
591 parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
592 }
593
594 /*
595 * We must keep a reference for LR engines if engine is registered with
596 * the GuC as jobs signal immediately and can't destroy an engine if the
597 * GuC has a reference to it.
598 */
599 if (xe_exec_queue_is_lr(q))
600 xe_exec_queue_get(q);
601
602 set_exec_queue_registered(q);
603 trace_xe_exec_queue_register(q);
604 if (xe_exec_queue_is_parallel(q))
605 __register_mlrc_exec_queue(guc, q, &info);
606 else
607 __register_exec_queue(guc, &info);
608 init_policies(guc, q);
609}
610
611static u32 wq_space_until_wrap(struct xe_exec_queue *q)
612{
613 return (WQ_SIZE - q->guc->wqi_tail);
614}
615
616static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
617{
618 struct xe_guc *guc = exec_queue_to_guc(q);
619 struct xe_device *xe = guc_to_xe(guc);
620 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
621 unsigned int sleep_period_ms = 1;
622
623#define AVAILABLE_SPACE \
624 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
625 if (wqi_size > AVAILABLE_SPACE) {
626try_again:
627 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
628 if (wqi_size > AVAILABLE_SPACE) {
629 if (sleep_period_ms == 1024) {
630 xe_gt_reset_async(q->gt);
631 return -ENODEV;
632 }
633
634 msleep(sleep_period_ms);
635 sleep_period_ms <<= 1;
636 goto try_again;
637 }
638 }
639#undef AVAILABLE_SPACE
640
641 return 0;
642}
643
644static int wq_noop_append(struct xe_exec_queue *q)
645{
646 struct xe_guc *guc = exec_queue_to_guc(q);
647 struct xe_device *xe = guc_to_xe(guc);
648 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
649 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
650
651 if (wq_wait_for_space(q, wq_space_until_wrap(q)))
652 return -ENODEV;
653
654 xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
655
656 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
657 FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
658 FIELD_PREP(WQ_LEN_MASK, len_dw));
659 q->guc->wqi_tail = 0;
660
661 return 0;
662}
663
664static void wq_item_append(struct xe_exec_queue *q)
665{
666 struct xe_guc *guc = exec_queue_to_guc(q);
667 struct xe_device *xe = guc_to_xe(guc);
668 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
669#define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
670 u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
671 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
672 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
673 int i = 0, j;
674
675 if (wqi_size > wq_space_until_wrap(q)) {
676 if (wq_noop_append(q))
677 return;
678 }
679 if (wq_wait_for_space(q, wqi_size))
680 return;
681
682 wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
683 FIELD_PREP(WQ_LEN_MASK, len_dw);
684 wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
685 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
686 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
687 wqi[i++] = 0;
688 for (j = 1; j < q->width; ++j) {
689 struct xe_lrc *lrc = q->lrc[j];
690
691 wqi[i++] = lrc->ring.tail / sizeof(u64);
692 }
693
694 xe_assert(xe, i == wqi_size / sizeof(u32));
695
696 iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
697 wq[q->guc->wqi_tail / sizeof(u32)]));
698 xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
699 q->guc->wqi_tail += wqi_size;
700 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
701
702 xe_device_wmb(xe);
703
704 map = xe_lrc_parallel_map(q->lrc[0]);
705 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
706}
707
708#define RESUME_PENDING ~0x0ull
709static void submit_exec_queue(struct xe_exec_queue *q)
710{
711 struct xe_guc *guc = exec_queue_to_guc(q);
712 struct xe_device *xe = guc_to_xe(guc);
713 struct xe_lrc *lrc = q->lrc[0];
714 u32 action[3];
715 u32 g2h_len = 0;
716 u32 num_g2h = 0;
717 int len = 0;
718 bool extra_submit = false;
719
720 xe_assert(xe, exec_queue_registered(q));
721
722 if (xe_exec_queue_is_parallel(q))
723 wq_item_append(q);
724 else
725 xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
726
727 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
728 return;
729
730 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
731 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
732 action[len++] = q->guc->id;
733 action[len++] = GUC_CONTEXT_ENABLE;
734 g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
735 num_g2h = 1;
736 if (xe_exec_queue_is_parallel(q))
737 extra_submit = true;
738
739 q->guc->resume_time = RESUME_PENDING;
740 set_exec_queue_pending_enable(q);
741 set_exec_queue_enabled(q);
742 trace_xe_exec_queue_scheduling_enable(q);
743 } else {
744 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
745 action[len++] = q->guc->id;
746 trace_xe_exec_queue_submit(q);
747 }
748
749 xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
750
751 if (extra_submit) {
752 len = 0;
753 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
754 action[len++] = q->guc->id;
755 trace_xe_exec_queue_submit(q);
756
757 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
758 }
759}
760
761static struct dma_fence *
762guc_exec_queue_run_job(struct drm_sched_job *drm_job)
763{
764 struct xe_sched_job *job = to_xe_sched_job(drm_job);
765 struct xe_exec_queue *q = job->q;
766 struct xe_guc *guc = exec_queue_to_guc(q);
767 struct xe_device *xe = guc_to_xe(guc);
768 bool lr = xe_exec_queue_is_lr(q);
769
770 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
771 exec_queue_banned(q) || exec_queue_suspended(q));
772
773 trace_xe_sched_job_run(job);
774
775 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
776 if (!exec_queue_registered(q))
777 register_exec_queue(q);
778 if (!lr) /* LR jobs are emitted in the exec IOCTL */
779 q->ring_ops->emit_job(job);
780 submit_exec_queue(q);
781 }
782
783 if (lr) {
784 xe_sched_job_set_error(job, -EOPNOTSUPP);
785 return NULL;
786 } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
787 return job->fence;
788 } else {
789 return dma_fence_get(job->fence);
790 }
791}
792
793static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
794{
795 struct xe_sched_job *job = to_xe_sched_job(drm_job);
796
797 xe_exec_queue_update_run_ticks(job->q);
798
799 trace_xe_sched_job_free(job);
800 xe_sched_job_put(job);
801}
802
803static int guc_read_stopped(struct xe_guc *guc)
804{
805 return atomic_read(&guc->submission_state.stopped);
806}
807
808#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
809 u32 action[] = { \
810 XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
811 q->guc->id, \
812 GUC_CONTEXT_##enable_disable, \
813 }
814
815static void disable_scheduling_deregister(struct xe_guc *guc,
816 struct xe_exec_queue *q)
817{
818 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
819 struct xe_device *xe = guc_to_xe(guc);
820 int ret;
821
822 set_min_preemption_timeout(guc, q);
823 smp_rmb();
824 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
825 guc_read_stopped(guc), HZ * 5);
826 if (!ret) {
827 struct xe_gpu_scheduler *sched = &q->guc->sched;
828
829 drm_warn(&xe->drm, "Pending enable failed to respond");
830 xe_sched_submission_start(sched);
831 xe_gt_reset_async(q->gt);
832 xe_sched_tdr_queue_imm(sched);
833 return;
834 }
835
836 clear_exec_queue_enabled(q);
837 set_exec_queue_pending_disable(q);
838 set_exec_queue_destroyed(q);
839 trace_xe_exec_queue_scheduling_disable(q);
840
841 /*
842 * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
843 * handler and we are not allowed to reserved G2H space in handlers.
844 */
845 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
846 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
847 G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
848}
849
850static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
851{
852 struct xe_guc *guc = exec_queue_to_guc(q);
853 struct xe_device *xe = guc_to_xe(guc);
854
855 /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
856 wake_up_all(&xe->ufence_wq);
857
858 if (xe_exec_queue_is_lr(q))
859 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
860 else
861 xe_sched_tdr_queue_imm(&q->guc->sched);
862}
863
864/**
865 * xe_guc_submit_wedge() - Wedge GuC submission
866 * @guc: the GuC object
867 *
868 * Save exec queue's registered with GuC state by taking a ref to each queue.
869 * Register a DRMM handler to drop refs upon driver unload.
870 */
871void xe_guc_submit_wedge(struct xe_guc *guc)
872{
873 struct xe_device *xe = guc_to_xe(guc);
874 struct xe_exec_queue *q;
875 unsigned long index;
876 int err;
877
878 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
879
880 err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
881 guc_submit_wedged_fini, guc);
882 if (err) {
883 drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
884 return;
885 }
886
887 mutex_lock(&guc->submission_state.lock);
888 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
889 if (xe_exec_queue_get_unless_zero(q))
890 set_exec_queue_wedged(q);
891 mutex_unlock(&guc->submission_state.lock);
892}
893
894static bool guc_submit_hint_wedged(struct xe_guc *guc)
895{
896 struct xe_device *xe = guc_to_xe(guc);
897
898 if (xe->wedged.mode != 2)
899 return false;
900
901 if (xe_device_wedged(xe))
902 return true;
903
904 xe_device_declare_wedged(xe);
905
906 return true;
907}
908
909static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
910{
911 struct xe_guc_exec_queue *ge =
912 container_of(w, struct xe_guc_exec_queue, lr_tdr);
913 struct xe_exec_queue *q = ge->q;
914 struct xe_guc *guc = exec_queue_to_guc(q);
915 struct xe_device *xe = guc_to_xe(guc);
916 struct xe_gpu_scheduler *sched = &ge->sched;
917 bool wedged;
918
919 xe_assert(xe, xe_exec_queue_is_lr(q));
920 trace_xe_exec_queue_lr_cleanup(q);
921
922 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
923
924 /* Kill the run_job / process_msg entry points */
925 xe_sched_submission_stop(sched);
926
927 /*
928 * Engine state now mostly stable, disable scheduling / deregister if
929 * needed. This cleanup routine might be called multiple times, where
930 * the actual async engine deregister drops the final engine ref.
931 * Calling disable_scheduling_deregister will mark the engine as
932 * destroyed and fire off the CT requests to disable scheduling /
933 * deregister, which we only want to do once. We also don't want to mark
934 * the engine as pending_disable again as this may race with the
935 * xe_guc_deregister_done_handler() which treats it as an unexpected
936 * state.
937 */
938 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
939 struct xe_guc *guc = exec_queue_to_guc(q);
940 int ret;
941
942 set_exec_queue_banned(q);
943 disable_scheduling_deregister(guc, q);
944
945 /*
946 * Must wait for scheduling to be disabled before signalling
947 * any fences, if GT broken the GT reset code should signal us.
948 */
949 ret = wait_event_timeout(guc->ct.wq,
950 !exec_queue_pending_disable(q) ||
951 guc_read_stopped(guc), HZ * 5);
952 if (!ret) {
953 drm_warn(&xe->drm, "Schedule disable failed to respond");
954 xe_sched_submission_start(sched);
955 xe_gt_reset_async(q->gt);
956 return;
957 }
958 }
959
960 xe_sched_submission_start(sched);
961}
962
963#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
964
965static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
966{
967 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
968 u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
969 u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
970 u32 timeout_ms = q->sched_props.job_timeout_ms;
971 u32 diff;
972 u64 running_time_ms;
973
974 /*
975 * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
976 * possible overflows with a high timeout.
977 */
978 xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
979
980 if (ctx_timestamp < ctx_job_timestamp)
981 diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
982 else
983 diff = ctx_timestamp - ctx_job_timestamp;
984
985 /*
986 * Ensure timeout is within 5% to account for an GuC scheduling latency
987 */
988 running_time_ms =
989 ADJUST_FIVE_PERCENT(xe_gt_clock_interval_to_ms(gt, diff));
990
991 xe_gt_dbg(gt,
992 "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, running_time_ms=%llu, timeout_ms=%u, diff=0x%08x",
993 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
994 q->guc->id, running_time_ms, timeout_ms, diff);
995
996 return running_time_ms >= timeout_ms;
997}
998
999static void enable_scheduling(struct xe_exec_queue *q)
1000{
1001 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
1002 struct xe_guc *guc = exec_queue_to_guc(q);
1003 int ret;
1004
1005 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1006 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1007 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1008 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1009
1010 set_exec_queue_pending_enable(q);
1011 set_exec_queue_enabled(q);
1012 trace_xe_exec_queue_scheduling_enable(q);
1013
1014 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1015 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1016
1017 ret = wait_event_timeout(guc->ct.wq,
1018 !exec_queue_pending_enable(q) ||
1019 guc_read_stopped(guc), HZ * 5);
1020 if (!ret || guc_read_stopped(guc)) {
1021 xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
1022 set_exec_queue_banned(q);
1023 xe_gt_reset_async(q->gt);
1024 xe_sched_tdr_queue_imm(&q->guc->sched);
1025 }
1026}
1027
1028static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
1029{
1030 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
1031 struct xe_guc *guc = exec_queue_to_guc(q);
1032
1033 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1034 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1035 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1036
1037 if (immediate)
1038 set_min_preemption_timeout(guc, q);
1039 clear_exec_queue_enabled(q);
1040 set_exec_queue_pending_disable(q);
1041 trace_xe_exec_queue_scheduling_disable(q);
1042
1043 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1044 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1045}
1046
1047static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1048{
1049 u32 action[] = {
1050 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1051 q->guc->id,
1052 };
1053
1054 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1055 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1056 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1057 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1058
1059 set_exec_queue_destroyed(q);
1060 trace_xe_exec_queue_deregister(q);
1061
1062 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1063 G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
1064}
1065
1066static enum drm_gpu_sched_stat
1067guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
1068{
1069 struct xe_sched_job *job = to_xe_sched_job(drm_job);
1070 struct xe_sched_job *tmp_job;
1071 struct xe_exec_queue *q = job->q;
1072 struct xe_gpu_scheduler *sched = &q->guc->sched;
1073 struct xe_guc *guc = exec_queue_to_guc(q);
1074 int err = -ETIME;
1075 int i = 0;
1076 bool wedged, skip_timeout_check;
1077
1078 /*
1079 * TDR has fired before free job worker. Common if exec queue
1080 * immediately closed after last fence signaled.
1081 */
1082 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
1083 guc_exec_queue_free_job(drm_job);
1084
1085 return DRM_GPU_SCHED_STAT_NOMINAL;
1086 }
1087
1088 /* Kill the run_job entry point */
1089 xe_sched_submission_stop(sched);
1090
1091 /* Must check all state after stopping scheduler */
1092 skip_timeout_check = exec_queue_reset(q) ||
1093 exec_queue_killed_or_banned_or_wedged(q) ||
1094 exec_queue_destroyed(q);
1095
1096 /* Job hasn't started, can't be timed out */
1097 if (!skip_timeout_check && !xe_sched_job_started(job))
1098 goto rearm;
1099
1100 /*
1101 * XXX: Sampling timeout doesn't work in wedged mode as we have to
1102 * modify scheduling state to read timestamp. We could read the
1103 * timestamp from a register to accumulate current running time but this
1104 * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
1105 * genuine timeouts.
1106 */
1107 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
1108
1109 /* Engine state now stable, disable scheduling to check timestamp */
1110 if (!wedged && exec_queue_registered(q)) {
1111 int ret;
1112
1113 if (exec_queue_reset(q))
1114 err = -EIO;
1115
1116 if (!exec_queue_destroyed(q)) {
1117 /*
1118 * Wait for any pending G2H to flush out before
1119 * modifying state
1120 */
1121 ret = wait_event_timeout(guc->ct.wq,
1122 !exec_queue_pending_enable(q) ||
1123 guc_read_stopped(guc), HZ * 5);
1124 if (!ret || guc_read_stopped(guc))
1125 goto trigger_reset;
1126
1127 /*
1128 * Flag communicates to G2H handler that schedule
1129 * disable originated from a timeout check. The G2H then
1130 * avoid triggering cleanup or deregistering the exec
1131 * queue.
1132 */
1133 set_exec_queue_check_timeout(q);
1134 disable_scheduling(q, skip_timeout_check);
1135 }
1136
1137 /*
1138 * Must wait for scheduling to be disabled before signalling
1139 * any fences, if GT broken the GT reset code should signal us.
1140 *
1141 * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
1142 * error) messages which can cause the schedule disable to get
1143 * lost. If this occurs, trigger a GT reset to recover.
1144 */
1145 smp_rmb();
1146 ret = wait_event_timeout(guc->ct.wq,
1147 !exec_queue_pending_disable(q) ||
1148 guc_read_stopped(guc), HZ * 5);
1149 if (!ret || guc_read_stopped(guc)) {
1150trigger_reset:
1151 if (!ret)
1152 xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
1153 set_exec_queue_extra_ref(q);
1154 xe_exec_queue_get(q); /* GT reset owns this */
1155 set_exec_queue_banned(q);
1156 xe_gt_reset_async(q->gt);
1157 xe_sched_tdr_queue_imm(sched);
1158 goto rearm;
1159 }
1160 }
1161
1162 /*
1163 * Check if job is actually timed out, if so restart job execution and TDR
1164 */
1165 if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
1166 !exec_queue_reset(q) && exec_queue_registered(q)) {
1167 clear_exec_queue_check_timeout(q);
1168 goto sched_enable;
1169 }
1170
1171 xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
1172 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1173 q->guc->id, q->flags);
1174 trace_xe_sched_job_timedout(job);
1175
1176 if (!exec_queue_killed(q))
1177 xe_devcoredump(job);
1178
1179 /*
1180 * Kernel jobs should never fail, nor should VM jobs if they do
1181 * somethings has gone wrong and the GT needs a reset
1182 */
1183 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
1184 "Kernel-submitted job timed out\n");
1185 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
1186 "VM job timed out on non-killed execqueue\n");
1187 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
1188 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
1189 if (!xe_sched_invalidate_job(job, 2)) {
1190 clear_exec_queue_check_timeout(q);
1191 xe_gt_reset_async(q->gt);
1192 goto rearm;
1193 }
1194 }
1195
1196 /* Finish cleaning up exec queue via deregister */
1197 set_exec_queue_banned(q);
1198 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
1199 set_exec_queue_extra_ref(q);
1200 xe_exec_queue_get(q);
1201 __deregister_exec_queue(guc, q);
1202 }
1203
1204 /* Stop fence signaling */
1205 xe_hw_fence_irq_stop(q->fence_irq);
1206
1207 /*
1208 * Fence state now stable, stop / start scheduler which cleans up any
1209 * fences that are complete
1210 */
1211 xe_sched_add_pending_job(sched, job);
1212 xe_sched_submission_start(sched);
1213
1214 xe_guc_exec_queue_trigger_cleanup(q);
1215
1216 /* Mark all outstanding jobs as bad, thus completing them */
1217 spin_lock(&sched->base.job_list_lock);
1218 list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1219 xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
1220 spin_unlock(&sched->base.job_list_lock);
1221
1222 /* Start fence signaling */
1223 xe_hw_fence_irq_start(q->fence_irq);
1224
1225 return DRM_GPU_SCHED_STAT_NOMINAL;
1226
1227sched_enable:
1228 enable_scheduling(q);
1229rearm:
1230 /*
1231 * XXX: Ideally want to adjust timeout based on current exection time
1232 * but there is not currently an easy way to do in DRM scheduler. With
1233 * some thought, do this in a follow up.
1234 */
1235 xe_sched_add_pending_job(sched, job);
1236 xe_sched_submission_start(sched);
1237
1238 return DRM_GPU_SCHED_STAT_NOMINAL;
1239}
1240
1241static void __guc_exec_queue_fini_async(struct work_struct *w)
1242{
1243 struct xe_guc_exec_queue *ge =
1244 container_of(w, struct xe_guc_exec_queue, fini_async);
1245 struct xe_exec_queue *q = ge->q;
1246 struct xe_guc *guc = exec_queue_to_guc(q);
1247
1248 xe_pm_runtime_get(guc_to_xe(guc));
1249 trace_xe_exec_queue_destroy(q);
1250
1251 if (xe_exec_queue_is_lr(q))
1252 cancel_work_sync(&ge->lr_tdr);
1253 release_guc_id(guc, q);
1254 xe_sched_entity_fini(&ge->entity);
1255 xe_sched_fini(&ge->sched);
1256
1257 kfree(ge);
1258 xe_exec_queue_fini(q);
1259 xe_pm_runtime_put(guc_to_xe(guc));
1260}
1261
1262static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1263{
1264 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1265
1266 /* We must block on kernel engines so slabs are empty on driver unload */
1267 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1268 __guc_exec_queue_fini_async(&q->guc->fini_async);
1269 else
1270 queue_work(system_wq, &q->guc->fini_async);
1271}
1272
1273static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1274{
1275 /*
1276 * Might be done from within the GPU scheduler, need to do async as we
1277 * fini the scheduler when the engine is fini'd, the scheduler can't
1278 * complete fini within itself (circular dependency). Async resolves
1279 * this we and don't really care when everything is fini'd, just that it
1280 * is.
1281 */
1282 guc_exec_queue_fini_async(q);
1283}
1284
1285static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
1286{
1287 struct xe_exec_queue *q = msg->private_data;
1288 struct xe_guc *guc = exec_queue_to_guc(q);
1289 struct xe_device *xe = guc_to_xe(guc);
1290
1291 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1292 trace_xe_exec_queue_cleanup_entity(q);
1293
1294 if (exec_queue_registered(q))
1295 disable_scheduling_deregister(guc, q);
1296 else
1297 __guc_exec_queue_fini(guc, q);
1298}
1299
1300static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1301{
1302 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
1303}
1304
1305static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
1306{
1307 struct xe_exec_queue *q = msg->private_data;
1308 struct xe_guc *guc = exec_queue_to_guc(q);
1309
1310 if (guc_exec_queue_allowed_to_change_state(q))
1311 init_policies(guc, q);
1312 kfree(msg);
1313}
1314
1315static void suspend_fence_signal(struct xe_exec_queue *q)
1316{
1317 struct xe_guc *guc = exec_queue_to_guc(q);
1318 struct xe_device *xe = guc_to_xe(guc);
1319
1320 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
1321 guc_read_stopped(guc));
1322 xe_assert(xe, q->guc->suspend_pending);
1323
1324 q->guc->suspend_pending = false;
1325 smp_wmb();
1326 wake_up(&q->guc->suspend_wait);
1327}
1328
1329static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
1330{
1331 struct xe_exec_queue *q = msg->private_data;
1332 struct xe_guc *guc = exec_queue_to_guc(q);
1333
1334 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1335 exec_queue_enabled(q)) {
1336 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
1337 guc_read_stopped(guc));
1338
1339 if (!guc_read_stopped(guc)) {
1340 s64 since_resume_ms =
1341 ktime_ms_delta(ktime_get(),
1342 q->guc->resume_time);
1343 s64 wait_ms = q->vm->preempt.min_run_period_ms -
1344 since_resume_ms;
1345
1346 if (wait_ms > 0 && q->guc->resume_time)
1347 msleep(wait_ms);
1348
1349 set_exec_queue_suspended(q);
1350 disable_scheduling(q, false);
1351 }
1352 } else if (q->guc->suspend_pending) {
1353 set_exec_queue_suspended(q);
1354 suspend_fence_signal(q);
1355 }
1356}
1357
1358static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
1359{
1360 struct xe_exec_queue *q = msg->private_data;
1361
1362 if (guc_exec_queue_allowed_to_change_state(q)) {
1363 q->guc->resume_time = RESUME_PENDING;
1364 clear_exec_queue_suspended(q);
1365 enable_scheduling(q);
1366 } else {
1367 clear_exec_queue_suspended(q);
1368 }
1369}
1370
1371#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
1372#define SET_SCHED_PROPS 2
1373#define SUSPEND 3
1374#define RESUME 4
1375
1376static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
1377{
1378 trace_xe_sched_msg_recv(msg);
1379
1380 switch (msg->opcode) {
1381 case CLEANUP:
1382 __guc_exec_queue_process_msg_cleanup(msg);
1383 break;
1384 case SET_SCHED_PROPS:
1385 __guc_exec_queue_process_msg_set_sched_props(msg);
1386 break;
1387 case SUSPEND:
1388 __guc_exec_queue_process_msg_suspend(msg);
1389 break;
1390 case RESUME:
1391 __guc_exec_queue_process_msg_resume(msg);
1392 break;
1393 default:
1394 XE_WARN_ON("Unknown message type");
1395 }
1396}
1397
1398static const struct drm_sched_backend_ops drm_sched_ops = {
1399 .run_job = guc_exec_queue_run_job,
1400 .free_job = guc_exec_queue_free_job,
1401 .timedout_job = guc_exec_queue_timedout_job,
1402};
1403
1404static const struct xe_sched_backend_ops xe_sched_ops = {
1405 .process_msg = guc_exec_queue_process_msg,
1406};
1407
1408static int guc_exec_queue_init(struct xe_exec_queue *q)
1409{
1410 struct xe_gpu_scheduler *sched;
1411 struct xe_guc *guc = exec_queue_to_guc(q);
1412 struct xe_device *xe = guc_to_xe(guc);
1413 struct xe_guc_exec_queue *ge;
1414 long timeout;
1415 int err;
1416
1417 xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
1418
1419 ge = kzalloc(sizeof(*ge), GFP_KERNEL);
1420 if (!ge)
1421 return -ENOMEM;
1422
1423 q->guc = ge;
1424 ge->q = q;
1425 init_waitqueue_head(&ge->suspend_wait);
1426
1427 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1428 msecs_to_jiffies(q->sched_props.job_timeout_ms);
1429 err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1430 get_submit_wq(guc),
1431 q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1432 timeout, guc_to_gt(guc)->ordered_wq, NULL,
1433 q->name, gt_to_xe(q->gt)->drm.dev);
1434 if (err)
1435 goto err_free;
1436
1437 sched = &ge->sched;
1438 err = xe_sched_entity_init(&ge->entity, sched);
1439 if (err)
1440 goto err_sched;
1441
1442 if (xe_exec_queue_is_lr(q))
1443 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1444
1445 mutex_lock(&guc->submission_state.lock);
1446
1447 err = alloc_guc_id(guc, q);
1448 if (err)
1449 goto err_entity;
1450
1451 q->entity = &ge->entity;
1452
1453 if (guc_read_stopped(guc))
1454 xe_sched_stop(sched);
1455
1456 mutex_unlock(&guc->submission_state.lock);
1457
1458 xe_exec_queue_assign_name(q, q->guc->id);
1459
1460 trace_xe_exec_queue_create(q);
1461
1462 return 0;
1463
1464err_entity:
1465 mutex_unlock(&guc->submission_state.lock);
1466 xe_sched_entity_fini(&ge->entity);
1467err_sched:
1468 xe_sched_fini(&ge->sched);
1469err_free:
1470 kfree(ge);
1471
1472 return err;
1473}
1474
1475static void guc_exec_queue_kill(struct xe_exec_queue *q)
1476{
1477 trace_xe_exec_queue_kill(q);
1478 set_exec_queue_killed(q);
1479 xe_guc_exec_queue_trigger_cleanup(q);
1480}
1481
1482static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1483 u32 opcode)
1484{
1485 INIT_LIST_HEAD(&msg->link);
1486 msg->opcode = opcode;
1487 msg->private_data = q;
1488
1489 trace_xe_sched_msg_add(msg);
1490 xe_sched_add_msg(&q->guc->sched, msg);
1491}
1492
1493#define STATIC_MSG_CLEANUP 0
1494#define STATIC_MSG_SUSPEND 1
1495#define STATIC_MSG_RESUME 2
1496static void guc_exec_queue_fini(struct xe_exec_queue *q)
1497{
1498 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1499
1500 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
1501 guc_exec_queue_add_msg(q, msg, CLEANUP);
1502 else
1503 __guc_exec_queue_fini(exec_queue_to_guc(q), q);
1504}
1505
1506static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1507 enum xe_exec_queue_priority priority)
1508{
1509 struct xe_sched_msg *msg;
1510
1511 if (q->sched_props.priority == priority ||
1512 exec_queue_killed_or_banned_or_wedged(q))
1513 return 0;
1514
1515 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1516 if (!msg)
1517 return -ENOMEM;
1518
1519 q->sched_props.priority = priority;
1520 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1521
1522 return 0;
1523}
1524
1525static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1526{
1527 struct xe_sched_msg *msg;
1528
1529 if (q->sched_props.timeslice_us == timeslice_us ||
1530 exec_queue_killed_or_banned_or_wedged(q))
1531 return 0;
1532
1533 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1534 if (!msg)
1535 return -ENOMEM;
1536
1537 q->sched_props.timeslice_us = timeslice_us;
1538 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1539
1540 return 0;
1541}
1542
1543static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1544 u32 preempt_timeout_us)
1545{
1546 struct xe_sched_msg *msg;
1547
1548 if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1549 exec_queue_killed_or_banned_or_wedged(q))
1550 return 0;
1551
1552 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1553 if (!msg)
1554 return -ENOMEM;
1555
1556 q->sched_props.preempt_timeout_us = preempt_timeout_us;
1557 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1558
1559 return 0;
1560}
1561
1562static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1563{
1564 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1565
1566 if (exec_queue_killed_or_banned_or_wedged(q) || q->guc->suspend_pending)
1567 return -EINVAL;
1568
1569 q->guc->suspend_pending = true;
1570 guc_exec_queue_add_msg(q, msg, SUSPEND);
1571
1572 return 0;
1573}
1574
1575static void guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1576{
1577 struct xe_guc *guc = exec_queue_to_guc(q);
1578
1579 wait_event(q->guc->suspend_wait, !q->guc->suspend_pending ||
1580 guc_read_stopped(guc));
1581}
1582
1583static void guc_exec_queue_resume(struct xe_exec_queue *q)
1584{
1585 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1586 struct xe_guc *guc = exec_queue_to_guc(q);
1587 struct xe_device *xe = guc_to_xe(guc);
1588
1589 xe_assert(xe, !q->guc->suspend_pending);
1590
1591 guc_exec_queue_add_msg(q, msg, RESUME);
1592}
1593
1594static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1595{
1596 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
1597}
1598
1599/*
1600 * All of these functions are an abstraction layer which other parts of XE can
1601 * use to trap into the GuC backend. All of these functions, aside from init,
1602 * really shouldn't do much other than trap into the DRM scheduler which
1603 * synchronizes these operations.
1604 */
1605static const struct xe_exec_queue_ops guc_exec_queue_ops = {
1606 .init = guc_exec_queue_init,
1607 .kill = guc_exec_queue_kill,
1608 .fini = guc_exec_queue_fini,
1609 .set_priority = guc_exec_queue_set_priority,
1610 .set_timeslice = guc_exec_queue_set_timeslice,
1611 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
1612 .suspend = guc_exec_queue_suspend,
1613 .suspend_wait = guc_exec_queue_suspend_wait,
1614 .resume = guc_exec_queue_resume,
1615 .reset_status = guc_exec_queue_reset_status,
1616};
1617
1618static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1619{
1620 struct xe_gpu_scheduler *sched = &q->guc->sched;
1621
1622 /* Stop scheduling + flush any DRM scheduler operations */
1623 xe_sched_submission_stop(sched);
1624
1625 /* Clean up lost G2H + reset engine state */
1626 if (exec_queue_registered(q)) {
1627 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1628 xe_exec_queue_put(q);
1629 else if (exec_queue_destroyed(q))
1630 __guc_exec_queue_fini(guc, q);
1631 }
1632 if (q->guc->suspend_pending) {
1633 set_exec_queue_suspended(q);
1634 suspend_fence_signal(q);
1635 }
1636 atomic_and(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_BANNED |
1637 EXEC_QUEUE_STATE_KILLED | EXEC_QUEUE_STATE_DESTROYED |
1638 EXEC_QUEUE_STATE_SUSPENDED,
1639 &q->guc->state);
1640 q->guc->resume_time = 0;
1641 trace_xe_exec_queue_stop(q);
1642
1643 /*
1644 * Ban any engine (aside from kernel and engines used for VM ops) with a
1645 * started but not complete job or if a job has gone through a GT reset
1646 * more than twice.
1647 */
1648 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1649 struct xe_sched_job *job = xe_sched_first_pending_job(sched);
1650 bool ban = false;
1651
1652 if (job) {
1653 if ((xe_sched_job_started(job) &&
1654 !xe_sched_job_completed(job)) ||
1655 xe_sched_invalidate_job(job, 2)) {
1656 trace_xe_sched_job_ban(job);
1657 ban = true;
1658 }
1659 } else if (xe_exec_queue_is_lr(q) &&
1660 (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
1661 ban = true;
1662 }
1663
1664 if (ban) {
1665 set_exec_queue_banned(q);
1666 xe_guc_exec_queue_trigger_cleanup(q);
1667 }
1668 }
1669}
1670
1671int xe_guc_submit_reset_prepare(struct xe_guc *guc)
1672{
1673 int ret;
1674
1675 /*
1676 * Using an atomic here rather than submission_state.lock as this
1677 * function can be called while holding the CT lock (engine reset
1678 * failure). submission_state.lock needs the CT lock to resubmit jobs.
1679 * Atomic is not ideal, but it works to prevent against concurrent reset
1680 * and releasing any TDRs waiting on guc->submission_state.stopped.
1681 */
1682 ret = atomic_fetch_or(1, &guc->submission_state.stopped);
1683 smp_wmb();
1684 wake_up_all(&guc->ct.wq);
1685
1686 return ret;
1687}
1688
1689void xe_guc_submit_reset_wait(struct xe_guc *guc)
1690{
1691 wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
1692 !guc_read_stopped(guc));
1693}
1694
1695void xe_guc_submit_stop(struct xe_guc *guc)
1696{
1697 struct xe_exec_queue *q;
1698 unsigned long index;
1699 struct xe_device *xe = guc_to_xe(guc);
1700
1701 xe_assert(xe, guc_read_stopped(guc) == 1);
1702
1703 mutex_lock(&guc->submission_state.lock);
1704
1705 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1706 guc_exec_queue_stop(guc, q);
1707
1708 mutex_unlock(&guc->submission_state.lock);
1709
1710 /*
1711 * No one can enter the backend at this point, aside from new engine
1712 * creation which is protected by guc->submission_state.lock.
1713 */
1714
1715}
1716
1717static void guc_exec_queue_start(struct xe_exec_queue *q)
1718{
1719 struct xe_gpu_scheduler *sched = &q->guc->sched;
1720
1721 if (!exec_queue_killed_or_banned_or_wedged(q)) {
1722 int i;
1723
1724 trace_xe_exec_queue_resubmit(q);
1725 for (i = 0; i < q->width; ++i)
1726 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
1727 xe_sched_resubmit_jobs(sched);
1728 }
1729
1730 xe_sched_submission_start(sched);
1731}
1732
1733int xe_guc_submit_start(struct xe_guc *guc)
1734{
1735 struct xe_exec_queue *q;
1736 unsigned long index;
1737 struct xe_device *xe = guc_to_xe(guc);
1738
1739 xe_assert(xe, guc_read_stopped(guc) == 1);
1740
1741 mutex_lock(&guc->submission_state.lock);
1742 atomic_dec(&guc->submission_state.stopped);
1743 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1744 guc_exec_queue_start(q);
1745 mutex_unlock(&guc->submission_state.lock);
1746
1747 wake_up_all(&guc->ct.wq);
1748
1749 return 0;
1750}
1751
1752static struct xe_exec_queue *
1753g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
1754{
1755 struct xe_device *xe = guc_to_xe(guc);
1756 struct xe_exec_queue *q;
1757
1758 if (unlikely(guc_id >= GUC_ID_MAX)) {
1759 drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
1760 return NULL;
1761 }
1762
1763 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1764 if (unlikely(!q)) {
1765 drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
1766 return NULL;
1767 }
1768
1769 xe_assert(xe, guc_id >= q->guc->id);
1770 xe_assert(xe, guc_id < (q->guc->id + q->width));
1771
1772 return q;
1773}
1774
1775static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1776{
1777 u32 action[] = {
1778 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1779 q->guc->id,
1780 };
1781
1782 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
1783 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1784 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1785 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1786
1787 trace_xe_exec_queue_deregister(q);
1788
1789 xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
1790}
1791
1792static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
1793 u32 runnable_state)
1794{
1795 trace_xe_exec_queue_scheduling_done(q);
1796
1797 if (runnable_state == 1) {
1798 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
1799
1800 q->guc->resume_time = ktime_get();
1801 clear_exec_queue_pending_enable(q);
1802 smp_wmb();
1803 wake_up_all(&guc->ct.wq);
1804 } else {
1805 bool check_timeout = exec_queue_check_timeout(q);
1806
1807 xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
1808 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
1809
1810 clear_exec_queue_pending_disable(q);
1811 if (q->guc->suspend_pending) {
1812 suspend_fence_signal(q);
1813 } else {
1814 if (exec_queue_banned(q) || check_timeout) {
1815 smp_wmb();
1816 wake_up_all(&guc->ct.wq);
1817 }
1818 if (!check_timeout)
1819 deregister_exec_queue(guc, q);
1820 }
1821 }
1822}
1823
1824int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1825{
1826 struct xe_device *xe = guc_to_xe(guc);
1827 struct xe_exec_queue *q;
1828 u32 guc_id = msg[0];
1829 u32 runnable_state = msg[1];
1830
1831 if (unlikely(len < 2)) {
1832 drm_err(&xe->drm, "Invalid length %u", len);
1833 return -EPROTO;
1834 }
1835
1836 q = g2h_exec_queue_lookup(guc, guc_id);
1837 if (unlikely(!q))
1838 return -EPROTO;
1839
1840 if (unlikely(!exec_queue_pending_enable(q) &&
1841 !exec_queue_pending_disable(q))) {
1842 xe_gt_err(guc_to_gt(guc),
1843 "SCHED_DONE: Unexpected engine state 0x%04x, guc_id=%d, runnable_state=%u",
1844 atomic_read(&q->guc->state), q->guc->id,
1845 runnable_state);
1846 return -EPROTO;
1847 }
1848
1849 handle_sched_done(guc, q, runnable_state);
1850
1851 return 0;
1852}
1853
1854static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
1855{
1856 trace_xe_exec_queue_deregister_done(q);
1857
1858 clear_exec_queue_registered(q);
1859
1860 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1861 xe_exec_queue_put(q);
1862 else
1863 __guc_exec_queue_fini(guc, q);
1864}
1865
1866int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1867{
1868 struct xe_device *xe = guc_to_xe(guc);
1869 struct xe_exec_queue *q;
1870 u32 guc_id = msg[0];
1871
1872 if (unlikely(len < 1)) {
1873 drm_err(&xe->drm, "Invalid length %u", len);
1874 return -EPROTO;
1875 }
1876
1877 q = g2h_exec_queue_lookup(guc, guc_id);
1878 if (unlikely(!q))
1879 return -EPROTO;
1880
1881 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
1882 exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
1883 xe_gt_err(guc_to_gt(guc),
1884 "DEREGISTER_DONE: Unexpected engine state 0x%04x, guc_id=%d",
1885 atomic_read(&q->guc->state), q->guc->id);
1886 return -EPROTO;
1887 }
1888
1889 handle_deregister_done(guc, q);
1890
1891 return 0;
1892}
1893
1894int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
1895{
1896 struct xe_gt *gt = guc_to_gt(guc);
1897 struct xe_device *xe = guc_to_xe(guc);
1898 struct xe_exec_queue *q;
1899 u32 guc_id = msg[0];
1900
1901 if (unlikely(len < 1)) {
1902 drm_err(&xe->drm, "Invalid length %u", len);
1903 return -EPROTO;
1904 }
1905
1906 q = g2h_exec_queue_lookup(guc, guc_id);
1907 if (unlikely(!q))
1908 return -EPROTO;
1909
1910 xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1911 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1912
1913 /* FIXME: Do error capture, most likely async */
1914
1915 trace_xe_exec_queue_reset(q);
1916
1917 /*
1918 * A banned engine is a NOP at this point (came from
1919 * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
1920 * jobs by setting timeout of the job to the minimum value kicking
1921 * guc_exec_queue_timedout_job.
1922 */
1923 set_exec_queue_reset(q);
1924 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1925 xe_guc_exec_queue_trigger_cleanup(q);
1926
1927 return 0;
1928}
1929
1930int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
1931 u32 len)
1932{
1933 struct xe_gt *gt = guc_to_gt(guc);
1934 struct xe_device *xe = guc_to_xe(guc);
1935 struct xe_exec_queue *q;
1936 u32 guc_id = msg[0];
1937
1938 if (unlikely(len < 1)) {
1939 drm_err(&xe->drm, "Invalid length %u", len);
1940 return -EPROTO;
1941 }
1942
1943 q = g2h_exec_queue_lookup(guc, guc_id);
1944 if (unlikely(!q))
1945 return -EPROTO;
1946
1947 xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1948 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1949
1950 trace_xe_exec_queue_memory_cat_error(q);
1951
1952 /* Treat the same as engine reset */
1953 set_exec_queue_reset(q);
1954 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1955 xe_guc_exec_queue_trigger_cleanup(q);
1956
1957 return 0;
1958}
1959
1960int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
1961{
1962 struct xe_device *xe = guc_to_xe(guc);
1963 u8 guc_class, instance;
1964 u32 reason;
1965
1966 if (unlikely(len != 3)) {
1967 drm_err(&xe->drm, "Invalid length %u", len);
1968 return -EPROTO;
1969 }
1970
1971 guc_class = msg[0];
1972 instance = msg[1];
1973 reason = msg[2];
1974
1975 /* Unexpected failure of a hardware feature, log an actual error */
1976 drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
1977 guc_class, instance, reason);
1978
1979 xe_gt_reset_async(guc_to_gt(guc));
1980
1981 return 0;
1982}
1983
1984static void
1985guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
1986 struct xe_guc_submit_exec_queue_snapshot *snapshot)
1987{
1988 struct xe_guc *guc = exec_queue_to_guc(q);
1989 struct xe_device *xe = guc_to_xe(guc);
1990 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
1991 int i;
1992
1993 snapshot->guc.wqi_head = q->guc->wqi_head;
1994 snapshot->guc.wqi_tail = q->guc->wqi_tail;
1995 snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
1996 snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
1997 snapshot->parallel.wq_desc.status = parallel_read(xe, map,
1998 wq_desc.wq_status);
1999
2000 if (snapshot->parallel.wq_desc.head !=
2001 snapshot->parallel.wq_desc.tail) {
2002 for (i = snapshot->parallel.wq_desc.head;
2003 i != snapshot->parallel.wq_desc.tail;
2004 i = (i + sizeof(u32)) % WQ_SIZE)
2005 snapshot->parallel.wq[i / sizeof(u32)] =
2006 parallel_read(xe, map, wq[i / sizeof(u32)]);
2007 }
2008}
2009
2010static void
2011guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2012 struct drm_printer *p)
2013{
2014 int i;
2015
2016 drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
2017 snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
2018 drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
2019 snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
2020 drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
2021
2022 if (snapshot->parallel.wq_desc.head !=
2023 snapshot->parallel.wq_desc.tail) {
2024 for (i = snapshot->parallel.wq_desc.head;
2025 i != snapshot->parallel.wq_desc.tail;
2026 i = (i + sizeof(u32)) % WQ_SIZE)
2027 drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
2028 snapshot->parallel.wq[i / sizeof(u32)]);
2029 }
2030}
2031
2032/**
2033 * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
2034 * @q: faulty exec queue
2035 *
2036 * This can be printed out in a later stage like during dev_coredump
2037 * analysis.
2038 *
2039 * Returns: a GuC Submit Engine snapshot object that must be freed by the
2040 * caller, using `xe_guc_exec_queue_snapshot_free`.
2041 */
2042struct xe_guc_submit_exec_queue_snapshot *
2043xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
2044{
2045 struct xe_gpu_scheduler *sched = &q->guc->sched;
2046 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2047 int i;
2048
2049 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
2050
2051 if (!snapshot)
2052 return NULL;
2053
2054 snapshot->guc.id = q->guc->id;
2055 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
2056 snapshot->class = q->class;
2057 snapshot->logical_mask = q->logical_mask;
2058 snapshot->width = q->width;
2059 snapshot->refcount = kref_read(&q->refcount);
2060 snapshot->sched_timeout = sched->base.timeout;
2061 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
2062 snapshot->sched_props.preempt_timeout_us =
2063 q->sched_props.preempt_timeout_us;
2064
2065 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
2066 GFP_ATOMIC);
2067
2068 if (snapshot->lrc) {
2069 for (i = 0; i < q->width; ++i) {
2070 struct xe_lrc *lrc = q->lrc[i];
2071
2072 snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
2073 }
2074 }
2075
2076 snapshot->schedule_state = atomic_read(&q->guc->state);
2077 snapshot->exec_queue_flags = q->flags;
2078
2079 snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
2080 if (snapshot->parallel_execution)
2081 guc_exec_queue_wq_snapshot_capture(q, snapshot);
2082
2083 spin_lock(&sched->base.job_list_lock);
2084 snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
2085 snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
2086 sizeof(struct pending_list_snapshot),
2087 GFP_ATOMIC);
2088
2089 if (snapshot->pending_list) {
2090 struct xe_sched_job *job_iter;
2091
2092 i = 0;
2093 list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
2094 snapshot->pending_list[i].seqno =
2095 xe_sched_job_seqno(job_iter);
2096 snapshot->pending_list[i].fence =
2097 dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
2098 snapshot->pending_list[i].finished =
2099 dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
2100 ? 1 : 0;
2101 i++;
2102 }
2103 }
2104
2105 spin_unlock(&sched->base.job_list_lock);
2106
2107 return snapshot;
2108}
2109
2110/**
2111 * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
2112 * @snapshot: Previously captured snapshot of job.
2113 *
2114 * This captures some data that requires taking some locks, so it cannot be done in signaling path.
2115 */
2116void
2117xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2118{
2119 int i;
2120
2121 if (!snapshot || !snapshot->lrc)
2122 return;
2123
2124 for (i = 0; i < snapshot->width; ++i)
2125 xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
2126}
2127
2128/**
2129 * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
2130 * @snapshot: GuC Submit Engine snapshot object.
2131 * @p: drm_printer where it will be printed out.
2132 *
2133 * This function prints out a given GuC Submit Engine snapshot object.
2134 */
2135void
2136xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2137 struct drm_printer *p)
2138{
2139 int i;
2140
2141 if (!snapshot)
2142 return;
2143
2144 drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
2145 drm_printf(p, "\tName: %s\n", snapshot->name);
2146 drm_printf(p, "\tClass: %d\n", snapshot->class);
2147 drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
2148 drm_printf(p, "\tWidth: %d\n", snapshot->width);
2149 drm_printf(p, "\tRef: %d\n", snapshot->refcount);
2150 drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
2151 drm_printf(p, "\tTimeslice: %u (us)\n",
2152 snapshot->sched_props.timeslice_us);
2153 drm_printf(p, "\tPreempt timeout: %u (us)\n",
2154 snapshot->sched_props.preempt_timeout_us);
2155
2156 for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
2157 xe_lrc_snapshot_print(snapshot->lrc[i], p);
2158
2159 drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
2160 drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
2161
2162 if (snapshot->parallel_execution)
2163 guc_exec_queue_wq_snapshot_print(snapshot, p);
2164
2165 for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
2166 i++)
2167 drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
2168 snapshot->pending_list[i].seqno,
2169 snapshot->pending_list[i].fence,
2170 snapshot->pending_list[i].finished);
2171}
2172
2173/**
2174 * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
2175 * snapshot.
2176 * @snapshot: GuC Submit Engine snapshot object.
2177 *
2178 * This function free all the memory that needed to be allocated at capture
2179 * time.
2180 */
2181void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2182{
2183 int i;
2184
2185 if (!snapshot)
2186 return;
2187
2188 if (snapshot->lrc) {
2189 for (i = 0; i < snapshot->width; i++)
2190 xe_lrc_snapshot_free(snapshot->lrc[i]);
2191 kfree(snapshot->lrc);
2192 }
2193 kfree(snapshot->pending_list);
2194 kfree(snapshot);
2195}
2196
2197static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
2198{
2199 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2200
2201 snapshot = xe_guc_exec_queue_snapshot_capture(q);
2202 xe_guc_exec_queue_snapshot_print(snapshot, p);
2203 xe_guc_exec_queue_snapshot_free(snapshot);
2204}
2205
2206/**
2207 * xe_guc_submit_print - GuC Submit Print.
2208 * @guc: GuC.
2209 * @p: drm_printer where it will be printed out.
2210 *
2211 * This function capture and prints snapshots of **all** GuC Engines.
2212 */
2213void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
2214{
2215 struct xe_exec_queue *q;
2216 unsigned long index;
2217
2218 if (!xe_device_uc_enabled(guc_to_xe(guc)))
2219 return;
2220
2221 mutex_lock(&guc->submission_state.lock);
2222 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
2223 guc_exec_queue_print(q, p);
2224 mutex_unlock(&guc->submission_state.lock);
2225}