Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_guc_submit.h"
7
8#include <linux/bitfield.h>
9#include <linux/bitmap.h>
10#include <linux/circ_buf.h>
11#include <linux/delay.h>
12#include <linux/dma-fence-array.h>
13#include <linux/math64.h>
14
15#include <drm/drm_managed.h>
16
17#include "abi/guc_actions_abi.h"
18#include "abi/guc_klvs_abi.h"
19#include "regs/xe_lrc_layout.h"
20#include "xe_assert.h"
21#include "xe_devcoredump.h"
22#include "xe_device.h"
23#include "xe_exec_queue.h"
24#include "xe_force_wake.h"
25#include "xe_gpu_scheduler.h"
26#include "xe_gt.h"
27#include "xe_gt_clock.h"
28#include "xe_gt_printk.h"
29#include "xe_guc.h"
30#include "xe_guc_ct.h"
31#include "xe_guc_exec_queue_types.h"
32#include "xe_guc_id_mgr.h"
33#include "xe_guc_submit_types.h"
34#include "xe_hw_engine.h"
35#include "xe_hw_fence.h"
36#include "xe_lrc.h"
37#include "xe_macros.h"
38#include "xe_map.h"
39#include "xe_mocs.h"
40#include "xe_pm.h"
41#include "xe_ring_ops_types.h"
42#include "xe_sched_job.h"
43#include "xe_trace.h"
44#include "xe_vm.h"
45
46static struct xe_guc *
47exec_queue_to_guc(struct xe_exec_queue *q)
48{
49 return &q->gt->uc.guc;
50}
51
52/*
53 * Helpers for engine state, using an atomic as some of the bits can transition
54 * as the same time (e.g. a suspend can be happning at the same time as schedule
55 * engine done being processed).
56 */
57#define EXEC_QUEUE_STATE_REGISTERED (1 << 0)
58#define EXEC_QUEUE_STATE_ENABLED (1 << 1)
59#define EXEC_QUEUE_STATE_PENDING_ENABLE (1 << 2)
60#define EXEC_QUEUE_STATE_PENDING_DISABLE (1 << 3)
61#define EXEC_QUEUE_STATE_DESTROYED (1 << 4)
62#define EXEC_QUEUE_STATE_SUSPENDED (1 << 5)
63#define EXEC_QUEUE_STATE_RESET (1 << 6)
64#define EXEC_QUEUE_STATE_KILLED (1 << 7)
65#define EXEC_QUEUE_STATE_WEDGED (1 << 8)
66#define EXEC_QUEUE_STATE_BANNED (1 << 9)
67#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
68#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
69
70static bool exec_queue_registered(struct xe_exec_queue *q)
71{
72 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
73}
74
75static void set_exec_queue_registered(struct xe_exec_queue *q)
76{
77 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
78}
79
80static void clear_exec_queue_registered(struct xe_exec_queue *q)
81{
82 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
83}
84
85static bool exec_queue_enabled(struct xe_exec_queue *q)
86{
87 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
88}
89
90static void set_exec_queue_enabled(struct xe_exec_queue *q)
91{
92 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
93}
94
95static void clear_exec_queue_enabled(struct xe_exec_queue *q)
96{
97 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
98}
99
100static bool exec_queue_pending_enable(struct xe_exec_queue *q)
101{
102 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
103}
104
105static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
106{
107 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
108}
109
110static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
111{
112 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
113}
114
115static bool exec_queue_pending_disable(struct xe_exec_queue *q)
116{
117 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
118}
119
120static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
121{
122 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
123}
124
125static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
126{
127 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
128}
129
130static bool exec_queue_destroyed(struct xe_exec_queue *q)
131{
132 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
133}
134
135static void set_exec_queue_destroyed(struct xe_exec_queue *q)
136{
137 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
138}
139
140static bool exec_queue_banned(struct xe_exec_queue *q)
141{
142 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
143}
144
145static void set_exec_queue_banned(struct xe_exec_queue *q)
146{
147 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
148}
149
150static bool exec_queue_suspended(struct xe_exec_queue *q)
151{
152 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
153}
154
155static void set_exec_queue_suspended(struct xe_exec_queue *q)
156{
157 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
158}
159
160static void clear_exec_queue_suspended(struct xe_exec_queue *q)
161{
162 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
163}
164
165static bool exec_queue_reset(struct xe_exec_queue *q)
166{
167 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
168}
169
170static void set_exec_queue_reset(struct xe_exec_queue *q)
171{
172 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
173}
174
175static bool exec_queue_killed(struct xe_exec_queue *q)
176{
177 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
178}
179
180static void set_exec_queue_killed(struct xe_exec_queue *q)
181{
182 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
183}
184
185static bool exec_queue_wedged(struct xe_exec_queue *q)
186{
187 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
188}
189
190static void set_exec_queue_wedged(struct xe_exec_queue *q)
191{
192 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
193}
194
195static bool exec_queue_check_timeout(struct xe_exec_queue *q)
196{
197 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
198}
199
200static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
201{
202 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
203}
204
205static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
206{
207 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
208}
209
210static bool exec_queue_extra_ref(struct xe_exec_queue *q)
211{
212 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF;
213}
214
215static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
216{
217 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
218}
219
220static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
221{
222 return (atomic_read(&q->guc->state) &
223 (EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED |
224 EXEC_QUEUE_STATE_BANNED));
225}
226
227static void guc_submit_fini(struct drm_device *drm, void *arg)
228{
229 struct xe_guc *guc = arg;
230
231 xa_destroy(&guc->submission_state.exec_queue_lookup);
232}
233
234static void guc_submit_wedged_fini(void *arg)
235{
236 struct xe_guc *guc = arg;
237 struct xe_exec_queue *q;
238 unsigned long index;
239
240 mutex_lock(&guc->submission_state.lock);
241 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
242 if (exec_queue_wedged(q)) {
243 mutex_unlock(&guc->submission_state.lock);
244 xe_exec_queue_put(q);
245 mutex_lock(&guc->submission_state.lock);
246 }
247 }
248 mutex_unlock(&guc->submission_state.lock);
249}
250
251static const struct xe_exec_queue_ops guc_exec_queue_ops;
252
253static void primelockdep(struct xe_guc *guc)
254{
255 if (!IS_ENABLED(CONFIG_LOCKDEP))
256 return;
257
258 fs_reclaim_acquire(GFP_KERNEL);
259
260 mutex_lock(&guc->submission_state.lock);
261 mutex_unlock(&guc->submission_state.lock);
262
263 fs_reclaim_release(GFP_KERNEL);
264}
265
266/**
267 * xe_guc_submit_init() - Initialize GuC submission.
268 * @guc: the &xe_guc to initialize
269 * @num_ids: number of GuC context IDs to use
270 *
271 * The bare-metal or PF driver can pass ~0 as &num_ids to indicate that all
272 * GuC context IDs supported by the GuC firmware should be used for submission.
273 *
274 * Only VF drivers will have to provide explicit number of GuC context IDs
275 * that they can use for submission.
276 *
277 * Return: 0 on success or a negative error code on failure.
278 */
279int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
280{
281 struct xe_device *xe = guc_to_xe(guc);
282 struct xe_gt *gt = guc_to_gt(guc);
283 int err;
284
285 err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
286 if (err)
287 return err;
288
289 err = xe_guc_id_mgr_init(&guc->submission_state.idm, num_ids);
290 if (err)
291 return err;
292
293 gt->exec_queue_ops = &guc_exec_queue_ops;
294
295 xa_init(&guc->submission_state.exec_queue_lookup);
296
297 init_waitqueue_head(&guc->submission_state.fini_wq);
298
299 primelockdep(guc);
300
301 return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
302}
303
304static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
305{
306 int i;
307
308 lockdep_assert_held(&guc->submission_state.lock);
309
310 for (i = 0; i < xa_count; ++i)
311 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
312
313 xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
314 q->guc->id, q->width);
315
316 if (xa_empty(&guc->submission_state.exec_queue_lookup))
317 wake_up(&guc->submission_state.fini_wq);
318}
319
320static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
321{
322 int ret;
323 int i;
324
325 /*
326 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
327 * worse case user gets -ENOMEM on engine create and has to try again.
328 *
329 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
330 * failure.
331 */
332 lockdep_assert_held(&guc->submission_state.lock);
333
334 ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
335 q->width);
336 if (ret < 0)
337 return ret;
338
339 q->guc->id = ret;
340
341 for (i = 0; i < q->width; ++i) {
342 ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
343 q->guc->id + i, q, GFP_NOWAIT));
344 if (ret)
345 goto err_release;
346 }
347
348 return 0;
349
350err_release:
351 __release_guc_id(guc, q, i);
352
353 return ret;
354}
355
356static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
357{
358 mutex_lock(&guc->submission_state.lock);
359 __release_guc_id(guc, q, q->width);
360 mutex_unlock(&guc->submission_state.lock);
361}
362
363struct exec_queue_policy {
364 u32 count;
365 struct guc_update_exec_queue_policy h2g;
366};
367
368static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
369{
370 size_t bytes = sizeof(policy->h2g.header) +
371 (sizeof(policy->h2g.klv[0]) * policy->count);
372
373 return bytes / sizeof(u32);
374}
375
376static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
377 u16 guc_id)
378{
379 policy->h2g.header.action =
380 XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
381 policy->h2g.header.guc_id = guc_id;
382 policy->count = 0;
383}
384
385#define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
386static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
387 u32 data) \
388{ \
389 XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
390\
391 policy->h2g.klv[policy->count].kl = \
392 FIELD_PREP(GUC_KLV_0_KEY, \
393 GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
394 FIELD_PREP(GUC_KLV_0_LEN, 1); \
395 policy->h2g.klv[policy->count].value = data; \
396 policy->count++; \
397}
398
399MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
400MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
401MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
402#undef MAKE_EXEC_QUEUE_POLICY_ADD
403
404static const int xe_exec_queue_prio_to_guc[] = {
405 [XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
406 [XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
407 [XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
408 [XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
409};
410
411static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
412{
413 struct exec_queue_policy policy;
414 struct xe_device *xe = guc_to_xe(guc);
415 enum xe_exec_queue_priority prio = q->sched_props.priority;
416 u32 timeslice_us = q->sched_props.timeslice_us;
417 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
418
419 xe_assert(xe, exec_queue_registered(q));
420
421 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
422 __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
423 __guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
424 __guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
425
426 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
427 __guc_exec_queue_policy_action_size(&policy), 0, 0);
428}
429
430static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
431{
432 struct exec_queue_policy policy;
433
434 __guc_exec_queue_policy_start_klv(&policy, q->guc->id);
435 __guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
436
437 xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
438 __guc_exec_queue_policy_action_size(&policy), 0, 0);
439}
440
441#define parallel_read(xe_, map_, field_) \
442 xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
443 field_)
444#define parallel_write(xe_, map_, field_, val_) \
445 xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
446 field_, val_)
447
448static void __register_mlrc_exec_queue(struct xe_guc *guc,
449 struct xe_exec_queue *q,
450 struct guc_ctxt_registration_info *info)
451{
452#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
453 struct xe_device *xe = guc_to_xe(guc);
454 u32 action[MAX_MLRC_REG_SIZE];
455 int len = 0;
456 int i;
457
458 xe_assert(xe, xe_exec_queue_is_parallel(q));
459
460 action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
461 action[len++] = info->flags;
462 action[len++] = info->context_idx;
463 action[len++] = info->engine_class;
464 action[len++] = info->engine_submit_mask;
465 action[len++] = info->wq_desc_lo;
466 action[len++] = info->wq_desc_hi;
467 action[len++] = info->wq_base_lo;
468 action[len++] = info->wq_base_hi;
469 action[len++] = info->wq_size;
470 action[len++] = q->width;
471 action[len++] = info->hwlrca_lo;
472 action[len++] = info->hwlrca_hi;
473
474 for (i = 1; i < q->width; ++i) {
475 struct xe_lrc *lrc = q->lrc[i];
476
477 action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
478 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
479 }
480
481 xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
482#undef MAX_MLRC_REG_SIZE
483
484 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
485}
486
487static void __register_exec_queue(struct xe_guc *guc,
488 struct guc_ctxt_registration_info *info)
489{
490 u32 action[] = {
491 XE_GUC_ACTION_REGISTER_CONTEXT,
492 info->flags,
493 info->context_idx,
494 info->engine_class,
495 info->engine_submit_mask,
496 info->wq_desc_lo,
497 info->wq_desc_hi,
498 info->wq_base_lo,
499 info->wq_base_hi,
500 info->wq_size,
501 info->hwlrca_lo,
502 info->hwlrca_hi,
503 };
504
505 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
506}
507
508static void register_exec_queue(struct xe_exec_queue *q)
509{
510 struct xe_guc *guc = exec_queue_to_guc(q);
511 struct xe_device *xe = guc_to_xe(guc);
512 struct xe_lrc *lrc = q->lrc[0];
513 struct guc_ctxt_registration_info info;
514
515 xe_assert(xe, !exec_queue_registered(q));
516
517 memset(&info, 0, sizeof(info));
518 info.context_idx = q->guc->id;
519 info.engine_class = xe_engine_class_to_guc_class(q->class);
520 info.engine_submit_mask = q->logical_mask;
521 info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
522 info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
523 info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
524
525 if (xe_exec_queue_is_parallel(q)) {
526 u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
527 struct iosys_map map = xe_lrc_parallel_map(lrc);
528
529 info.wq_desc_lo = lower_32_bits(ggtt_addr +
530 offsetof(struct guc_submit_parallel_scratch, wq_desc));
531 info.wq_desc_hi = upper_32_bits(ggtt_addr +
532 offsetof(struct guc_submit_parallel_scratch, wq_desc));
533 info.wq_base_lo = lower_32_bits(ggtt_addr +
534 offsetof(struct guc_submit_parallel_scratch, wq[0]));
535 info.wq_base_hi = upper_32_bits(ggtt_addr +
536 offsetof(struct guc_submit_parallel_scratch, wq[0]));
537 info.wq_size = WQ_SIZE;
538
539 q->guc->wqi_head = 0;
540 q->guc->wqi_tail = 0;
541 xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
542 parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
543 }
544
545 /*
546 * We must keep a reference for LR engines if engine is registered with
547 * the GuC as jobs signal immediately and can't destroy an engine if the
548 * GuC has a reference to it.
549 */
550 if (xe_exec_queue_is_lr(q))
551 xe_exec_queue_get(q);
552
553 set_exec_queue_registered(q);
554 trace_xe_exec_queue_register(q);
555 if (xe_exec_queue_is_parallel(q))
556 __register_mlrc_exec_queue(guc, q, &info);
557 else
558 __register_exec_queue(guc, &info);
559 init_policies(guc, q);
560}
561
562static u32 wq_space_until_wrap(struct xe_exec_queue *q)
563{
564 return (WQ_SIZE - q->guc->wqi_tail);
565}
566
567static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
568{
569 struct xe_guc *guc = exec_queue_to_guc(q);
570 struct xe_device *xe = guc_to_xe(guc);
571 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
572 unsigned int sleep_period_ms = 1;
573
574#define AVAILABLE_SPACE \
575 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
576 if (wqi_size > AVAILABLE_SPACE) {
577try_again:
578 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
579 if (wqi_size > AVAILABLE_SPACE) {
580 if (sleep_period_ms == 1024) {
581 xe_gt_reset_async(q->gt);
582 return -ENODEV;
583 }
584
585 msleep(sleep_period_ms);
586 sleep_period_ms <<= 1;
587 goto try_again;
588 }
589 }
590#undef AVAILABLE_SPACE
591
592 return 0;
593}
594
595static int wq_noop_append(struct xe_exec_queue *q)
596{
597 struct xe_guc *guc = exec_queue_to_guc(q);
598 struct xe_device *xe = guc_to_xe(guc);
599 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
600 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
601
602 if (wq_wait_for_space(q, wq_space_until_wrap(q)))
603 return -ENODEV;
604
605 xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
606
607 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
608 FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
609 FIELD_PREP(WQ_LEN_MASK, len_dw));
610 q->guc->wqi_tail = 0;
611
612 return 0;
613}
614
615static void wq_item_append(struct xe_exec_queue *q)
616{
617 struct xe_guc *guc = exec_queue_to_guc(q);
618 struct xe_device *xe = guc_to_xe(guc);
619 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
620#define WQ_HEADER_SIZE 4 /* Includes 1 LRC address too */
621 u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
622 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
623 u32 len_dw = (wqi_size / sizeof(u32)) - 1;
624 int i = 0, j;
625
626 if (wqi_size > wq_space_until_wrap(q)) {
627 if (wq_noop_append(q))
628 return;
629 }
630 if (wq_wait_for_space(q, wqi_size))
631 return;
632
633 wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
634 FIELD_PREP(WQ_LEN_MASK, len_dw);
635 wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
636 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
637 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
638 wqi[i++] = 0;
639 for (j = 1; j < q->width; ++j) {
640 struct xe_lrc *lrc = q->lrc[j];
641
642 wqi[i++] = lrc->ring.tail / sizeof(u64);
643 }
644
645 xe_assert(xe, i == wqi_size / sizeof(u32));
646
647 iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
648 wq[q->guc->wqi_tail / sizeof(u32)]));
649 xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
650 q->guc->wqi_tail += wqi_size;
651 xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
652
653 xe_device_wmb(xe);
654
655 map = xe_lrc_parallel_map(q->lrc[0]);
656 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
657}
658
659#define RESUME_PENDING ~0x0ull
660static void submit_exec_queue(struct xe_exec_queue *q)
661{
662 struct xe_guc *guc = exec_queue_to_guc(q);
663 struct xe_device *xe = guc_to_xe(guc);
664 struct xe_lrc *lrc = q->lrc[0];
665 u32 action[3];
666 u32 g2h_len = 0;
667 u32 num_g2h = 0;
668 int len = 0;
669 bool extra_submit = false;
670
671 xe_assert(xe, exec_queue_registered(q));
672
673 if (xe_exec_queue_is_parallel(q))
674 wq_item_append(q);
675 else
676 xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
677
678 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
679 return;
680
681 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
682 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
683 action[len++] = q->guc->id;
684 action[len++] = GUC_CONTEXT_ENABLE;
685 g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
686 num_g2h = 1;
687 if (xe_exec_queue_is_parallel(q))
688 extra_submit = true;
689
690 q->guc->resume_time = RESUME_PENDING;
691 set_exec_queue_pending_enable(q);
692 set_exec_queue_enabled(q);
693 trace_xe_exec_queue_scheduling_enable(q);
694 } else {
695 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
696 action[len++] = q->guc->id;
697 trace_xe_exec_queue_submit(q);
698 }
699
700 xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
701
702 if (extra_submit) {
703 len = 0;
704 action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
705 action[len++] = q->guc->id;
706 trace_xe_exec_queue_submit(q);
707
708 xe_guc_ct_send(&guc->ct, action, len, 0, 0);
709 }
710}
711
712static struct dma_fence *
713guc_exec_queue_run_job(struct drm_sched_job *drm_job)
714{
715 struct xe_sched_job *job = to_xe_sched_job(drm_job);
716 struct xe_exec_queue *q = job->q;
717 struct xe_guc *guc = exec_queue_to_guc(q);
718 struct xe_device *xe = guc_to_xe(guc);
719 bool lr = xe_exec_queue_is_lr(q);
720
721 xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
722 exec_queue_banned(q) || exec_queue_suspended(q));
723
724 trace_xe_sched_job_run(job);
725
726 if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
727 if (!exec_queue_registered(q))
728 register_exec_queue(q);
729 if (!lr) /* LR jobs are emitted in the exec IOCTL */
730 q->ring_ops->emit_job(job);
731 submit_exec_queue(q);
732 }
733
734 if (lr) {
735 xe_sched_job_set_error(job, -EOPNOTSUPP);
736 return NULL;
737 } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
738 return job->fence;
739 } else {
740 return dma_fence_get(job->fence);
741 }
742}
743
744static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
745{
746 struct xe_sched_job *job = to_xe_sched_job(drm_job);
747
748 xe_exec_queue_update_run_ticks(job->q);
749
750 trace_xe_sched_job_free(job);
751 xe_sched_job_put(job);
752}
753
754static int guc_read_stopped(struct xe_guc *guc)
755{
756 return atomic_read(&guc->submission_state.stopped);
757}
758
759#define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \
760 u32 action[] = { \
761 XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET, \
762 q->guc->id, \
763 GUC_CONTEXT_##enable_disable, \
764 }
765
766static void disable_scheduling_deregister(struct xe_guc *guc,
767 struct xe_exec_queue *q)
768{
769 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
770 struct xe_device *xe = guc_to_xe(guc);
771 int ret;
772
773 set_min_preemption_timeout(guc, q);
774 smp_rmb();
775 ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
776 guc_read_stopped(guc), HZ * 5);
777 if (!ret) {
778 struct xe_gpu_scheduler *sched = &q->guc->sched;
779
780 drm_warn(&xe->drm, "Pending enable failed to respond");
781 xe_sched_submission_start(sched);
782 xe_gt_reset_async(q->gt);
783 xe_sched_tdr_queue_imm(sched);
784 return;
785 }
786
787 clear_exec_queue_enabled(q);
788 set_exec_queue_pending_disable(q);
789 set_exec_queue_destroyed(q);
790 trace_xe_exec_queue_scheduling_disable(q);
791
792 /*
793 * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
794 * handler and we are not allowed to reserved G2H space in handlers.
795 */
796 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
797 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
798 G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
799}
800
801static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
802{
803 struct xe_guc *guc = exec_queue_to_guc(q);
804 struct xe_device *xe = guc_to_xe(guc);
805
806 /** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
807 wake_up_all(&xe->ufence_wq);
808
809 if (xe_exec_queue_is_lr(q))
810 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
811 else
812 xe_sched_tdr_queue_imm(&q->guc->sched);
813}
814
815/**
816 * xe_guc_submit_wedge() - Wedge GuC submission
817 * @guc: the GuC object
818 *
819 * Save exec queue's registered with GuC state by taking a ref to each queue.
820 * Register a DRMM handler to drop refs upon driver unload.
821 */
822void xe_guc_submit_wedge(struct xe_guc *guc)
823{
824 struct xe_device *xe = guc_to_xe(guc);
825 struct xe_exec_queue *q;
826 unsigned long index;
827 int err;
828
829 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
830
831 err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
832 guc_submit_wedged_fini, guc);
833 if (err) {
834 drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
835 return;
836 }
837
838 mutex_lock(&guc->submission_state.lock);
839 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
840 if (xe_exec_queue_get_unless_zero(q))
841 set_exec_queue_wedged(q);
842 mutex_unlock(&guc->submission_state.lock);
843}
844
845static bool guc_submit_hint_wedged(struct xe_guc *guc)
846{
847 struct xe_device *xe = guc_to_xe(guc);
848
849 if (xe->wedged.mode != 2)
850 return false;
851
852 if (xe_device_wedged(xe))
853 return true;
854
855 xe_device_declare_wedged(xe);
856
857 return true;
858}
859
860static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
861{
862 struct xe_guc_exec_queue *ge =
863 container_of(w, struct xe_guc_exec_queue, lr_tdr);
864 struct xe_exec_queue *q = ge->q;
865 struct xe_guc *guc = exec_queue_to_guc(q);
866 struct xe_device *xe = guc_to_xe(guc);
867 struct xe_gpu_scheduler *sched = &ge->sched;
868 bool wedged;
869
870 xe_assert(xe, xe_exec_queue_is_lr(q));
871 trace_xe_exec_queue_lr_cleanup(q);
872
873 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
874
875 /* Kill the run_job / process_msg entry points */
876 xe_sched_submission_stop(sched);
877
878 /*
879 * Engine state now mostly stable, disable scheduling / deregister if
880 * needed. This cleanup routine might be called multiple times, where
881 * the actual async engine deregister drops the final engine ref.
882 * Calling disable_scheduling_deregister will mark the engine as
883 * destroyed and fire off the CT requests to disable scheduling /
884 * deregister, which we only want to do once. We also don't want to mark
885 * the engine as pending_disable again as this may race with the
886 * xe_guc_deregister_done_handler() which treats it as an unexpected
887 * state.
888 */
889 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
890 struct xe_guc *guc = exec_queue_to_guc(q);
891 int ret;
892
893 set_exec_queue_banned(q);
894 disable_scheduling_deregister(guc, q);
895
896 /*
897 * Must wait for scheduling to be disabled before signalling
898 * any fences, if GT broken the GT reset code should signal us.
899 */
900 ret = wait_event_timeout(guc->ct.wq,
901 !exec_queue_pending_disable(q) ||
902 guc_read_stopped(guc), HZ * 5);
903 if (!ret) {
904 drm_warn(&xe->drm, "Schedule disable failed to respond");
905 xe_sched_submission_start(sched);
906 xe_gt_reset_async(q->gt);
907 return;
908 }
909 }
910
911 xe_sched_submission_start(sched);
912}
913
914#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
915
916static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
917{
918 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
919 u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
920 u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
921 u32 timeout_ms = q->sched_props.job_timeout_ms;
922 u32 diff;
923 u64 running_time_ms;
924
925 /*
926 * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
927 * possible overflows with a high timeout.
928 */
929 xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
930
931 if (ctx_timestamp < ctx_job_timestamp)
932 diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
933 else
934 diff = ctx_timestamp - ctx_job_timestamp;
935
936 /*
937 * Ensure timeout is within 5% to account for an GuC scheduling latency
938 */
939 running_time_ms =
940 ADJUST_FIVE_PERCENT(xe_gt_clock_interval_to_ms(gt, diff));
941
942 xe_gt_dbg(gt,
943 "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, running_time_ms=%llu, timeout_ms=%u, diff=0x%08x",
944 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
945 q->guc->id, running_time_ms, timeout_ms, diff);
946
947 return running_time_ms >= timeout_ms;
948}
949
950static void enable_scheduling(struct xe_exec_queue *q)
951{
952 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
953 struct xe_guc *guc = exec_queue_to_guc(q);
954 int ret;
955
956 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
957 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
958 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
959 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
960
961 set_exec_queue_pending_enable(q);
962 set_exec_queue_enabled(q);
963 trace_xe_exec_queue_scheduling_enable(q);
964
965 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
966 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
967
968 ret = wait_event_timeout(guc->ct.wq,
969 !exec_queue_pending_enable(q) ||
970 guc_read_stopped(guc), HZ * 5);
971 if (!ret || guc_read_stopped(guc)) {
972 xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
973 set_exec_queue_banned(q);
974 xe_gt_reset_async(q->gt);
975 xe_sched_tdr_queue_imm(&q->guc->sched);
976 }
977}
978
979static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
980{
981 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
982 struct xe_guc *guc = exec_queue_to_guc(q);
983
984 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
985 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
986 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
987
988 if (immediate)
989 set_min_preemption_timeout(guc, q);
990 clear_exec_queue_enabled(q);
991 set_exec_queue_pending_disable(q);
992 trace_xe_exec_queue_scheduling_disable(q);
993
994 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
995 G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
996}
997
998static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
999{
1000 u32 action[] = {
1001 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1002 q->guc->id,
1003 };
1004
1005 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1006 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1007 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1008 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1009
1010 set_exec_queue_destroyed(q);
1011 trace_xe_exec_queue_deregister(q);
1012
1013 xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1014 G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
1015}
1016
1017static enum drm_gpu_sched_stat
1018guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
1019{
1020 struct xe_sched_job *job = to_xe_sched_job(drm_job);
1021 struct xe_sched_job *tmp_job;
1022 struct xe_exec_queue *q = job->q;
1023 struct xe_gpu_scheduler *sched = &q->guc->sched;
1024 struct xe_guc *guc = exec_queue_to_guc(q);
1025 const char *process_name = "no process";
1026 int err = -ETIME;
1027 pid_t pid = -1;
1028 int i = 0;
1029 bool wedged, skip_timeout_check;
1030
1031 /*
1032 * TDR has fired before free job worker. Common if exec queue
1033 * immediately closed after last fence signaled.
1034 */
1035 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
1036 guc_exec_queue_free_job(drm_job);
1037
1038 return DRM_GPU_SCHED_STAT_NOMINAL;
1039 }
1040
1041 /* Kill the run_job entry point */
1042 xe_sched_submission_stop(sched);
1043
1044 /* Must check all state after stopping scheduler */
1045 skip_timeout_check = exec_queue_reset(q) ||
1046 exec_queue_killed_or_banned_or_wedged(q) ||
1047 exec_queue_destroyed(q);
1048
1049 /* Job hasn't started, can't be timed out */
1050 if (!skip_timeout_check && !xe_sched_job_started(job))
1051 goto rearm;
1052
1053 /*
1054 * XXX: Sampling timeout doesn't work in wedged mode as we have to
1055 * modify scheduling state to read timestamp. We could read the
1056 * timestamp from a register to accumulate current running time but this
1057 * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
1058 * genuine timeouts.
1059 */
1060 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
1061
1062 /* Engine state now stable, disable scheduling to check timestamp */
1063 if (!wedged && exec_queue_registered(q)) {
1064 int ret;
1065
1066 if (exec_queue_reset(q))
1067 err = -EIO;
1068
1069 if (!exec_queue_destroyed(q)) {
1070 /*
1071 * Wait for any pending G2H to flush out before
1072 * modifying state
1073 */
1074 ret = wait_event_timeout(guc->ct.wq,
1075 !exec_queue_pending_enable(q) ||
1076 guc_read_stopped(guc), HZ * 5);
1077 if (!ret || guc_read_stopped(guc))
1078 goto trigger_reset;
1079
1080 /*
1081 * Flag communicates to G2H handler that schedule
1082 * disable originated from a timeout check. The G2H then
1083 * avoid triggering cleanup or deregistering the exec
1084 * queue.
1085 */
1086 set_exec_queue_check_timeout(q);
1087 disable_scheduling(q, skip_timeout_check);
1088 }
1089
1090 /*
1091 * Must wait for scheduling to be disabled before signalling
1092 * any fences, if GT broken the GT reset code should signal us.
1093 *
1094 * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
1095 * error) messages which can cause the schedule disable to get
1096 * lost. If this occurs, trigger a GT reset to recover.
1097 */
1098 smp_rmb();
1099 ret = wait_event_timeout(guc->ct.wq,
1100 !exec_queue_pending_disable(q) ||
1101 guc_read_stopped(guc), HZ * 5);
1102 if (!ret || guc_read_stopped(guc)) {
1103trigger_reset:
1104 if (!ret)
1105 xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
1106 set_exec_queue_extra_ref(q);
1107 xe_exec_queue_get(q); /* GT reset owns this */
1108 set_exec_queue_banned(q);
1109 xe_gt_reset_async(q->gt);
1110 xe_sched_tdr_queue_imm(sched);
1111 goto rearm;
1112 }
1113 }
1114
1115 /*
1116 * Check if job is actually timed out, if so restart job execution and TDR
1117 */
1118 if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
1119 !exec_queue_reset(q) && exec_queue_registered(q)) {
1120 clear_exec_queue_check_timeout(q);
1121 goto sched_enable;
1122 }
1123
1124 if (q->vm && q->vm->xef) {
1125 process_name = q->vm->xef->process_name;
1126 pid = q->vm->xef->pid;
1127 }
1128 xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
1129 xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1130 q->guc->id, q->flags, process_name, pid);
1131
1132 trace_xe_sched_job_timedout(job);
1133
1134 if (!exec_queue_killed(q))
1135 xe_devcoredump(job);
1136
1137 /*
1138 * Kernel jobs should never fail, nor should VM jobs if they do
1139 * somethings has gone wrong and the GT needs a reset
1140 */
1141 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
1142 "Kernel-submitted job timed out\n");
1143 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
1144 "VM job timed out on non-killed execqueue\n");
1145 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
1146 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
1147 if (!xe_sched_invalidate_job(job, 2)) {
1148 clear_exec_queue_check_timeout(q);
1149 xe_gt_reset_async(q->gt);
1150 goto rearm;
1151 }
1152 }
1153
1154 /* Finish cleaning up exec queue via deregister */
1155 set_exec_queue_banned(q);
1156 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
1157 set_exec_queue_extra_ref(q);
1158 xe_exec_queue_get(q);
1159 __deregister_exec_queue(guc, q);
1160 }
1161
1162 /* Stop fence signaling */
1163 xe_hw_fence_irq_stop(q->fence_irq);
1164
1165 /*
1166 * Fence state now stable, stop / start scheduler which cleans up any
1167 * fences that are complete
1168 */
1169 xe_sched_add_pending_job(sched, job);
1170 xe_sched_submission_start(sched);
1171
1172 xe_guc_exec_queue_trigger_cleanup(q);
1173
1174 /* Mark all outstanding jobs as bad, thus completing them */
1175 spin_lock(&sched->base.job_list_lock);
1176 list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1177 xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
1178 spin_unlock(&sched->base.job_list_lock);
1179
1180 /* Start fence signaling */
1181 xe_hw_fence_irq_start(q->fence_irq);
1182
1183 return DRM_GPU_SCHED_STAT_NOMINAL;
1184
1185sched_enable:
1186 enable_scheduling(q);
1187rearm:
1188 /*
1189 * XXX: Ideally want to adjust timeout based on current exection time
1190 * but there is not currently an easy way to do in DRM scheduler. With
1191 * some thought, do this in a follow up.
1192 */
1193 xe_sched_add_pending_job(sched, job);
1194 xe_sched_submission_start(sched);
1195
1196 return DRM_GPU_SCHED_STAT_NOMINAL;
1197}
1198
1199static void __guc_exec_queue_fini_async(struct work_struct *w)
1200{
1201 struct xe_guc_exec_queue *ge =
1202 container_of(w, struct xe_guc_exec_queue, fini_async);
1203 struct xe_exec_queue *q = ge->q;
1204 struct xe_guc *guc = exec_queue_to_guc(q);
1205
1206 xe_pm_runtime_get(guc_to_xe(guc));
1207 trace_xe_exec_queue_destroy(q);
1208
1209 if (xe_exec_queue_is_lr(q))
1210 cancel_work_sync(&ge->lr_tdr);
1211 release_guc_id(guc, q);
1212 xe_sched_entity_fini(&ge->entity);
1213 xe_sched_fini(&ge->sched);
1214
1215 kfree(ge);
1216 xe_exec_queue_fini(q);
1217 xe_pm_runtime_put(guc_to_xe(guc));
1218}
1219
1220static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1221{
1222 struct xe_guc *guc = exec_queue_to_guc(q);
1223 struct xe_device *xe = guc_to_xe(guc);
1224
1225 INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1226
1227 /* We must block on kernel engines so slabs are empty on driver unload */
1228 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1229 __guc_exec_queue_fini_async(&q->guc->fini_async);
1230 else
1231 queue_work(xe->destroy_wq, &q->guc->fini_async);
1232}
1233
1234static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1235{
1236 /*
1237 * Might be done from within the GPU scheduler, need to do async as we
1238 * fini the scheduler when the engine is fini'd, the scheduler can't
1239 * complete fini within itself (circular dependency). Async resolves
1240 * this we and don't really care when everything is fini'd, just that it
1241 * is.
1242 */
1243 guc_exec_queue_fini_async(q);
1244}
1245
1246static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
1247{
1248 struct xe_exec_queue *q = msg->private_data;
1249 struct xe_guc *guc = exec_queue_to_guc(q);
1250 struct xe_device *xe = guc_to_xe(guc);
1251
1252 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1253 trace_xe_exec_queue_cleanup_entity(q);
1254
1255 if (exec_queue_registered(q))
1256 disable_scheduling_deregister(guc, q);
1257 else
1258 __guc_exec_queue_fini(guc, q);
1259}
1260
1261static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1262{
1263 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
1264}
1265
1266static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
1267{
1268 struct xe_exec_queue *q = msg->private_data;
1269 struct xe_guc *guc = exec_queue_to_guc(q);
1270
1271 if (guc_exec_queue_allowed_to_change_state(q))
1272 init_policies(guc, q);
1273 kfree(msg);
1274}
1275
1276static void __suspend_fence_signal(struct xe_exec_queue *q)
1277{
1278 if (!q->guc->suspend_pending)
1279 return;
1280
1281 WRITE_ONCE(q->guc->suspend_pending, false);
1282 wake_up(&q->guc->suspend_wait);
1283}
1284
1285static void suspend_fence_signal(struct xe_exec_queue *q)
1286{
1287 struct xe_guc *guc = exec_queue_to_guc(q);
1288 struct xe_device *xe = guc_to_xe(guc);
1289
1290 xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
1291 guc_read_stopped(guc));
1292 xe_assert(xe, q->guc->suspend_pending);
1293
1294 __suspend_fence_signal(q);
1295}
1296
1297static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
1298{
1299 struct xe_exec_queue *q = msg->private_data;
1300 struct xe_guc *guc = exec_queue_to_guc(q);
1301
1302 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1303 exec_queue_enabled(q)) {
1304 wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
1305 guc_read_stopped(guc));
1306
1307 if (!guc_read_stopped(guc)) {
1308 s64 since_resume_ms =
1309 ktime_ms_delta(ktime_get(),
1310 q->guc->resume_time);
1311 s64 wait_ms = q->vm->preempt.min_run_period_ms -
1312 since_resume_ms;
1313
1314 if (wait_ms > 0 && q->guc->resume_time)
1315 msleep(wait_ms);
1316
1317 set_exec_queue_suspended(q);
1318 disable_scheduling(q, false);
1319 }
1320 } else if (q->guc->suspend_pending) {
1321 set_exec_queue_suspended(q);
1322 suspend_fence_signal(q);
1323 }
1324}
1325
1326static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
1327{
1328 struct xe_exec_queue *q = msg->private_data;
1329
1330 if (guc_exec_queue_allowed_to_change_state(q)) {
1331 clear_exec_queue_suspended(q);
1332 if (!exec_queue_enabled(q)) {
1333 q->guc->resume_time = RESUME_PENDING;
1334 enable_scheduling(q);
1335 }
1336 } else {
1337 clear_exec_queue_suspended(q);
1338 }
1339}
1340
1341#define CLEANUP 1 /* Non-zero values to catch uninitialized msg */
1342#define SET_SCHED_PROPS 2
1343#define SUSPEND 3
1344#define RESUME 4
1345#define OPCODE_MASK 0xf
1346#define MSG_LOCKED BIT(8)
1347
1348static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
1349{
1350 struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data));
1351
1352 trace_xe_sched_msg_recv(msg);
1353
1354 switch (msg->opcode) {
1355 case CLEANUP:
1356 __guc_exec_queue_process_msg_cleanup(msg);
1357 break;
1358 case SET_SCHED_PROPS:
1359 __guc_exec_queue_process_msg_set_sched_props(msg);
1360 break;
1361 case SUSPEND:
1362 __guc_exec_queue_process_msg_suspend(msg);
1363 break;
1364 case RESUME:
1365 __guc_exec_queue_process_msg_resume(msg);
1366 break;
1367 default:
1368 XE_WARN_ON("Unknown message type");
1369 }
1370
1371 xe_pm_runtime_put(xe);
1372}
1373
1374static const struct drm_sched_backend_ops drm_sched_ops = {
1375 .run_job = guc_exec_queue_run_job,
1376 .free_job = guc_exec_queue_free_job,
1377 .timedout_job = guc_exec_queue_timedout_job,
1378};
1379
1380static const struct xe_sched_backend_ops xe_sched_ops = {
1381 .process_msg = guc_exec_queue_process_msg,
1382};
1383
1384static int guc_exec_queue_init(struct xe_exec_queue *q)
1385{
1386 struct xe_gpu_scheduler *sched;
1387 struct xe_guc *guc = exec_queue_to_guc(q);
1388 struct xe_device *xe = guc_to_xe(guc);
1389 struct xe_guc_exec_queue *ge;
1390 long timeout;
1391 int err, i;
1392
1393 xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
1394
1395 ge = kzalloc(sizeof(*ge), GFP_KERNEL);
1396 if (!ge)
1397 return -ENOMEM;
1398
1399 q->guc = ge;
1400 ge->q = q;
1401 init_waitqueue_head(&ge->suspend_wait);
1402
1403 for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
1404 INIT_LIST_HEAD(&ge->static_msgs[i].link);
1405
1406 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1407 msecs_to_jiffies(q->sched_props.job_timeout_ms);
1408 err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1409 NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1410 timeout, guc_to_gt(guc)->ordered_wq, NULL,
1411 q->name, gt_to_xe(q->gt)->drm.dev);
1412 if (err)
1413 goto err_free;
1414
1415 sched = &ge->sched;
1416 err = xe_sched_entity_init(&ge->entity, sched);
1417 if (err)
1418 goto err_sched;
1419
1420 if (xe_exec_queue_is_lr(q))
1421 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1422
1423 mutex_lock(&guc->submission_state.lock);
1424
1425 err = alloc_guc_id(guc, q);
1426 if (err)
1427 goto err_entity;
1428
1429 q->entity = &ge->entity;
1430
1431 if (guc_read_stopped(guc))
1432 xe_sched_stop(sched);
1433
1434 mutex_unlock(&guc->submission_state.lock);
1435
1436 xe_exec_queue_assign_name(q, q->guc->id);
1437
1438 trace_xe_exec_queue_create(q);
1439
1440 return 0;
1441
1442err_entity:
1443 mutex_unlock(&guc->submission_state.lock);
1444 xe_sched_entity_fini(&ge->entity);
1445err_sched:
1446 xe_sched_fini(&ge->sched);
1447err_free:
1448 kfree(ge);
1449
1450 return err;
1451}
1452
1453static void guc_exec_queue_kill(struct xe_exec_queue *q)
1454{
1455 trace_xe_exec_queue_kill(q);
1456 set_exec_queue_killed(q);
1457 __suspend_fence_signal(q);
1458 xe_guc_exec_queue_trigger_cleanup(q);
1459}
1460
1461static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1462 u32 opcode)
1463{
1464 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
1465
1466 INIT_LIST_HEAD(&msg->link);
1467 msg->opcode = opcode & OPCODE_MASK;
1468 msg->private_data = q;
1469
1470 trace_xe_sched_msg_add(msg);
1471 if (opcode & MSG_LOCKED)
1472 xe_sched_add_msg_locked(&q->guc->sched, msg);
1473 else
1474 xe_sched_add_msg(&q->guc->sched, msg);
1475}
1476
1477static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
1478 struct xe_sched_msg *msg,
1479 u32 opcode)
1480{
1481 if (!list_empty(&msg->link))
1482 return false;
1483
1484 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
1485
1486 return true;
1487}
1488
1489#define STATIC_MSG_CLEANUP 0
1490#define STATIC_MSG_SUSPEND 1
1491#define STATIC_MSG_RESUME 2
1492static void guc_exec_queue_fini(struct xe_exec_queue *q)
1493{
1494 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1495
1496 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
1497 guc_exec_queue_add_msg(q, msg, CLEANUP);
1498 else
1499 __guc_exec_queue_fini(exec_queue_to_guc(q), q);
1500}
1501
1502static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1503 enum xe_exec_queue_priority priority)
1504{
1505 struct xe_sched_msg *msg;
1506
1507 if (q->sched_props.priority == priority ||
1508 exec_queue_killed_or_banned_or_wedged(q))
1509 return 0;
1510
1511 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1512 if (!msg)
1513 return -ENOMEM;
1514
1515 q->sched_props.priority = priority;
1516 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1517
1518 return 0;
1519}
1520
1521static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1522{
1523 struct xe_sched_msg *msg;
1524
1525 if (q->sched_props.timeslice_us == timeslice_us ||
1526 exec_queue_killed_or_banned_or_wedged(q))
1527 return 0;
1528
1529 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1530 if (!msg)
1531 return -ENOMEM;
1532
1533 q->sched_props.timeslice_us = timeslice_us;
1534 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1535
1536 return 0;
1537}
1538
1539static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1540 u32 preempt_timeout_us)
1541{
1542 struct xe_sched_msg *msg;
1543
1544 if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1545 exec_queue_killed_or_banned_or_wedged(q))
1546 return 0;
1547
1548 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1549 if (!msg)
1550 return -ENOMEM;
1551
1552 q->sched_props.preempt_timeout_us = preempt_timeout_us;
1553 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1554
1555 return 0;
1556}
1557
1558static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1559{
1560 struct xe_gpu_scheduler *sched = &q->guc->sched;
1561 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1562
1563 if (exec_queue_killed_or_banned_or_wedged(q))
1564 return -EINVAL;
1565
1566 xe_sched_msg_lock(sched);
1567 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
1568 q->guc->suspend_pending = true;
1569 xe_sched_msg_unlock(sched);
1570
1571 return 0;
1572}
1573
1574static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1575{
1576 struct xe_guc *guc = exec_queue_to_guc(q);
1577 int ret;
1578
1579 /*
1580 * Likely don't need to check exec_queue_killed() as we clear
1581 * suspend_pending upon kill but to be paranoid but races in which
1582 * suspend_pending is set after kill also check kill here.
1583 */
1584 ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
1585 !READ_ONCE(q->guc->suspend_pending) ||
1586 exec_queue_killed(q) ||
1587 guc_read_stopped(guc),
1588 HZ * 5);
1589
1590 if (!ret) {
1591 xe_gt_warn(guc_to_gt(guc),
1592 "Suspend fence, guc_id=%d, failed to respond",
1593 q->guc->id);
1594 /* XXX: Trigger GT reset? */
1595 return -ETIME;
1596 }
1597
1598 return ret < 0 ? ret : 0;
1599}
1600
1601static void guc_exec_queue_resume(struct xe_exec_queue *q)
1602{
1603 struct xe_gpu_scheduler *sched = &q->guc->sched;
1604 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1605 struct xe_guc *guc = exec_queue_to_guc(q);
1606 struct xe_device *xe = guc_to_xe(guc);
1607
1608 xe_assert(xe, !q->guc->suspend_pending);
1609
1610 xe_sched_msg_lock(sched);
1611 guc_exec_queue_try_add_msg(q, msg, RESUME);
1612 xe_sched_msg_unlock(sched);
1613}
1614
1615static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1616{
1617 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
1618}
1619
1620/*
1621 * All of these functions are an abstraction layer which other parts of XE can
1622 * use to trap into the GuC backend. All of these functions, aside from init,
1623 * really shouldn't do much other than trap into the DRM scheduler which
1624 * synchronizes these operations.
1625 */
1626static const struct xe_exec_queue_ops guc_exec_queue_ops = {
1627 .init = guc_exec_queue_init,
1628 .kill = guc_exec_queue_kill,
1629 .fini = guc_exec_queue_fini,
1630 .set_priority = guc_exec_queue_set_priority,
1631 .set_timeslice = guc_exec_queue_set_timeslice,
1632 .set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
1633 .suspend = guc_exec_queue_suspend,
1634 .suspend_wait = guc_exec_queue_suspend_wait,
1635 .resume = guc_exec_queue_resume,
1636 .reset_status = guc_exec_queue_reset_status,
1637};
1638
1639static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1640{
1641 struct xe_gpu_scheduler *sched = &q->guc->sched;
1642
1643 /* Stop scheduling + flush any DRM scheduler operations */
1644 xe_sched_submission_stop(sched);
1645
1646 /* Clean up lost G2H + reset engine state */
1647 if (exec_queue_registered(q)) {
1648 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1649 xe_exec_queue_put(q);
1650 else if (exec_queue_destroyed(q))
1651 __guc_exec_queue_fini(guc, q);
1652 }
1653 if (q->guc->suspend_pending) {
1654 set_exec_queue_suspended(q);
1655 suspend_fence_signal(q);
1656 }
1657 atomic_and(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_BANNED |
1658 EXEC_QUEUE_STATE_KILLED | EXEC_QUEUE_STATE_DESTROYED |
1659 EXEC_QUEUE_STATE_SUSPENDED,
1660 &q->guc->state);
1661 q->guc->resume_time = 0;
1662 trace_xe_exec_queue_stop(q);
1663
1664 /*
1665 * Ban any engine (aside from kernel and engines used for VM ops) with a
1666 * started but not complete job or if a job has gone through a GT reset
1667 * more than twice.
1668 */
1669 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1670 struct xe_sched_job *job = xe_sched_first_pending_job(sched);
1671 bool ban = false;
1672
1673 if (job) {
1674 if ((xe_sched_job_started(job) &&
1675 !xe_sched_job_completed(job)) ||
1676 xe_sched_invalidate_job(job, 2)) {
1677 trace_xe_sched_job_ban(job);
1678 ban = true;
1679 }
1680 } else if (xe_exec_queue_is_lr(q) &&
1681 (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
1682 ban = true;
1683 }
1684
1685 if (ban) {
1686 set_exec_queue_banned(q);
1687 xe_guc_exec_queue_trigger_cleanup(q);
1688 }
1689 }
1690}
1691
1692int xe_guc_submit_reset_prepare(struct xe_guc *guc)
1693{
1694 int ret;
1695
1696 /*
1697 * Using an atomic here rather than submission_state.lock as this
1698 * function can be called while holding the CT lock (engine reset
1699 * failure). submission_state.lock needs the CT lock to resubmit jobs.
1700 * Atomic is not ideal, but it works to prevent against concurrent reset
1701 * and releasing any TDRs waiting on guc->submission_state.stopped.
1702 */
1703 ret = atomic_fetch_or(1, &guc->submission_state.stopped);
1704 smp_wmb();
1705 wake_up_all(&guc->ct.wq);
1706
1707 return ret;
1708}
1709
1710void xe_guc_submit_reset_wait(struct xe_guc *guc)
1711{
1712 wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
1713 !guc_read_stopped(guc));
1714}
1715
1716void xe_guc_submit_stop(struct xe_guc *guc)
1717{
1718 struct xe_exec_queue *q;
1719 unsigned long index;
1720 struct xe_device *xe = guc_to_xe(guc);
1721
1722 xe_assert(xe, guc_read_stopped(guc) == 1);
1723
1724 mutex_lock(&guc->submission_state.lock);
1725
1726 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1727 guc_exec_queue_stop(guc, q);
1728
1729 mutex_unlock(&guc->submission_state.lock);
1730
1731 /*
1732 * No one can enter the backend at this point, aside from new engine
1733 * creation which is protected by guc->submission_state.lock.
1734 */
1735
1736}
1737
1738static void guc_exec_queue_start(struct xe_exec_queue *q)
1739{
1740 struct xe_gpu_scheduler *sched = &q->guc->sched;
1741
1742 if (!exec_queue_killed_or_banned_or_wedged(q)) {
1743 int i;
1744
1745 trace_xe_exec_queue_resubmit(q);
1746 for (i = 0; i < q->width; ++i)
1747 xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
1748 xe_sched_resubmit_jobs(sched);
1749 }
1750
1751 xe_sched_submission_start(sched);
1752 xe_sched_submission_resume_tdr(sched);
1753}
1754
1755int xe_guc_submit_start(struct xe_guc *guc)
1756{
1757 struct xe_exec_queue *q;
1758 unsigned long index;
1759 struct xe_device *xe = guc_to_xe(guc);
1760
1761 xe_assert(xe, guc_read_stopped(guc) == 1);
1762
1763 mutex_lock(&guc->submission_state.lock);
1764 atomic_dec(&guc->submission_state.stopped);
1765 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
1766 guc_exec_queue_start(q);
1767 mutex_unlock(&guc->submission_state.lock);
1768
1769 wake_up_all(&guc->ct.wq);
1770
1771 return 0;
1772}
1773
1774static struct xe_exec_queue *
1775g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
1776{
1777 struct xe_device *xe = guc_to_xe(guc);
1778 struct xe_exec_queue *q;
1779
1780 if (unlikely(guc_id >= GUC_ID_MAX)) {
1781 drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
1782 return NULL;
1783 }
1784
1785 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1786 if (unlikely(!q)) {
1787 drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
1788 return NULL;
1789 }
1790
1791 xe_assert(xe, guc_id >= q->guc->id);
1792 xe_assert(xe, guc_id < (q->guc->id + q->width));
1793
1794 return q;
1795}
1796
1797static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1798{
1799 u32 action[] = {
1800 XE_GUC_ACTION_DEREGISTER_CONTEXT,
1801 q->guc->id,
1802 };
1803
1804 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
1805 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1806 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1807 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1808
1809 trace_xe_exec_queue_deregister(q);
1810
1811 xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
1812}
1813
1814static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
1815 u32 runnable_state)
1816{
1817 trace_xe_exec_queue_scheduling_done(q);
1818
1819 if (runnable_state == 1) {
1820 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
1821
1822 q->guc->resume_time = ktime_get();
1823 clear_exec_queue_pending_enable(q);
1824 smp_wmb();
1825 wake_up_all(&guc->ct.wq);
1826 } else {
1827 bool check_timeout = exec_queue_check_timeout(q);
1828
1829 xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
1830 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
1831
1832 clear_exec_queue_pending_disable(q);
1833 if (q->guc->suspend_pending) {
1834 suspend_fence_signal(q);
1835 } else {
1836 if (exec_queue_banned(q) || check_timeout) {
1837 smp_wmb();
1838 wake_up_all(&guc->ct.wq);
1839 }
1840 if (!check_timeout)
1841 deregister_exec_queue(guc, q);
1842 }
1843 }
1844}
1845
1846int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1847{
1848 struct xe_device *xe = guc_to_xe(guc);
1849 struct xe_exec_queue *q;
1850 u32 guc_id = msg[0];
1851 u32 runnable_state = msg[1];
1852
1853 if (unlikely(len < 2)) {
1854 drm_err(&xe->drm, "Invalid length %u", len);
1855 return -EPROTO;
1856 }
1857
1858 q = g2h_exec_queue_lookup(guc, guc_id);
1859 if (unlikely(!q))
1860 return -EPROTO;
1861
1862 if (unlikely(!exec_queue_pending_enable(q) &&
1863 !exec_queue_pending_disable(q))) {
1864 xe_gt_err(guc_to_gt(guc),
1865 "SCHED_DONE: Unexpected engine state 0x%04x, guc_id=%d, runnable_state=%u",
1866 atomic_read(&q->guc->state), q->guc->id,
1867 runnable_state);
1868 return -EPROTO;
1869 }
1870
1871 handle_sched_done(guc, q, runnable_state);
1872
1873 return 0;
1874}
1875
1876static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
1877{
1878 trace_xe_exec_queue_deregister_done(q);
1879
1880 clear_exec_queue_registered(q);
1881
1882 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1883 xe_exec_queue_put(q);
1884 else
1885 __guc_exec_queue_fini(guc, q);
1886}
1887
1888int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1889{
1890 struct xe_device *xe = guc_to_xe(guc);
1891 struct xe_exec_queue *q;
1892 u32 guc_id = msg[0];
1893
1894 if (unlikely(len < 1)) {
1895 drm_err(&xe->drm, "Invalid length %u", len);
1896 return -EPROTO;
1897 }
1898
1899 q = g2h_exec_queue_lookup(guc, guc_id);
1900 if (unlikely(!q))
1901 return -EPROTO;
1902
1903 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
1904 exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
1905 xe_gt_err(guc_to_gt(guc),
1906 "DEREGISTER_DONE: Unexpected engine state 0x%04x, guc_id=%d",
1907 atomic_read(&q->guc->state), q->guc->id);
1908 return -EPROTO;
1909 }
1910
1911 handle_deregister_done(guc, q);
1912
1913 return 0;
1914}
1915
1916int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
1917{
1918 struct xe_gt *gt = guc_to_gt(guc);
1919 struct xe_device *xe = guc_to_xe(guc);
1920 struct xe_exec_queue *q;
1921 u32 guc_id = msg[0];
1922
1923 if (unlikely(len < 1)) {
1924 drm_err(&xe->drm, "Invalid length %u", len);
1925 return -EPROTO;
1926 }
1927
1928 q = g2h_exec_queue_lookup(guc, guc_id);
1929 if (unlikely(!q))
1930 return -EPROTO;
1931
1932 xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1933 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1934
1935 /* FIXME: Do error capture, most likely async */
1936
1937 trace_xe_exec_queue_reset(q);
1938
1939 /*
1940 * A banned engine is a NOP at this point (came from
1941 * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
1942 * jobs by setting timeout of the job to the minimum value kicking
1943 * guc_exec_queue_timedout_job.
1944 */
1945 set_exec_queue_reset(q);
1946 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1947 xe_guc_exec_queue_trigger_cleanup(q);
1948
1949 return 0;
1950}
1951
1952int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
1953 u32 len)
1954{
1955 struct xe_gt *gt = guc_to_gt(guc);
1956 struct xe_device *xe = guc_to_xe(guc);
1957 struct xe_exec_queue *q;
1958 u32 guc_id = msg[0];
1959
1960 if (unlikely(len < 1)) {
1961 drm_err(&xe->drm, "Invalid length %u", len);
1962 return -EPROTO;
1963 }
1964
1965 q = g2h_exec_queue_lookup(guc, guc_id);
1966 if (unlikely(!q))
1967 return -EPROTO;
1968
1969 xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
1970 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
1971
1972 trace_xe_exec_queue_memory_cat_error(q);
1973
1974 /* Treat the same as engine reset */
1975 set_exec_queue_reset(q);
1976 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
1977 xe_guc_exec_queue_trigger_cleanup(q);
1978
1979 return 0;
1980}
1981
1982int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
1983{
1984 struct xe_device *xe = guc_to_xe(guc);
1985 u8 guc_class, instance;
1986 u32 reason;
1987
1988 if (unlikely(len != 3)) {
1989 drm_err(&xe->drm, "Invalid length %u", len);
1990 return -EPROTO;
1991 }
1992
1993 guc_class = msg[0];
1994 instance = msg[1];
1995 reason = msg[2];
1996
1997 /* Unexpected failure of a hardware feature, log an actual error */
1998 drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
1999 guc_class, instance, reason);
2000
2001 xe_gt_reset_async(guc_to_gt(guc));
2002
2003 return 0;
2004}
2005
2006static void
2007guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
2008 struct xe_guc_submit_exec_queue_snapshot *snapshot)
2009{
2010 struct xe_guc *guc = exec_queue_to_guc(q);
2011 struct xe_device *xe = guc_to_xe(guc);
2012 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
2013 int i;
2014
2015 snapshot->guc.wqi_head = q->guc->wqi_head;
2016 snapshot->guc.wqi_tail = q->guc->wqi_tail;
2017 snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
2018 snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
2019 snapshot->parallel.wq_desc.status = parallel_read(xe, map,
2020 wq_desc.wq_status);
2021
2022 if (snapshot->parallel.wq_desc.head !=
2023 snapshot->parallel.wq_desc.tail) {
2024 for (i = snapshot->parallel.wq_desc.head;
2025 i != snapshot->parallel.wq_desc.tail;
2026 i = (i + sizeof(u32)) % WQ_SIZE)
2027 snapshot->parallel.wq[i / sizeof(u32)] =
2028 parallel_read(xe, map, wq[i / sizeof(u32)]);
2029 }
2030}
2031
2032static void
2033guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2034 struct drm_printer *p)
2035{
2036 int i;
2037
2038 drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
2039 snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
2040 drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
2041 snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
2042 drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
2043
2044 if (snapshot->parallel.wq_desc.head !=
2045 snapshot->parallel.wq_desc.tail) {
2046 for (i = snapshot->parallel.wq_desc.head;
2047 i != snapshot->parallel.wq_desc.tail;
2048 i = (i + sizeof(u32)) % WQ_SIZE)
2049 drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
2050 snapshot->parallel.wq[i / sizeof(u32)]);
2051 }
2052}
2053
2054/**
2055 * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
2056 * @q: faulty exec queue
2057 *
2058 * This can be printed out in a later stage like during dev_coredump
2059 * analysis.
2060 *
2061 * Returns: a GuC Submit Engine snapshot object that must be freed by the
2062 * caller, using `xe_guc_exec_queue_snapshot_free`.
2063 */
2064struct xe_guc_submit_exec_queue_snapshot *
2065xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
2066{
2067 struct xe_gpu_scheduler *sched = &q->guc->sched;
2068 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2069 int i;
2070
2071 snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
2072
2073 if (!snapshot)
2074 return NULL;
2075
2076 snapshot->guc.id = q->guc->id;
2077 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
2078 snapshot->class = q->class;
2079 snapshot->logical_mask = q->logical_mask;
2080 snapshot->width = q->width;
2081 snapshot->refcount = kref_read(&q->refcount);
2082 snapshot->sched_timeout = sched->base.timeout;
2083 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
2084 snapshot->sched_props.preempt_timeout_us =
2085 q->sched_props.preempt_timeout_us;
2086
2087 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
2088 GFP_ATOMIC);
2089
2090 if (snapshot->lrc) {
2091 for (i = 0; i < q->width; ++i) {
2092 struct xe_lrc *lrc = q->lrc[i];
2093
2094 snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
2095 }
2096 }
2097
2098 snapshot->schedule_state = atomic_read(&q->guc->state);
2099 snapshot->exec_queue_flags = q->flags;
2100
2101 snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
2102 if (snapshot->parallel_execution)
2103 guc_exec_queue_wq_snapshot_capture(q, snapshot);
2104
2105 spin_lock(&sched->base.job_list_lock);
2106 snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
2107 snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
2108 sizeof(struct pending_list_snapshot),
2109 GFP_ATOMIC);
2110
2111 if (snapshot->pending_list) {
2112 struct xe_sched_job *job_iter;
2113
2114 i = 0;
2115 list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
2116 snapshot->pending_list[i].seqno =
2117 xe_sched_job_seqno(job_iter);
2118 snapshot->pending_list[i].fence =
2119 dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
2120 snapshot->pending_list[i].finished =
2121 dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
2122 ? 1 : 0;
2123 i++;
2124 }
2125 }
2126
2127 spin_unlock(&sched->base.job_list_lock);
2128
2129 return snapshot;
2130}
2131
2132/**
2133 * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
2134 * @snapshot: Previously captured snapshot of job.
2135 *
2136 * This captures some data that requires taking some locks, so it cannot be done in signaling path.
2137 */
2138void
2139xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2140{
2141 int i;
2142
2143 if (!snapshot || !snapshot->lrc)
2144 return;
2145
2146 for (i = 0; i < snapshot->width; ++i)
2147 xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
2148}
2149
2150/**
2151 * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
2152 * @snapshot: GuC Submit Engine snapshot object.
2153 * @p: drm_printer where it will be printed out.
2154 *
2155 * This function prints out a given GuC Submit Engine snapshot object.
2156 */
2157void
2158xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2159 struct drm_printer *p)
2160{
2161 int i;
2162
2163 if (!snapshot)
2164 return;
2165
2166 drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
2167 drm_printf(p, "\tName: %s\n", snapshot->name);
2168 drm_printf(p, "\tClass: %d\n", snapshot->class);
2169 drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
2170 drm_printf(p, "\tWidth: %d\n", snapshot->width);
2171 drm_printf(p, "\tRef: %d\n", snapshot->refcount);
2172 drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
2173 drm_printf(p, "\tTimeslice: %u (us)\n",
2174 snapshot->sched_props.timeslice_us);
2175 drm_printf(p, "\tPreempt timeout: %u (us)\n",
2176 snapshot->sched_props.preempt_timeout_us);
2177
2178 for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
2179 xe_lrc_snapshot_print(snapshot->lrc[i], p);
2180
2181 drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
2182 drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
2183
2184 if (snapshot->parallel_execution)
2185 guc_exec_queue_wq_snapshot_print(snapshot, p);
2186
2187 for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
2188 i++)
2189 drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
2190 snapshot->pending_list[i].seqno,
2191 snapshot->pending_list[i].fence,
2192 snapshot->pending_list[i].finished);
2193}
2194
2195/**
2196 * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
2197 * snapshot.
2198 * @snapshot: GuC Submit Engine snapshot object.
2199 *
2200 * This function free all the memory that needed to be allocated at capture
2201 * time.
2202 */
2203void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2204{
2205 int i;
2206
2207 if (!snapshot)
2208 return;
2209
2210 if (snapshot->lrc) {
2211 for (i = 0; i < snapshot->width; i++)
2212 xe_lrc_snapshot_free(snapshot->lrc[i]);
2213 kfree(snapshot->lrc);
2214 }
2215 kfree(snapshot->pending_list);
2216 kfree(snapshot);
2217}
2218
2219static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
2220{
2221 struct xe_guc_submit_exec_queue_snapshot *snapshot;
2222
2223 snapshot = xe_guc_exec_queue_snapshot_capture(q);
2224 xe_guc_exec_queue_snapshot_print(snapshot, p);
2225 xe_guc_exec_queue_snapshot_free(snapshot);
2226}
2227
2228/**
2229 * xe_guc_submit_print - GuC Submit Print.
2230 * @guc: GuC.
2231 * @p: drm_printer where it will be printed out.
2232 *
2233 * This function capture and prints snapshots of **all** GuC Engines.
2234 */
2235void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
2236{
2237 struct xe_exec_queue *q;
2238 unsigned long index;
2239
2240 if (!xe_device_uc_enabled(guc_to_xe(guc)))
2241 return;
2242
2243 mutex_lock(&guc->submission_state.lock);
2244 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
2245 guc_exec_queue_print(q, p);
2246 mutex_unlock(&guc->submission_state.lock);
2247}