Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "xe_guc_ct.h"
7
8#include <linux/bitfield.h>
9#include <linux/circ_buf.h>
10#include <linux/delay.h>
11#include <linux/fault-inject.h>
12
13#include <kunit/static_stub.h>
14
15#include <drm/drm_managed.h>
16
17#include "abi/guc_actions_abi.h"
18#include "abi/guc_actions_sriov_abi.h"
19#include "abi/guc_klvs_abi.h"
20#include "xe_bo.h"
21#include "xe_devcoredump.h"
22#include "xe_device.h"
23#include "xe_gt.h"
24#include "xe_gt_pagefault.h"
25#include "xe_gt_printk.h"
26#include "xe_gt_sriov_pf_control.h"
27#include "xe_gt_sriov_pf_monitor.h"
28#include "xe_gt_tlb_invalidation.h"
29#include "xe_guc.h"
30#include "xe_guc_log.h"
31#include "xe_guc_relay.h"
32#include "xe_guc_submit.h"
33#include "xe_map.h"
34#include "xe_pm.h"
35#include "xe_trace_guc.h"
36
37#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
38enum {
39 /* Internal states, not error conditions */
40 CT_DEAD_STATE_REARM, /* 0x0001 */
41 CT_DEAD_STATE_CAPTURE, /* 0x0002 */
42
43 /* Error conditions */
44 CT_DEAD_SETUP, /* 0x0004 */
45 CT_DEAD_H2G_WRITE, /* 0x0008 */
46 CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
47 CT_DEAD_G2H_READ, /* 0x0020 */
48 CT_DEAD_G2H_RECV, /* 0x0040 */
49 CT_DEAD_G2H_RELEASE, /* 0x0080 */
50 CT_DEAD_DEADLOCK, /* 0x0100 */
51 CT_DEAD_PROCESS_FAILED, /* 0x0200 */
52 CT_DEAD_FAST_G2H, /* 0x0400 */
53 CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
54 CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
55 CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
56 CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
57 CT_DEAD_CRASH, /* 0x8000 */
58};
59
60static void ct_dead_worker_func(struct work_struct *w);
61static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
62
63#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
64#else
65#define CT_DEAD(ct, ctb, reason) \
66 do { \
67 struct guc_ctb *_ctb = (ctb); \
68 if (_ctb) \
69 _ctb->info.broken = true; \
70 } while (0)
71#endif
72
73/* Used when a CT send wants to block and / or receive data */
74struct g2h_fence {
75 u32 *response_buffer;
76 u32 seqno;
77 u32 response_data;
78 u16 response_len;
79 u16 error;
80 u16 hint;
81 u16 reason;
82 bool retry;
83 bool fail;
84 bool done;
85};
86
87static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
88{
89 g2h_fence->response_buffer = response_buffer;
90 g2h_fence->response_data = 0;
91 g2h_fence->response_len = 0;
92 g2h_fence->fail = false;
93 g2h_fence->retry = false;
94 g2h_fence->done = false;
95 g2h_fence->seqno = ~0x0;
96}
97
98static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
99{
100 return g2h_fence->seqno == ~0x0;
101}
102
103static struct xe_guc *
104ct_to_guc(struct xe_guc_ct *ct)
105{
106 return container_of(ct, struct xe_guc, ct);
107}
108
109static struct xe_gt *
110ct_to_gt(struct xe_guc_ct *ct)
111{
112 return container_of(ct, struct xe_gt, uc.guc.ct);
113}
114
115static struct xe_device *
116ct_to_xe(struct xe_guc_ct *ct)
117{
118 return gt_to_xe(ct_to_gt(ct));
119}
120
121/**
122 * DOC: GuC CTB Blob
123 *
124 * We allocate single blob to hold both CTB descriptors and buffers:
125 *
126 * +--------+-----------------------------------------------+------+
127 * | offset | contents | size |
128 * +========+===============================================+======+
129 * | 0x0000 | H2G CTB Descriptor (send) | |
130 * +--------+-----------------------------------------------+ 4K |
131 * | 0x0800 | G2H CTB Descriptor (g2h) | |
132 * +--------+-----------------------------------------------+------+
133 * | 0x1000 | H2G CT Buffer (send) | n*4K |
134 * | | | |
135 * +--------+-----------------------------------------------+------+
136 * | 0x1000 | G2H CT Buffer (g2h) | m*4K |
137 * | + n*4K | | |
138 * +--------+-----------------------------------------------+------+
139 *
140 * Size of each ``CT Buffer`` must be multiple of 4K.
141 * We don't expect too many messages in flight at any time, unless we are
142 * using the GuC submission. In that case each request requires a minimum
143 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
144 * enough space to avoid backpressure on the driver. We increase the size
145 * of the receive buffer (relative to the send) to ensure a G2H response
146 * CTB has a landing spot.
147 *
148 * In addition to submissions, the G2H buffer needs to be able to hold
149 * enough space for recoverable page fault notifications. The number of
150 * page faults is interrupt driven and can be as much as the number of
151 * compute resources available. However, most of the actual work for these
152 * is in a separate page fault worker thread. Therefore we only need to
153 * make sure the queue has enough space to handle all of the submissions
154 * and responses and an extra buffer for incoming page faults.
155 */
156
157#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
158#define CTB_H2G_BUFFER_SIZE (SZ_4K)
159#define CTB_G2H_BUFFER_SIZE (SZ_128K)
160#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
161
162/**
163 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
164 * CT command queue
165 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
166 *
167 * Observation is that a 4KiB buffer full of commands takes a little over a
168 * second to process. Use that to calculate maximum time to process a full CT
169 * command queue.
170 *
171 * Return: Maximum time to process a full CT queue in jiffies.
172 */
173long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
174{
175 BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4));
176 return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ;
177}
178
179static size_t guc_ct_size(void)
180{
181 return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
182 CTB_G2H_BUFFER_SIZE;
183}
184
185static void guc_ct_fini(struct drm_device *drm, void *arg)
186{
187 struct xe_guc_ct *ct = arg;
188
189 destroy_workqueue(ct->g2h_wq);
190 xa_destroy(&ct->fence_lookup);
191}
192
193static void receive_g2h(struct xe_guc_ct *ct);
194static void g2h_worker_func(struct work_struct *w);
195static void safe_mode_worker_func(struct work_struct *w);
196
197static void primelockdep(struct xe_guc_ct *ct)
198{
199 if (!IS_ENABLED(CONFIG_LOCKDEP))
200 return;
201
202 fs_reclaim_acquire(GFP_KERNEL);
203 might_lock(&ct->lock);
204 fs_reclaim_release(GFP_KERNEL);
205}
206
207int xe_guc_ct_init(struct xe_guc_ct *ct)
208{
209 struct xe_device *xe = ct_to_xe(ct);
210 struct xe_gt *gt = ct_to_gt(ct);
211 struct xe_tile *tile = gt_to_tile(gt);
212 struct xe_bo *bo;
213 int err;
214
215 xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
216
217 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
218 if (!ct->g2h_wq)
219 return -ENOMEM;
220
221 spin_lock_init(&ct->fast_lock);
222 xa_init(&ct->fence_lookup);
223 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
224 INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
225#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
226 spin_lock_init(&ct->dead.lock);
227 INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
228#endif
229 init_waitqueue_head(&ct->wq);
230 init_waitqueue_head(&ct->g2h_fence_wq);
231
232 err = drmm_mutex_init(&xe->drm, &ct->lock);
233 if (err)
234 return err;
235
236 primelockdep(ct);
237
238 bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
239 XE_BO_FLAG_SYSTEM |
240 XE_BO_FLAG_GGTT |
241 XE_BO_FLAG_GGTT_INVALIDATE |
242 XE_BO_FLAG_PINNED_NORESTORE);
243 if (IS_ERR(bo))
244 return PTR_ERR(bo);
245
246 ct->bo = bo;
247
248 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
249 if (err)
250 return err;
251
252 xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
253 ct->state = XE_GUC_CT_STATE_DISABLED;
254 return 0;
255}
256ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
257
258#define desc_read(xe_, guc_ctb__, field_) \
259 xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
260 struct guc_ct_buffer_desc, field_)
261
262#define desc_write(xe_, guc_ctb__, field_, val_) \
263 xe_map_wr_field(xe_, &guc_ctb__->desc, 0, \
264 struct guc_ct_buffer_desc, field_, val_)
265
266static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
267 struct iosys_map *map)
268{
269 h2g->info.size = CTB_H2G_BUFFER_SIZE / sizeof(u32);
270 h2g->info.resv_space = 0;
271 h2g->info.tail = 0;
272 h2g->info.head = 0;
273 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
274 h2g->info.size) -
275 h2g->info.resv_space;
276 h2g->info.broken = false;
277
278 h2g->desc = *map;
279 xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
280
281 h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
282}
283
284static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
285 struct iosys_map *map)
286{
287 g2h->info.size = CTB_G2H_BUFFER_SIZE / sizeof(u32);
288 g2h->info.resv_space = G2H_ROOM_BUFFER_SIZE / sizeof(u32);
289 g2h->info.head = 0;
290 g2h->info.tail = 0;
291 g2h->info.space = CIRC_SPACE(g2h->info.tail, g2h->info.head,
292 g2h->info.size) -
293 g2h->info.resv_space;
294 g2h->info.broken = false;
295
296 g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
297 xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
298
299 g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
300 CTB_H2G_BUFFER_SIZE);
301}
302
303static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
304{
305 struct xe_guc *guc = ct_to_guc(ct);
306 u32 desc_addr, ctb_addr, size;
307 int err;
308
309 desc_addr = xe_bo_ggtt_addr(ct->bo);
310 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
311 size = ct->ctbs.h2g.info.size * sizeof(u32);
312
313 err = xe_guc_self_cfg64(guc,
314 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY,
315 desc_addr);
316 if (err)
317 return err;
318
319 err = xe_guc_self_cfg64(guc,
320 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY,
321 ctb_addr);
322 if (err)
323 return err;
324
325 return xe_guc_self_cfg32(guc,
326 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY,
327 size);
328}
329
330static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
331{
332 struct xe_guc *guc = ct_to_guc(ct);
333 u32 desc_addr, ctb_addr, size;
334 int err;
335
336 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
337 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
338 CTB_H2G_BUFFER_SIZE;
339 size = ct->ctbs.g2h.info.size * sizeof(u32);
340
341 err = xe_guc_self_cfg64(guc,
342 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
343 desc_addr);
344 if (err)
345 return err;
346
347 err = xe_guc_self_cfg64(guc,
348 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
349 ctb_addr);
350 if (err)
351 return err;
352
353 return xe_guc_self_cfg32(guc,
354 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
355 size);
356}
357
358static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
359{
360 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
361 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
362 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
363 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
364 GUC_ACTION_HOST2GUC_CONTROL_CTB),
365 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL,
366 enable ? GUC_CTB_CONTROL_ENABLE :
367 GUC_CTB_CONTROL_DISABLE),
368 };
369 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
370
371 return ret > 0 ? -EPROTO : ret;
372}
373
374static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
375 enum xe_guc_ct_state state)
376{
377 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
378 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
379
380 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
381 state == XE_GUC_CT_STATE_STOPPED);
382
383 if (ct->g2h_outstanding)
384 xe_pm_runtime_put(ct_to_xe(ct));
385 ct->g2h_outstanding = 0;
386 ct->state = state;
387
388 spin_unlock_irq(&ct->fast_lock);
389
390 /*
391 * Lockdep doesn't like this under the fast lock and he destroy only
392 * needs to be serialized with the send path which ct lock provides.
393 */
394 xa_destroy(&ct->fence_lookup);
395
396 mutex_unlock(&ct->lock);
397}
398
399static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
400{
401 return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev));
402}
403
404static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
405{
406 if (!ct_needs_safe_mode(ct))
407 return false;
408
409 queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10);
410 return true;
411}
412
413static void safe_mode_worker_func(struct work_struct *w)
414{
415 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work);
416
417 receive_g2h(ct);
418
419 if (!ct_restart_safe_mode_worker(ct))
420 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n");
421}
422
423static void ct_enter_safe_mode(struct xe_guc_ct *ct)
424{
425 if (ct_restart_safe_mode_worker(ct))
426 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n");
427}
428
429static void ct_exit_safe_mode(struct xe_guc_ct *ct)
430{
431 if (cancel_delayed_work_sync(&ct->safe_mode_worker))
432 xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
433}
434
435int xe_guc_ct_enable(struct xe_guc_ct *ct)
436{
437 struct xe_device *xe = ct_to_xe(ct);
438 struct xe_gt *gt = ct_to_gt(ct);
439 int err;
440
441 xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
442
443 xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
444 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
445 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
446
447 err = guc_ct_ctb_h2g_register(ct);
448 if (err)
449 goto err_out;
450
451 err = guc_ct_ctb_g2h_register(ct);
452 if (err)
453 goto err_out;
454
455 err = guc_ct_control_toggle(ct, true);
456 if (err)
457 goto err_out;
458
459 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
460
461 smp_mb();
462 wake_up_all(&ct->wq);
463 xe_gt_dbg(gt, "GuC CT communication channel enabled\n");
464
465 if (ct_needs_safe_mode(ct))
466 ct_enter_safe_mode(ct);
467
468#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
469 /*
470 * The CT has now been reset so the dumper can be re-armed
471 * after any existing dead state has been dumped.
472 */
473 spin_lock_irq(&ct->dead.lock);
474 if (ct->dead.reason) {
475 ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
476 queue_work(system_unbound_wq, &ct->dead.worker);
477 }
478 spin_unlock_irq(&ct->dead.lock);
479#endif
480
481 return 0;
482
483err_out:
484 xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
485 CT_DEAD(ct, NULL, SETUP);
486
487 return err;
488}
489
490static void stop_g2h_handler(struct xe_guc_ct *ct)
491{
492 cancel_work_sync(&ct->g2h_worker);
493}
494
495/**
496 * xe_guc_ct_disable - Set GuC to disabled state
497 * @ct: the &xe_guc_ct
498 *
499 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
500 * in this transition.
501 */
502void xe_guc_ct_disable(struct xe_guc_ct *ct)
503{
504 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
505 ct_exit_safe_mode(ct);
506 stop_g2h_handler(ct);
507}
508
509/**
510 * xe_guc_ct_stop - Set GuC to stopped state
511 * @ct: the &xe_guc_ct
512 *
513 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
514 */
515void xe_guc_ct_stop(struct xe_guc_ct *ct)
516{
517 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
518 stop_g2h_handler(ct);
519}
520
521static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
522{
523 struct guc_ctb *h2g = &ct->ctbs.h2g;
524
525 lockdep_assert_held(&ct->lock);
526
527 if (cmd_len > h2g->info.space) {
528 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
529
530 if (h2g->info.head > h2g->info.size) {
531 struct xe_device *xe = ct_to_xe(ct);
532 u32 desc_status = desc_read(xe, h2g, status);
533
534 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
535
536 xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
537 h2g->info.head, h2g->info.size);
538 CT_DEAD(ct, h2g, H2G_HAS_ROOM);
539 return false;
540 }
541
542 h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
543 h2g->info.size) -
544 h2g->info.resv_space;
545 if (cmd_len > h2g->info.space)
546 return false;
547 }
548
549 return true;
550}
551
552static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
553{
554 if (!g2h_len)
555 return true;
556
557 lockdep_assert_held(&ct->fast_lock);
558
559 return ct->ctbs.g2h.info.space > g2h_len;
560}
561
562static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
563{
564 lockdep_assert_held(&ct->lock);
565
566 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
567 return -EBUSY;
568
569 return 0;
570}
571
572static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
573{
574 lockdep_assert_held(&ct->lock);
575 ct->ctbs.h2g.info.space -= cmd_len;
576}
577
578static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
579{
580 xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
581 xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
582 (g2h_len && num_g2h));
583
584 if (g2h_len) {
585 lockdep_assert_held(&ct->fast_lock);
586
587 if (!ct->g2h_outstanding)
588 xe_pm_runtime_get_noresume(ct_to_xe(ct));
589
590 ct->ctbs.g2h.info.space -= g2h_len;
591 ct->g2h_outstanding += num_g2h;
592 }
593}
594
595static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
596{
597 bool bad = false;
598
599 lockdep_assert_held(&ct->fast_lock);
600
601 bad = ct->ctbs.g2h.info.space + g2h_len >
602 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
603 bad |= !ct->g2h_outstanding;
604
605 if (bad) {
606 xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
607 ct->ctbs.g2h.info.space, g2h_len,
608 ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
609 ct->ctbs.g2h.info.space + g2h_len,
610 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
611 ct->g2h_outstanding);
612 CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
613 return;
614 }
615
616 ct->ctbs.g2h.info.space += g2h_len;
617 if (!--ct->g2h_outstanding)
618 xe_pm_runtime_put(ct_to_xe(ct));
619}
620
621static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
622{
623 spin_lock_irq(&ct->fast_lock);
624 __g2h_release_space(ct, g2h_len);
625 spin_unlock_irq(&ct->fast_lock);
626}
627
628#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
629
630static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
631 u32 ct_fence_value, bool want_response)
632{
633 struct xe_device *xe = ct_to_xe(ct);
634 struct xe_gt *gt = ct_to_gt(ct);
635 struct guc_ctb *h2g = &ct->ctbs.h2g;
636 u32 cmd[H2G_CT_HEADERS];
637 u32 tail = h2g->info.tail;
638 u32 full_len;
639 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
640 tail * sizeof(u32));
641 u32 desc_status;
642
643 full_len = len + GUC_CTB_HDR_LEN;
644
645 lockdep_assert_held(&ct->lock);
646 xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
647
648 desc_status = desc_read(xe, h2g, status);
649 if (desc_status) {
650 xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
651 goto corrupted;
652 }
653
654 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
655 u32 desc_tail = desc_read(xe, h2g, tail);
656 u32 desc_head = desc_read(xe, h2g, head);
657
658 if (tail != desc_tail) {
659 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
660 xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
661 goto corrupted;
662 }
663
664 if (tail > h2g->info.size) {
665 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
666 xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
667 tail, h2g->info.size);
668 goto corrupted;
669 }
670
671 if (desc_head >= h2g->info.size) {
672 desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
673 xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
674 desc_head, h2g->info.size);
675 goto corrupted;
676 }
677 }
678
679 /* Command will wrap, zero fill (NOPs), return and check credits again */
680 if (tail + full_len > h2g->info.size) {
681 xe_map_memset(xe, &map, 0, 0,
682 (h2g->info.size - tail) * sizeof(u32));
683 h2g_reserve_space(ct, (h2g->info.size - tail));
684 h2g->info.tail = 0;
685 desc_write(xe, h2g, tail, h2g->info.tail);
686
687 return -EAGAIN;
688 }
689
690 /*
691 * dw0: CT header (including fence)
692 * dw1: HXG header (including action code)
693 * dw2+: action data
694 */
695 cmd[0] = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
696 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
697 FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
698 if (want_response) {
699 cmd[1] =
700 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
701 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
702 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
703 } else {
704 cmd[1] =
705 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
706 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
707 GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
708 }
709
710 /* H2G header in cmd[1] replaces action[0] so: */
711 --len;
712 ++action;
713
714 /* Write H2G ensuring visible before descriptor update */
715 xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
716 xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
717 xe_device_wmb(xe);
718
719 /* Update local copies */
720 h2g->info.tail = (tail + full_len) % h2g->info.size;
721 h2g_reserve_space(ct, full_len);
722
723 /* Update descriptor */
724 desc_write(xe, h2g, tail, h2g->info.tail);
725
726 trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len,
727 desc_read(xe, h2g, head), h2g->info.tail);
728
729 return 0;
730
731corrupted:
732 CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
733 return -EPIPE;
734}
735
736/*
737 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
738 * driver, the GuC will just copy it to the reply message. Since we need to
739 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
740 * we use one bit of the seqno as an indicator for that and a rolling counter
741 * for the remaining 15 bits.
742 */
743#define CT_SEQNO_MASK GENMASK(14, 0)
744#define CT_SEQNO_UNTRACKED BIT(15)
745static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
746{
747 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
748
749 if (!is_g2h_fence)
750 seqno |= CT_SEQNO_UNTRACKED;
751
752 return seqno;
753}
754
755static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
756 u32 len, u32 g2h_len, u32 num_g2h,
757 struct g2h_fence *g2h_fence)
758{
759 struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
760 u16 seqno;
761 int ret;
762
763 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
764 xe_gt_assert(gt, !g2h_len || !g2h_fence);
765 xe_gt_assert(gt, !num_g2h || !g2h_fence);
766 xe_gt_assert(gt, !g2h_len || num_g2h);
767 xe_gt_assert(gt, g2h_len || !num_g2h);
768 lockdep_assert_held(&ct->lock);
769
770 if (unlikely(ct->ctbs.h2g.info.broken)) {
771 ret = -EPIPE;
772 goto out;
773 }
774
775 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
776 ret = -ENODEV;
777 goto out;
778 }
779
780 if (ct->state == XE_GUC_CT_STATE_STOPPED) {
781 ret = -ECANCELED;
782 goto out;
783 }
784
785 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
786
787 if (g2h_fence) {
788 g2h_len = GUC_CTB_HXG_MSG_MAX_LEN;
789 num_g2h = 1;
790
791 if (g2h_fence_needs_alloc(g2h_fence)) {
792 g2h_fence->seqno = next_ct_seqno(ct, true);
793 ret = xa_err(xa_store(&ct->fence_lookup,
794 g2h_fence->seqno, g2h_fence,
795 GFP_ATOMIC));
796 if (ret)
797 goto out;
798 }
799
800 seqno = g2h_fence->seqno;
801 } else {
802 seqno = next_ct_seqno(ct, false);
803 }
804
805 if (g2h_len)
806 spin_lock_irq(&ct->fast_lock);
807retry:
808 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
809 if (unlikely(ret))
810 goto out_unlock;
811
812 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
813 if (unlikely(ret)) {
814 if (ret == -EAGAIN)
815 goto retry;
816 goto out_unlock;
817 }
818
819 __g2h_reserve_space(ct, g2h_len, num_g2h);
820 xe_guc_notify(ct_to_guc(ct));
821out_unlock:
822 if (g2h_len)
823 spin_unlock_irq(&ct->fast_lock);
824out:
825 return ret;
826}
827
828static void kick_reset(struct xe_guc_ct *ct)
829{
830 xe_gt_reset_async(ct_to_gt(ct));
831}
832
833static int dequeue_one_g2h(struct xe_guc_ct *ct);
834
835static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
836 u32 g2h_len, u32 num_g2h,
837 struct g2h_fence *g2h_fence)
838{
839 struct xe_device *xe = ct_to_xe(ct);
840 struct xe_gt *gt = ct_to_gt(ct);
841 unsigned int sleep_period_ms = 1;
842 int ret;
843
844 xe_gt_assert(gt, !g2h_len || !g2h_fence);
845 lockdep_assert_held(&ct->lock);
846 xe_device_assert_mem_access(ct_to_xe(ct));
847
848try_again:
849 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
850 g2h_fence);
851
852 /*
853 * We wait to try to restore credits for about 1 second before bailing.
854 * In the case of H2G credits we have no choice but just to wait for the
855 * GuC to consume H2Gs in the channel so we use a wait / sleep loop. In
856 * the case of G2H we process any G2H in the channel, hopefully freeing
857 * credits as we consume the G2H messages.
858 */
859 if (unlikely(ret == -EBUSY &&
860 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
861 struct guc_ctb *h2g = &ct->ctbs.h2g;
862
863 if (sleep_period_ms == 1024)
864 goto broken;
865
866 trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
867 h2g->info.size,
868 h2g->info.space,
869 len + GUC_CTB_HDR_LEN);
870 msleep(sleep_period_ms);
871 sleep_period_ms <<= 1;
872
873 goto try_again;
874 } else if (unlikely(ret == -EBUSY)) {
875 struct xe_device *xe = ct_to_xe(ct);
876 struct guc_ctb *g2h = &ct->ctbs.g2h;
877
878 trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
879 desc_read(xe, g2h, tail),
880 g2h->info.size,
881 g2h->info.space,
882 g2h_fence ?
883 GUC_CTB_HXG_MSG_MAX_LEN :
884 g2h_len);
885
886#define g2h_avail(ct) \
887 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
888 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
889 g2h_avail(ct), HZ))
890 goto broken;
891#undef g2h_avail
892
893 ret = dequeue_one_g2h(ct);
894 if (ret < 0) {
895 if (ret != -ECANCELED)
896 xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
897 ERR_PTR(ret));
898 goto broken;
899 }
900
901 goto try_again;
902 }
903
904 return ret;
905
906broken:
907 xe_gt_err(gt, "No forward process on H2G, reset required\n");
908 CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
909
910 return -EDEADLK;
911}
912
913static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
914 u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
915{
916 int ret;
917
918 xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence);
919
920 mutex_lock(&ct->lock);
921 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
922 mutex_unlock(&ct->lock);
923
924 return ret;
925}
926
927int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
928 u32 g2h_len, u32 num_g2h)
929{
930 int ret;
931
932 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
933 if (ret == -EDEADLK)
934 kick_reset(ct);
935
936 return ret;
937}
938
939int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
940 u32 g2h_len, u32 num_g2h)
941{
942 int ret;
943
944 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
945 if (ret == -EDEADLK)
946 kick_reset(ct);
947
948 return ret;
949}
950
951int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
952{
953 int ret;
954
955 lockdep_assert_held(&ct->lock);
956
957 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
958 if (ret == -EDEADLK)
959 kick_reset(ct);
960
961 return ret;
962}
963
964/*
965 * Check if a GT reset is in progress or will occur and if GT reset brought the
966 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
967 */
968static bool retry_failure(struct xe_guc_ct *ct, int ret)
969{
970 if (!(ret == -EDEADLK || ret == -EPIPE || ret == -ENODEV))
971 return false;
972
973#define ct_alive(ct) \
974 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
975 !ct->ctbs.g2h.info.broken)
976 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
977 return false;
978#undef ct_alive
979
980 return true;
981}
982
983static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
984 u32 *response_buffer, bool no_fail)
985{
986 struct xe_gt *gt = ct_to_gt(ct);
987 struct g2h_fence g2h_fence;
988 int ret = 0;
989
990 /*
991 * We use a fence to implement blocking sends / receiving response data.
992 * The seqno of the fence is sent in the H2G, returned in the G2H, and
993 * an xarray is used as storage media with the seqno being to key.
994 * Fields in the fence hold success, failure, retry status and the
995 * response data. Safe to allocate on the stack as the xarray is the
996 * only reference and it cannot be present after this function exits.
997 */
998retry:
999 g2h_fence_init(&g2h_fence, response_buffer);
1000retry_same_fence:
1001 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
1002 if (unlikely(ret == -ENOMEM)) {
1003 /* Retry allocation /w GFP_KERNEL */
1004 ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
1005 &g2h_fence, GFP_KERNEL));
1006 if (ret)
1007 return ret;
1008
1009 goto retry_same_fence;
1010 } else if (unlikely(ret)) {
1011 if (ret == -EDEADLK)
1012 kick_reset(ct);
1013
1014 if (no_fail && retry_failure(ct, ret))
1015 goto retry_same_fence;
1016
1017 if (!g2h_fence_needs_alloc(&g2h_fence))
1018 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1019
1020 return ret;
1021 }
1022
1023 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
1024 if (!ret) {
1025 LNL_FLUSH_WORK(&ct->g2h_worker);
1026 if (g2h_fence.done) {
1027 xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
1028 g2h_fence.seqno, action[0]);
1029 ret = 1;
1030 }
1031 }
1032
1033 /*
1034 * Ensure we serialize with completion side to prevent UAF with fence going out of scope on
1035 * the stack, since we have no clue if it will fire after the timeout before we can erase
1036 * from the xa. Also we have some dependent loads and stores below for which we need the
1037 * correct ordering, and we lack the needed barriers.
1038 */
1039 mutex_lock(&ct->lock);
1040 if (!ret) {
1041 xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
1042 g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
1043 xa_erase(&ct->fence_lookup, g2h_fence.seqno);
1044 mutex_unlock(&ct->lock);
1045 return -ETIME;
1046 }
1047
1048 if (g2h_fence.retry) {
1049 xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
1050 action[0], g2h_fence.reason);
1051 mutex_unlock(&ct->lock);
1052 goto retry;
1053 }
1054 if (g2h_fence.fail) {
1055 xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n",
1056 action[0], g2h_fence.error, g2h_fence.hint);
1057 ret = -EIO;
1058 }
1059
1060 if (ret > 0)
1061 ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
1062
1063 mutex_unlock(&ct->lock);
1064
1065 return ret;
1066}
1067
1068/**
1069 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
1070 * @ct: the &xe_guc_ct
1071 * @action: the dword array with `HXG Request`_ message (can't be NULL)
1072 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
1073 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
1074 *
1075 * Send a `HXG Request`_ message to the GuC over CT communication channel and
1076 * blocks until GuC replies with a `HXG Response`_ message.
1077 *
1078 * For non-blocking communication with GuC use xe_guc_ct_send().
1079 *
1080 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
1081 *
1082 * Return: response length (in dwords) if &response_buffer was not NULL, or
1083 * DATA0 from `HXG Response`_ if &response_buffer was NULL, or
1084 * a negative error code on failure.
1085 */
1086int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
1087 u32 *response_buffer)
1088{
1089 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
1090 return guc_ct_send_recv(ct, action, len, response_buffer, false);
1091}
1092ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
1093
1094int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
1095 u32 len, u32 *response_buffer)
1096{
1097 return guc_ct_send_recv(ct, action, len, response_buffer, true);
1098}
1099
1100static u32 *msg_to_hxg(u32 *msg)
1101{
1102 return msg + GUC_CTB_MSG_MIN_LEN;
1103}
1104
1105static u32 msg_len_to_hxg_len(u32 len)
1106{
1107 return len - GUC_CTB_MSG_MIN_LEN;
1108}
1109
1110static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
1111{
1112 u32 *hxg = msg_to_hxg(msg);
1113 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1114
1115 lockdep_assert_held(&ct->lock);
1116
1117 switch (action) {
1118 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1119 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1120 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1121 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1122 g2h_release_space(ct, len);
1123 }
1124
1125 return 0;
1126}
1127
1128static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
1129{
1130 struct xe_gt *gt = ct_to_gt(ct);
1131
1132 if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
1133 xe_gt_err(gt, "GuC Crash dump notification\n");
1134 else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
1135 xe_gt_err(gt, "GuC Exception notification\n");
1136 else
1137 xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
1138
1139 CT_DEAD(ct, NULL, CRASH);
1140
1141 kick_reset(ct);
1142
1143 return 0;
1144}
1145
1146static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
1147{
1148 struct xe_gt *gt = ct_to_gt(ct);
1149 u32 *hxg = msg_to_hxg(msg);
1150 u32 hxg_len = msg_len_to_hxg_len(len);
1151 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]);
1152 u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1153 struct g2h_fence *g2h_fence;
1154
1155 lockdep_assert_held(&ct->lock);
1156
1157 /*
1158 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
1159 * Those messages should never fail, so if we do get an error back it
1160 * means we're likely doing an illegal operation and the GuC is
1161 * rejecting it. We have no way to inform the code that submitted the
1162 * H2G that the message was rejected, so we need to escalate the
1163 * failure to trigger a reset.
1164 */
1165 if (fence & CT_SEQNO_UNTRACKED) {
1166 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE)
1167 xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n",
1168 fence,
1169 FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]),
1170 FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]));
1171 else
1172 xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
1173 type, fence);
1174 CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
1175
1176 return -EPROTO;
1177 }
1178
1179 g2h_fence = xa_erase(&ct->fence_lookup, fence);
1180 if (unlikely(!g2h_fence)) {
1181 /* Don't tear down channel, as send could've timed out */
1182 /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
1183 xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
1184 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1185 return 0;
1186 }
1187
1188 xe_gt_assert(gt, fence == g2h_fence->seqno);
1189
1190 if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) {
1191 g2h_fence->fail = true;
1192 g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]);
1193 g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]);
1194 } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
1195 g2h_fence->retry = true;
1196 g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]);
1197 } else if (g2h_fence->response_buffer) {
1198 g2h_fence->response_len = hxg_len;
1199 memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32));
1200 } else {
1201 g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]);
1202 }
1203
1204 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
1205
1206 g2h_fence->done = true;
1207 smp_mb();
1208
1209 wake_up_all(&ct->g2h_fence_wq);
1210
1211 return 0;
1212}
1213
1214static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1215{
1216 struct xe_gt *gt = ct_to_gt(ct);
1217 u32 *hxg = msg_to_hxg(msg);
1218 u32 origin, type;
1219 int ret;
1220
1221 lockdep_assert_held(&ct->lock);
1222
1223 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1224 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1225 xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
1226 origin);
1227 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
1228
1229 return -EPROTO;
1230 }
1231
1232 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1233 switch (type) {
1234 case GUC_HXG_TYPE_EVENT:
1235 ret = parse_g2h_event(ct, msg, len);
1236 break;
1237 case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1238 case GUC_HXG_TYPE_RESPONSE_FAILURE:
1239 case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
1240 ret = parse_g2h_response(ct, msg, len);
1241 break;
1242 default:
1243 xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
1244 type);
1245 CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
1246
1247 ret = -EOPNOTSUPP;
1248 }
1249
1250 return ret;
1251}
1252
1253static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1254{
1255 struct xe_guc *guc = ct_to_guc(ct);
1256 struct xe_gt *gt = ct_to_gt(ct);
1257 u32 hxg_len = msg_len_to_hxg_len(len);
1258 u32 *hxg = msg_to_hxg(msg);
1259 u32 action, adj_len;
1260 u32 *payload;
1261 int ret = 0;
1262
1263 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1264 return 0;
1265
1266 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1267 payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN;
1268 adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN;
1269
1270 switch (action) {
1271 case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1272 ret = xe_guc_sched_done_handler(guc, payload, adj_len);
1273 break;
1274 case XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1275 ret = xe_guc_deregister_done_handler(guc, payload, adj_len);
1276 break;
1277 case XE_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
1278 ret = xe_guc_exec_queue_reset_handler(guc, payload, adj_len);
1279 break;
1280 case XE_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
1281 ret = xe_guc_exec_queue_reset_failure_handler(guc, payload,
1282 adj_len);
1283 break;
1284 case XE_GUC_ACTION_SCHED_ENGINE_MODE_DONE:
1285 /* Selftest only at the moment */
1286 break;
1287 case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
1288 ret = xe_guc_error_capture_handler(guc, payload, adj_len);
1289 break;
1290 case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
1291 /* FIXME: Handle this */
1292 break;
1293 case XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR:
1294 ret = xe_guc_exec_queue_memory_cat_error_handler(guc, payload,
1295 adj_len);
1296 break;
1297 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1298 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1299 break;
1300 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1301 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1302 adj_len);
1303 break;
1304 case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
1305 ret = xe_guc_access_counter_notify_handler(guc, payload,
1306 adj_len);
1307 break;
1308 case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
1309 ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
1310 break;
1311 case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
1312 ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
1313 break;
1314 case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
1315 ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
1316 break;
1317 case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
1318 ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
1319 break;
1320 case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
1321 case XE_GUC_ACTION_NOTIFY_EXCEPTION:
1322 ret = guc_crash_process_msg(ct, action);
1323 break;
1324 default:
1325 xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
1326 }
1327
1328 if (ret) {
1329 xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
1330 action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
1331 CT_DEAD(ct, NULL, PROCESS_FAILED);
1332 }
1333
1334 return 0;
1335}
1336
1337static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1338{
1339 struct xe_device *xe = ct_to_xe(ct);
1340 struct xe_gt *gt = ct_to_gt(ct);
1341 struct guc_ctb *g2h = &ct->ctbs.g2h;
1342 u32 tail, head, len, desc_status;
1343 s32 avail;
1344 u32 action;
1345 u32 *hxg;
1346
1347 xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
1348 lockdep_assert_held(&ct->fast_lock);
1349
1350 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1351 return -ENODEV;
1352
1353 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1354 return -ECANCELED;
1355
1356 if (g2h->info.broken)
1357 return -EPIPE;
1358
1359 xe_gt_assert(gt, xe_guc_ct_enabled(ct));
1360
1361 desc_status = desc_read(xe, g2h, status);
1362 if (desc_status) {
1363 if (desc_status & GUC_CTB_STATUS_DISABLED) {
1364 /*
1365 * Potentially valid if a CLIENT_RESET request resulted in
1366 * contexts/engines being reset. But should never happen as
1367 * no contexts should be active when CLIENT_RESET is sent.
1368 */
1369 xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
1370 desc_status &= ~GUC_CTB_STATUS_DISABLED;
1371 }
1372
1373 if (desc_status) {
1374 xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
1375 goto corrupted;
1376 }
1377 }
1378
1379 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
1380 u32 desc_tail = desc_read(xe, g2h, tail);
1381 /*
1382 u32 desc_head = desc_read(xe, g2h, head);
1383
1384 * info.head and desc_head are updated back-to-back at the end of
1385 * this function and nowhere else. Hence, they cannot be different
1386 * unless two g2h_read calls are running concurrently. Which is not
1387 * possible because it is guarded by ct->fast_lock. And yet, some
1388 * discrete platforms are regularly hitting this error :(.
1389 *
1390 * desc_head rolling backwards shouldn't cause any noticeable
1391 * problems - just a delay in GuC being allowed to proceed past that
1392 * point in the queue. So for now, just disable the error until it
1393 * can be root caused.
1394 *
1395 if (g2h->info.head != desc_head) {
1396 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
1397 xe_gt_err(gt, "CT read: head was modified %u != %u\n",
1398 desc_head, g2h->info.head);
1399 goto corrupted;
1400 }
1401 */
1402
1403 if (g2h->info.head > g2h->info.size) {
1404 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1405 xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
1406 g2h->info.head, g2h->info.size);
1407 goto corrupted;
1408 }
1409
1410 if (desc_tail >= g2h->info.size) {
1411 desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
1412 xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
1413 desc_tail, g2h->info.size);
1414 goto corrupted;
1415 }
1416 }
1417
1418 /* Calculate DW available to read */
1419 tail = desc_read(xe, g2h, tail);
1420 avail = tail - g2h->info.head;
1421 if (unlikely(avail == 0))
1422 return 0;
1423
1424 if (avail < 0)
1425 avail += g2h->info.size;
1426
1427 /* Read header */
1428 xe_map_memcpy_from(xe, msg, &g2h->cmds, sizeof(u32) * g2h->info.head,
1429 sizeof(u32));
1430 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
1431 if (len > avail) {
1432 xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
1433 avail, len);
1434 goto corrupted;
1435 }
1436
1437 head = (g2h->info.head + 1) % g2h->info.size;
1438 avail = len - 1;
1439
1440 /* Read G2H message */
1441 if (avail + head > g2h->info.size) {
1442 u32 avail_til_wrap = g2h->info.size - head;
1443
1444 xe_map_memcpy_from(xe, msg + 1,
1445 &g2h->cmds, sizeof(u32) * head,
1446 avail_til_wrap * sizeof(u32));
1447 xe_map_memcpy_from(xe, msg + 1 + avail_til_wrap,
1448 &g2h->cmds, 0,
1449 (avail - avail_til_wrap) * sizeof(u32));
1450 } else {
1451 xe_map_memcpy_from(xe, msg + 1,
1452 &g2h->cmds, sizeof(u32) * head,
1453 avail * sizeof(u32));
1454 }
1455
1456 hxg = msg_to_hxg(msg);
1457 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1458
1459 if (fast_path) {
1460 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT)
1461 return 0;
1462
1463 switch (action) {
1464 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1465 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1466 break; /* Process these in fast-path */
1467 default:
1468 return 0;
1469 }
1470 }
1471
1472 /* Update local / descriptor header */
1473 g2h->info.head = (head + avail) % g2h->info.size;
1474 desc_write(xe, g2h, head, g2h->info.head);
1475
1476 trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id,
1477 action, len, g2h->info.head, tail);
1478
1479 return len;
1480
1481corrupted:
1482 CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
1483 return -EPROTO;
1484}
1485
1486static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1487{
1488 struct xe_gt *gt = ct_to_gt(ct);
1489 struct xe_guc *guc = ct_to_guc(ct);
1490 u32 hxg_len = msg_len_to_hxg_len(len);
1491 u32 *hxg = msg_to_hxg(msg);
1492 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1493 u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN;
1494 u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN;
1495 int ret = 0;
1496
1497 switch (action) {
1498 case XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC:
1499 ret = xe_guc_pagefault_handler(guc, payload, adj_len);
1500 break;
1501 case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
1502 __g2h_release_space(ct, len);
1503 ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
1504 adj_len);
1505 break;
1506 default:
1507 xe_gt_warn(gt, "NOT_POSSIBLE");
1508 }
1509
1510 if (ret) {
1511 xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
1512 action, ERR_PTR(ret));
1513 CT_DEAD(ct, NULL, FAST_G2H);
1514 }
1515}
1516
1517/**
1518 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
1519 * @ct: GuC CT object
1520 *
1521 * Anything related to page faults is critical for performance, process these
1522 * critical G2H in the IRQ. This is safe as these handlers either just wake up
1523 * waiters or queue another worker.
1524 */
1525void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1526{
1527 struct xe_device *xe = ct_to_xe(ct);
1528 bool ongoing;
1529 int len;
1530
1531 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1532 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1533 return;
1534
1535 spin_lock(&ct->fast_lock);
1536 do {
1537 len = g2h_read(ct, ct->fast_msg, true);
1538 if (len > 0)
1539 g2h_fast_path(ct, ct->fast_msg, len);
1540 } while (len > 0);
1541 spin_unlock(&ct->fast_lock);
1542
1543 if (ongoing)
1544 xe_pm_runtime_put(xe);
1545}
1546
1547/* Returns less than zero on error, 0 on done, 1 on more available */
1548static int dequeue_one_g2h(struct xe_guc_ct *ct)
1549{
1550 int len;
1551 int ret;
1552
1553 lockdep_assert_held(&ct->lock);
1554
1555 spin_lock_irq(&ct->fast_lock);
1556 len = g2h_read(ct, ct->msg, false);
1557 spin_unlock_irq(&ct->fast_lock);
1558 if (len <= 0)
1559 return len;
1560
1561 ret = parse_g2h_msg(ct, ct->msg, len);
1562 if (unlikely(ret < 0))
1563 return ret;
1564
1565 ret = process_g2h_msg(ct, ct->msg, len);
1566 if (unlikely(ret < 0))
1567 return ret;
1568
1569 return 1;
1570}
1571
1572static void receive_g2h(struct xe_guc_ct *ct)
1573{
1574 bool ongoing;
1575 int ret;
1576
1577 /*
1578 * Normal users must always hold mem_access.ref around CT calls. However
1579 * during the runtime pm callbacks we rely on CT to talk to the GuC, but
1580 * at this stage we can't rely on mem_access.ref and even the
1581 * callback_task will be different than current. For such cases we just
1582 * need to ensure we always process the responses from any blocking
1583 * ct_send requests or where we otherwise expect some response when
1584 * initiated from those callbacks (which will need to wait for the below
1585 * dequeue_one_g2h()). The dequeue_one_g2h() will gracefully fail if
1586 * the device has suspended to the point that the CT communication has
1587 * been disabled.
1588 *
1589 * If we are inside the runtime pm callback, we can be the only task
1590 * still issuing CT requests (since that requires having the
1591 * mem_access.ref). It seems like it might in theory be possible to
1592 * receive unsolicited events from the GuC just as we are
1593 * suspending-resuming, but those will currently anyway be lost when
1594 * eventually exiting from suspend, hence no need to wake up the device
1595 * here. If we ever need something stronger than get_if_ongoing() then
1596 * we need to be careful with blocking the pm callbacks from getting CT
1597 * responses, if the worker here is blocked on those callbacks
1598 * completing, creating a deadlock.
1599 */
1600 ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
1601 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1602 return;
1603
1604 do {
1605 mutex_lock(&ct->lock);
1606 ret = dequeue_one_g2h(ct);
1607 mutex_unlock(&ct->lock);
1608
1609 if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
1610 xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
1611 CT_DEAD(ct, NULL, G2H_RECV);
1612 kick_reset(ct);
1613 }
1614 } while (ret == 1);
1615
1616 if (ongoing)
1617 xe_pm_runtime_put(ct_to_xe(ct));
1618}
1619
1620static void g2h_worker_func(struct work_struct *w)
1621{
1622 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1623
1624 receive_g2h(ct);
1625}
1626
1627static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
1628 bool want_ctb)
1629{
1630 struct xe_guc_ct_snapshot *snapshot;
1631
1632 snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
1633 if (!snapshot)
1634 return NULL;
1635
1636 if (ct->bo && want_ctb) {
1637 snapshot->ctb_size = ct->bo->size;
1638 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
1639 }
1640
1641 return snapshot;
1642}
1643
1644static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
1645 struct guc_ctb_snapshot *snapshot)
1646{
1647 xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
1648 sizeof(struct guc_ct_buffer_desc));
1649 memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
1650}
1651
1652static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
1653 struct drm_printer *p)
1654{
1655 drm_printf(p, "\tsize: %d\n", snapshot->info.size);
1656 drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
1657 drm_printf(p, "\thead: %d\n", snapshot->info.head);
1658 drm_printf(p, "\ttail: %d\n", snapshot->info.tail);
1659 drm_printf(p, "\tspace: %d\n", snapshot->info.space);
1660 drm_printf(p, "\tbroken: %d\n", snapshot->info.broken);
1661 drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
1662 drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
1663 drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
1664}
1665
1666static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
1667 bool want_ctb)
1668{
1669 struct xe_device *xe = ct_to_xe(ct);
1670 struct xe_guc_ct_snapshot *snapshot;
1671
1672 snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
1673 if (!snapshot) {
1674 xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
1675 return NULL;
1676 }
1677
1678 if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
1679 snapshot->ct_enabled = true;
1680 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1681 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
1682 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
1683 }
1684
1685 if (ct->bo && snapshot->ctb)
1686 xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
1687
1688 return snapshot;
1689}
1690
1691/**
1692 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
1693 * @ct: GuC CT object.
1694 *
1695 * This can be printed out in a later stage like during dev_coredump
1696 * analysis. This is safe to be called during atomic context.
1697 *
1698 * Returns: a GuC CT snapshot object that must be freed by the caller
1699 * by using `xe_guc_ct_snapshot_free`.
1700 */
1701struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
1702{
1703 return guc_ct_snapshot_capture(ct, true, true);
1704}
1705
1706/**
1707 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
1708 * @snapshot: GuC CT snapshot object.
1709 * @p: drm_printer where it will be printed out.
1710 *
1711 * This function prints out a given GuC CT snapshot object.
1712 */
1713void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
1714 struct drm_printer *p)
1715{
1716 if (!snapshot)
1717 return;
1718
1719 if (snapshot->ct_enabled) {
1720 drm_puts(p, "H2G CTB (all sizes in DW):\n");
1721 guc_ctb_snapshot_print(&snapshot->h2g, p);
1722
1723 drm_puts(p, "G2H CTB (all sizes in DW):\n");
1724 guc_ctb_snapshot_print(&snapshot->g2h, p);
1725 drm_printf(p, "\tg2h outstanding: %d\n",
1726 snapshot->g2h_outstanding);
1727
1728 if (snapshot->ctb) {
1729 drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
1730 xe_print_blob_ascii85(p, "[CTB].data", '\n',
1731 snapshot->ctb, 0, snapshot->ctb_size);
1732 }
1733 } else {
1734 drm_puts(p, "CT disabled\n");
1735 }
1736}
1737
1738/**
1739 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
1740 * @snapshot: GuC CT snapshot object.
1741 *
1742 * This function free all the memory that needed to be allocated at capture
1743 * time.
1744 */
1745void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
1746{
1747 if (!snapshot)
1748 return;
1749
1750 kfree(snapshot->ctb);
1751 kfree(snapshot);
1752}
1753
1754/**
1755 * xe_guc_ct_print - GuC CT Print.
1756 * @ct: GuC CT.
1757 * @p: drm_printer where it will be printed out.
1758 * @want_ctb: Should the full CTB content be dumped (vs just the headers)
1759 *
1760 * This function will quickly capture a snapshot of the CT state
1761 * and immediately print it out.
1762 */
1763void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
1764{
1765 struct xe_guc_ct_snapshot *snapshot;
1766
1767 snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
1768 xe_guc_ct_snapshot_print(snapshot, p);
1769 xe_guc_ct_snapshot_free(snapshot);
1770}
1771
1772#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
1773static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
1774{
1775 struct xe_guc_log_snapshot *snapshot_log;
1776 struct xe_guc_ct_snapshot *snapshot_ct;
1777 struct xe_guc *guc = ct_to_guc(ct);
1778 unsigned long flags;
1779 bool have_capture;
1780
1781 if (ctb)
1782 ctb->info.broken = true;
1783
1784 /* Ignore further errors after the first dump until a reset */
1785 if (ct->dead.reported)
1786 return;
1787
1788 spin_lock_irqsave(&ct->dead.lock, flags);
1789
1790 /* And only capture one dump at a time */
1791 have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
1792 ct->dead.reason |= (1 << reason_code) |
1793 (1 << CT_DEAD_STATE_CAPTURE);
1794
1795 spin_unlock_irqrestore(&ct->dead.lock, flags);
1796
1797 if (have_capture)
1798 return;
1799
1800 snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
1801 snapshot_ct = xe_guc_ct_snapshot_capture((ct));
1802
1803 spin_lock_irqsave(&ct->dead.lock, flags);
1804
1805 if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
1806 xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
1807 xe_guc_log_snapshot_free(snapshot_log);
1808 xe_guc_ct_snapshot_free(snapshot_ct);
1809 } else {
1810 ct->dead.snapshot_log = snapshot_log;
1811 ct->dead.snapshot_ct = snapshot_ct;
1812 }
1813
1814 spin_unlock_irqrestore(&ct->dead.lock, flags);
1815
1816 queue_work(system_unbound_wq, &(ct)->dead.worker);
1817}
1818
1819static void ct_dead_print(struct xe_dead_ct *dead)
1820{
1821 struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
1822 struct xe_device *xe = ct_to_xe(ct);
1823 struct xe_gt *gt = ct_to_gt(ct);
1824 static int g_count;
1825 struct drm_printer ip = xe_gt_info_printer(gt);
1826 struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
1827
1828 if (!dead->reason) {
1829 xe_gt_err(gt, "CTB is dead for no reason!?\n");
1830 return;
1831 }
1832
1833
1834 /* Can't generate a genuine core dump at this point, so just do the good bits */
1835 drm_puts(&lp, "**** Xe Device Coredump ****\n");
1836 drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
1837 xe_device_snapshot_print(xe, &lp);
1838
1839 drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
1840 drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
1841
1842 drm_puts(&lp, "**** GuC Log ****\n");
1843 xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
1844
1845 drm_puts(&lp, "**** GuC CT ****\n");
1846 xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
1847
1848 drm_puts(&lp, "Done.\n");
1849}
1850
1851static void ct_dead_worker_func(struct work_struct *w)
1852{
1853 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
1854
1855 if (!ct->dead.reported) {
1856 ct->dead.reported = true;
1857 ct_dead_print(&ct->dead);
1858 }
1859
1860 spin_lock_irq(&ct->dead.lock);
1861
1862 xe_guc_log_snapshot_free(ct->dead.snapshot_log);
1863 ct->dead.snapshot_log = NULL;
1864 xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
1865 ct->dead.snapshot_ct = NULL;
1866
1867 if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
1868 /* A reset has occurred so re-arm the error reporting */
1869 ct->dead.reason = 0;
1870 ct->dead.reported = false;
1871 }
1872
1873 spin_unlock_irq(&ct->dead.lock);
1874}
1875#endif