Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_device.h"
7
8#include <linux/delay.h>
9#include <linux/units.h>
10
11#include <drm/drm_aperture.h>
12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_client.h>
14#include <drm/drm_gem_ttm_helper.h>
15#include <drm/drm_ioctl.h>
16#include <drm/drm_managed.h>
17#include <drm/drm_print.h>
18#include <drm/xe_drm.h>
19
20#include "display/xe_display.h"
21#include "instructions/xe_gpu_commands.h"
22#include "regs/xe_gt_regs.h"
23#include "regs/xe_regs.h"
24#include "xe_bo.h"
25#include "xe_debugfs.h"
26#include "xe_devcoredump.h"
27#include "xe_dma_buf.h"
28#include "xe_drm_client.h"
29#include "xe_drv.h"
30#include "xe_exec.h"
31#include "xe_exec_queue.h"
32#include "xe_force_wake.h"
33#include "xe_ggtt.h"
34#include "xe_gsc_proxy.h"
35#include "xe_gt.h"
36#include "xe_gt_mcr.h"
37#include "xe_gt_printk.h"
38#include "xe_gt_sriov_vf.h"
39#include "xe_guc.h"
40#include "xe_hwmon.h"
41#include "xe_irq.h"
42#include "xe_memirq.h"
43#include "xe_mmio.h"
44#include "xe_module.h"
45#include "xe_observation.h"
46#include "xe_pat.h"
47#include "xe_pcode.h"
48#include "xe_pm.h"
49#include "xe_query.h"
50#include "xe_sriov.h"
51#include "xe_tile.h"
52#include "xe_ttm_stolen_mgr.h"
53#include "xe_ttm_sys_mgr.h"
54#include "xe_vm.h"
55#include "xe_vram.h"
56#include "xe_wait_user_fence.h"
57
58static int xe_file_open(struct drm_device *dev, struct drm_file *file)
59{
60 struct xe_device *xe = to_xe_device(dev);
61 struct xe_drm_client *client;
62 struct xe_file *xef;
63 int ret = -ENOMEM;
64
65 xef = kzalloc(sizeof(*xef), GFP_KERNEL);
66 if (!xef)
67 return ret;
68
69 client = xe_drm_client_alloc();
70 if (!client) {
71 kfree(xef);
72 return ret;
73 }
74
75 xef->drm = file;
76 xef->client = client;
77 xef->xe = xe;
78
79 mutex_init(&xef->vm.lock);
80 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
81
82 mutex_init(&xef->exec_queue.lock);
83 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
84
85 spin_lock(&xe->clients.lock);
86 xe->clients.count++;
87 spin_unlock(&xe->clients.lock);
88
89 file->driver_priv = xef;
90 return 0;
91}
92
93static void xe_file_close(struct drm_device *dev, struct drm_file *file)
94{
95 struct xe_device *xe = to_xe_device(dev);
96 struct xe_file *xef = file->driver_priv;
97 struct xe_vm *vm;
98 struct xe_exec_queue *q;
99 unsigned long idx;
100
101 /*
102 * No need for exec_queue.lock here as there is no contention for it
103 * when FD is closing as IOCTLs presumably can't be modifying the
104 * xarray. Taking exec_queue.lock here causes undue dependency on
105 * vm->lock taken during xe_exec_queue_kill().
106 */
107 xa_for_each(&xef->exec_queue.xa, idx, q) {
108 xe_exec_queue_kill(q);
109 xe_exec_queue_put(q);
110 }
111 xa_destroy(&xef->exec_queue.xa);
112 mutex_destroy(&xef->exec_queue.lock);
113 mutex_lock(&xef->vm.lock);
114 xa_for_each(&xef->vm.xa, idx, vm)
115 xe_vm_close_and_put(vm);
116 mutex_unlock(&xef->vm.lock);
117 xa_destroy(&xef->vm.xa);
118 mutex_destroy(&xef->vm.lock);
119
120 spin_lock(&xe->clients.lock);
121 xe->clients.count--;
122 spin_unlock(&xe->clients.lock);
123
124 xe_drm_client_put(xef->client);
125 kfree(xef);
126}
127
128static const struct drm_ioctl_desc xe_ioctls[] = {
129 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
130 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
131 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
132 DRM_RENDER_ALLOW),
133 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
134 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
135 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
136 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
137 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
138 DRM_RENDER_ALLOW),
139 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
140 DRM_RENDER_ALLOW),
141 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
142 DRM_RENDER_ALLOW),
143 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
144 DRM_RENDER_ALLOW),
145 DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
146};
147
148static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
149{
150 struct drm_file *file_priv = file->private_data;
151 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
152 long ret;
153
154 if (xe_device_wedged(xe))
155 return -ECANCELED;
156
157 ret = xe_pm_runtime_get_ioctl(xe);
158 if (ret >= 0)
159 ret = drm_ioctl(file, cmd, arg);
160 xe_pm_runtime_put(xe);
161
162 return ret;
163}
164
165#ifdef CONFIG_COMPAT
166static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
167{
168 struct drm_file *file_priv = file->private_data;
169 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
170 long ret;
171
172 if (xe_device_wedged(xe))
173 return -ECANCELED;
174
175 ret = xe_pm_runtime_get_ioctl(xe);
176 if (ret >= 0)
177 ret = drm_compat_ioctl(file, cmd, arg);
178 xe_pm_runtime_put(xe);
179
180 return ret;
181}
182#else
183/* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
184#define xe_drm_compat_ioctl NULL
185#endif
186
187static const struct file_operations xe_driver_fops = {
188 .owner = THIS_MODULE,
189 .open = drm_open,
190 .release = drm_release_noglobal,
191 .unlocked_ioctl = xe_drm_ioctl,
192 .mmap = drm_gem_mmap,
193 .poll = drm_poll,
194 .read = drm_read,
195 .compat_ioctl = xe_drm_compat_ioctl,
196 .llseek = noop_llseek,
197#ifdef CONFIG_PROC_FS
198 .show_fdinfo = drm_show_fdinfo,
199#endif
200};
201
202static struct drm_driver driver = {
203 /* Don't use MTRRs here; the Xserver or userspace app should
204 * deal with them for Intel hardware.
205 */
206 .driver_features =
207 DRIVER_GEM |
208 DRIVER_RENDER | DRIVER_SYNCOBJ |
209 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
210 .open = xe_file_open,
211 .postclose = xe_file_close,
212
213 .gem_prime_import = xe_gem_prime_import,
214
215 .dumb_create = xe_bo_dumb_create,
216 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
217#ifdef CONFIG_PROC_FS
218 .show_fdinfo = xe_drm_client_fdinfo,
219#endif
220 .ioctls = xe_ioctls,
221 .num_ioctls = ARRAY_SIZE(xe_ioctls),
222 .fops = &xe_driver_fops,
223 .name = DRIVER_NAME,
224 .desc = DRIVER_DESC,
225 .date = DRIVER_DATE,
226 .major = DRIVER_MAJOR,
227 .minor = DRIVER_MINOR,
228 .patchlevel = DRIVER_PATCHLEVEL,
229};
230
231static void xe_device_destroy(struct drm_device *dev, void *dummy)
232{
233 struct xe_device *xe = to_xe_device(dev);
234
235 if (xe->preempt_fence_wq)
236 destroy_workqueue(xe->preempt_fence_wq);
237
238 if (xe->ordered_wq)
239 destroy_workqueue(xe->ordered_wq);
240
241 if (xe->unordered_wq)
242 destroy_workqueue(xe->unordered_wq);
243
244 ttm_device_fini(&xe->ttm);
245}
246
247struct xe_device *xe_device_create(struct pci_dev *pdev,
248 const struct pci_device_id *ent)
249{
250 struct xe_device *xe;
251 int err;
252
253 xe_display_driver_set_hooks(&driver);
254
255 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
256 if (err)
257 return ERR_PTR(err);
258
259 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
260 if (IS_ERR(xe))
261 return xe;
262
263 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
264 xe->drm.anon_inode->i_mapping,
265 xe->drm.vma_offset_manager, false, false);
266 if (WARN_ON(err))
267 goto err;
268
269 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
270 if (err)
271 goto err;
272
273 xe->info.devid = pdev->device;
274 xe->info.revid = pdev->revision;
275 xe->info.force_execlist = xe_modparam.force_execlist;
276
277 spin_lock_init(&xe->irq.lock);
278 spin_lock_init(&xe->clients.lock);
279
280 init_waitqueue_head(&xe->ufence_wq);
281
282 err = drmm_mutex_init(&xe->drm, &xe->usm.lock);
283 if (err)
284 goto err;
285
286 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
287
288 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
289 /* Trigger a large asid and an early asid wrap. */
290 u32 asid;
291
292 BUILD_BUG_ON(XE_MAX_ASID < 2);
293 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
294 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
295 &xe->usm.next_asid, GFP_KERNEL);
296 drm_WARN_ON(&xe->drm, err);
297 if (err >= 0)
298 xa_erase(&xe->usm.asid_to_vm, asid);
299 }
300
301 spin_lock_init(&xe->pinned.lock);
302 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
303 INIT_LIST_HEAD(&xe->pinned.external_vram);
304 INIT_LIST_HEAD(&xe->pinned.evicted);
305
306 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
307 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
308 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
309 if (!xe->ordered_wq || !xe->unordered_wq ||
310 !xe->preempt_fence_wq) {
311 /*
312 * Cleanup done in xe_device_destroy via
313 * drmm_add_action_or_reset register above
314 */
315 drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
316 err = -ENOMEM;
317 goto err;
318 }
319
320 err = xe_display_create(xe);
321 if (WARN_ON(err))
322 goto err;
323
324 return xe;
325
326err:
327 return ERR_PTR(err);
328}
329
330/*
331 * The driver-initiated FLR is the highest level of reset that we can trigger
332 * from within the driver. It is different from the PCI FLR in that it doesn't
333 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
334 * it doesn't require a re-enumeration of the PCI BARs. However, the
335 * driver-initiated FLR does still cause a reset of both GT and display and a
336 * memory wipe of local and stolen memory, so recovery would require a full HW
337 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
338 * perform the FLR as the very last action before releasing access to the HW
339 * during the driver release flow, we don't attempt recovery at all, because
340 * if/when a new instance of i915 is bound to the device it will do a full
341 * re-init anyway.
342 */
343static void xe_driver_flr(struct xe_device *xe)
344{
345 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
346 struct xe_gt *gt = xe_root_mmio_gt(xe);
347 int ret;
348
349 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
350 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
351 return;
352 }
353
354 drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
355
356 /*
357 * Make sure any pending FLR requests have cleared by waiting for the
358 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
359 * to make sure it's not still set from a prior attempt (it's a write to
360 * clear bit).
361 * Note that we should never be in a situation where a previous attempt
362 * is still pending (unless the HW is totally dead), but better to be
363 * safe in case something unexpected happens
364 */
365 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
366 if (ret) {
367 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
368 return;
369 }
370 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
371
372 /* Trigger the actual Driver-FLR */
373 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
374
375 /* Wait for hardware teardown to complete */
376 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
377 if (ret) {
378 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
379 return;
380 }
381
382 /* Wait for hardware/firmware re-init to complete */
383 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
384 flr_timeout, NULL, false);
385 if (ret) {
386 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
387 return;
388 }
389
390 /* Clear sticky completion status */
391 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
392}
393
394static void xe_driver_flr_fini(void *arg)
395{
396 struct xe_device *xe = arg;
397
398 if (xe->needs_flr_on_fini)
399 xe_driver_flr(xe);
400}
401
402static void xe_device_sanitize(void *arg)
403{
404 struct xe_device *xe = arg;
405 struct xe_gt *gt;
406 u8 id;
407
408 for_each_gt(gt, xe, id)
409 xe_gt_sanitize(gt);
410}
411
412static int xe_set_dma_info(struct xe_device *xe)
413{
414 unsigned int mask_size = xe->info.dma_mask_size;
415 int err;
416
417 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
418
419 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
420 if (err)
421 goto mask_err;
422
423 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
424 if (err)
425 goto mask_err;
426
427 return 0;
428
429mask_err:
430 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
431 return err;
432}
433
434static bool verify_lmem_ready(struct xe_gt *gt)
435{
436 u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
437
438 return !!val;
439}
440
441static int wait_for_lmem_ready(struct xe_device *xe)
442{
443 struct xe_gt *gt = xe_root_mmio_gt(xe);
444 unsigned long timeout, start;
445
446 if (!IS_DGFX(xe))
447 return 0;
448
449 if (IS_SRIOV_VF(xe))
450 return 0;
451
452 if (verify_lmem_ready(gt))
453 return 0;
454
455 drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
456
457 start = jiffies;
458 timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
459
460 do {
461 if (signal_pending(current))
462 return -EINTR;
463
464 /*
465 * The boot firmware initializes local memory and
466 * assesses its health. If memory training fails,
467 * the punit will have been instructed to keep the GT powered
468 * down.we won't be able to communicate with it
469 *
470 * If the status check is done before punit updates the register,
471 * it can lead to the system being unusable.
472 * use a timeout and defer the probe to prevent this.
473 */
474 if (time_after(jiffies, timeout)) {
475 drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
476 return -EPROBE_DEFER;
477 }
478
479 msleep(20);
480
481 } while (!verify_lmem_ready(gt));
482
483 drm_dbg(&xe->drm, "lmem ready after %ums",
484 jiffies_to_msecs(jiffies - start));
485
486 return 0;
487}
488
489static void update_device_info(struct xe_device *xe)
490{
491 /* disable features that are not available/applicable to VFs */
492 if (IS_SRIOV_VF(xe)) {
493 xe->info.enable_display = 0;
494 xe->info.has_heci_gscfi = 0;
495 xe->info.skip_guc_pc = 1;
496 xe->info.skip_pcode = 1;
497 }
498}
499
500/**
501 * xe_device_probe_early: Device early probe
502 * @xe: xe device instance
503 *
504 * Initialize MMIO resources that don't require any
505 * knowledge about tile count. Also initialize pcode and
506 * check vram initialization on root tile.
507 *
508 * Return: 0 on success, error code on failure
509 */
510int xe_device_probe_early(struct xe_device *xe)
511{
512 int err;
513
514 err = xe_mmio_init(xe);
515 if (err)
516 return err;
517
518 xe_sriov_probe_early(xe);
519
520 update_device_info(xe);
521
522 err = xe_pcode_probe_early(xe);
523 if (err)
524 return err;
525
526 err = wait_for_lmem_ready(xe);
527 if (err)
528 return err;
529
530 xe->wedged.mode = xe_modparam.wedged_mode;
531
532 return 0;
533}
534
535static int xe_device_set_has_flat_ccs(struct xe_device *xe)
536{
537 u32 reg;
538 int err;
539
540 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
541 return 0;
542
543 struct xe_gt *gt = xe_root_mmio_gt(xe);
544
545 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
546 if (err)
547 return err;
548
549 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
550 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
551
552 if (!xe->info.has_flat_ccs)
553 drm_dbg(&xe->drm,
554 "Flat CCS has been disabled in bios, May lead to performance impact");
555
556 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
557}
558
559int xe_device_probe(struct xe_device *xe)
560{
561 struct xe_tile *tile;
562 struct xe_gt *gt;
563 int err;
564 u8 last_gt;
565 u8 id;
566
567 xe_pat_init_early(xe);
568
569 err = xe_sriov_init(xe);
570 if (err)
571 return err;
572
573 xe->info.mem_region_mask = 1;
574 err = xe_display_init_nommio(xe);
575 if (err)
576 return err;
577
578 err = xe_set_dma_info(xe);
579 if (err)
580 return err;
581
582 err = xe_mmio_probe_tiles(xe);
583 if (err)
584 return err;
585
586 xe_ttm_sys_mgr_init(xe);
587
588 for_each_gt(gt, xe, id) {
589 err = xe_gt_init_early(gt);
590 if (err)
591 return err;
592 }
593
594 for_each_tile(tile, xe, id) {
595 if (IS_SRIOV_VF(xe)) {
596 xe_guc_comm_init_early(&tile->primary_gt->uc.guc);
597 err = xe_gt_sriov_vf_bootstrap(tile->primary_gt);
598 if (err)
599 return err;
600 err = xe_gt_sriov_vf_query_config(tile->primary_gt);
601 if (err)
602 return err;
603 }
604 err = xe_ggtt_init_early(tile->mem.ggtt);
605 if (err)
606 return err;
607 if (IS_SRIOV_VF(xe)) {
608 err = xe_memirq_init(&tile->sriov.vf.memirq);
609 if (err)
610 return err;
611 }
612 }
613
614 for_each_gt(gt, xe, id) {
615 err = xe_gt_init_hwconfig(gt);
616 if (err)
617 return err;
618 }
619
620 err = xe_devcoredump_init(xe);
621 if (err)
622 return err;
623 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
624 if (err)
625 return err;
626
627 err = xe_display_init_noirq(xe);
628 if (err)
629 return err;
630
631 err = xe_irq_install(xe);
632 if (err)
633 goto err;
634
635 err = xe_device_set_has_flat_ccs(xe);
636 if (err)
637 goto err;
638
639 err = xe_vram_probe(xe);
640 if (err)
641 goto err;
642
643 for_each_tile(tile, xe, id) {
644 err = xe_tile_init_noalloc(tile);
645 if (err)
646 goto err;
647 }
648
649 /* Allocate and map stolen after potential VRAM resize */
650 xe_ttm_stolen_mgr_init(xe);
651
652 /*
653 * Now that GT is initialized (TTM in particular),
654 * we can try to init display, and inherit the initial fb.
655 * This is the reason the first allocation needs to be done
656 * inside display.
657 */
658 err = xe_display_init_noaccel(xe);
659 if (err)
660 goto err;
661
662 for_each_gt(gt, xe, id) {
663 last_gt = id;
664
665 err = xe_gt_init(gt);
666 if (err)
667 goto err_fini_gt;
668 }
669
670 xe_heci_gsc_init(xe);
671
672 err = xe_oa_init(xe);
673 if (err)
674 goto err_fini_gt;
675
676 err = xe_display_init(xe);
677 if (err)
678 goto err_fini_oa;
679
680 err = drm_dev_register(&xe->drm, 0);
681 if (err)
682 goto err_fini_display;
683
684 xe_display_register(xe);
685
686 xe_oa_register(xe);
687
688 xe_debugfs_register(xe);
689
690 xe_hwmon_register(xe);
691
692 for_each_gt(gt, xe, id)
693 xe_gt_sanitize_freq(gt);
694
695 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
696
697err_fini_display:
698 xe_display_driver_remove(xe);
699
700err_fini_oa:
701 xe_oa_fini(xe);
702
703err_fini_gt:
704 for_each_gt(gt, xe, id) {
705 if (id < last_gt)
706 xe_gt_remove(gt);
707 else
708 break;
709 }
710
711err:
712 xe_display_fini(xe);
713 return err;
714}
715
716static void xe_device_remove_display(struct xe_device *xe)
717{
718 xe_display_unregister(xe);
719
720 drm_dev_unplug(&xe->drm);
721 xe_display_driver_remove(xe);
722}
723
724void xe_device_remove(struct xe_device *xe)
725{
726 struct xe_gt *gt;
727 u8 id;
728
729 xe_oa_unregister(xe);
730
731 xe_device_remove_display(xe);
732
733 xe_display_fini(xe);
734
735 xe_oa_fini(xe);
736
737 xe_heci_gsc_fini(xe);
738
739 for_each_gt(gt, xe, id)
740 xe_gt_remove(gt);
741}
742
743void xe_device_shutdown(struct xe_device *xe)
744{
745}
746
747void xe_device_wmb(struct xe_device *xe)
748{
749 struct xe_gt *gt = xe_root_mmio_gt(xe);
750
751 wmb();
752 if (IS_DGFX(xe))
753 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
754}
755
756/**
757 * xe_device_td_flush() - Flush transient L3 cache entries
758 * @xe: The device
759 *
760 * Display engine has direct access to memory and is never coherent with L3/L4
761 * caches (or CPU caches), however KMD is responsible for specifically flushing
762 * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
763 * can happen from such a surface without seeing corruption.
764 *
765 * Display surfaces can be tagged as transient by mapping it using one of the
766 * various L3:XD PAT index modes on Xe2.
767 *
768 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
769 * at the end of each submission via PIPE_CONTROL for compute/render, since SA
770 * Media is not coherent with L3 and we want to support render-vs-media
771 * usescases. For other engines like copy/blt the HW internally forces uncached
772 * behaviour, hence why we can skip the TDF on such platforms.
773 */
774void xe_device_td_flush(struct xe_device *xe)
775{
776 struct xe_gt *gt;
777 u8 id;
778
779 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
780 return;
781
782 for_each_gt(gt, xe, id) {
783 if (xe_gt_is_media_type(gt))
784 continue;
785
786 if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
787 return;
788
789 xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
790 /*
791 * FIXME: We can likely do better here with our choice of
792 * timeout. Currently we just assume the worst case, i.e. 150us,
793 * which is believed to be sufficient to cover the worst case
794 * scenario on current platforms if all cache entries are
795 * transient and need to be flushed..
796 */
797 if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
798 150, NULL, false))
799 xe_gt_err_once(gt, "TD flush timeout\n");
800
801 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
802 }
803}
804
805u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
806{
807 return xe_device_has_flat_ccs(xe) ?
808 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
809}
810
811/**
812 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
813 * @xe: xe device instance
814 *
815 * To be used before any kind of memory access. It will splat a debug warning
816 * if the device is currently sleeping. But it doesn't guarantee in any way
817 * that the device is going to remain awake. Xe PM runtime get and put
818 * functions might be added to the outer bound of the memory access, while
819 * this check is intended for inner usage to splat some warning if the worst
820 * case has just happened.
821 */
822void xe_device_assert_mem_access(struct xe_device *xe)
823{
824 xe_assert(xe, !xe_pm_runtime_suspended(xe));
825}
826
827void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
828{
829 struct xe_gt *gt;
830 u8 id;
831
832 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
833 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
834
835 for_each_gt(gt, xe, id) {
836 drm_printf(p, "GT id: %u\n", id);
837 drm_printf(p, "\tType: %s\n",
838 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
839 drm_printf(p, "\tIP ver: %u.%u.%u\n",
840 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
841 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
842 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
843 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
844 }
845}
846
847u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
848{
849 return sign_extend64(address, xe->info.va_bits - 1);
850}
851
852u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
853{
854 return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
855}
856
857static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
858{
859 struct xe_device *xe = arg;
860
861 xe_pm_runtime_put(xe);
862}
863
864/**
865 * xe_device_declare_wedged - Declare device wedged
866 * @xe: xe device instance
867 *
868 * This is a final state that can only be cleared with a mudule
869 * re-probe (unbind + bind).
870 * In this state every IOCTL will be blocked so the GT cannot be used.
871 * In general it will be called upon any critical error such as gt reset
872 * failure or guc loading failure.
873 * If xe.wedged module parameter is set to 2, this function will be called
874 * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
875 * snapshot capture. In this mode, GT reset won't be attempted so the state of
876 * the issue is preserved for further debugging.
877 */
878void xe_device_declare_wedged(struct xe_device *xe)
879{
880 struct xe_gt *gt;
881 u8 id;
882
883 if (xe->wedged.mode == 0) {
884 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
885 return;
886 }
887
888 if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
889 drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
890 return;
891 }
892
893 xe_pm_runtime_get_noresume(xe);
894
895 if (!atomic_xchg(&xe->wedged.flag, 1)) {
896 xe->needs_flr_on_fini = true;
897 drm_err(&xe->drm,
898 "CRITICAL: Xe has declared device %s as wedged.\n"
899 "IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
900 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
901 dev_name(xe->drm.dev));
902 }
903
904 for_each_gt(gt, xe, id)
905 xe_gt_declare_wedged(gt);
906}