Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_device.h"
7
8#include <linux/units.h>
9
10#include <drm/drm_aperture.h>
11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_client.h>
13#include <drm/drm_gem_ttm_helper.h>
14#include <drm/drm_ioctl.h>
15#include <drm/drm_managed.h>
16#include <drm/drm_print.h>
17#include <drm/xe_drm.h>
18
19#include "display/xe_display.h"
20#include "regs/xe_gt_regs.h"
21#include "regs/xe_regs.h"
22#include "xe_bo.h"
23#include "xe_debugfs.h"
24#include "xe_devcoredump.h"
25#include "xe_dma_buf.h"
26#include "xe_drm_client.h"
27#include "xe_drv.h"
28#include "xe_exec.h"
29#include "xe_exec_queue.h"
30#include "xe_ggtt.h"
31#include "xe_gsc_proxy.h"
32#include "xe_gt.h"
33#include "xe_gt_mcr.h"
34#include "xe_hwmon.h"
35#include "xe_irq.h"
36#include "xe_memirq.h"
37#include "xe_mmio.h"
38#include "xe_module.h"
39#include "xe_pat.h"
40#include "xe_pcode.h"
41#include "xe_pm.h"
42#include "xe_query.h"
43#include "xe_sriov.h"
44#include "xe_tile.h"
45#include "xe_ttm_stolen_mgr.h"
46#include "xe_ttm_sys_mgr.h"
47#include "xe_vm.h"
48#include "xe_wait_user_fence.h"
49
50static int xe_file_open(struct drm_device *dev, struct drm_file *file)
51{
52 struct xe_device *xe = to_xe_device(dev);
53 struct xe_drm_client *client;
54 struct xe_file *xef;
55 int ret = -ENOMEM;
56
57 xef = kzalloc(sizeof(*xef), GFP_KERNEL);
58 if (!xef)
59 return ret;
60
61 client = xe_drm_client_alloc();
62 if (!client) {
63 kfree(xef);
64 return ret;
65 }
66
67 xef->drm = file;
68 xef->client = client;
69 xef->xe = xe;
70
71 mutex_init(&xef->vm.lock);
72 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
73
74 mutex_init(&xef->exec_queue.lock);
75 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
76
77 spin_lock(&xe->clients.lock);
78 xe->clients.count++;
79 spin_unlock(&xe->clients.lock);
80
81 file->driver_priv = xef;
82 return 0;
83}
84
85static void xe_file_close(struct drm_device *dev, struct drm_file *file)
86{
87 struct xe_device *xe = to_xe_device(dev);
88 struct xe_file *xef = file->driver_priv;
89 struct xe_vm *vm;
90 struct xe_exec_queue *q;
91 unsigned long idx;
92
93 mutex_lock(&xef->exec_queue.lock);
94 xa_for_each(&xef->exec_queue.xa, idx, q) {
95 xe_exec_queue_kill(q);
96 xe_exec_queue_put(q);
97 }
98 mutex_unlock(&xef->exec_queue.lock);
99 xa_destroy(&xef->exec_queue.xa);
100 mutex_destroy(&xef->exec_queue.lock);
101 mutex_lock(&xef->vm.lock);
102 xa_for_each(&xef->vm.xa, idx, vm)
103 xe_vm_close_and_put(vm);
104 mutex_unlock(&xef->vm.lock);
105 xa_destroy(&xef->vm.xa);
106 mutex_destroy(&xef->vm.lock);
107
108 spin_lock(&xe->clients.lock);
109 xe->clients.count--;
110 spin_unlock(&xe->clients.lock);
111
112 xe_drm_client_put(xef->client);
113 kfree(xef);
114}
115
116static const struct drm_ioctl_desc xe_ioctls[] = {
117 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
118 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
119 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
120 DRM_RENDER_ALLOW),
121 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
122 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
123 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
124 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
125 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
126 DRM_RENDER_ALLOW),
127 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
128 DRM_RENDER_ALLOW),
129 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
130 DRM_RENDER_ALLOW),
131 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
132 DRM_RENDER_ALLOW),
133};
134
135static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
136{
137 struct drm_file *file_priv = file->private_data;
138 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
139 long ret;
140
141 ret = xe_pm_runtime_get_ioctl(xe);
142 if (ret >= 0)
143 ret = drm_ioctl(file, cmd, arg);
144 xe_pm_runtime_put(xe);
145
146 return ret;
147}
148
149#ifdef CONFIG_COMPAT
150static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
151{
152 struct drm_file *file_priv = file->private_data;
153 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
154 long ret;
155
156 ret = xe_pm_runtime_get_ioctl(xe);
157 if (ret >= 0)
158 ret = drm_compat_ioctl(file, cmd, arg);
159 xe_pm_runtime_put(xe);
160
161 return ret;
162}
163#else
164/* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
165#define xe_drm_compat_ioctl NULL
166#endif
167
168static const struct file_operations xe_driver_fops = {
169 .owner = THIS_MODULE,
170 .open = drm_open,
171 .release = drm_release_noglobal,
172 .unlocked_ioctl = xe_drm_ioctl,
173 .mmap = drm_gem_mmap,
174 .poll = drm_poll,
175 .read = drm_read,
176 .compat_ioctl = xe_drm_compat_ioctl,
177 .llseek = noop_llseek,
178#ifdef CONFIG_PROC_FS
179 .show_fdinfo = drm_show_fdinfo,
180#endif
181};
182
183static void xe_driver_release(struct drm_device *dev)
184{
185 struct xe_device *xe = to_xe_device(dev);
186
187 pci_set_drvdata(to_pci_dev(xe->drm.dev), NULL);
188}
189
190static struct drm_driver driver = {
191 /* Don't use MTRRs here; the Xserver or userspace app should
192 * deal with them for Intel hardware.
193 */
194 .driver_features =
195 DRIVER_GEM |
196 DRIVER_RENDER | DRIVER_SYNCOBJ |
197 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
198 .open = xe_file_open,
199 .postclose = xe_file_close,
200
201 .gem_prime_import = xe_gem_prime_import,
202
203 .dumb_create = xe_bo_dumb_create,
204 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
205#ifdef CONFIG_PROC_FS
206 .show_fdinfo = xe_drm_client_fdinfo,
207#endif
208 .release = &xe_driver_release,
209
210 .ioctls = xe_ioctls,
211 .num_ioctls = ARRAY_SIZE(xe_ioctls),
212 .fops = &xe_driver_fops,
213 .name = DRIVER_NAME,
214 .desc = DRIVER_DESC,
215 .date = DRIVER_DATE,
216 .major = DRIVER_MAJOR,
217 .minor = DRIVER_MINOR,
218 .patchlevel = DRIVER_PATCHLEVEL,
219};
220
221static void xe_device_destroy(struct drm_device *dev, void *dummy)
222{
223 struct xe_device *xe = to_xe_device(dev);
224
225 if (xe->preempt_fence_wq)
226 destroy_workqueue(xe->preempt_fence_wq);
227
228 if (xe->ordered_wq)
229 destroy_workqueue(xe->ordered_wq);
230
231 if (xe->unordered_wq)
232 destroy_workqueue(xe->unordered_wq);
233
234 ttm_device_fini(&xe->ttm);
235}
236
237struct xe_device *xe_device_create(struct pci_dev *pdev,
238 const struct pci_device_id *ent)
239{
240 struct xe_device *xe;
241 int err;
242
243 xe_display_driver_set_hooks(&driver);
244
245 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
246 if (err)
247 return ERR_PTR(err);
248
249 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
250 if (IS_ERR(xe))
251 return xe;
252
253 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
254 xe->drm.anon_inode->i_mapping,
255 xe->drm.vma_offset_manager, false, false);
256 if (WARN_ON(err))
257 goto err;
258
259 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
260 if (err)
261 goto err;
262
263 xe->info.devid = pdev->device;
264 xe->info.revid = pdev->revision;
265 xe->info.force_execlist = xe_modparam.force_execlist;
266
267 spin_lock_init(&xe->irq.lock);
268 spin_lock_init(&xe->clients.lock);
269
270 init_waitqueue_head(&xe->ufence_wq);
271
272 drmm_mutex_init(&xe->drm, &xe->usm.lock);
273 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
274
275 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
276 /* Trigger a large asid and an early asid wrap. */
277 u32 asid;
278
279 BUILD_BUG_ON(XE_MAX_ASID < 2);
280 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
281 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
282 &xe->usm.next_asid, GFP_KERNEL);
283 drm_WARN_ON(&xe->drm, err);
284 if (err >= 0)
285 xa_erase(&xe->usm.asid_to_vm, asid);
286 }
287
288 spin_lock_init(&xe->pinned.lock);
289 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
290 INIT_LIST_HEAD(&xe->pinned.external_vram);
291 INIT_LIST_HEAD(&xe->pinned.evicted);
292
293 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
294 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
295 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
296 if (!xe->ordered_wq || !xe->unordered_wq ||
297 !xe->preempt_fence_wq) {
298 /*
299 * Cleanup done in xe_device_destroy via
300 * drmm_add_action_or_reset register above
301 */
302 drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
303 err = -ENOMEM;
304 goto err;
305 }
306
307 err = xe_display_create(xe);
308 if (WARN_ON(err))
309 goto err;
310
311 return xe;
312
313err:
314 return ERR_PTR(err);
315}
316
317/*
318 * The driver-initiated FLR is the highest level of reset that we can trigger
319 * from within the driver. It is different from the PCI FLR in that it doesn't
320 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
321 * it doesn't require a re-enumeration of the PCI BARs. However, the
322 * driver-initiated FLR does still cause a reset of both GT and display and a
323 * memory wipe of local and stolen memory, so recovery would require a full HW
324 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
325 * perform the FLR as the very last action before releasing access to the HW
326 * during the driver release flow, we don't attempt recovery at all, because
327 * if/when a new instance of i915 is bound to the device it will do a full
328 * re-init anyway.
329 */
330static void xe_driver_flr(struct xe_device *xe)
331{
332 const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
333 struct xe_gt *gt = xe_root_mmio_gt(xe);
334 int ret;
335
336 if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
337 drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
338 return;
339 }
340
341 drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
342
343 /*
344 * Make sure any pending FLR requests have cleared by waiting for the
345 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
346 * to make sure it's not still set from a prior attempt (it's a write to
347 * clear bit).
348 * Note that we should never be in a situation where a previous attempt
349 * is still pending (unless the HW is totally dead), but better to be
350 * safe in case something unexpected happens
351 */
352 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
353 if (ret) {
354 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
355 return;
356 }
357 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
358
359 /* Trigger the actual Driver-FLR */
360 xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
361
362 /* Wait for hardware teardown to complete */
363 ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
364 if (ret) {
365 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
366 return;
367 }
368
369 /* Wait for hardware/firmware re-init to complete */
370 ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
371 flr_timeout, NULL, false);
372 if (ret) {
373 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
374 return;
375 }
376
377 /* Clear sticky completion status */
378 xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
379}
380
381static void xe_driver_flr_fini(struct drm_device *drm, void *arg)
382{
383 struct xe_device *xe = arg;
384
385 if (xe->needs_flr_on_fini)
386 xe_driver_flr(xe);
387}
388
389static void xe_device_sanitize(struct drm_device *drm, void *arg)
390{
391 struct xe_device *xe = arg;
392 struct xe_gt *gt;
393 u8 id;
394
395 for_each_gt(gt, xe, id)
396 xe_gt_sanitize(gt);
397}
398
399static int xe_set_dma_info(struct xe_device *xe)
400{
401 unsigned int mask_size = xe->info.dma_mask_size;
402 int err;
403
404 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
405
406 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
407 if (err)
408 goto mask_err;
409
410 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
411 if (err)
412 goto mask_err;
413
414 return 0;
415
416mask_err:
417 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
418 return err;
419}
420
421static bool verify_lmem_ready(struct xe_gt *gt)
422{
423 u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
424
425 return !!val;
426}
427
428static int wait_for_lmem_ready(struct xe_device *xe)
429{
430 struct xe_gt *gt = xe_root_mmio_gt(xe);
431 unsigned long timeout, start;
432
433 if (!IS_DGFX(xe))
434 return 0;
435
436 if (IS_SRIOV_VF(xe))
437 return 0;
438
439 if (verify_lmem_ready(gt))
440 return 0;
441
442 drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
443
444 start = jiffies;
445 timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
446
447 do {
448 if (signal_pending(current))
449 return -EINTR;
450
451 /*
452 * The boot firmware initializes local memory and
453 * assesses its health. If memory training fails,
454 * the punit will have been instructed to keep the GT powered
455 * down.we won't be able to communicate with it
456 *
457 * If the status check is done before punit updates the register,
458 * it can lead to the system being unusable.
459 * use a timeout and defer the probe to prevent this.
460 */
461 if (time_after(jiffies, timeout)) {
462 drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
463 return -EPROBE_DEFER;
464 }
465
466 msleep(20);
467
468 } while (!verify_lmem_ready(gt));
469
470 drm_dbg(&xe->drm, "lmem ready after %ums",
471 jiffies_to_msecs(jiffies - start));
472
473 return 0;
474}
475
476/**
477 * xe_device_probe_early: Device early probe
478 * @xe: xe device instance
479 *
480 * Initialize MMIO resources that don't require any
481 * knowledge about tile count. Also initialize pcode and
482 * check vram initialization on root tile.
483 *
484 * Return: 0 on success, error code on failure
485 */
486int xe_device_probe_early(struct xe_device *xe)
487{
488 int err;
489
490 err = xe_mmio_init(xe);
491 if (err)
492 return err;
493
494 xe_sriov_probe_early(xe);
495
496 err = xe_pcode_probe_early(xe);
497 if (err)
498 return err;
499
500 err = wait_for_lmem_ready(xe);
501 if (err)
502 return err;
503
504 return 0;
505}
506
507static int xe_device_set_has_flat_ccs(struct xe_device *xe)
508{
509 u32 reg;
510 int err;
511
512 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
513 return 0;
514
515 struct xe_gt *gt = xe_root_mmio_gt(xe);
516
517 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
518 if (err)
519 return err;
520
521 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
522 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
523
524 if (!xe->info.has_flat_ccs)
525 drm_dbg(&xe->drm,
526 "Flat CCS has been disabled in bios, May lead to performance impact");
527
528 return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
529}
530
531int xe_device_probe(struct xe_device *xe)
532{
533 struct xe_tile *tile;
534 struct xe_gt *gt;
535 int err;
536 u8 last_gt;
537 u8 id;
538
539 xe_pat_init_early(xe);
540
541 err = xe_sriov_init(xe);
542 if (err)
543 return err;
544
545 xe->info.mem_region_mask = 1;
546 err = xe_display_init_nommio(xe);
547 if (err)
548 return err;
549
550 err = xe_set_dma_info(xe);
551 if (err)
552 return err;
553
554 xe_mmio_probe_tiles(xe);
555
556 xe_ttm_sys_mgr_init(xe);
557
558 for_each_gt(gt, xe, id)
559 xe_force_wake_init_gt(gt, gt_to_fw(gt));
560
561 for_each_tile(tile, xe, id) {
562 err = xe_ggtt_init_early(tile->mem.ggtt);
563 if (err)
564 return err;
565 if (IS_SRIOV_VF(xe)) {
566 err = xe_memirq_init(&tile->sriov.vf.memirq);
567 if (err)
568 return err;
569 }
570 }
571
572 for_each_gt(gt, xe, id) {
573 err = xe_gt_init_hwconfig(gt);
574 if (err)
575 return err;
576 }
577
578 err = xe_devcoredump_init(xe);
579 if (err)
580 return err;
581 err = drmm_add_action_or_reset(&xe->drm, xe_driver_flr_fini, xe);
582 if (err)
583 return err;
584
585 for_each_gt(gt, xe, id)
586 xe_pcode_init(gt);
587
588 err = xe_display_init_noirq(xe);
589 if (err)
590 return err;
591
592 err = xe_irq_install(xe);
593 if (err)
594 goto err;
595
596 for_each_gt(gt, xe, id) {
597 err = xe_gt_init_early(gt);
598 if (err)
599 goto err_irq_shutdown;
600 }
601
602 err = xe_device_set_has_flat_ccs(xe);
603 if (err)
604 goto err_irq_shutdown;
605
606 err = xe_mmio_probe_vram(xe);
607 if (err)
608 goto err_irq_shutdown;
609
610 for_each_tile(tile, xe, id) {
611 err = xe_tile_init_noalloc(tile);
612 if (err)
613 goto err_irq_shutdown;
614 }
615
616 /* Allocate and map stolen after potential VRAM resize */
617 xe_ttm_stolen_mgr_init(xe);
618
619 /*
620 * Now that GT is initialized (TTM in particular),
621 * we can try to init display, and inherit the initial fb.
622 * This is the reason the first allocation needs to be done
623 * inside display.
624 */
625 err = xe_display_init_noaccel(xe);
626 if (err)
627 goto err_irq_shutdown;
628
629 for_each_gt(gt, xe, id) {
630 last_gt = id;
631
632 err = xe_gt_init(gt);
633 if (err)
634 goto err_fini_gt;
635 }
636
637 xe_heci_gsc_init(xe);
638
639 err = xe_display_init(xe);
640 if (err)
641 goto err_fini_gt;
642
643 err = drm_dev_register(&xe->drm, 0);
644 if (err)
645 goto err_fini_display;
646
647 xe_display_register(xe);
648
649 xe_debugfs_register(xe);
650
651 xe_hwmon_register(xe);
652
653 return drmm_add_action_or_reset(&xe->drm, xe_device_sanitize, xe);
654
655err_fini_display:
656 xe_display_driver_remove(xe);
657
658err_fini_gt:
659 for_each_gt(gt, xe, id) {
660 if (id < last_gt)
661 xe_gt_remove(gt);
662 else
663 break;
664 }
665
666err_irq_shutdown:
667 xe_irq_shutdown(xe);
668err:
669 xe_display_fini(xe);
670 return err;
671}
672
673static void xe_device_remove_display(struct xe_device *xe)
674{
675 xe_display_unregister(xe);
676
677 drm_dev_unplug(&xe->drm);
678 xe_display_driver_remove(xe);
679}
680
681void xe_device_remove(struct xe_device *xe)
682{
683 struct xe_gt *gt;
684 u8 id;
685
686 xe_device_remove_display(xe);
687
688 xe_display_fini(xe);
689
690 xe_heci_gsc_fini(xe);
691
692 for_each_gt(gt, xe, id)
693 xe_gt_remove(gt);
694
695 xe_irq_shutdown(xe);
696}
697
698void xe_device_shutdown(struct xe_device *xe)
699{
700}
701
702void xe_device_wmb(struct xe_device *xe)
703{
704 struct xe_gt *gt = xe_root_mmio_gt(xe);
705
706 wmb();
707 if (IS_DGFX(xe))
708 xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
709}
710
711u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
712{
713 return xe_device_has_flat_ccs(xe) ?
714 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
715}
716
717/**
718 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
719 * @xe: xe device instance
720 *
721 * To be used before any kind of memory access. It will splat a debug warning
722 * if the device is currently sleeping. But it doesn't guarantee in any way
723 * that the device is going to remain awake. Xe PM runtime get and put
724 * functions might be added to the outer bound of the memory access, while
725 * this check is intended for inner usage to splat some warning if the worst
726 * case has just happened.
727 */
728void xe_device_assert_mem_access(struct xe_device *xe)
729{
730 xe_assert(xe, !xe_pm_runtime_suspended(xe));
731}
732
733void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
734{
735 struct xe_gt *gt;
736 u8 id;
737
738 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
739 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
740
741 for_each_gt(gt, xe, id) {
742 drm_printf(p, "GT id: %u\n", id);
743 drm_printf(p, "\tType: %s\n",
744 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
745 drm_printf(p, "\tIP ver: %u.%u.%u\n",
746 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
747 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
748 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
749 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
750 }
751}
752
753u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
754{
755 return sign_extend64(address, xe->info.va_bits - 1);
756}
757
758u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
759{
760 return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
761}