Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#ifndef _XE_DEVICE_H_
7#define _XE_DEVICE_H_
8
9#include <drm/drm_util.h>
10
11#include "xe_device_types.h"
12#include "xe_gt_types.h"
13#include "xe_sriov.h"
14
15static inline struct xe_device *to_xe_device(const struct drm_device *dev)
16{
17 return container_of(dev, struct xe_device, drm);
18}
19
20static inline struct xe_device *kdev_to_xe_device(struct device *kdev)
21{
22 struct drm_device *drm = dev_get_drvdata(kdev);
23
24 return drm ? to_xe_device(drm) : NULL;
25}
26
27static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
28{
29 struct drm_device *drm = pci_get_drvdata(pdev);
30
31 return drm ? to_xe_device(drm) : NULL;
32}
33
34static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
35{
36 return (struct xe_device *)xe;
37}
38
39static inline struct xe_device *ttm_to_xe_device(struct ttm_device *ttm)
40{
41 return container_of(ttm, struct xe_device, ttm);
42}
43
44struct xe_device *xe_device_create(struct pci_dev *pdev,
45 const struct pci_device_id *ent);
46int xe_device_probe_early(struct xe_device *xe);
47int xe_device_probe(struct xe_device *xe);
48void xe_device_remove(struct xe_device *xe);
49void xe_device_shutdown(struct xe_device *xe);
50
51void xe_device_wmb(struct xe_device *xe);
52
53static inline struct xe_file *to_xe_file(const struct drm_file *file)
54{
55 return file->driver_priv;
56}
57
58static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe)
59{
60 return &xe->tiles[0];
61}
62
63/*
64 * Highest GT/tile count for any platform. Used only for memory allocation
65 * sizing. Any logic looping over GTs or mapping userspace GT IDs into GT
66 * structures should use the per-platform xe->info.max_gt_per_tile instead.
67 */
68#define XE_MAX_GT_PER_TILE 2
69
70static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id)
71{
72 struct xe_tile *tile;
73 struct xe_gt *gt;
74
75 if (gt_id >= xe->info.tile_count * xe->info.max_gt_per_tile)
76 return NULL;
77
78 tile = &xe->tiles[gt_id / xe->info.max_gt_per_tile];
79 switch (gt_id % xe->info.max_gt_per_tile) {
80 default:
81 xe_assert(xe, false);
82 fallthrough;
83 case 0:
84 gt = tile->primary_gt;
85 break;
86 case 1:
87 gt = tile->media_gt;
88 break;
89 }
90
91 if (!gt)
92 return NULL;
93
94 drm_WARN_ON(&xe->drm, gt->info.id != gt_id);
95 drm_WARN_ON(&xe->drm, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
96
97 return gt;
98}
99
100/*
101 * Provide a GT structure suitable for performing non-GT MMIO operations against
102 * the primary tile. Primarily intended for early tile initialization, display
103 * handling, top-most interrupt enable/disable, etc. Since anything using the
104 * MMIO handle returned by this function doesn't need GSI offset translation,
105 * we'll return the primary GT from the root tile.
106 *
107 * FIXME: Fix the driver design so that 'gt' isn't the target of all MMIO
108 * operations.
109 *
110 * Returns the primary gt of the root tile.
111 */
112static inline struct xe_gt *xe_root_mmio_gt(struct xe_device *xe)
113{
114 return xe_device_get_root_tile(xe)->primary_gt;
115}
116
117static inline bool xe_device_uc_enabled(struct xe_device *xe)
118{
119 return !xe->info.force_execlist;
120}
121
122#define for_each_tile(tile__, xe__, id__) \
123 for ((id__) = 0; (id__) < (xe__)->info.tile_count; (id__)++) \
124 for_each_if((tile__) = &(xe__)->tiles[(id__)])
125
126#define for_each_remote_tile(tile__, xe__, id__) \
127 for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \
128 for_each_if((tile__) = &(xe__)->tiles[(id__)])
129
130#define for_each_gt(gt__, xe__, id__) \
131 for ((id__) = 0; (id__) < (xe__)->info.tile_count * (xe__)->info.max_gt_per_tile; (id__)++) \
132 for_each_if((gt__) = xe_device_get_gt((xe__), (id__)))
133
134#define for_each_gt_on_tile(gt__, tile__, id__) \
135 for_each_gt((gt__), (tile__)->xe, (id__)) \
136 for_each_if((gt__)->tile == (tile__))
137
138static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
139{
140 return >->pm.fw;
141}
142
143void xe_device_assert_mem_access(struct xe_device *xe);
144
145static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
146{
147 return xe->info.has_flat_ccs;
148}
149
150static inline bool xe_device_has_sriov(struct xe_device *xe)
151{
152 return xe->info.has_sriov;
153}
154
155static inline bool xe_device_has_msix(struct xe_device *xe)
156{
157 return xe->irq.msix.nvec > 0;
158}
159
160static inline bool xe_device_has_memirq(struct xe_device *xe)
161{
162 return GRAPHICS_VERx100(xe) >= 1250;
163}
164
165static inline bool xe_device_uses_memirq(struct xe_device *xe)
166{
167 return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
168}
169
170static inline bool xe_device_has_lmtt(struct xe_device *xe)
171{
172 return IS_DGFX(xe);
173}
174
175u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
176
177void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
178
179u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
180u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
181
182void xe_device_td_flush(struct xe_device *xe);
183void xe_device_l2_flush(struct xe_device *xe);
184
185static inline bool xe_device_wedged(struct xe_device *xe)
186{
187 return atomic_read(&xe->wedged.flag);
188}
189
190void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method);
191void xe_device_declare_wedged(struct xe_device *xe);
192
193struct xe_file *xe_file_get(struct xe_file *xef);
194void xe_file_put(struct xe_file *xef);
195
196int xe_is_injection_active(void);
197
198/*
199 * Occasionally it is seen that the G2H worker starts running after a delay of more than
200 * a second even after being queued and activated by the Linux workqueue subsystem. This
201 * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
202 * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
203 * and this is beyond xe kmd.
204 *
205 * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
206 */
207#define LNL_FLUSH_WORKQUEUE(wq__) \
208 flush_workqueue(wq__)
209#define LNL_FLUSH_WORK(wrk__) \
210 flush_work(wrk__)
211
212#endif