Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/device.h>
7#include <linux/kobject.h>
8#include <linux/pci.h>
9#include <linux/sysfs.h>
10
11#include "xe_device.h"
12#include "xe_device_sysfs.h"
13#include "xe_mmio.h"
14#include "xe_pcode_api.h"
15#include "xe_pcode.h"
16#include "xe_pm.h"
17
18/**
19 * DOC: Xe device sysfs
20 * Xe driver requires exposing certain tunable knobs controlled by user space for
21 * each graphics device. Considering this, we need to add sysfs attributes at device
22 * level granularity.
23 * These sysfs attributes will be available under pci device kobj directory.
24 *
25 * vram_d3cold_threshold - Report/change vram used threshold(in MB) below
26 * which vram save/restore is permissible during runtime D3cold entry/exit.
27 *
28 * lb_fan_control_version - Fan control version provisioned by late binding.
29 * Exposed only if supported by the device.
30 *
31 * lb_voltage_regulator_version - Voltage regulator version provisioned by late
32 * binding. Exposed only if supported by the device.
33 */
34
35static ssize_t
36vram_d3cold_threshold_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct pci_dev *pdev = to_pci_dev(dev);
40 struct xe_device *xe = pdev_to_xe_device(pdev);
41
42 return sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
43}
44
45static ssize_t
46vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
47 const char *buff, size_t count)
48{
49 struct pci_dev *pdev = to_pci_dev(dev);
50 struct xe_device *xe = pdev_to_xe_device(pdev);
51 u32 vram_d3cold_threshold;
52 int ret;
53
54 ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
55 if (ret)
56 return ret;
57
58 drm_dbg(&xe->drm, "vram_d3cold_threshold: %u\n", vram_d3cold_threshold);
59
60 xe_pm_runtime_get(xe);
61 ret = xe_pm_set_vram_threshold(xe, vram_d3cold_threshold);
62 xe_pm_runtime_put(xe);
63
64 return ret ?: count;
65}
66
67static DEVICE_ATTR_RW(vram_d3cold_threshold);
68
69static struct attribute *vram_attrs[] = {
70 &dev_attr_vram_d3cold_threshold.attr,
71 NULL
72};
73
74static const struct attribute_group vram_attr_group = {
75 .attrs = vram_attrs,
76};
77
78static ssize_t
79lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
80{
81 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
82 struct xe_tile *root = xe_device_get_root_tile(xe);
83 u32 cap = 0, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
84 u16 major = 0, minor = 0, hotfix = 0, build = 0;
85 int ret;
86
87 xe_pm_runtime_get(xe);
88
89 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
90 &cap, NULL);
91 if (ret)
92 goto out;
93
94 if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) {
95 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
96 &ver_low, NULL);
97 if (ret)
98 goto out;
99
100 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
101 &ver_high, NULL);
102 if (ret)
103 goto out;
104
105 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
106 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
107 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
108 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
109 }
110out:
111 xe_pm_runtime_put(xe);
112
113 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
114}
115static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version);
116
117static ssize_t
118lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf)
119{
120 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
121 struct xe_tile *root = xe_device_get_root_tile(xe);
122 u32 cap = 0, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
123 u16 major = 0, minor = 0, hotfix = 0, build = 0;
124 int ret;
125
126 xe_pm_runtime_get(xe);
127
128 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
129 &cap, NULL);
130 if (ret)
131 goto out;
132
133 if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) {
134 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0),
135 &ver_low, NULL);
136 if (ret)
137 goto out;
138
139 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0),
140 &ver_high, NULL);
141 if (ret)
142 goto out;
143
144 major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low);
145 minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low);
146 hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high);
147 build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high);
148 }
149out:
150 xe_pm_runtime_put(xe);
151
152 return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build);
153}
154static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
155
156static struct attribute *late_bind_attrs[] = {
157 &dev_attr_lb_fan_control_version.attr,
158 &dev_attr_lb_voltage_regulator_version.attr,
159 NULL
160};
161
162static umode_t late_bind_attr_is_visible(struct kobject *kobj,
163 struct attribute *attr, int n)
164{
165 struct device *dev = kobj_to_dev(kobj);
166 struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
167 struct xe_tile *root = xe_device_get_root_tile(xe);
168 u32 cap = 0;
169 int ret;
170
171 ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
172 &cap, NULL);
173 if (ret)
174 return 0;
175
176 if (attr == &dev_attr_lb_fan_control_version.attr &&
177 REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
178 return attr->mode;
179 if (attr == &dev_attr_lb_voltage_regulator_version.attr &&
180 REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
181 return attr->mode;
182
183 return 0;
184}
185
186static const struct attribute_group late_bind_attr_group = {
187 .attrs = late_bind_attrs,
188 .is_visible = late_bind_attr_is_visible,
189};
190
191/**
192 * DOC: PCIe Gen5 Limitations
193 *
194 * Default link speed of discrete GPUs is determined by configuration parameters
195 * stored in their flash memory, which are subject to override through user
196 * initiated firmware updates. It has been observed that devices configured with
197 * PCIe Gen5 as their default link speed can come across link quality issues due
198 * to host or motherboard limitations and may have to auto-downgrade their link
199 * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
200 * firmware updates rather risky on such setups. It is required to ensure that
201 * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
202 * pushing the firmware image with PCIe Gen5 as default configuration. This can
203 * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
204 * denote if the device is capable of auto-downgrading its link to PCIe Gen4
205 * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
206 * `capable` respectively.
207 *
208 * .. code-block:: shell
209 *
210 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
211 *
212 * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
213 * link downgrade incapable device and facing link instability due to host or
214 * motherboard limitations can result in driver failing to bind to the device,
215 * making further firmware updates impossible with RMA being the only last
216 * resort.
217 *
218 * Link downgrade status of auto link downgrade capable devices is available
219 * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
220 * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
221 * link training (which is the optimal scenario) and ``1`` means the device has
222 * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
223 *
224 * .. code-block:: shell
225 *
226 * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
227 */
228
229static ssize_t
230auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
231{
232 struct pci_dev *pdev = to_pci_dev(dev);
233 struct xe_device *xe = pdev_to_xe_device(pdev);
234 u32 cap, val;
235
236 xe_pm_runtime_get(xe);
237 val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
238 xe_pm_runtime_put(xe);
239
240 cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
241 return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
242}
243static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
244
245static ssize_t
246auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
247{
248 struct pci_dev *pdev = to_pci_dev(dev);
249 struct xe_device *xe = pdev_to_xe_device(pdev);
250 /* default the auto_link_downgrade status to 0 */
251 u32 val = 0;
252 int ret;
253
254 xe_pm_runtime_get(xe);
255 ret = xe_pcode_read(xe_device_get_root_tile(xe),
256 PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
257 &val, NULL);
258 xe_pm_runtime_put(xe);
259
260 return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
261}
262static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
263
264static struct attribute *auto_link_downgrade_attrs[] = {
265 &dev_attr_auto_link_downgrade_capable.attr,
266 &dev_attr_auto_link_downgrade_status.attr,
267 NULL
268};
269
270static const struct attribute_group auto_link_downgrade_attr_group = {
271 .attrs = auto_link_downgrade_attrs,
272};
273
274int xe_device_sysfs_init(struct xe_device *xe)
275{
276 struct device *dev = xe->drm.dev;
277 int ret;
278
279 if (xe->d3cold.capable) {
280 ret = devm_device_add_group(dev, &vram_attr_group);
281 if (ret)
282 return ret;
283 }
284
285 if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
286 ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group);
287 if (ret)
288 return ret;
289
290 ret = devm_device_add_group(dev, &late_bind_attr_group);
291 if (ret)
292 return ret;
293 }
294
295 return 0;
296}