Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/gsc: add initial support for GSC proxy

The GSC uC needs to communicate with the CSME to perform certain
operations. Since the GSC can't perform this communication directly
on platforms where it is integrated in GT, i915 needs to transfer the
messages from GSC to CSME and back.
The proxy flow is as follow:
1 - i915 submits a request to GSC asking for the message to CSME
2 - GSC replies with the proxy header + payload for CSME
3 - i915 sends the reply from GSC as-is to CSME via the mei proxy
component
4 - CSME replies with the proxy header + payload for GSC
5 - i915 submits a request to GSC with the reply from CSME
6 - GSC replies either with a new header + payload (same as step 2,
so we restart from there) or with an end message.

After GSC load, i915 is expected to start the first proxy message chain,
while all subsequent ones will be triggered by the GSC via interrupt.

To communicate with the CSME, we use a dedicated mei component, which
means that we need to wait for it to bind before we can initialize the
proxies. This usually happens quite fast, but given that there is a
chance that we'll have to wait a few seconds the GSC work has been moved
to a dedicated WQ to not stall other processes.

v2: fix code style, includes and variable naming (Alan)
v3: add extra check for proxy status, fix includes and comments

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230502163854.317653-4-daniele.ceraolospurio@intel.com

+472 -5
+1
drivers/gpu/drm/i915/Makefile
··· 194 194 # general-purpose microcontroller (GuC) support 195 195 i915-y += \ 196 196 gt/uc/intel_gsc_fw.o \ 197 + gt/uc/intel_gsc_proxy.o \ 197 198 gt/uc/intel_gsc_uc.o \ 198 199 gt/uc/intel_gsc_uc_heci_cmd_submit.o\ 199 200 gt/uc/intel_guc.o \
+10
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
··· 13 13 #define GSC_FW_STATUS_REG _MMIO(0x116C40) 14 14 #define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0) 15 15 #define GSC_FW_CURRENT_STATE_RESET 0 16 + #define GSC_FW_PROXY_STATE_NORMAL 5 16 17 #define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9) 17 18 18 19 static bool gsc_is_in_reset(struct intel_uncore *uncore) ··· 22 21 23 22 return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) == 24 23 GSC_FW_CURRENT_STATE_RESET; 24 + } 25 + 26 + bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc) 27 + { 28 + struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore; 29 + u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG); 30 + 31 + return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) == 32 + GSC_FW_PROXY_STATE_NORMAL; 25 33 } 26 34 27 35 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
+1
drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
··· 13 13 14 14 int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc); 15 15 bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc); 16 + bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc); 16 17 17 18 #endif
+384
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include <linux/component.h> 7 + 8 + #include "drm/i915_component.h" 9 + #include "drm/i915_gsc_proxy_mei_interface.h" 10 + 11 + #include "gt/intel_gt.h" 12 + #include "gt/intel_gt_print.h" 13 + #include "intel_gsc_proxy.h" 14 + #include "intel_gsc_uc.h" 15 + #include "intel_gsc_uc_heci_cmd_submit.h" 16 + #include "i915_drv.h" 17 + 18 + /* 19 + * GSC proxy: 20 + * The GSC uC needs to communicate with the CSME to perform certain operations. 21 + * Since the GSC can't perform this communication directly on platforms where it 22 + * is integrated in GT, i915 needs to transfer the messages from GSC to CSME 23 + * and back. i915 must manually start the proxy flow after the GSC is loaded to 24 + * signal to GSC that we're ready to handle its messages and allow it to query 25 + * its init data from CSME; GSC will then trigger an HECI2 interrupt if it needs 26 + * to send messages to CSME again. 27 + * The proxy flow is as follow: 28 + * 1 - i915 submits a request to GSC asking for the message to CSME 29 + * 2 - GSC replies with the proxy header + payload for CSME 30 + * 3 - i915 sends the reply from GSC as-is to CSME via the mei proxy component 31 + * 4 - CSME replies with the proxy header + payload for GSC 32 + * 5 - i915 submits a request to GSC with the reply from CSME 33 + * 6 - GSC replies either with a new header + payload (same as step 2, so we 34 + * restart from there) or with an end message. 35 + */ 36 + 37 + /* 38 + * The component should load quite quickly in most cases, but it could take 39 + * a bit. Using a very big timeout just to cover the worst case scenario 40 + */ 41 + #define GSC_PROXY_INIT_TIMEOUT_MS 20000 42 + 43 + /* the protocol supports up to 32K in each direction */ 44 + #define GSC_PROXY_BUFFER_SIZE SZ_32K 45 + #define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2) 46 + #define GSC_PROXY_MAX_MSG_SIZE (GSC_PROXY_BUFFER_SIZE - sizeof(struct intel_gsc_mtl_header)) 47 + 48 + /* FW-defined proxy header */ 49 + struct intel_gsc_proxy_header { 50 + /* 51 + * hdr: 52 + * Bits 0-7: type of the proxy message (see enum intel_gsc_proxy_type) 53 + * Bits 8-15: rsvd 54 + * Bits 16-31: length in bytes of the payload following the proxy header 55 + */ 56 + u32 hdr; 57 + #define GSC_PROXY_TYPE GENMASK(7, 0) 58 + #define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16) 59 + 60 + u32 source; /* Source of the Proxy message */ 61 + u32 destination; /* Destination of the Proxy message */ 62 + #define GSC_PROXY_ADDRESSING_KMD 0x10000 63 + #define GSC_PROXY_ADDRESSING_GSC 0x20000 64 + #define GSC_PROXY_ADDRESSING_CSME 0x30000 65 + 66 + u32 status; /* Command status */ 67 + } __packed; 68 + 69 + /* FW-defined proxy types */ 70 + enum intel_gsc_proxy_type { 71 + GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0, 72 + GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1, 73 + GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2, 74 + GSC_PROXY_MSG_TYPE_PROXY_END = 3, 75 + GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4, 76 + }; 77 + 78 + struct gsc_proxy_msg { 79 + struct intel_gsc_mtl_header header; 80 + struct intel_gsc_proxy_header proxy_header; 81 + } __packed; 82 + 83 + static int proxy_send_to_csme(struct intel_gsc_uc *gsc) 84 + { 85 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 86 + struct i915_gsc_proxy_component *comp = gsc->proxy.component; 87 + struct intel_gsc_mtl_header *hdr; 88 + void *in = gsc->proxy.to_csme; 89 + void *out = gsc->proxy.to_gsc; 90 + u32 in_size; 91 + int ret; 92 + 93 + /* CSME msg only includes the proxy */ 94 + hdr = in; 95 + in += sizeof(struct intel_gsc_mtl_header); 96 + out += sizeof(struct intel_gsc_mtl_header); 97 + 98 + in_size = hdr->message_size - sizeof(struct intel_gsc_mtl_header); 99 + 100 + /* the message must contain at least the proxy header */ 101 + if (in_size < sizeof(struct intel_gsc_proxy_header) || 102 + in_size > GSC_PROXY_MAX_MSG_SIZE) { 103 + gt_err(gt, "Invalid CSME message size: %u\n", in_size); 104 + return -EINVAL; 105 + } 106 + 107 + ret = comp->ops->send(comp->mei_dev, in, in_size); 108 + if (ret < 0) { 109 + gt_err(gt, "Failed to send CSME message\n"); 110 + return ret; 111 + } 112 + 113 + ret = comp->ops->recv(comp->mei_dev, out, GSC_PROXY_MAX_MSG_SIZE); 114 + if (ret < 0) { 115 + gt_err(gt, "Failed to receive CSME message\n"); 116 + return ret; 117 + } 118 + 119 + return ret; 120 + } 121 + 122 + static int proxy_send_to_gsc(struct intel_gsc_uc *gsc) 123 + { 124 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 125 + u32 *marker = gsc->proxy.to_csme; /* first dw of the reply header */ 126 + u64 addr_in = i915_ggtt_offset(gsc->proxy.vma); 127 + u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE; 128 + u32 size = ((struct gsc_proxy_msg *)gsc->proxy.to_gsc)->header.message_size; 129 + int err; 130 + 131 + /* the message must contain at least the gsc and proxy headers */ 132 + if (size < sizeof(struct gsc_proxy_msg) || size > GSC_PROXY_BUFFER_SIZE) { 133 + gt_err(gt, "Invalid GSC proxy message size: %u\n", size); 134 + return -EINVAL; 135 + } 136 + 137 + /* clear the message marker */ 138 + *marker = 0; 139 + 140 + /* make sure the marker write is flushed */ 141 + wmb(); 142 + 143 + /* send the request */ 144 + err = intel_gsc_uc_heci_cmd_submit_packet(gsc, addr_in, size, 145 + addr_out, GSC_PROXY_BUFFER_SIZE); 146 + 147 + if (!err) { 148 + /* wait for the reply to show up */ 149 + err = wait_for(*marker != 0, 300); 150 + if (err) 151 + gt_err(gt, "Failed to get a proxy reply from gsc\n"); 152 + } 153 + 154 + return err; 155 + } 156 + 157 + static int validate_proxy_header(struct intel_gsc_proxy_header *header, 158 + u32 source, u32 dest) 159 + { 160 + u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr); 161 + u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr); 162 + int ret = 0; 163 + 164 + if (header->destination != dest || header->source != source) { 165 + ret = -ENOEXEC; 166 + goto fail; 167 + } 168 + 169 + switch (type) { 170 + case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD: 171 + if (length > 0) 172 + break; 173 + fallthrough; 174 + case GSC_PROXY_MSG_TYPE_PROXY_INVALID: 175 + ret = -EIO; 176 + goto fail; 177 + default: 178 + break; 179 + } 180 + 181 + fail: 182 + return ret; 183 + } 184 + 185 + static int proxy_query(struct intel_gsc_uc *gsc) 186 + { 187 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 188 + struct gsc_proxy_msg *to_gsc = gsc->proxy.to_gsc; 189 + struct gsc_proxy_msg *to_csme = gsc->proxy.to_csme; 190 + int ret; 191 + 192 + intel_gsc_uc_heci_cmd_emit_mtl_header(&to_gsc->header, 193 + HECI_MEADDRESS_PROXY, 194 + sizeof(struct gsc_proxy_msg), 195 + 0); 196 + 197 + to_gsc->proxy_header.hdr = 198 + FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) | 199 + FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0); 200 + 201 + to_gsc->proxy_header.source = GSC_PROXY_ADDRESSING_KMD; 202 + to_gsc->proxy_header.destination = GSC_PROXY_ADDRESSING_GSC; 203 + to_gsc->proxy_header.status = 0; 204 + 205 + while (1) { 206 + /* clear the GSC response header space */ 207 + memset(gsc->proxy.to_csme, 0, sizeof(struct gsc_proxy_msg)); 208 + 209 + /* send proxy message to GSC */ 210 + ret = proxy_send_to_gsc(gsc); 211 + if (ret) { 212 + gt_err(gt, "failed to send proxy message to GSC! %d\n", ret); 213 + goto proxy_error; 214 + } 215 + 216 + /* stop if this was the last message */ 217 + if (FIELD_GET(GSC_PROXY_TYPE, to_csme->proxy_header.hdr) == 218 + GSC_PROXY_MSG_TYPE_PROXY_END) 219 + break; 220 + 221 + /* make sure the GSC-to-CSME proxy header is sane */ 222 + ret = validate_proxy_header(&to_csme->proxy_header, 223 + GSC_PROXY_ADDRESSING_GSC, 224 + GSC_PROXY_ADDRESSING_CSME); 225 + if (ret) { 226 + gt_err(gt, "invalid GSC to CSME proxy header! %d\n", ret); 227 + goto proxy_error; 228 + } 229 + 230 + /* send the GSC message to the CSME */ 231 + ret = proxy_send_to_csme(gsc); 232 + if (ret < 0) { 233 + gt_err(gt, "failed to send proxy message to CSME! %d\n", ret); 234 + goto proxy_error; 235 + } 236 + 237 + /* update the GSC message size with the returned value from CSME */ 238 + to_gsc->header.message_size = ret + sizeof(struct intel_gsc_mtl_header); 239 + 240 + /* make sure the CSME-to-GSC proxy header is sane */ 241 + ret = validate_proxy_header(&to_gsc->proxy_header, 242 + GSC_PROXY_ADDRESSING_CSME, 243 + GSC_PROXY_ADDRESSING_GSC); 244 + if (ret) { 245 + gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret); 246 + goto proxy_error; 247 + } 248 + } 249 + 250 + proxy_error: 251 + return ret < 0 ? ret : 0; 252 + } 253 + 254 + int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc) 255 + { 256 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 257 + int err; 258 + 259 + if (!gsc->proxy.component_added) 260 + return -ENODEV; 261 + 262 + assert_rpm_wakelock_held(gt->uncore->rpm); 263 + 264 + /* when GSC is loaded, we can queue this before the component is bound */ 265 + err = wait_for(gsc->proxy.component, GSC_PROXY_INIT_TIMEOUT_MS); 266 + if (err) { 267 + gt_err(gt, "GSC proxy component didn't bind within the expected timeout\n"); 268 + return -EIO; 269 + } 270 + 271 + mutex_lock(&gsc->proxy.mutex); 272 + if (!gsc->proxy.component) { 273 + gt_err(gt, "GSC proxy worker called without the component being bound!\n"); 274 + err = -EIO; 275 + } else { 276 + err = proxy_query(gsc); 277 + } 278 + mutex_unlock(&gsc->proxy.mutex); 279 + return err; 280 + } 281 + 282 + static int i915_gsc_proxy_component_bind(struct device *i915_kdev, 283 + struct device *mei_kdev, void *data) 284 + { 285 + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 286 + struct intel_gsc_uc *gsc = &i915->media_gt->uc.gsc; 287 + 288 + mutex_lock(&gsc->proxy.mutex); 289 + gsc->proxy.component = data; 290 + gsc->proxy.component->mei_dev = mei_kdev; 291 + mutex_unlock(&gsc->proxy.mutex); 292 + 293 + return 0; 294 + } 295 + 296 + static void i915_gsc_proxy_component_unbind(struct device *i915_kdev, 297 + struct device *mei_kdev, void *data) 298 + { 299 + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 300 + struct intel_gsc_uc *gsc = &i915->media_gt->uc.gsc; 301 + 302 + mutex_lock(&gsc->proxy.mutex); 303 + gsc->proxy.component = NULL; 304 + mutex_unlock(&gsc->proxy.mutex); 305 + } 306 + 307 + static const struct component_ops i915_gsc_proxy_component_ops = { 308 + .bind = i915_gsc_proxy_component_bind, 309 + .unbind = i915_gsc_proxy_component_unbind, 310 + }; 311 + 312 + static int proxy_channel_alloc(struct intel_gsc_uc *gsc) 313 + { 314 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 315 + struct i915_vma *vma; 316 + void *vaddr; 317 + int err; 318 + 319 + err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE, 320 + &vma, &vaddr); 321 + if (err) 322 + return err; 323 + 324 + gsc->proxy.vma = vma; 325 + gsc->proxy.to_gsc = vaddr; 326 + gsc->proxy.to_csme = vaddr + GSC_PROXY_BUFFER_SIZE; 327 + 328 + return 0; 329 + } 330 + 331 + static void proxy_channel_free(struct intel_gsc_uc *gsc) 332 + { 333 + if (!gsc->proxy.vma) 334 + return; 335 + 336 + gsc->proxy.to_gsc = NULL; 337 + gsc->proxy.to_csme = NULL; 338 + i915_vma_unpin_and_release(&gsc->proxy.vma, I915_VMA_RELEASE_MAP); 339 + } 340 + 341 + void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc) 342 + { 343 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 344 + struct drm_i915_private *i915 = gt->i915; 345 + 346 + if (fetch_and_zero(&gsc->proxy.component_added)) 347 + component_del(i915->drm.dev, &i915_gsc_proxy_component_ops); 348 + 349 + proxy_channel_free(gsc); 350 + } 351 + 352 + int intel_gsc_proxy_init(struct intel_gsc_uc *gsc) 353 + { 354 + int err; 355 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 356 + struct drm_i915_private *i915 = gt->i915; 357 + 358 + mutex_init(&gsc->proxy.mutex); 359 + 360 + if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) { 361 + gt_info(gt, "can't init GSC proxy due to missing mei component\n"); 362 + return -ENODEV; 363 + } 364 + 365 + err = proxy_channel_alloc(gsc); 366 + if (err) 367 + return err; 368 + 369 + err = component_add_typed(i915->drm.dev, &i915_gsc_proxy_component_ops, 370 + I915_COMPONENT_GSC_PROXY); 371 + if (err < 0) { 372 + gt_err(gt, "Failed to add GSC_PROXY component (%d)\n", err); 373 + goto out_free; 374 + } 375 + 376 + gsc->proxy.component_added = true; 377 + 378 + return 0; 379 + 380 + out_free: 381 + proxy_channel_free(gsc); 382 + return err; 383 + } 384 +
+17
drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_GSC_PROXY_H_ 7 + #define _INTEL_GSC_PROXY_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + struct intel_gsc_uc; 12 + 13 + int intel_gsc_proxy_init(struct intel_gsc_uc *gsc); 14 + void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc); 15 + int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc); 16 + 17 + #endif
+45 -4
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
··· 10 10 #include "intel_gsc_uc.h" 11 11 #include "intel_gsc_fw.h" 12 12 #include "i915_drv.h" 13 + #include "intel_gsc_proxy.h" 13 14 14 15 static void gsc_work(struct work_struct *work) 15 16 { 16 17 struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work); 17 18 struct intel_gt *gt = gsc_uc_to_gt(gsc); 18 19 intel_wakeref_t wakeref; 20 + int ret; 19 21 20 - with_intel_runtime_pm(gt->uncore->rpm, wakeref) 21 - intel_gsc_uc_fw_upload(gsc); 22 + wakeref = intel_runtime_pm_get(gt->uncore->rpm); 23 + 24 + ret = intel_gsc_uc_fw_upload(gsc); 25 + if (ret) 26 + goto out_put; 27 + 28 + ret = intel_gsc_proxy_request_handler(gsc); 29 + if (ret) 30 + goto out_put; 31 + 32 + /* 33 + * If there is a proxy establishment error, the GSC might still 34 + * complete the request handling cleanly, so we need to check the 35 + * status register to check if the proxy init was actually successful 36 + */ 37 + if (intel_gsc_uc_fw_proxy_init_done(gsc)) { 38 + drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n"); 39 + intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING); 40 + } else { 41 + drm_err(&gt->i915->drm, "GSC status reports proxy init not complete\n"); 42 + } 43 + 44 + out_put: 45 + intel_runtime_pm_put(gt->uncore->rpm, wakeref); 22 46 } 23 47 24 48 static bool gsc_engine_supported(struct intel_gt *gt) ··· 67 43 68 44 void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc) 69 45 { 46 + struct intel_gt *gt = gsc_uc_to_gt(gsc); 47 + 70 48 intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC); 71 49 INIT_WORK(&gsc->work, gsc_work); 72 50 ··· 76 50 * GT with it being not fully setup hence check device info's 77 51 * engine mask 78 52 */ 79 - if (!gsc_engine_supported(gsc_uc_to_gt(gsc))) { 53 + if (!gsc_engine_supported(gt)) { 80 54 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 81 55 return; 56 + } 57 + 58 + gsc->wq = alloc_ordered_workqueue("i915_gsc", 0); 59 + if (!gsc->wq) { 60 + gt_err(gt, "failed to allocate WQ for GSC, disabling FW\n"); 61 + intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 82 62 } 83 63 } 84 64 ··· 120 88 121 89 gsc->ce = ce; 122 90 91 + /* if we fail to init proxy we still want to load GSC for PM */ 92 + intel_gsc_proxy_init(gsc); 93 + 123 94 intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE); 124 95 125 96 return 0; ··· 142 107 return; 143 108 144 109 flush_work(&gsc->work); 110 + if (gsc->wq) { 111 + destroy_workqueue(gsc->wq); 112 + gsc->wq = NULL; 113 + } 114 + 115 + intel_gsc_proxy_fini(gsc); 145 116 146 117 if (gsc->ce) 147 118 intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce)); ··· 192 151 if (intel_gsc_uc_fw_init_done(gsc)) 193 152 return; 194 153 195 - queue_work(system_unbound_wq, &gsc->work); 154 + queue_work(gsc->wq, &gsc->work); 196 155 }
+13 -1
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
··· 10 10 11 11 struct i915_vma; 12 12 struct intel_context; 13 + struct i915_gsc_proxy_component; 13 14 14 15 struct intel_gsc_uc { 15 16 /* Generic uC firmware management */ ··· 20 19 struct i915_vma *local; /* private memory for GSC usage */ 21 20 struct intel_context *ce; /* for submission to GSC FW via GSC engine */ 22 21 23 - struct work_struct work; /* for delayed load */ 22 + /* for delayed load and proxy handling */ 23 + struct workqueue_struct *wq; 24 + struct work_struct work; 25 + 26 + struct { 27 + struct i915_gsc_proxy_component *component; 28 + bool component_added; 29 + struct i915_vma *vma; 30 + void *to_gsc; 31 + void *to_csme; 32 + struct mutex mutex; /* protects the tee channel binding */ 33 + } proxy; 24 34 }; 25 35 26 36 void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
+1
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
··· 14 14 #define GSC_HECI_VALIDITY_MARKER 0xA578875A 15 15 16 16 u8 heci_client_id; 17 + #define HECI_MEADDRESS_PROXY 10 17 18 #define HECI_MEADDRESS_PXP 17 18 19 #define HECI_MEADDRESS_HDCP 18 19 20