Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2014-2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "intel_guc.h"
26#include "intel_guc_ads.h"
27#include "intel_guc_submission.h"
28#include "i915_drv.h"
29
30static void gen8_guc_raise_irq(struct intel_guc *guc)
31{
32 struct drm_i915_private *dev_priv = guc_to_i915(guc);
33
34 I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
35}
36
37static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
38{
39 GEM_BUG_ON(!guc->send_regs.base);
40 GEM_BUG_ON(!guc->send_regs.count);
41 GEM_BUG_ON(i >= guc->send_regs.count);
42
43 return _MMIO(guc->send_regs.base + 4 * i);
44}
45
46void intel_guc_init_send_regs(struct intel_guc *guc)
47{
48 struct drm_i915_private *dev_priv = guc_to_i915(guc);
49 enum forcewake_domains fw_domains = 0;
50 unsigned int i;
51
52 guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
53 guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
54 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
55
56 for (i = 0; i < guc->send_regs.count; i++) {
57 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
58 guc_send_reg(guc, i),
59 FW_REG_READ | FW_REG_WRITE);
60 }
61 guc->send_regs.fw_domains = fw_domains;
62}
63
64void intel_guc_init_early(struct intel_guc *guc)
65{
66 intel_guc_fw_init_early(guc);
67 intel_guc_ct_init_early(&guc->ct);
68 intel_guc_log_init_early(&guc->log);
69
70 mutex_init(&guc->send_mutex);
71 spin_lock_init(&guc->irq_lock);
72 guc->send = intel_guc_send_nop;
73 guc->handler = intel_guc_to_host_event_handler_nop;
74 guc->notify = gen8_guc_raise_irq;
75}
76
77static int guc_init_wq(struct intel_guc *guc)
78{
79 struct drm_i915_private *dev_priv = guc_to_i915(guc);
80
81 /*
82 * GuC log buffer flush work item has to do register access to
83 * send the ack to GuC and this work item, if not synced before
84 * suspend, can potentially get executed after the GFX device is
85 * suspended.
86 * By marking the WQ as freezable, we don't have to bother about
87 * flushing of this work item from the suspend hooks, the pending
88 * work item if any will be either executed before the suspend
89 * or scheduled later on resume. This way the handling of work
90 * item can be kept same between system suspend & rpm suspend.
91 */
92 guc->log.relay.flush_wq =
93 alloc_ordered_workqueue("i915-guc_log",
94 WQ_HIGHPRI | WQ_FREEZABLE);
95 if (!guc->log.relay.flush_wq) {
96 DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
97 return -ENOMEM;
98 }
99
100 /*
101 * Even though both sending GuC action, and adding a new workitem to
102 * GuC workqueue are serialized (each with its own locking), since
103 * we're using mutliple engines, it's possible that we're going to
104 * issue a preempt request with two (or more - each for different
105 * engine) workitems in GuC queue. In this situation, GuC may submit
106 * all of them, which will make us very confused.
107 * Our preemption contexts may even already be complete - before we
108 * even had the chance to sent the preempt action to GuC!. Rather
109 * than introducing yet another lock, we can just use ordered workqueue
110 * to make sure we're always sending a single preemption request with a
111 * single workitem.
112 */
113 if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
114 USES_GUC_SUBMISSION(dev_priv)) {
115 guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
116 WQ_HIGHPRI);
117 if (!guc->preempt_wq) {
118 destroy_workqueue(guc->log.relay.flush_wq);
119 DRM_ERROR("Couldn't allocate workqueue for GuC "
120 "preemption\n");
121 return -ENOMEM;
122 }
123 }
124
125 return 0;
126}
127
128static void guc_fini_wq(struct intel_guc *guc)
129{
130 struct workqueue_struct *wq;
131
132 wq = fetch_and_zero(&guc->preempt_wq);
133 if (wq)
134 destroy_workqueue(wq);
135
136 wq = fetch_and_zero(&guc->log.relay.flush_wq);
137 if (wq)
138 destroy_workqueue(wq);
139}
140
141int intel_guc_init_misc(struct intel_guc *guc)
142{
143 struct drm_i915_private *i915 = guc_to_i915(guc);
144 int ret;
145
146 ret = guc_init_wq(guc);
147 if (ret)
148 return ret;
149
150 intel_uc_fw_fetch(i915, &guc->fw);
151
152 return 0;
153}
154
155void intel_guc_fini_misc(struct intel_guc *guc)
156{
157 intel_uc_fw_fini(&guc->fw);
158 guc_fini_wq(guc);
159}
160
161static int guc_shared_data_create(struct intel_guc *guc)
162{
163 struct i915_vma *vma;
164 void *vaddr;
165
166 vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
167 if (IS_ERR(vma))
168 return PTR_ERR(vma);
169
170 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
171 if (IS_ERR(vaddr)) {
172 i915_vma_unpin_and_release(&vma, 0);
173 return PTR_ERR(vaddr);
174 }
175
176 guc->shared_data = vma;
177 guc->shared_data_vaddr = vaddr;
178
179 return 0;
180}
181
182static void guc_shared_data_destroy(struct intel_guc *guc)
183{
184 i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
185}
186
187int intel_guc_init(struct intel_guc *guc)
188{
189 struct drm_i915_private *dev_priv = guc_to_i915(guc);
190 int ret;
191
192 ret = guc_shared_data_create(guc);
193 if (ret)
194 goto err_fetch;
195 GEM_BUG_ON(!guc->shared_data);
196
197 ret = intel_guc_log_create(&guc->log);
198 if (ret)
199 goto err_shared;
200
201 ret = intel_guc_ads_create(guc);
202 if (ret)
203 goto err_log;
204 GEM_BUG_ON(!guc->ads_vma);
205
206 /* We need to notify the guc whenever we change the GGTT */
207 i915_ggtt_enable_guc(dev_priv);
208
209 return 0;
210
211err_log:
212 intel_guc_log_destroy(&guc->log);
213err_shared:
214 guc_shared_data_destroy(guc);
215err_fetch:
216 intel_uc_fw_fini(&guc->fw);
217 return ret;
218}
219
220void intel_guc_fini(struct intel_guc *guc)
221{
222 struct drm_i915_private *dev_priv = guc_to_i915(guc);
223
224 i915_ggtt_disable_guc(dev_priv);
225 intel_guc_ads_destroy(guc);
226 intel_guc_log_destroy(&guc->log);
227 guc_shared_data_destroy(guc);
228 intel_uc_fw_fini(&guc->fw);
229}
230
231static u32 guc_ctl_debug_flags(struct intel_guc *guc)
232{
233 u32 level = intel_guc_log_get_level(&guc->log);
234 u32 flags;
235 u32 ads;
236
237 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
238 flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
239
240 if (!GUC_LOG_LEVEL_IS_ENABLED(level))
241 flags |= GUC_LOG_DEFAULT_DISABLED;
242
243 if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
244 flags |= GUC_LOG_DISABLED;
245 else
246 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
247 GUC_LOG_VERBOSITY_SHIFT;
248
249 return flags;
250}
251
252static u32 guc_ctl_feature_flags(struct intel_guc *guc)
253{
254 u32 flags = 0;
255
256 flags |= GUC_CTL_VCS2_ENABLED;
257
258 if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
259 flags |= GUC_CTL_KERNEL_SUBMISSIONS;
260 else
261 flags |= GUC_CTL_DISABLE_SCHEDULER;
262
263 return flags;
264}
265
266static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
267{
268 u32 flags = 0;
269
270 if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
271 u32 ctxnum, base;
272
273 base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
274 ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
275
276 base >>= PAGE_SHIFT;
277 flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
278 (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
279 }
280 return flags;
281}
282
283static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
284{
285 u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
286 u32 flags;
287
288 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
289 #define UNIT SZ_1M
290 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
291 #else
292 #define UNIT SZ_4K
293 #define FLAG 0
294 #endif
295
296 BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
297 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
298 BUILD_BUG_ON(!DPC_BUFFER_SIZE);
299 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
300 BUILD_BUG_ON(!ISR_BUFFER_SIZE);
301 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
302
303 BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
304 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
305 BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
306 (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
307 BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
308 (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
309
310 flags = GUC_LOG_VALID |
311 GUC_LOG_NOTIFY_ON_HALF_FULL |
312 FLAG |
313 ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
314 ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
315 ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
316 (offset << GUC_LOG_BUF_ADDR_SHIFT);
317
318 #undef UNIT
319 #undef FLAG
320
321 return flags;
322}
323
324/*
325 * Initialise the GuC parameter block before starting the firmware
326 * transfer. These parameters are read by the firmware on startup
327 * and cannot be changed thereafter.
328 */
329void intel_guc_init_params(struct intel_guc *guc)
330{
331 struct drm_i915_private *dev_priv = guc_to_i915(guc);
332 u32 params[GUC_CTL_MAX_DWORDS];
333 int i;
334
335 memset(params, 0, sizeof(params));
336
337 /*
338 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
339 * second. This ARAR is calculated by:
340 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
341 */
342 params[GUC_CTL_ARAT_HIGH] = 0;
343 params[GUC_CTL_ARAT_LOW] = 100000000;
344
345 params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
346
347 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
348 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
349 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
350 params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
351
352 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
353 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
354
355 /*
356 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
357 * they are power context saved so it's ok to release forcewake
358 * when we are done here and take it again at xfer time.
359 */
360 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
361
362 I915_WRITE(SOFT_SCRATCH(0), 0);
363
364 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
365 I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
366
367 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
368}
369
370int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
371 u32 *response_buf, u32 response_buf_size)
372{
373 WARN(1, "Unexpected send: action=%#x\n", *action);
374 return -ENODEV;
375}
376
377void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
378{
379 WARN(1, "Unexpected event: no suitable handler\n");
380}
381
382/*
383 * This function implements the MMIO based host to GuC interface.
384 */
385int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
386 u32 *response_buf, u32 response_buf_size)
387{
388 struct drm_i915_private *dev_priv = guc_to_i915(guc);
389 u32 status;
390 int i;
391 int ret;
392
393 GEM_BUG_ON(!len);
394 GEM_BUG_ON(len > guc->send_regs.count);
395
396 /* We expect only action code */
397 GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
398
399 /* If CT is available, we expect to use MMIO only during init/fini */
400 GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
401 *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
402 *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
403
404 mutex_lock(&guc->send_mutex);
405 intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
406
407 for (i = 0; i < len; i++)
408 I915_WRITE(guc_send_reg(guc, i), action[i]);
409
410 POSTING_READ(guc_send_reg(guc, i - 1));
411
412 intel_guc_notify(guc);
413
414 /*
415 * No GuC command should ever take longer than 10ms.
416 * Fast commands should still complete in 10us.
417 */
418 ret = __intel_wait_for_register_fw(dev_priv,
419 guc_send_reg(guc, 0),
420 INTEL_GUC_MSG_TYPE_MASK,
421 INTEL_GUC_MSG_TYPE_RESPONSE <<
422 INTEL_GUC_MSG_TYPE_SHIFT,
423 10, 10, &status);
424 /* If GuC explicitly returned an error, convert it to -EIO */
425 if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
426 ret = -EIO;
427
428 if (ret) {
429 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
430 action[0], ret, status);
431 goto out;
432 }
433
434 if (response_buf) {
435 int count = min(response_buf_size, guc->send_regs.count - 1);
436
437 for (i = 0; i < count; i++)
438 response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
439 }
440
441 /* Use data from the GuC response as our return value */
442 ret = INTEL_GUC_MSG_TO_DATA(status);
443
444out:
445 intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
446 mutex_unlock(&guc->send_mutex);
447
448 return ret;
449}
450
451void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
452{
453 struct drm_i915_private *dev_priv = guc_to_i915(guc);
454 u32 msg, val;
455
456 /*
457 * Sample the log buffer flush related bits & clear them out now
458 * itself from the message identity register to minimize the
459 * probability of losing a flush interrupt, when there are back
460 * to back flush interrupts.
461 * There can be a new flush interrupt, for different log buffer
462 * type (like for ISR), whilst Host is handling one (for DPC).
463 * Since same bit is used in message register for ISR & DPC, it
464 * could happen that GuC sets the bit for 2nd interrupt but Host
465 * clears out the bit on handling the 1st interrupt.
466 */
467 disable_rpm_wakeref_asserts(dev_priv);
468 spin_lock(&guc->irq_lock);
469 val = I915_READ(SOFT_SCRATCH(15));
470 msg = val & guc->msg_enabled_mask;
471 I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
472 spin_unlock(&guc->irq_lock);
473 enable_rpm_wakeref_asserts(dev_priv);
474
475 intel_guc_to_host_process_recv_msg(guc, msg);
476}
477
478void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
479{
480 /* Make sure to handle only enabled messages */
481 msg &= guc->msg_enabled_mask;
482
483 if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
484 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
485 intel_guc_log_handle_flush_event(&guc->log);
486}
487
488int intel_guc_sample_forcewake(struct intel_guc *guc)
489{
490 struct drm_i915_private *dev_priv = guc_to_i915(guc);
491 u32 action[2];
492
493 action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
494 /* WaRsDisableCoarsePowerGating:skl,cnl */
495 if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
496 action[1] = 0;
497 else
498 /* bit 0 and 1 are for Render and Media domain separately */
499 action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
500
501 return intel_guc_send(guc, action, ARRAY_SIZE(action));
502}
503
504/**
505 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
506 * @guc: intel_guc structure
507 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
508 *
509 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
510 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
511 * intel_huc_auth().
512 *
513 * Return: non-zero code on error
514 */
515int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
516{
517 u32 action[] = {
518 INTEL_GUC_ACTION_AUTHENTICATE_HUC,
519 rsa_offset
520 };
521
522 return intel_guc_send(guc, action, ARRAY_SIZE(action));
523}
524
525/*
526 * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
527 * then return, so waiting on the H2G is not enough to guarantee GuC is done.
528 * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
529 * scratch register 14, so we can poll on that. Note that GuC does not ensure
530 * that the value in the register is different from
531 * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
532 * take care of that ourselves as well.
533 */
534static int guc_sleep_state_action(struct intel_guc *guc,
535 const u32 *action, u32 len)
536{
537 struct drm_i915_private *dev_priv = guc_to_i915(guc);
538 int ret;
539 u32 status;
540
541 I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
542
543 ret = intel_guc_send(guc, action, len);
544 if (ret)
545 return ret;
546
547 ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
548 INTEL_GUC_SLEEP_STATE_INVALID_MASK,
549 0, 0, 10, &status);
550 if (ret)
551 return ret;
552
553 if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
554 DRM_ERROR("GuC failed to change sleep state. "
555 "action=0x%x, err=%u\n",
556 action[0], status);
557 return -EIO;
558 }
559
560 return 0;
561}
562
563/**
564 * intel_guc_suspend() - notify GuC entering suspend state
565 * @guc: the guc
566 */
567int intel_guc_suspend(struct intel_guc *guc)
568{
569 u32 data[] = {
570 INTEL_GUC_ACTION_ENTER_S_STATE,
571 GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
572 intel_guc_ggtt_offset(guc, guc->shared_data)
573 };
574
575 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
576}
577
578/**
579 * intel_guc_reset_engine() - ask GuC to reset an engine
580 * @guc: intel_guc structure
581 * @engine: engine to be reset
582 */
583int intel_guc_reset_engine(struct intel_guc *guc,
584 struct intel_engine_cs *engine)
585{
586 u32 data[7];
587
588 GEM_BUG_ON(!guc->execbuf_client);
589
590 data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET;
591 data[1] = engine->guc_id;
592 data[2] = 0;
593 data[3] = 0;
594 data[4] = 0;
595 data[5] = guc->execbuf_client->stage_id;
596 data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
597
598 return intel_guc_send(guc, data, ARRAY_SIZE(data));
599}
600
601/**
602 * intel_guc_resume() - notify GuC resuming from suspend state
603 * @guc: the guc
604 */
605int intel_guc_resume(struct intel_guc *guc)
606{
607 u32 data[] = {
608 INTEL_GUC_ACTION_EXIT_S_STATE,
609 GUC_POWER_D0,
610 intel_guc_ggtt_offset(guc, guc->shared_data)
611 };
612
613 return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
614}
615
616/**
617 * DOC: GuC Address Space
618 *
619 * The layout of GuC address space is shown below:
620 *
621 * ::
622 *
623 * +===========> +====================+ <== FFFF_FFFF
624 * ^ | Reserved |
625 * | +====================+ <== GUC_GGTT_TOP
626 * | | |
627 * | | DRAM |
628 * GuC | |
629 * Address +===> +====================+ <== GuC ggtt_pin_bias
630 * Space ^ | |
631 * | | | |
632 * | GuC | GuC |
633 * | WOPCM | WOPCM |
634 * | Size | |
635 * | | | |
636 * v v | |
637 * +=======+===> +====================+ <== 0000_0000
638 *
639 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
640 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
641 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
642 */
643
644/**
645 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
646 * @guc: the guc
647 * @size: size of area to allocate (both virtual space and memory)
648 *
649 * This is a wrapper to create an object for use with the GuC. In order to
650 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
651 * both some backing storage and a range inside the Global GTT. We must pin
652 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
653 * range is reserved inside GuC.
654 *
655 * Return: A i915_vma if successful, otherwise an ERR_PTR.
656 */
657struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
658{
659 struct drm_i915_private *dev_priv = guc_to_i915(guc);
660 struct drm_i915_gem_object *obj;
661 struct i915_vma *vma;
662 u64 flags;
663 int ret;
664
665 obj = i915_gem_object_create(dev_priv, size);
666 if (IS_ERR(obj))
667 return ERR_CAST(obj);
668
669 vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
670 if (IS_ERR(vma))
671 goto err;
672
673 flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
674 ret = i915_vma_pin(vma, 0, 0, flags);
675 if (ret) {
676 vma = ERR_PTR(ret);
677 goto err;
678 }
679
680 return vma;
681
682err:
683 i915_gem_object_put(obj);
684 return vma;
685}
686
687/**
688 * intel_guc_reserved_gtt_size()
689 * @guc: intel_guc structure
690 *
691 * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using
692 * GuC we can't have any objects pinned in that region. This function returns
693 * the size of the shadowed region.
694 *
695 * Returns:
696 * 0 if GuC is not present or not in use.
697 * Otherwise, the GuC WOPCM size.
698 */
699u32 intel_guc_reserved_gtt_size(struct intel_guc *guc)
700{
701 return guc_to_i915(guc)->wopcm.guc.size;
702}