Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023-2024 Intel Corporation
4 */
5
6#include <linux/string_choices.h>
7#include <linux/wordpart.h>
8
9#include "abi/guc_actions_sriov_abi.h"
10#include "abi/guc_klvs_abi.h"
11
12#include "regs/xe_guc_regs.h"
13
14#include "xe_bo.h"
15#include "xe_device.h"
16#include "xe_ggtt.h"
17#include "xe_gt.h"
18#include "xe_gt_sriov_pf_config.h"
19#include "xe_gt_sriov_pf_helpers.h"
20#include "xe_gt_sriov_pf_policy.h"
21#include "xe_gt_sriov_printk.h"
22#include "xe_guc.h"
23#include "xe_guc_ct.h"
24#include "xe_guc_db_mgr.h"
25#include "xe_guc_fwif.h"
26#include "xe_guc_id_mgr.h"
27#include "xe_guc_klv_helpers.h"
28#include "xe_guc_klv_thresholds_set.h"
29#include "xe_guc_submit.h"
30#include "xe_lmtt.h"
31#include "xe_map.h"
32#include "xe_migrate.h"
33#include "xe_sriov.h"
34#include "xe_ttm_vram_mgr.h"
35#include "xe_wopcm.h"
36
37/*
38 * Return: number of KLVs that were successfully parsed and saved,
39 * negative error code on failure.
40 */
41static int guc_action_update_vf_cfg(struct xe_guc *guc, u32 vfid,
42 u64 addr, u32 size)
43{
44 u32 request[] = {
45 GUC_ACTION_PF2GUC_UPDATE_VF_CFG,
46 vfid,
47 lower_32_bits(addr),
48 upper_32_bits(addr),
49 size,
50 };
51
52 return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
53}
54
55/*
56 * Return: 0 on success, negative error code on failure.
57 */
58static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
59{
60 struct xe_guc *guc = >->uc.guc;
61 int ret;
62
63 ret = guc_action_update_vf_cfg(guc, vfid, 0, 0);
64
65 return ret <= 0 ? ret : -EPROTO;
66}
67
68/*
69 * Return: number of KLVs that were successfully parsed and saved,
70 * negative error code on failure.
71 */
72static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
73{
74 const u32 bytes = num_dwords * sizeof(u32);
75 struct xe_tile *tile = gt_to_tile(gt);
76 struct xe_device *xe = tile_to_xe(tile);
77 struct xe_guc *guc = >->uc.guc;
78 struct xe_bo *bo;
79 int ret;
80
81 bo = xe_bo_create_pin_map(xe, tile, NULL,
82 ALIGN(bytes, PAGE_SIZE),
83 ttm_bo_type_kernel,
84 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
85 XE_BO_FLAG_GGTT |
86 XE_BO_FLAG_GGTT_INVALIDATE);
87 if (IS_ERR(bo))
88 return PTR_ERR(bo);
89
90 xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
91
92 ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
93
94 xe_bo_unpin_map_no_vm(bo);
95
96 return ret;
97}
98
99/*
100 * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
101 * negative error code on failure.
102 */
103static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
104 const u32 *klvs, u32 num_dwords)
105{
106 int ret;
107
108 xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
109
110 ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
111
112 if (ret != num_klvs) {
113 int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
114 struct drm_printer p = xe_gt_info_printer(gt);
115 char name[8];
116
117 xe_gt_sriov_notice(gt, "Failed to push %s %u config KLV%s (%pe)\n",
118 xe_sriov_function_name(vfid, name, sizeof(name)),
119 num_klvs, str_plural(num_klvs), ERR_PTR(err));
120 xe_guc_klv_print(klvs, num_dwords, &p);
121 return err;
122 }
123
124 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
125 struct drm_printer p = xe_gt_info_printer(gt);
126
127 xe_guc_klv_print(klvs, num_dwords, &p);
128 }
129
130 return 0;
131}
132
133static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
134{
135 u32 klv[] = {
136 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 1),
137 value,
138 };
139
140 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
141}
142
143static int pf_push_vf_cfg_u64(struct xe_gt *gt, unsigned int vfid, u16 key, u64 value)
144{
145 u32 klv[] = {
146 FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, 2),
147 lower_32_bits(value),
148 upper_32_bits(value),
149 };
150
151 return pf_push_vf_cfg_klvs(gt, vfid, 1, klv, ARRAY_SIZE(klv));
152}
153
154static int pf_push_vf_cfg_ggtt(struct xe_gt *gt, unsigned int vfid, u64 start, u64 size)
155{
156 u32 klvs[] = {
157 PREP_GUC_KLV_TAG(VF_CFG_GGTT_START),
158 lower_32_bits(start),
159 upper_32_bits(start),
160 PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE),
161 lower_32_bits(size),
162 upper_32_bits(size),
163 };
164
165 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
166}
167
168static int pf_push_vf_cfg_ctxs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
169{
170 u32 klvs[] = {
171 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID),
172 begin,
173 PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS),
174 num,
175 };
176
177 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
178}
179
180static int pf_push_vf_cfg_dbs(struct xe_gt *gt, unsigned int vfid, u32 begin, u32 num)
181{
182 u32 klvs[] = {
183 PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID),
184 begin,
185 PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS),
186 num,
187 };
188
189 return pf_push_vf_cfg_klvs(gt, vfid, 2, klvs, ARRAY_SIZE(klvs));
190}
191
192static int pf_push_vf_cfg_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 *exec_quantum)
193{
194 /* GuC will silently clamp values exceeding max */
195 *exec_quantum = min_t(u32, *exec_quantum, GUC_KLV_VF_CFG_EXEC_QUANTUM_MAX_VALUE);
196
197 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, *exec_quantum);
198}
199
200static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u32 *preempt_timeout)
201{
202 /* GuC will silently clamp values exceeding max */
203 *preempt_timeout = min_t(u32, *preempt_timeout, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_MAX_VALUE);
204
205 return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
206}
207
208static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
209{
210 return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
211}
212
213static int pf_push_vf_cfg_threshold(struct xe_gt *gt, unsigned int vfid,
214 enum xe_guc_klv_threshold_index index, u32 value)
215{
216 u32 key = xe_guc_klv_threshold_index_to_key(index);
217
218 xe_gt_assert(gt, key);
219 return pf_push_vf_cfg_u32(gt, vfid, key, value);
220}
221
222static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned int vfid)
223{
224 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
225 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
226 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
227
228 return >->sriov.pf.vfs[vfid].config;
229}
230
231/* Return: number of configuration dwords written */
232static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
233{
234 u32 n = 0;
235
236 if (xe_ggtt_node_allocated(config->ggtt_region)) {
237 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
238 cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
239 cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
240
241 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
242 cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
243 cfg[n++] = upper_32_bits(config->ggtt_region->base.size);
244 }
245
246 return n;
247}
248
249/* Return: number of configuration dwords written */
250static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
251{
252 u32 n = 0;
253
254 n += encode_config_ggtt(cfg, config);
255
256 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
257 cfg[n++] = config->begin_ctx;
258
259 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
260 cfg[n++] = config->num_ctxs;
261
262 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
263 cfg[n++] = config->begin_db;
264
265 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
266 cfg[n++] = config->num_dbs;
267
268 if (config->lmem_obj) {
269 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE);
270 cfg[n++] = lower_32_bits(config->lmem_obj->size);
271 cfg[n++] = upper_32_bits(config->lmem_obj->size);
272 }
273
274 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM);
275 cfg[n++] = config->exec_quantum;
276
277 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
278 cfg[n++] = config->preempt_timeout;
279
280#define encode_threshold_config(TAG, ...) ({ \
281 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
282 cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
283});
284
285 MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
286#undef encode_threshold_config
287
288 return n;
289}
290
291static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
292{
293 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
294 u32 max_cfg_dwords = SZ_4K / sizeof(u32);
295 u32 num_dwords;
296 int num_klvs;
297 u32 *cfg;
298 int err;
299
300 cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
301 if (!cfg)
302 return -ENOMEM;
303
304 num_dwords = encode_config(cfg, config);
305 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
306
307 if (xe_gt_is_media_type(gt)) {
308 struct xe_gt *primary = gt->tile->primary_gt;
309 struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
310
311 /* media-GT will never include a GGTT config */
312 xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
313
314 /* the GGTT config must be taken from the primary-GT instead */
315 num_dwords += encode_config_ggtt(cfg + num_dwords, other);
316 }
317 xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
318
319 num_klvs = xe_guc_klv_count(cfg, num_dwords);
320 err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
321
322 kfree(cfg);
323 return err;
324}
325
326static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
327{
328 struct xe_device *xe = gt_to_xe(gt);
329
330 return IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
331}
332
333static u64 pf_get_min_spare_ggtt(struct xe_gt *gt)
334{
335 /* XXX: preliminary */
336 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
337 pf_get_ggtt_alignment(gt) : SZ_64M;
338}
339
340static u64 pf_get_spare_ggtt(struct xe_gt *gt)
341{
342 u64 spare;
343
344 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
345 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
346 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
347
348 spare = gt->sriov.pf.spare.ggtt_size;
349 spare = max_t(u64, spare, pf_get_min_spare_ggtt(gt));
350
351 return spare;
352}
353
354static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size)
355{
356 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
357 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
358 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
359
360 if (size && size < pf_get_min_spare_ggtt(gt))
361 return -EINVAL;
362
363 size = round_up(size, pf_get_ggtt_alignment(gt));
364 gt->sriov.pf.spare.ggtt_size = size;
365
366 return 0;
367}
368
369static int pf_distribute_config_ggtt(struct xe_tile *tile, unsigned int vfid, u64 start, u64 size)
370{
371 int err, err2 = 0;
372
373 err = pf_push_vf_cfg_ggtt(tile->primary_gt, vfid, start, size);
374
375 if (tile->media_gt && !err)
376 err2 = pf_push_vf_cfg_ggtt(tile->media_gt, vfid, start, size);
377
378 return err ?: err2;
379}
380
381static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
382{
383 if (xe_ggtt_node_allocated(node)) {
384 /*
385 * explicit GGTT PTE assignment to the PF using xe_ggtt_assign()
386 * is redundant, as PTE will be implicitly re-assigned to PF by
387 * the xe_ggtt_clear() called by below xe_ggtt_remove_node().
388 */
389 xe_ggtt_node_remove(node, false);
390 }
391}
392
393static void pf_release_vf_config_ggtt(struct xe_gt *gt, struct xe_gt_sriov_config *config)
394{
395 pf_release_ggtt(gt_to_tile(gt), config->ggtt_region);
396 config->ggtt_region = NULL;
397}
398
399static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
400{
401 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
402 struct xe_ggtt_node *node;
403 struct xe_tile *tile = gt_to_tile(gt);
404 struct xe_ggtt *ggtt = tile->mem.ggtt;
405 u64 alignment = pf_get_ggtt_alignment(gt);
406 int err;
407
408 xe_gt_assert(gt, vfid);
409 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
410 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
411
412 size = round_up(size, alignment);
413
414 if (xe_ggtt_node_allocated(config->ggtt_region)) {
415 err = pf_distribute_config_ggtt(tile, vfid, 0, 0);
416 if (unlikely(err))
417 return err;
418
419 pf_release_vf_config_ggtt(gt, config);
420 }
421 xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
422
423 if (!size)
424 return 0;
425
426 node = xe_ggtt_node_init(ggtt);
427 if (IS_ERR(node))
428 return PTR_ERR(node);
429
430 err = xe_ggtt_node_insert(node, size, alignment);
431 if (unlikely(err))
432 goto err;
433
434 xe_ggtt_assign(node, vfid);
435 xe_gt_sriov_dbg_verbose(gt, "VF%u assigned GGTT %llx-%llx\n",
436 vfid, node->base.start, node->base.start + node->base.size - 1);
437
438 err = pf_distribute_config_ggtt(gt->tile, vfid, node->base.start, node->base.size);
439 if (unlikely(err))
440 goto err;
441
442 config->ggtt_region = node;
443 return 0;
444err:
445 xe_ggtt_node_fini(node);
446 return err;
447}
448
449static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid)
450{
451 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
452 struct xe_ggtt_node *node = config->ggtt_region;
453
454 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
455 return xe_ggtt_node_allocated(node) ? node->base.size : 0;
456}
457
458/**
459 * xe_gt_sriov_pf_config_get_ggtt - Query size of GGTT address space of the VF.
460 * @gt: the &xe_gt
461 * @vfid: the VF identifier
462 *
463 * This function can only be called on PF.
464 *
465 * Return: size of the VF's assigned (or PF's spare) GGTT address space.
466 */
467u64 xe_gt_sriov_pf_config_get_ggtt(struct xe_gt *gt, unsigned int vfid)
468{
469 u64 size;
470
471 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
472 if (vfid)
473 size = pf_get_vf_config_ggtt(gt_to_tile(gt)->primary_gt, vfid);
474 else
475 size = pf_get_spare_ggtt(gt_to_tile(gt)->primary_gt);
476 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
477
478 return size;
479}
480
481static int pf_config_set_u64_done(struct xe_gt *gt, unsigned int vfid, u64 value,
482 u64 actual, const char *what, int err)
483{
484 char size[10];
485 char name[8];
486
487 xe_sriov_function_name(vfid, name, sizeof(name));
488
489 if (unlikely(err)) {
490 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
491 xe_gt_sriov_notice(gt, "Failed to provision %s with %llu (%s) %s (%pe)\n",
492 name, value, size, what, ERR_PTR(err));
493 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
494 xe_gt_sriov_info(gt, "%s provisioning remains at %llu (%s) %s\n",
495 name, actual, size, what);
496 return err;
497 }
498
499 /* the actual value may have changed during provisioning */
500 string_get_size(actual, 1, STRING_UNITS_2, size, sizeof(size));
501 xe_gt_sriov_info(gt, "%s provisioned with %llu (%s) %s\n",
502 name, actual, size, what);
503 return 0;
504}
505
506/**
507 * xe_gt_sriov_pf_config_set_ggtt - Provision VF with GGTT space.
508 * @gt: the &xe_gt (can't be media)
509 * @vfid: the VF identifier
510 * @size: requested GGTT size
511 *
512 * If &vfid represents PF, then function will change PF's spare GGTT config.
513 *
514 * This function can only be called on PF.
515 *
516 * Return: 0 on success or a negative error code on failure.
517 */
518int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
519{
520 int err;
521
522 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
523
524 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
525 if (vfid)
526 err = pf_provision_vf_ggtt(gt, vfid, size);
527 else
528 err = pf_set_spare_ggtt(gt, size);
529 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
530
531 return pf_config_set_u64_done(gt, vfid, size,
532 xe_gt_sriov_pf_config_get_ggtt(gt, vfid),
533 vfid ? "GGTT" : "spare GGTT", err);
534}
535
536static int pf_config_bulk_set_u64_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
537 u64 value, u64 (*get)(struct xe_gt*, unsigned int),
538 const char *what, unsigned int last, int err)
539{
540 char size[10];
541
542 xe_gt_assert(gt, first);
543 xe_gt_assert(gt, num_vfs);
544 xe_gt_assert(gt, first <= last);
545
546 if (num_vfs == 1)
547 return pf_config_set_u64_done(gt, first, value, get(gt, first), what, err);
548
549 if (unlikely(err)) {
550 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
551 first, first + num_vfs - 1, what);
552 if (last > first)
553 pf_config_bulk_set_u64_done(gt, first, last - first, value,
554 get, what, last, 0);
555 return pf_config_set_u64_done(gt, last, value, get(gt, last), what, err);
556 }
557
558 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
559 value = get(gt, first);
560 string_get_size(value, 1, STRING_UNITS_2, size, sizeof(size));
561 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %llu (%s) %s\n",
562 first, first + num_vfs - 1, value, size, what);
563 return 0;
564}
565
566/**
567 * xe_gt_sriov_pf_config_bulk_set_ggtt - Provision many VFs with GGTT.
568 * @gt: the &xe_gt (can't be media)
569 * @vfid: starting VF identifier (can't be 0)
570 * @num_vfs: number of VFs to provision
571 * @size: requested GGTT size
572 *
573 * This function can only be called on PF.
574 *
575 * Return: 0 on success or a negative error code on failure.
576 */
577int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid,
578 unsigned int num_vfs, u64 size)
579{
580 unsigned int n;
581 int err = 0;
582
583 xe_gt_assert(gt, vfid);
584 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
585
586 if (!num_vfs)
587 return 0;
588
589 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
590 for (n = vfid; n < vfid + num_vfs; n++) {
591 err = pf_provision_vf_ggtt(gt, n, size);
592 if (err)
593 break;
594 }
595 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
596
597 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
598 xe_gt_sriov_pf_config_get_ggtt,
599 "GGTT", n, err);
600}
601
602/* Return: size of the largest continuous GGTT region */
603static u64 pf_get_max_ggtt(struct xe_gt *gt)
604{
605 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
606 u64 alignment = pf_get_ggtt_alignment(gt);
607 u64 spare = pf_get_spare_ggtt(gt);
608 u64 max_hole;
609
610 max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare);
611
612 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n",
613 max_hole / SZ_1K, spare / SZ_1K);
614 return max_hole > spare ? max_hole - spare : 0;
615}
616
617static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
618{
619 u64 available = pf_get_max_ggtt(gt);
620 u64 alignment = pf_get_ggtt_alignment(gt);
621 u64 fair;
622
623 /*
624 * To simplify the logic we only look at single largest GGTT region
625 * as that will be always the best fit for 1 VF case, and most likely
626 * will also nicely cover other cases where VFs are provisioned on the
627 * fresh and idle PF driver, without any stale GGTT allocations spread
628 * in the middle of the full GGTT range.
629 */
630
631 fair = div_u64(available, num_vfs);
632 fair = ALIGN_DOWN(fair, alignment);
633 xe_gt_sriov_dbg_verbose(gt, "GGTT available(%lluK) fair(%u x %lluK)\n",
634 available / SZ_1K, num_vfs, fair / SZ_1K);
635 return fair;
636}
637
638/**
639 * xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
640 * @gt: the &xe_gt (can't be media)
641 * @vfid: starting VF identifier (can't be 0)
642 * @num_vfs: number of VFs to provision
643 *
644 * This function can only be called on PF.
645 *
646 * Return: 0 on success or a negative error code on failure.
647 */
648int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
649 unsigned int num_vfs)
650{
651 u64 fair;
652
653 xe_gt_assert(gt, vfid);
654 xe_gt_assert(gt, num_vfs);
655 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
656
657 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
658 fair = pf_estimate_fair_ggtt(gt, num_vfs);
659 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
660
661 if (!fair)
662 return -ENOSPC;
663
664 return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
665}
666
667static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
668{
669 /* XXX: preliminary */
670 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ?
671 hweight64(gt->info.engine_mask) : SZ_256;
672}
673
674static u32 pf_get_spare_ctxs(struct xe_gt *gt)
675{
676 u32 spare;
677
678 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
679 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
680
681 spare = gt->sriov.pf.spare.num_ctxs;
682 spare = max_t(u32, spare, pf_get_min_spare_ctxs(gt));
683
684 return spare;
685}
686
687static int pf_set_spare_ctxs(struct xe_gt *gt, u32 spare)
688{
689 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
690 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
691
692 if (spare > GUC_ID_MAX)
693 return -EINVAL;
694
695 if (spare && spare < pf_get_min_spare_ctxs(gt))
696 return -EINVAL;
697
698 gt->sriov.pf.spare.num_ctxs = spare;
699
700 return 0;
701}
702
703/* Return: start ID or negative error code on failure */
704static int pf_reserve_ctxs(struct xe_gt *gt, u32 num)
705{
706 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
707 unsigned int spare = pf_get_spare_ctxs(gt);
708
709 return xe_guc_id_mgr_reserve(idm, num, spare);
710}
711
712static void pf_release_ctxs(struct xe_gt *gt, u32 start, u32 num)
713{
714 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
715
716 if (num)
717 xe_guc_id_mgr_release(idm, start, num);
718}
719
720static void pf_release_config_ctxs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
721{
722 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
723
724 pf_release_ctxs(gt, config->begin_ctx, config->num_ctxs);
725 config->begin_ctx = 0;
726 config->num_ctxs = 0;
727}
728
729static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
730{
731 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
732 int ret;
733
734 xe_gt_assert(gt, vfid);
735
736 if (num_ctxs > GUC_ID_MAX)
737 return -EINVAL;
738
739 if (config->num_ctxs) {
740 ret = pf_push_vf_cfg_ctxs(gt, vfid, 0, 0);
741 if (unlikely(ret))
742 return ret;
743
744 pf_release_config_ctxs(gt, config);
745 }
746
747 if (!num_ctxs)
748 return 0;
749
750 ret = pf_reserve_ctxs(gt, num_ctxs);
751 if (unlikely(ret < 0))
752 return ret;
753
754 config->begin_ctx = ret;
755 config->num_ctxs = num_ctxs;
756
757 ret = pf_push_vf_cfg_ctxs(gt, vfid, config->begin_ctx, config->num_ctxs);
758 if (unlikely(ret)) {
759 pf_release_config_ctxs(gt, config);
760 return ret;
761 }
762
763 xe_gt_sriov_dbg_verbose(gt, "VF%u contexts %u-%u\n",
764 vfid, config->begin_ctx, config->begin_ctx + config->num_ctxs - 1);
765 return 0;
766}
767
768static u32 pf_get_vf_config_ctxs(struct xe_gt *gt, unsigned int vfid)
769{
770 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
771
772 return config->num_ctxs;
773}
774
775/**
776 * xe_gt_sriov_pf_config_get_ctxs - Get VF's GuC contexts IDs quota.
777 * @gt: the &xe_gt
778 * @vfid: the VF identifier
779 *
780 * This function can only be called on PF.
781 * If &vfid represents a PF then number of PF's spare GuC context IDs is returned.
782 *
783 * Return: VF's quota (or PF's spare).
784 */
785u32 xe_gt_sriov_pf_config_get_ctxs(struct xe_gt *gt, unsigned int vfid)
786{
787 u32 num_ctxs;
788
789 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
790 if (vfid)
791 num_ctxs = pf_get_vf_config_ctxs(gt, vfid);
792 else
793 num_ctxs = pf_get_spare_ctxs(gt);
794 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
795
796 return num_ctxs;
797}
798
799static const char *no_unit(u32 unused)
800{
801 return "";
802}
803
804static const char *spare_unit(u32 unused)
805{
806 return " spare";
807}
808
809static int pf_config_set_u32_done(struct xe_gt *gt, unsigned int vfid, u32 value, u32 actual,
810 const char *what, const char *(*unit)(u32), int err)
811{
812 char name[8];
813
814 xe_sriov_function_name(vfid, name, sizeof(name));
815
816 if (unlikely(err)) {
817 xe_gt_sriov_notice(gt, "Failed to provision %s with %u%s %s (%pe)\n",
818 name, value, unit(value), what, ERR_PTR(err));
819 xe_gt_sriov_info(gt, "%s provisioning remains at %u%s %s\n",
820 name, actual, unit(actual), what);
821 return err;
822 }
823
824 /* the actual value may have changed during provisioning */
825 xe_gt_sriov_info(gt, "%s provisioned with %u%s %s\n",
826 name, actual, unit(actual), what);
827 return 0;
828}
829
830/**
831 * xe_gt_sriov_pf_config_set_ctxs - Configure GuC contexts IDs quota for the VF.
832 * @gt: the &xe_gt
833 * @vfid: the VF identifier
834 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
835 *
836 * This function can only be called on PF.
837 *
838 * Return: 0 on success or a negative error code on failure.
839 */
840int xe_gt_sriov_pf_config_set_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctxs)
841{
842 int err;
843
844 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
845 if (vfid)
846 err = pf_provision_vf_ctxs(gt, vfid, num_ctxs);
847 else
848 err = pf_set_spare_ctxs(gt, num_ctxs);
849 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
850
851 return pf_config_set_u32_done(gt, vfid, num_ctxs,
852 xe_gt_sriov_pf_config_get_ctxs(gt, vfid),
853 "GuC context IDs", vfid ? no_unit : spare_unit, err);
854}
855
856static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, unsigned int num_vfs,
857 u32 value, u32 (*get)(struct xe_gt*, unsigned int),
858 const char *what, const char *(*unit)(u32),
859 unsigned int last, int err)
860{
861 xe_gt_assert(gt, first);
862 xe_gt_assert(gt, num_vfs);
863 xe_gt_assert(gt, first <= last);
864
865 if (num_vfs == 1)
866 return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
867
868 if (unlikely(err)) {
869 xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
870 first, first + num_vfs - 1, what);
871 if (last > first)
872 pf_config_bulk_set_u32_done(gt, first, last - first, value,
873 get, what, unit, last, 0);
874 return pf_config_set_u32_done(gt, last, value, get(gt, last), what, unit, err);
875 }
876
877 /* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
878 value = get(gt, first);
879 xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
880 first, first + num_vfs - 1, value, unit(value), what);
881 return 0;
882}
883
884/**
885 * xe_gt_sriov_pf_config_bulk_set_ctxs - Provision many VFs with GuC context IDs.
886 * @gt: the &xe_gt
887 * @vfid: starting VF identifier
888 * @num_vfs: number of VFs to provision
889 * @num_ctxs: requested number of GuC contexts IDs (0 to release)
890 *
891 * This function can only be called on PF.
892 *
893 * Return: 0 on success or a negative error code on failure.
894 */
895int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
896 unsigned int num_vfs, u32 num_ctxs)
897{
898 unsigned int n;
899 int err = 0;
900
901 xe_gt_assert(gt, vfid);
902
903 if (!num_vfs)
904 return 0;
905
906 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
907 for (n = vfid; n < vfid + num_vfs; n++) {
908 err = pf_provision_vf_ctxs(gt, n, num_ctxs);
909 if (err)
910 break;
911 }
912 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
913
914 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_ctxs,
915 xe_gt_sriov_pf_config_get_ctxs,
916 "GuC context IDs", no_unit, n, err);
917}
918
919static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
920{
921 struct xe_guc_id_mgr *idm = >->uc.guc.submission_state.idm;
922 u32 spare = pf_get_spare_ctxs(gt);
923 u32 fair = (idm->total - spare) / num_vfs;
924 int ret;
925
926 for (; fair; --fair) {
927 ret = xe_guc_id_mgr_reserve(idm, fair * num_vfs, spare);
928 if (ret < 0)
929 continue;
930 xe_guc_id_mgr_release(idm, ret, fair * num_vfs);
931 break;
932 }
933
934 xe_gt_sriov_dbg_verbose(gt, "contexts fair(%u x %u)\n", num_vfs, fair);
935 return fair;
936}
937
938/**
939 * xe_gt_sriov_pf_config_set_fair_ctxs - Provision many VFs with fair GuC context IDs.
940 * @gt: the &xe_gt
941 * @vfid: starting VF identifier (can't be 0)
942 * @num_vfs: number of VFs to provision (can't be 0)
943 *
944 * This function can only be called on PF.
945 *
946 * Return: 0 on success or a negative error code on failure.
947 */
948int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
949 unsigned int num_vfs)
950{
951 u32 fair;
952
953 xe_gt_assert(gt, vfid);
954 xe_gt_assert(gt, num_vfs);
955
956 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
957 fair = pf_estimate_fair_ctxs(gt, num_vfs);
958 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
959
960 if (!fair)
961 return -ENOSPC;
962
963 return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
964}
965
966static u32 pf_get_min_spare_dbs(struct xe_gt *gt)
967{
968 /* XXX: preliminary, we don't use doorbells yet! */
969 return IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 1 : 0;
970}
971
972static u32 pf_get_spare_dbs(struct xe_gt *gt)
973{
974 u32 spare;
975
976 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
977 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
978
979 spare = gt->sriov.pf.spare.num_dbs;
980 spare = max_t(u32, spare, pf_get_min_spare_dbs(gt));
981
982 return spare;
983}
984
985static int pf_set_spare_dbs(struct xe_gt *gt, u32 spare)
986{
987 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
988 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
989
990 if (spare > GUC_NUM_DOORBELLS)
991 return -EINVAL;
992
993 if (spare && spare < pf_get_min_spare_dbs(gt))
994 return -EINVAL;
995
996 gt->sriov.pf.spare.num_dbs = spare;
997 return 0;
998}
999
1000/* Return: start ID or negative error code on failure */
1001static int pf_reserve_dbs(struct xe_gt *gt, u32 num)
1002{
1003 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1004 unsigned int spare = pf_get_spare_dbs(gt);
1005
1006 return xe_guc_db_mgr_reserve_range(dbm, num, spare);
1007}
1008
1009static void pf_release_dbs(struct xe_gt *gt, u32 start, u32 num)
1010{
1011 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1012
1013 if (num)
1014 xe_guc_db_mgr_release_range(dbm, start, num);
1015}
1016
1017static void pf_release_config_dbs(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1018{
1019 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1020
1021 pf_release_dbs(gt, config->begin_db, config->num_dbs);
1022 config->begin_db = 0;
1023 config->num_dbs = 0;
1024}
1025
1026static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1027{
1028 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1029 int ret;
1030
1031 xe_gt_assert(gt, vfid);
1032
1033 if (num_dbs > GUC_NUM_DOORBELLS)
1034 return -EINVAL;
1035
1036 if (config->num_dbs) {
1037 ret = pf_push_vf_cfg_dbs(gt, vfid, 0, 0);
1038 if (unlikely(ret))
1039 return ret;
1040
1041 pf_release_config_dbs(gt, config);
1042 }
1043
1044 if (!num_dbs)
1045 return 0;
1046
1047 ret = pf_reserve_dbs(gt, num_dbs);
1048 if (unlikely(ret < 0))
1049 return ret;
1050
1051 config->begin_db = ret;
1052 config->num_dbs = num_dbs;
1053
1054 ret = pf_push_vf_cfg_dbs(gt, vfid, config->begin_db, config->num_dbs);
1055 if (unlikely(ret)) {
1056 pf_release_config_dbs(gt, config);
1057 return ret;
1058 }
1059
1060 xe_gt_sriov_dbg_verbose(gt, "VF%u doorbells %u-%u\n",
1061 vfid, config->begin_db, config->begin_db + config->num_dbs - 1);
1062 return 0;
1063}
1064
1065static u32 pf_get_vf_config_dbs(struct xe_gt *gt, unsigned int vfid)
1066{
1067 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1068
1069 return config->num_dbs;
1070}
1071
1072/**
1073 * xe_gt_sriov_pf_config_get_dbs - Get VF's GuC doorbells IDs quota.
1074 * @gt: the &xe_gt
1075 * @vfid: the VF identifier
1076 *
1077 * This function can only be called on PF.
1078 * If &vfid represents a PF then number of PF's spare GuC doorbells IDs is returned.
1079 *
1080 * Return: VF's quota (or PF's spare).
1081 */
1082u32 xe_gt_sriov_pf_config_get_dbs(struct xe_gt *gt, unsigned int vfid)
1083{
1084 u32 num_dbs;
1085
1086 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1087 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1088
1089 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1090 if (vfid)
1091 num_dbs = pf_get_vf_config_dbs(gt, vfid);
1092 else
1093 num_dbs = pf_get_spare_dbs(gt);
1094 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1095
1096 return num_dbs;
1097}
1098
1099/**
1100 * xe_gt_sriov_pf_config_set_dbs - Configure GuC doorbells IDs quota for the VF.
1101 * @gt: the &xe_gt
1102 * @vfid: the VF identifier
1103 * @num_dbs: requested number of GuC doorbells IDs (0 to release)
1104 *
1105 * This function can only be called on PF.
1106 *
1107 * Return: 0 on success or a negative error code on failure.
1108 */
1109int xe_gt_sriov_pf_config_set_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
1110{
1111 int err;
1112
1113 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1114 xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
1115
1116 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1117 if (vfid)
1118 err = pf_provision_vf_dbs(gt, vfid, num_dbs);
1119 else
1120 err = pf_set_spare_dbs(gt, num_dbs);
1121 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1122
1123 return pf_config_set_u32_done(gt, vfid, num_dbs,
1124 xe_gt_sriov_pf_config_get_dbs(gt, vfid),
1125 "GuC doorbell IDs", vfid ? no_unit : spare_unit, err);
1126}
1127
1128/**
1129 * xe_gt_sriov_pf_config_bulk_set_dbs - Provision many VFs with GuC context IDs.
1130 * @gt: the &xe_gt
1131 * @vfid: starting VF identifier (can't be 0)
1132 * @num_vfs: number of VFs to provision
1133 * @num_dbs: requested number of GuC doorbell IDs (0 to release)
1134 *
1135 * This function can only be called on PF.
1136 *
1137 * Return: 0 on success or a negative error code on failure.
1138 */
1139int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
1140 unsigned int num_vfs, u32 num_dbs)
1141{
1142 unsigned int n;
1143 int err = 0;
1144
1145 xe_gt_assert(gt, vfid);
1146
1147 if (!num_vfs)
1148 return 0;
1149
1150 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1151 for (n = vfid; n < vfid + num_vfs; n++) {
1152 err = pf_provision_vf_dbs(gt, n, num_dbs);
1153 if (err)
1154 break;
1155 }
1156 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1157
1158 return pf_config_bulk_set_u32_done(gt, vfid, num_vfs, num_dbs,
1159 xe_gt_sriov_pf_config_get_dbs,
1160 "GuC doorbell IDs", no_unit, n, err);
1161}
1162
1163static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
1164{
1165 struct xe_guc_db_mgr *dbm = >->uc.guc.dbm;
1166 u32 spare = pf_get_spare_dbs(gt);
1167 u32 fair = (GUC_NUM_DOORBELLS - spare) / num_vfs;
1168 int ret;
1169
1170 for (; fair; --fair) {
1171 ret = xe_guc_db_mgr_reserve_range(dbm, fair * num_vfs, spare);
1172 if (ret < 0)
1173 continue;
1174 xe_guc_db_mgr_release_range(dbm, ret, fair * num_vfs);
1175 break;
1176 }
1177
1178 xe_gt_sriov_dbg_verbose(gt, "doorbells fair(%u x %u)\n", num_vfs, fair);
1179 return fair;
1180}
1181
1182/**
1183 * xe_gt_sriov_pf_config_set_fair_dbs - Provision many VFs with fair GuC doorbell IDs.
1184 * @gt: the &xe_gt
1185 * @vfid: starting VF identifier (can't be 0)
1186 * @num_vfs: number of VFs to provision (can't be 0)
1187 *
1188 * This function can only be called on PF.
1189 *
1190 * Return: 0 on success or a negative error code on failure.
1191 */
1192int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
1193 unsigned int num_vfs)
1194{
1195 u32 fair;
1196
1197 xe_gt_assert(gt, vfid);
1198 xe_gt_assert(gt, num_vfs);
1199
1200 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1201 fair = pf_estimate_fair_dbs(gt, num_vfs);
1202 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1203
1204 if (!fair)
1205 return -ENOSPC;
1206
1207 return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
1208}
1209
1210static u64 pf_get_lmem_alignment(struct xe_gt *gt)
1211{
1212 /* this might be platform dependent */
1213 return SZ_2M;
1214}
1215
1216static u64 pf_get_min_spare_lmem(struct xe_gt *gt)
1217{
1218 /* this might be platform dependent */
1219 return SZ_128M; /* XXX: preliminary */
1220}
1221
1222static u64 pf_get_spare_lmem(struct xe_gt *gt)
1223{
1224 u64 spare;
1225
1226 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1227 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1228
1229 spare = gt->sriov.pf.spare.lmem_size;
1230 spare = max_t(u64, spare, pf_get_min_spare_lmem(gt));
1231
1232 return spare;
1233}
1234
1235static int pf_set_spare_lmem(struct xe_gt *gt, u64 size)
1236{
1237 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
1238 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1239
1240 if (size && size < pf_get_min_spare_lmem(gt))
1241 return -EINVAL;
1242
1243 gt->sriov.pf.spare.lmem_size = size;
1244 return 0;
1245}
1246
1247static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid)
1248{
1249 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1250 struct xe_bo *bo;
1251
1252 bo = config->lmem_obj;
1253 return bo ? bo->size : 0;
1254}
1255
1256static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1257{
1258 struct xe_device *xe = gt_to_xe(gt);
1259 struct xe_tile *tile;
1260 unsigned int tid;
1261 int err;
1262
1263 for_each_tile(tile, xe, tid) {
1264 if (tile->primary_gt == gt) {
1265 err = pf_push_vf_cfg_lmem(gt, vfid, size);
1266 } else {
1267 u64 lmem = pf_get_vf_config_lmem(tile->primary_gt, vfid);
1268
1269 if (!lmem)
1270 continue;
1271 err = pf_push_vf_cfg_lmem(gt, vfid, lmem);
1272 }
1273 if (unlikely(err))
1274 return err;
1275 }
1276 return 0;
1277}
1278
1279static void pf_force_lmtt_invalidate(struct xe_device *xe)
1280{
1281 /* TODO */
1282}
1283
1284static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1285{
1286 struct xe_lmtt *lmtt;
1287 struct xe_tile *tile;
1288 unsigned int tid;
1289
1290 xe_assert(xe, IS_DGFX(xe));
1291 xe_assert(xe, IS_SRIOV_PF(xe));
1292
1293 for_each_tile(tile, xe, tid) {
1294 lmtt = &tile->sriov.pf.lmtt;
1295 xe_lmtt_drop_pages(lmtt, vfid);
1296 }
1297}
1298
1299static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
1300{
1301 struct xe_gt_sriov_config *config;
1302 struct xe_tile *tile;
1303 struct xe_lmtt *lmtt;
1304 struct xe_bo *bo;
1305 struct xe_gt *gt;
1306 u64 total, offset;
1307 unsigned int gtid;
1308 unsigned int tid;
1309 int err;
1310
1311 xe_assert(xe, IS_DGFX(xe));
1312 xe_assert(xe, IS_SRIOV_PF(xe));
1313
1314 total = 0;
1315 for_each_tile(tile, xe, tid)
1316 total += pf_get_vf_config_lmem(tile->primary_gt, vfid);
1317
1318 for_each_tile(tile, xe, tid) {
1319 lmtt = &tile->sriov.pf.lmtt;
1320
1321 xe_lmtt_drop_pages(lmtt, vfid);
1322 if (!total)
1323 continue;
1324
1325 err = xe_lmtt_prepare_pages(lmtt, vfid, total);
1326 if (err)
1327 goto fail;
1328
1329 offset = 0;
1330 for_each_gt(gt, xe, gtid) {
1331 if (xe_gt_is_media_type(gt))
1332 continue;
1333
1334 config = pf_pick_vf_config(gt, vfid);
1335 bo = config->lmem_obj;
1336 if (!bo)
1337 continue;
1338
1339 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset);
1340 if (err)
1341 goto fail;
1342 offset += bo->size;
1343 }
1344 }
1345
1346 pf_force_lmtt_invalidate(xe);
1347 return 0;
1348
1349fail:
1350 for_each_tile(tile, xe, tid) {
1351 lmtt = &tile->sriov.pf.lmtt;
1352 xe_lmtt_drop_pages(lmtt, vfid);
1353 }
1354 return err;
1355}
1356
1357static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1358{
1359 xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
1360 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1361 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1362
1363 if (config->lmem_obj) {
1364 xe_bo_unpin_map_no_vm(config->lmem_obj);
1365 config->lmem_obj = NULL;
1366 }
1367}
1368
1369static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1370{
1371 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1372 struct xe_device *xe = gt_to_xe(gt);
1373 struct xe_tile *tile = gt_to_tile(gt);
1374 struct xe_bo *bo;
1375 int err;
1376
1377 xe_gt_assert(gt, vfid);
1378 xe_gt_assert(gt, IS_DGFX(xe));
1379 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1380
1381 size = round_up(size, pf_get_lmem_alignment(gt));
1382
1383 if (config->lmem_obj) {
1384 err = pf_distribute_config_lmem(gt, vfid, 0);
1385 if (unlikely(err))
1386 return err;
1387
1388 pf_reset_vf_lmtt(xe, vfid);
1389 pf_release_vf_config_lmem(gt, config);
1390 }
1391 xe_gt_assert(gt, !config->lmem_obj);
1392
1393 if (!size)
1394 return 0;
1395
1396 xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
1397 bo = xe_bo_create_pin_map(xe, tile, NULL,
1398 ALIGN(size, PAGE_SIZE),
1399 ttm_bo_type_kernel,
1400 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
1401 XE_BO_FLAG_NEEDS_2M |
1402 XE_BO_FLAG_PINNED);
1403 if (IS_ERR(bo))
1404 return PTR_ERR(bo);
1405
1406 config->lmem_obj = bo;
1407
1408 err = pf_update_vf_lmtt(xe, vfid);
1409 if (unlikely(err))
1410 goto release;
1411
1412 err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
1413 if (unlikely(err))
1414 goto reset_lmtt;
1415
1416 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n",
1417 vfid, bo->size, bo->size / SZ_1M);
1418 return 0;
1419
1420reset_lmtt:
1421 pf_reset_vf_lmtt(xe, vfid);
1422release:
1423 pf_release_vf_config_lmem(gt, config);
1424 return err;
1425}
1426
1427/**
1428 * xe_gt_sriov_pf_config_get_lmem - Get VF's LMEM quota.
1429 * @gt: the &xe_gt
1430 * @vfid: the VF identifier
1431 *
1432 * This function can only be called on PF.
1433 *
1434 * Return: VF's (or PF's spare) LMEM quota.
1435 */
1436u64 xe_gt_sriov_pf_config_get_lmem(struct xe_gt *gt, unsigned int vfid)
1437{
1438 u64 size;
1439
1440 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1441 if (vfid)
1442 size = pf_get_vf_config_lmem(gt, vfid);
1443 else
1444 size = pf_get_spare_lmem(gt);
1445 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1446
1447 return size;
1448}
1449
1450/**
1451 * xe_gt_sriov_pf_config_set_lmem - Provision VF with LMEM.
1452 * @gt: the &xe_gt (can't be media)
1453 * @vfid: the VF identifier
1454 * @size: requested LMEM size
1455 *
1456 * This function can only be called on PF.
1457 */
1458int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
1459{
1460 int err;
1461
1462 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1463 if (vfid)
1464 err = pf_provision_vf_lmem(gt, vfid, size);
1465 else
1466 err = pf_set_spare_lmem(gt, size);
1467 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1468
1469 return pf_config_set_u64_done(gt, vfid, size,
1470 xe_gt_sriov_pf_config_get_lmem(gt, vfid),
1471 vfid ? "LMEM" : "spare LMEM", err);
1472}
1473
1474/**
1475 * xe_gt_sriov_pf_config_bulk_set_lmem - Provision many VFs with LMEM.
1476 * @gt: the &xe_gt (can't be media)
1477 * @vfid: starting VF identifier (can't be 0)
1478 * @num_vfs: number of VFs to provision
1479 * @size: requested LMEM size
1480 *
1481 * This function can only be called on PF.
1482 *
1483 * Return: 0 on success or a negative error code on failure.
1484 */
1485int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
1486 unsigned int num_vfs, u64 size)
1487{
1488 unsigned int n;
1489 int err = 0;
1490
1491 xe_gt_assert(gt, vfid);
1492 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1493
1494 if (!num_vfs)
1495 return 0;
1496
1497 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1498 for (n = vfid; n < vfid + num_vfs; n++) {
1499 err = pf_provision_vf_lmem(gt, n, size);
1500 if (err)
1501 break;
1502 }
1503 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1504
1505 return pf_config_bulk_set_u64_done(gt, vfid, num_vfs, size,
1506 xe_gt_sriov_pf_config_get_lmem,
1507 "LMEM", n, err);
1508}
1509
1510static u64 pf_query_free_lmem(struct xe_gt *gt)
1511{
1512 struct xe_tile *tile = gt->tile;
1513
1514 return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
1515}
1516
1517static u64 pf_query_max_lmem(struct xe_gt *gt)
1518{
1519 u64 alignment = pf_get_lmem_alignment(gt);
1520 u64 spare = pf_get_spare_lmem(gt);
1521 u64 free = pf_query_free_lmem(gt);
1522 u64 avail;
1523
1524 /* XXX: need to account for 2MB blocks only */
1525 avail = free > spare ? free - spare : 0;
1526 avail = round_down(avail, alignment);
1527
1528 return avail;
1529}
1530
1531#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
1532#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
1533#else
1534#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
1535#endif
1536
1537static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
1538{
1539 u64 available = pf_query_max_lmem(gt);
1540 u64 alignment = pf_get_lmem_alignment(gt);
1541 u64 fair;
1542
1543 fair = div_u64(available, num_vfs);
1544 fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
1545 fair = ALIGN_DOWN(fair, alignment);
1546#ifdef MAX_FAIR_LMEM
1547 fair = min_t(u64, MAX_FAIR_LMEM, fair);
1548#endif
1549 xe_gt_sriov_dbg_verbose(gt, "LMEM available(%lluM) fair(%u x %lluM)\n",
1550 available / SZ_1M, num_vfs, fair / SZ_1M);
1551 return fair;
1552}
1553
1554/**
1555 * xe_gt_sriov_pf_config_set_fair_lmem - Provision many VFs with fair LMEM.
1556 * @gt: the &xe_gt (can't be media)
1557 * @vfid: starting VF identifier (can't be 0)
1558 * @num_vfs: number of VFs to provision (can't be 0)
1559 *
1560 * This function can only be called on PF.
1561 *
1562 * Return: 0 on success or a negative error code on failure.
1563 */
1564int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
1565 unsigned int num_vfs)
1566{
1567 u64 fair;
1568
1569 xe_gt_assert(gt, vfid);
1570 xe_gt_assert(gt, num_vfs);
1571 xe_gt_assert(gt, !xe_gt_is_media_type(gt));
1572
1573 if (!IS_DGFX(gt_to_xe(gt)))
1574 return 0;
1575
1576 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1577 fair = pf_estimate_fair_lmem(gt, num_vfs);
1578 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1579
1580 if (!fair)
1581 return -ENOSPC;
1582
1583 return xe_gt_sriov_pf_config_bulk_set_lmem(gt, vfid, num_vfs, fair);
1584}
1585
1586/**
1587 * xe_gt_sriov_pf_config_set_fair - Provision many VFs with fair resources.
1588 * @gt: the &xe_gt
1589 * @vfid: starting VF identifier (can't be 0)
1590 * @num_vfs: number of VFs to provision (can't be 0)
1591 *
1592 * This function can only be called on PF.
1593 *
1594 * Return: 0 on success or a negative error code on failure.
1595 */
1596int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid,
1597 unsigned int num_vfs)
1598{
1599 int result = 0;
1600 int err;
1601
1602 xe_gt_assert(gt, vfid);
1603 xe_gt_assert(gt, num_vfs);
1604
1605 if (!xe_gt_is_media_type(gt)) {
1606 err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs);
1607 result = result ?: err;
1608 err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs);
1609 result = result ?: err;
1610 }
1611 err = xe_gt_sriov_pf_config_set_fair_ctxs(gt, vfid, num_vfs);
1612 result = result ?: err;
1613 err = xe_gt_sriov_pf_config_set_fair_dbs(gt, vfid, num_vfs);
1614 result = result ?: err;
1615
1616 return result;
1617}
1618
1619static const char *exec_quantum_unit(u32 exec_quantum)
1620{
1621 return exec_quantum ? "ms" : "(infinity)";
1622}
1623
1624static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1625 u32 exec_quantum)
1626{
1627 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1628 int err;
1629
1630 err = pf_push_vf_cfg_exec_quantum(gt, vfid, &exec_quantum);
1631 if (unlikely(err))
1632 return err;
1633
1634 config->exec_quantum = exec_quantum;
1635 return 0;
1636}
1637
1638static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1639{
1640 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1641
1642 return config->exec_quantum;
1643}
1644
1645/**
1646 * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
1647 * @gt: the &xe_gt
1648 * @vfid: the VF identifier
1649 * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
1650 *
1651 * This function can only be called on PF.
1652 *
1653 * Return: 0 on success or a negative error code on failure.
1654 */
1655int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
1656 u32 exec_quantum)
1657{
1658 int err;
1659
1660 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1661 err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
1662 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1663
1664 return pf_config_set_u32_done(gt, vfid, exec_quantum,
1665 xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
1666 "execution quantum", exec_quantum_unit, err);
1667}
1668
1669/**
1670 * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
1671 * @gt: the &xe_gt
1672 * @vfid: the VF identifier
1673 *
1674 * This function can only be called on PF.
1675 *
1676 * Return: VF's (or PF's) execution quantum in milliseconds.
1677 */
1678u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
1679{
1680 u32 exec_quantum;
1681
1682 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1683 exec_quantum = pf_get_exec_quantum(gt, vfid);
1684 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1685
1686 return exec_quantum;
1687}
1688
1689static const char *preempt_timeout_unit(u32 preempt_timeout)
1690{
1691 return preempt_timeout ? "us" : "(infinity)";
1692}
1693
1694static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1695 u32 preempt_timeout)
1696{
1697 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1698 int err;
1699
1700 err = pf_push_vf_cfg_preempt_timeout(gt, vfid, &preempt_timeout);
1701 if (unlikely(err))
1702 return err;
1703
1704 config->preempt_timeout = preempt_timeout;
1705
1706 return 0;
1707}
1708
1709static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1710{
1711 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1712
1713 return config->preempt_timeout;
1714}
1715
1716/**
1717 * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
1718 * @gt: the &xe_gt
1719 * @vfid: the VF identifier
1720 * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
1721 *
1722 * This function can only be called on PF.
1723 *
1724 * Return: 0 on success or a negative error code on failure.
1725 */
1726int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
1727 u32 preempt_timeout)
1728{
1729 int err;
1730
1731 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1732 err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
1733 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1734
1735 return pf_config_set_u32_done(gt, vfid, preempt_timeout,
1736 xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
1737 "preemption timeout", preempt_timeout_unit, err);
1738}
1739
1740/**
1741 * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
1742 * @gt: the &xe_gt
1743 * @vfid: the VF identifier
1744 *
1745 * This function can only be called on PF.
1746 *
1747 * Return: VF's (or PF's) preemption timeout in microseconds.
1748 */
1749u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
1750{
1751 u32 preempt_timeout;
1752
1753 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1754 preempt_timeout = pf_get_preempt_timeout(gt, vfid);
1755 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1756
1757 return preempt_timeout;
1758}
1759
1760static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1761{
1762 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1763
1764 config->exec_quantum = 0;
1765 config->preempt_timeout = 0;
1766}
1767
1768static int pf_provision_threshold(struct xe_gt *gt, unsigned int vfid,
1769 enum xe_guc_klv_threshold_index index, u32 value)
1770{
1771 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1772 int err;
1773
1774 err = pf_push_vf_cfg_threshold(gt, vfid, index, value);
1775 if (unlikely(err))
1776 return err;
1777
1778 config->thresholds[index] = value;
1779
1780 return 0;
1781}
1782
1783static int pf_get_threshold(struct xe_gt *gt, unsigned int vfid,
1784 enum xe_guc_klv_threshold_index index)
1785{
1786 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1787
1788 return config->thresholds[index];
1789}
1790
1791static const char *threshold_unit(u32 threshold)
1792{
1793 return threshold ? "" : "(disabled)";
1794}
1795
1796/**
1797 * xe_gt_sriov_pf_config_set_threshold - Configure threshold for the VF.
1798 * @gt: the &xe_gt
1799 * @vfid: the VF identifier
1800 * @index: the threshold index
1801 * @value: requested value (0 means disabled)
1802 *
1803 * This function can only be called on PF.
1804 *
1805 * Return: 0 on success or a negative error code on failure.
1806 */
1807int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
1808 enum xe_guc_klv_threshold_index index, u32 value)
1809{
1810 u32 key = xe_guc_klv_threshold_index_to_key(index);
1811 const char *name = xe_guc_klv_key_to_string(key);
1812 int err;
1813
1814 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1815 err = pf_provision_threshold(gt, vfid, index, value);
1816 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1817
1818 return pf_config_set_u32_done(gt, vfid, value,
1819 xe_gt_sriov_pf_config_get_threshold(gt, vfid, index),
1820 name, threshold_unit, err);
1821}
1822
1823/**
1824 * xe_gt_sriov_pf_config_get_threshold - Get VF's threshold.
1825 * @gt: the &xe_gt
1826 * @vfid: the VF identifier
1827 * @index: the threshold index
1828 *
1829 * This function can only be called on PF.
1830 *
1831 * Return: value of VF's (or PF's) threshold.
1832 */
1833u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
1834 enum xe_guc_klv_threshold_index index)
1835{
1836 u32 value;
1837
1838 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1839 value = pf_get_threshold(gt, vfid, index);
1840 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1841
1842 return value;
1843}
1844
1845static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
1846{
1847 lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
1848
1849#define reset_threshold_config(TAG, ...) ({ \
1850 config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
1851});
1852
1853 MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
1854#undef reset_threshold_config
1855}
1856
1857static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
1858{
1859 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1860 struct xe_device *xe = gt_to_xe(gt);
1861
1862 if (!xe_gt_is_media_type(gt)) {
1863 pf_release_vf_config_ggtt(gt, config);
1864 if (IS_DGFX(xe)) {
1865 pf_release_vf_config_lmem(gt, config);
1866 pf_update_vf_lmtt(xe, vfid);
1867 }
1868 }
1869 pf_release_config_ctxs(gt, config);
1870 pf_release_config_dbs(gt, config);
1871 pf_reset_config_sched(gt, config);
1872 pf_reset_config_thresholds(gt, config);
1873}
1874
1875/**
1876 * xe_gt_sriov_pf_config_release - Release and reset VF configuration.
1877 * @gt: the &xe_gt
1878 * @vfid: the VF identifier (can't be PF)
1879 * @force: force configuration release
1880 *
1881 * This function can only be called on PF.
1882 *
1883 * Return: 0 on success or a negative error code on failure.
1884 */
1885int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force)
1886{
1887 int err;
1888
1889 xe_gt_assert(gt, vfid);
1890
1891 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1892 err = pf_send_vf_cfg_reset(gt, vfid);
1893 if (!err || force)
1894 pf_release_vf_config(gt, vfid);
1895 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1896
1897 if (unlikely(err)) {
1898 xe_gt_sriov_notice(gt, "VF%u unprovisioning failed with error (%pe)%s\n",
1899 vfid, ERR_PTR(err),
1900 force ? " but all resources were released anyway!" : "");
1901 }
1902
1903 return force ? 0 : err;
1904}
1905
1906static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
1907{
1908 if (xe_ggtt_node_allocated(ggtt_region))
1909 xe_ggtt_assign(ggtt_region, vfid);
1910}
1911
1912static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
1913{
1914 struct xe_migrate *m = tile->migrate;
1915 struct dma_fence *fence;
1916 int err;
1917
1918 if (!bo)
1919 return 0;
1920
1921 xe_bo_lock(bo, false);
1922 fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
1923 if (IS_ERR(fence)) {
1924 err = PTR_ERR(fence);
1925 } else if (!fence) {
1926 err = -ENOMEM;
1927 } else {
1928 long ret = dma_fence_wait_timeout(fence, false, timeout);
1929
1930 err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
1931 dma_fence_put(fence);
1932 if (!err)
1933 xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
1934 jiffies_to_msecs(timeout - ret));
1935 }
1936 xe_bo_unlock(bo);
1937
1938 return err;
1939}
1940
1941static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
1942{
1943 struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
1944 struct xe_tile *tile = gt_to_tile(gt);
1945 struct xe_device *xe = gt_to_xe(gt);
1946 int err = 0;
1947
1948 /*
1949 * Only GGTT and LMEM requires to be cleared by the PF.
1950 * GuC doorbell IDs and context IDs do not need any clearing.
1951 */
1952 if (!xe_gt_is_media_type(gt)) {
1953 pf_sanitize_ggtt(config->ggtt_region, vfid);
1954 if (IS_DGFX(xe))
1955 err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
1956 }
1957
1958 return err;
1959}
1960
1961/**
1962 * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
1963 * @gt: the &xe_gt
1964 * @vfid: the VF identifier (can't be PF)
1965 * @timeout: maximum timeout to wait for completion in jiffies
1966 *
1967 * This function can only be called on PF.
1968 *
1969 * Return: 0 on success or a negative error code on failure.
1970 */
1971int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
1972{
1973 int err;
1974
1975 xe_gt_assert(gt, vfid != PFID);
1976
1977 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
1978 err = pf_sanitize_vf_resources(gt, vfid, timeout);
1979 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
1980
1981 if (unlikely(err))
1982 xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
1983 vfid, ERR_PTR(err));
1984 return err;
1985}
1986
1987/**
1988 * xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
1989 * @gt: the &xe_gt
1990 * @vfid: the VF identifier (can't be PF)
1991 * @refresh: explicit refresh
1992 *
1993 * This function can only be called on PF.
1994 *
1995 * Return: 0 on success or a negative error code on failure.
1996 */
1997int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh)
1998{
1999 int err = 0;
2000
2001 xe_gt_assert(gt, vfid);
2002
2003 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2004 if (refresh)
2005 err = pf_send_vf_cfg_reset(gt, vfid);
2006 if (!err)
2007 err = pf_push_full_vf_config(gt, vfid);
2008 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2009
2010 if (unlikely(err)) {
2011 xe_gt_sriov_notice(gt, "Failed to %s VF%u configuration (%pe)\n",
2012 refresh ? "refresh" : "push", vfid, ERR_PTR(err));
2013 }
2014
2015 return err;
2016}
2017
2018static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
2019{
2020 struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
2021 struct xe_device *xe = gt_to_xe(gt);
2022 bool is_primary = !xe_gt_is_media_type(gt);
2023 bool valid_ggtt, valid_ctxs, valid_dbs;
2024 bool valid_any, valid_all;
2025
2026 valid_ggtt = pf_get_vf_config_ggtt(primary_gt, vfid);
2027 valid_ctxs = pf_get_vf_config_ctxs(gt, vfid);
2028 valid_dbs = pf_get_vf_config_dbs(gt, vfid);
2029
2030 /* note that GuC doorbells are optional */
2031 valid_any = valid_ctxs || valid_dbs;
2032 valid_all = valid_ctxs;
2033
2034 /* and GGTT/LMEM is configured on primary GT only */
2035 valid_all = valid_all && valid_ggtt;
2036 valid_any = valid_any || (valid_ggtt && is_primary);
2037
2038 if (IS_DGFX(xe)) {
2039 bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
2040
2041 valid_any = valid_any || (valid_lmem && is_primary);
2042 valid_all = valid_all && valid_lmem;
2043 }
2044
2045 return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
2046}
2047
2048/**
2049 * xe_gt_sriov_pf_config_is_empty - Check VF's configuration.
2050 * @gt: the &xe_gt
2051 * @vfid: the VF identifier (can't be PF)
2052 *
2053 * This function can only be called on PF.
2054 *
2055 * Return: true if VF mandatory configuration (GGTT, LMEM, ...) is empty.
2056 */
2057bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
2058{
2059 bool empty;
2060
2061 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2062 xe_gt_assert(gt, vfid);
2063
2064 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2065 empty = pf_validate_vf_config(gt, vfid) == -ENODATA;
2066 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2067
2068 return empty;
2069}
2070
2071/**
2072 * xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
2073 * @gt: the &xe_gt
2074 *
2075 * Any prior configurations pushed to GuC are lost when the GT is reset.
2076 * Push again all non-empty VF configurations to the GuC.
2077 *
2078 * This function can only be called on PF.
2079 */
2080void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
2081{
2082 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2083 unsigned int fail = 0, skip = 0;
2084
2085 for (n = 1; n <= total_vfs; n++) {
2086 if (xe_gt_sriov_pf_config_is_empty(gt, n))
2087 skip++;
2088 else if (xe_gt_sriov_pf_config_push(gt, n, false))
2089 fail++;
2090 }
2091
2092 if (fail)
2093 xe_gt_sriov_notice(gt, "Failed to push %u of %u VF%s configurations\n",
2094 fail, total_vfs - skip, str_plural(total_vfs));
2095
2096 if (fail != total_vfs)
2097 xe_gt_sriov_dbg(gt, "pushed %u skip %u of %u VF%s configurations\n",
2098 total_vfs - skip - fail, skip, total_vfs, str_plural(total_vfs));
2099}
2100
2101/**
2102 * xe_gt_sriov_pf_config_print_ggtt - Print GGTT configurations.
2103 * @gt: the &xe_gt
2104 * @p: the &drm_printer
2105 *
2106 * Print GGTT configuration data for all VFs.
2107 * VFs without provisioned GGTT are ignored.
2108 *
2109 * This function can only be called on PF.
2110 */
2111int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p)
2112{
2113 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2114 const struct xe_gt_sriov_config *config;
2115 char buf[10];
2116
2117 for (n = 1; n <= total_vfs; n++) {
2118 config = >->sriov.pf.vfs[n].config;
2119 if (!xe_ggtt_node_allocated(config->ggtt_region))
2120 continue;
2121
2122 string_get_size(config->ggtt_region->base.size, 1, STRING_UNITS_2,
2123 buf, sizeof(buf));
2124 drm_printf(p, "VF%u:\t%#0llx-%#llx\t(%s)\n",
2125 n, config->ggtt_region->base.start,
2126 config->ggtt_region->base.start + config->ggtt_region->base.size - 1,
2127 buf);
2128 }
2129
2130 return 0;
2131}
2132
2133/**
2134 * xe_gt_sriov_pf_config_print_ctxs - Print GuC context IDs configurations.
2135 * @gt: the &xe_gt
2136 * @p: the &drm_printer
2137 *
2138 * Print GuC context ID allocations across all VFs.
2139 * VFs without GuC context IDs are skipped.
2140 *
2141 * This function can only be called on PF.
2142 * Return: 0 on success or a negative error code on failure.
2143 */
2144int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p)
2145{
2146 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2147 const struct xe_gt_sriov_config *config;
2148
2149 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2150 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2151
2152 for (n = 1; n <= total_vfs; n++) {
2153 config = >->sriov.pf.vfs[n].config;
2154 if (!config->num_ctxs)
2155 continue;
2156
2157 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2158 n,
2159 config->begin_ctx,
2160 config->begin_ctx + config->num_ctxs - 1,
2161 config->num_ctxs);
2162 }
2163
2164 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2165 return 0;
2166}
2167
2168/**
2169 * xe_gt_sriov_pf_config_print_dbs - Print GuC doorbell ID configurations.
2170 * @gt: the &xe_gt
2171 * @p: the &drm_printer
2172 *
2173 * Print GuC doorbell IDs allocations across all VFs.
2174 * VFs without GuC doorbell IDs are skipped.
2175 *
2176 * This function can only be called on PF.
2177 * Return: 0 on success or a negative error code on failure.
2178 */
2179int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
2180{
2181 unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
2182 const struct xe_gt_sriov_config *config;
2183
2184 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2185 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2186
2187 for (n = 1; n <= total_vfs; n++) {
2188 config = >->sriov.pf.vfs[n].config;
2189 if (!config->num_dbs)
2190 continue;
2191
2192 drm_printf(p, "VF%u:\t%u-%u\t(%u)\n",
2193 n,
2194 config->begin_db,
2195 config->begin_db + config->num_dbs - 1,
2196 config->num_dbs);
2197 }
2198
2199 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2200 return 0;
2201}
2202
2203/**
2204 * xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
2205 * @gt: the &xe_gt
2206 * @p: the &drm_printer
2207 *
2208 * Print GGTT ranges that are available for the provisioning.
2209 *
2210 * This function can only be called on PF.
2211 */
2212int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p)
2213{
2214 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt;
2215 u64 alignment = pf_get_ggtt_alignment(gt);
2216 u64 spare, avail, total;
2217 char buf[10];
2218
2219 xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
2220
2221 mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
2222
2223 spare = pf_get_spare_ggtt(gt);
2224 total = xe_ggtt_print_holes(ggtt, alignment, p);
2225
2226 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
2227
2228 string_get_size(total, 1, STRING_UNITS_2, buf, sizeof(buf));
2229 drm_printf(p, "total:\t%llu\t(%s)\n", total, buf);
2230
2231 string_get_size(spare, 1, STRING_UNITS_2, buf, sizeof(buf));
2232 drm_printf(p, "spare:\t%llu\t(%s)\n", spare, buf);
2233
2234 avail = total > spare ? total - spare : 0;
2235
2236 string_get_size(avail, 1, STRING_UNITS_2, buf, sizeof(buf));
2237 drm_printf(p, "avail:\t%llu\t(%s)\n", avail, buf);
2238
2239 return 0;
2240}