Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_xcp.h"
25#include "amdgpu_drv.h"
26
27#include <drm/drm_drv.h>
28#include "../amdxcp/amdgpu_xcp_drv.h"
29
30static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
31static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
32
33static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
34 struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
35{
36 int (*run_func)(void *handle, uint32_t inst_mask);
37 int ret = 0;
38
39 if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
40 return 0;
41
42 run_func = NULL;
43
44 switch (xcp_state) {
45 case AMDGPU_XCP_PREPARE_SUSPEND:
46 run_func = xcp_ip->ip_funcs->prepare_suspend;
47 break;
48 case AMDGPU_XCP_SUSPEND:
49 run_func = xcp_ip->ip_funcs->suspend;
50 break;
51 case AMDGPU_XCP_PREPARE_RESUME:
52 run_func = xcp_ip->ip_funcs->prepare_resume;
53 break;
54 case AMDGPU_XCP_RESUME:
55 run_func = xcp_ip->ip_funcs->resume;
56 break;
57 }
58
59 if (run_func)
60 ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
61
62 return ret;
63}
64
65static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
66 int state)
67{
68 struct amdgpu_xcp_ip *xcp_ip;
69 struct amdgpu_xcp *xcp;
70 int i, ret;
71
72 if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
73 return -EINVAL;
74
75 xcp = &xcp_mgr->xcp[xcp_id];
76 for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
77 xcp_ip = &xcp->ip[i];
78 ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
79 if (ret)
80 break;
81 }
82
83 return ret;
84}
85
86int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
87{
88 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
89 AMDGPU_XCP_PREPARE_SUSPEND);
90}
91
92int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
93{
94 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
95}
96
97int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
98{
99 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
100 AMDGPU_XCP_PREPARE_RESUME);
101}
102
103int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
104{
105 return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
106}
107
108static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
109 struct amdgpu_xcp_ip *ip)
110{
111 struct amdgpu_xcp *xcp;
112
113 if (!ip)
114 return;
115
116 xcp = &xcp_mgr->xcp[xcp_id];
117 xcp->ip[ip->ip_id] = *ip;
118 xcp->ip[ip->ip_id].valid = true;
119
120 xcp->valid = true;
121}
122
123static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
124 int xcp_id)
125{
126 struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
127 struct amdgpu_device *adev = xcp_mgr->adev;
128 uint32_t inst_mask;
129 uint64_t uid;
130 int i;
131
132 if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
133 inst_mask) {
134 i = GET_INST(GC, (ffs(inst_mask) - 1));
135 uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
136 AMDGPU_UID_TYPE_XCD, i);
137 if (uid)
138 xcp->unique_id = uid;
139 }
140}
141
142int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
143{
144 struct amdgpu_device *adev = xcp_mgr->adev;
145 struct amdgpu_xcp_ip ip;
146 uint8_t mem_id;
147 int i, j, ret;
148
149 if (!num_xcps || num_xcps > MAX_XCP)
150 return -EINVAL;
151
152 xcp_mgr->mode = mode;
153
154 for (i = 0; i < MAX_XCP; ++i)
155 xcp_mgr->xcp[i].valid = false;
156
157 /* This is needed for figuring out memory id of xcp */
158 xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
159
160 for (i = 0; i < num_xcps; ++i) {
161 for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
162 ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
163 &ip);
164 if (ret)
165 continue;
166
167 __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
168 }
169
170 xcp_mgr->xcp[i].id = i;
171
172 if (xcp_mgr->funcs->get_xcp_mem_id) {
173 ret = xcp_mgr->funcs->get_xcp_mem_id(
174 xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
175 if (ret)
176 continue;
177 else
178 xcp_mgr->xcp[i].mem_id = mem_id;
179 }
180 __amdgpu_xcp_set_unique_id(xcp_mgr, i);
181 }
182
183 xcp_mgr->num_xcps = num_xcps;
184 amdgpu_xcp_update_partition_sched_list(adev);
185
186 return 0;
187}
188
189static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
190 int mode)
191{
192 int ret, curr_mode, num_xcps = 0;
193
194 if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
195 return 0;
196
197 mutex_lock(&xcp_mgr->xcp_lock);
198
199 curr_mode = xcp_mgr->mode;
200 /* State set to transient mode */
201 xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
202
203 ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
204
205 if (ret) {
206 /* Failed, get whatever mode it's at now */
207 if (xcp_mgr->funcs->query_partition_mode)
208 xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
209 xcp_mgr, AMDGPU_XCP_FL_LOCKED);
210 else
211 xcp_mgr->mode = curr_mode;
212
213 goto out;
214 }
215 amdgpu_xcp_sysfs_entries_update(xcp_mgr);
216out:
217 mutex_unlock(&xcp_mgr->xcp_lock);
218
219 return ret;
220}
221
222int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
223{
224 if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
225 return -EINVAL;
226
227 if (xcp_mgr->mode == mode)
228 return 0;
229
230 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
231}
232
233int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
234{
235 if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
236 return 0;
237
238 return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
239}
240
241static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
242{
243 if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
244 return true;
245
246 if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
247 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
248 return true;
249
250 if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
251 xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
252 return true;
253
254 return false;
255}
256
257int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
258{
259 int mode;
260
261 if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
262 return xcp_mgr->mode;
263
264 if (!(flags & AMDGPU_XCP_FL_LOCKED))
265 mutex_lock(&xcp_mgr->xcp_lock);
266 mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
267
268 /* First time query for VF, set the mode here */
269 if (amdgpu_sriov_vf(xcp_mgr->adev) &&
270 xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
271 xcp_mgr->mode = mode;
272
273 if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
274 dev_WARN(
275 xcp_mgr->adev->dev,
276 "Cached partition mode %d not matching with device mode %d",
277 xcp_mgr->mode, mode);
278
279 if (!(flags & AMDGPU_XCP_FL_LOCKED))
280 mutex_unlock(&xcp_mgr->xcp_lock);
281
282 return mode;
283}
284
285static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
286{
287 struct drm_device *p_ddev;
288 struct drm_device *ddev;
289 int i, ret;
290
291 ddev = adev_to_drm(adev);
292
293 /* xcp #0 shares drm device setting with adev */
294 adev->xcp_mgr->xcp->ddev = ddev;
295
296 for (i = 1; i < MAX_XCP; i++) {
297 ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
298 if (ret == -ENOSPC) {
299 dev_warn(adev->dev,
300 "Skip xcp node #%d when out of drm node resource.", i);
301 ret = 0;
302 goto out;
303 } else if (ret) {
304 goto out;
305 }
306
307 /* Redirect all IOCTLs to the primary device */
308 adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
309 adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
310 adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
311 adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
312 p_ddev->render->dev = ddev;
313 p_ddev->primary->dev = ddev;
314 p_ddev->vma_offset_manager = ddev->vma_offset_manager;
315 p_ddev->driver = &amdgpu_partition_driver;
316 adev->xcp_mgr->xcp[i].ddev = p_ddev;
317
318 dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
319 }
320 ret = 0;
321out:
322 amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
323
324 return ret;
325}
326
327int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
328 int init_num_xcps,
329 struct amdgpu_xcp_mgr_funcs *xcp_funcs)
330{
331 struct amdgpu_xcp_mgr *xcp_mgr;
332 int i;
333
334 if (!xcp_funcs || !xcp_funcs->get_ip_details)
335 return -EINVAL;
336
337 xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
338
339 if (!xcp_mgr)
340 return -ENOMEM;
341
342 xcp_mgr->adev = adev;
343 xcp_mgr->funcs = xcp_funcs;
344 xcp_mgr->mode = init_mode;
345 mutex_init(&xcp_mgr->xcp_lock);
346
347 if (init_mode != AMDGPU_XCP_MODE_NONE)
348 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
349
350 adev->xcp_mgr = xcp_mgr;
351 for (i = 0; i < MAX_XCP; ++i)
352 xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
353
354 return amdgpu_xcp_dev_alloc(adev);
355}
356
357int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
358 enum AMDGPU_XCP_IP_BLOCK ip, int instance)
359{
360 struct amdgpu_xcp *xcp;
361 int i, id_mask = 0;
362
363 if (ip >= AMDGPU_XCP_MAX_BLOCKS)
364 return -EINVAL;
365
366 for (i = 0; i < xcp_mgr->num_xcps; ++i) {
367 xcp = &xcp_mgr->xcp[i];
368 if ((xcp->valid) && (xcp->ip[ip].valid) &&
369 (xcp->ip[ip].inst_mask & BIT(instance)))
370 id_mask |= BIT(i);
371 }
372
373 if (!id_mask)
374 id_mask = -ENXIO;
375
376 return id_mask;
377}
378
379int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
380 enum AMDGPU_XCP_IP_BLOCK ip,
381 uint32_t *inst_mask)
382{
383 if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
384 return -EINVAL;
385
386 *inst_mask = xcp->ip[ip].inst_mask;
387
388 return 0;
389}
390
391int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
392 const struct pci_device_id *ent)
393{
394 int i, ret;
395
396 if (!adev->xcp_mgr)
397 return 0;
398
399 for (i = 1; i < MAX_XCP; i++) {
400 if (!adev->xcp_mgr->xcp[i].ddev)
401 break;
402
403 ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
404 if (ret)
405 return ret;
406 }
407
408 return 0;
409}
410
411void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
412{
413 struct drm_device *p_ddev;
414 int i;
415
416 if (!adev->xcp_mgr)
417 return;
418
419 for (i = 1; i < MAX_XCP; i++) {
420 if (!adev->xcp_mgr->xcp[i].ddev)
421 break;
422
423 p_ddev = adev->xcp_mgr->xcp[i].ddev;
424 drm_dev_unplug(p_ddev);
425 p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
426 p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
427 p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
428 p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
429 amdgpu_xcp_drm_dev_free(p_ddev);
430 }
431}
432
433int amdgpu_xcp_open_device(struct amdgpu_device *adev,
434 struct amdgpu_fpriv *fpriv,
435 struct drm_file *file_priv)
436{
437 int i;
438
439 if (!adev->xcp_mgr)
440 return 0;
441
442 fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
443 for (i = 0; i < MAX_XCP; ++i) {
444 if (!adev->xcp_mgr->xcp[i].ddev)
445 break;
446
447 if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
448 if (adev->xcp_mgr->xcp[i].valid == FALSE) {
449 dev_err(adev->dev, "renderD%d partition %d not valid!",
450 file_priv->minor->index, i);
451 return -ENOENT;
452 }
453 dev_dbg(adev->dev, "renderD%d partition %d opened!",
454 file_priv->minor->index, i);
455 fpriv->xcp_id = i;
456 break;
457 }
458 }
459
460 fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
461 adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
462 return 0;
463}
464
465void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
466 struct amdgpu_ctx_entity *entity)
467{
468 struct drm_gpu_scheduler *sched;
469 struct amdgpu_ring *ring;
470
471 if (!adev->xcp_mgr)
472 return;
473
474 sched = entity->entity.rq->sched;
475 if (drm_sched_wqueue_ready(sched)) {
476 ring = to_amdgpu_ring(entity->entity.rq->sched);
477 atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
478 }
479}
480
481int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
482 u32 hw_ip, u32 hw_prio,
483 struct amdgpu_fpriv *fpriv,
484 unsigned int *num_scheds,
485 struct drm_gpu_scheduler ***scheds)
486{
487 u32 sel_xcp_id;
488 int i;
489 struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
490
491 if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
492 u32 least_ref_cnt = ~0;
493
494 fpriv->xcp_id = 0;
495 for (i = 0; i < xcp_mgr->num_xcps; i++) {
496 u32 total_ref_cnt;
497
498 total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
499 if (total_ref_cnt < least_ref_cnt) {
500 fpriv->xcp_id = i;
501 least_ref_cnt = total_ref_cnt;
502 }
503 }
504 }
505 sel_xcp_id = fpriv->xcp_id;
506
507 if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
508 *num_scheds =
509 xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
510 *scheds =
511 xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
512 atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
513 dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
514 } else {
515 dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
516 return -ENOENT;
517 }
518
519 return 0;
520}
521
522static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
523 uint32_t inst_idx,
524 struct amdgpu_ring *ring)
525{
526 int xcp_id;
527 enum AMDGPU_XCP_IP_BLOCK ip_blk;
528 uint32_t inst_mask;
529
530 ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
531 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
532 adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
533 if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
534 (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
535 return;
536
537 inst_mask = 1 << inst_idx;
538
539 switch (ring->funcs->type) {
540 case AMDGPU_HW_IP_GFX:
541 case AMDGPU_RING_TYPE_COMPUTE:
542 case AMDGPU_RING_TYPE_KIQ:
543 ip_blk = AMDGPU_XCP_GFX;
544 break;
545 case AMDGPU_RING_TYPE_SDMA:
546 ip_blk = AMDGPU_XCP_SDMA;
547 break;
548 case AMDGPU_RING_TYPE_VCN_ENC:
549 case AMDGPU_RING_TYPE_VCN_JPEG:
550 ip_blk = AMDGPU_XCP_VCN;
551 break;
552 default:
553 dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
554 return;
555 }
556
557 for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
558 if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
559 ring->xcp_id = xcp_id;
560 dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
561 ring->xcp_id);
562 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
563 adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
564 break;
565 }
566 }
567}
568
569static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
570 struct amdgpu_ring *ring,
571 unsigned int sel_xcp_id)
572{
573 unsigned int *num_gpu_sched;
574
575 num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
576 .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
577 adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
578 .sched[(*num_gpu_sched)++] = &ring->sched;
579 dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
580 ring->name, sel_xcp_id, ring->funcs->type,
581 ring->hw_prio, *num_gpu_sched);
582}
583
584static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
585{
586 struct amdgpu_ring *ring;
587 int i;
588
589 for (i = 0; i < MAX_XCP; i++) {
590 atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
591 memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
592 }
593
594 if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
595 return 0;
596
597 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
598 ring = adev->rings[i];
599 if (!ring || !ring->sched.ready || ring->no_scheduler)
600 continue;
601
602 amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
603
604 /* VCN may be shared by two partitions under CPX MODE in certain
605 * configs.
606 */
607 if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
608 ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
609 (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
610 amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
611 }
612
613 return 0;
614}
615
616int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
617{
618 int i;
619
620 for (i = 0; i < adev->num_rings; i++) {
621 struct amdgpu_ring *ring = adev->rings[i];
622
623 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
624 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
625 amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
626 else
627 amdgpu_set_xcp_id(adev, ring->me, ring);
628 }
629
630 return amdgpu_xcp_sched_list_update(adev);
631}
632
633void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
634{
635 struct amdgpu_device *adev = xcp_mgr->adev;
636
637 xcp_mgr->supp_xcp_modes = 0;
638
639 switch (NUM_XCC(adev->gfx.xcc_mask)) {
640 case 8:
641 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
642 BIT(AMDGPU_DPX_PARTITION_MODE) |
643 BIT(AMDGPU_QPX_PARTITION_MODE) |
644 BIT(AMDGPU_CPX_PARTITION_MODE);
645 break;
646 case 6:
647 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
648 BIT(AMDGPU_TPX_PARTITION_MODE) |
649 BIT(AMDGPU_CPX_PARTITION_MODE);
650 break;
651 case 4:
652 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
653 BIT(AMDGPU_DPX_PARTITION_MODE) |
654 BIT(AMDGPU_CPX_PARTITION_MODE);
655 break;
656 case 2:
657 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
658 BIT(AMDGPU_CPX_PARTITION_MODE);
659 break;
660 case 1:
661 xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
662 BIT(AMDGPU_CPX_PARTITION_MODE);
663 break;
664
665 default:
666 break;
667 }
668}
669
670int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
671{
672 /* TODO:
673 * Stop user queues and threads, and make sure GPU is empty of work.
674 */
675
676 if (flags & AMDGPU_XCP_OPS_KFD)
677 amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
678
679 return 0;
680}
681
682int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
683{
684 int ret = 0;
685
686 if (flags & AMDGPU_XCP_OPS_KFD) {
687 amdgpu_amdkfd_device_probe(xcp_mgr->adev);
688 amdgpu_amdkfd_device_init(xcp_mgr->adev);
689 /* If KFD init failed, return failure */
690 if (!xcp_mgr->adev->kfd.init_complete)
691 ret = -EIO;
692 }
693
694 return ret;
695}
696
697/*====================== xcp sysfs - configuration ======================*/
698#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
699 static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
700 struct amdgpu_xcp_res_details *xcp_res, char *buf) \
701 { \
702 return sysfs_emit(buf, "%d\n", xcp_res->_name); \
703 }
704
705struct amdgpu_xcp_res_sysfs_attribute {
706 struct attribute attr;
707 ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
708};
709
710#define XCP_CFG_SYSFS_RES_ATTR(_name) \
711 struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
712 .attr = { .name = __stringify(_name), .mode = 0400 }, \
713 .show = amdgpu_xcp_res_sysfs_##_name##_show, \
714 }
715
716XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
717XCP_CFG_SYSFS_RES_ATTR(num_inst);
718XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
719XCP_CFG_SYSFS_RES_ATTR(num_shared);
720
721#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
722
723static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
724 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
725 &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
726};
727
728static const char *xcp_desc[] = {
729 [AMDGPU_SPX_PARTITION_MODE] = "SPX",
730 [AMDGPU_DPX_PARTITION_MODE] = "DPX",
731 [AMDGPU_TPX_PARTITION_MODE] = "TPX",
732 [AMDGPU_QPX_PARTITION_MODE] = "QPX",
733 [AMDGPU_CPX_PARTITION_MODE] = "CPX",
734};
735
736static const char *nps_desc[] = {
737 [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
738 [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
739 [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
740 [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
741 [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
742 [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
743 [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
744};
745
746ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
747
748#define to_xcp_attr(x) \
749 container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
750#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
751
752static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
753 struct attribute *attr, char *buf)
754{
755 struct amdgpu_xcp_res_sysfs_attribute *attribute;
756 struct amdgpu_xcp_res_details *xcp_res;
757
758 attribute = to_xcp_attr(attr);
759 xcp_res = to_xcp_res(kobj);
760
761 if (!attribute->show)
762 return -EIO;
763
764 return attribute->show(xcp_res, buf);
765}
766
767static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
768 .show = xcp_cfg_res_sysfs_attr_show,
769};
770
771static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
772 .sysfs_ops = &xcp_cfg_res_sysfs_ops,
773 .default_groups = xcp_cfg_res_sysfs_groups,
774};
775
776const char *xcp_res_names[] = {
777 [AMDGPU_XCP_RES_XCC] = "xcc",
778 [AMDGPU_XCP_RES_DMA] = "dma",
779 [AMDGPU_XCP_RES_DEC] = "dec",
780 [AMDGPU_XCP_RES_JPEG] = "jpeg",
781};
782
783static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
784 int mode,
785 struct amdgpu_xcp_cfg *xcp_cfg)
786{
787 if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
788 return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
789
790 return -EOPNOTSUPP;
791}
792
793#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
794static ssize_t supported_xcp_configs_show(struct kobject *kobj,
795 struct kobj_attribute *attr, char *buf)
796{
797 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
798 struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
799 int size = 0, mode;
800 char *sep = "";
801
802 if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
803 return sysfs_emit(buf, "Not supported\n");
804
805 for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
806 size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
807 sep = ", ";
808 }
809
810 size += sysfs_emit_at(buf, size, "\n");
811
812 return size;
813}
814
815static ssize_t supported_nps_configs_show(struct kobject *kobj,
816 struct kobj_attribute *attr, char *buf)
817{
818 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
819 int size = 0, mode;
820 char *sep = "";
821
822 if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
823 return sysfs_emit(buf, "Not supported\n");
824
825 for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
826 size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
827 sep = ", ";
828 }
829
830 size += sysfs_emit_at(buf, size, "\n");
831
832 return size;
833}
834
835static ssize_t xcp_config_show(struct kobject *kobj,
836 struct kobj_attribute *attr, char *buf)
837{
838 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
839
840 return sysfs_emit(buf, "%s\n",
841 amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
842}
843
844static ssize_t xcp_config_store(struct kobject *kobj,
845 struct kobj_attribute *attr,
846 const char *buf, size_t size)
847{
848 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
849 int mode, r;
850
851 if (!strncasecmp("SPX", buf, strlen("SPX")))
852 mode = AMDGPU_SPX_PARTITION_MODE;
853 else if (!strncasecmp("DPX", buf, strlen("DPX")))
854 mode = AMDGPU_DPX_PARTITION_MODE;
855 else if (!strncasecmp("TPX", buf, strlen("TPX")))
856 mode = AMDGPU_TPX_PARTITION_MODE;
857 else if (!strncasecmp("QPX", buf, strlen("QPX")))
858 mode = AMDGPU_QPX_PARTITION_MODE;
859 else if (!strncasecmp("CPX", buf, strlen("CPX")))
860 mode = AMDGPU_CPX_PARTITION_MODE;
861 else
862 return -EINVAL;
863
864 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
865
866 if (r)
867 return r;
868
869 xcp_cfg->mode = mode;
870 return size;
871}
872
873static struct kobj_attribute xcp_cfg_sysfs_mode =
874 __ATTR_RW_MODE(xcp_config, 0644);
875
876static void xcp_cfg_sysfs_release(struct kobject *kobj)
877{
878 struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
879
880 kfree(xcp_cfg);
881}
882
883static const struct kobj_type xcp_cfg_sysfs_ktype = {
884 .release = xcp_cfg_sysfs_release,
885 .sysfs_ops = &kobj_sysfs_ops,
886};
887
888static struct kobj_attribute supp_part_sysfs_mode =
889 __ATTR_RO(supported_xcp_configs);
890
891static struct kobj_attribute supp_nps_sysfs_mode =
892 __ATTR_RO(supported_nps_configs);
893
894static const struct attribute *xcp_attrs[] = {
895 &supp_part_sysfs_mode.attr,
896 &xcp_cfg_sysfs_mode.attr,
897 NULL,
898};
899
900static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
901{
902 struct amdgpu_xcp_res_details *xcp_res;
903 struct amdgpu_xcp_cfg *xcp_cfg;
904 int i, r, j, rid, mode;
905
906 if (!adev->xcp_mgr)
907 return;
908
909 xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
910 if (!xcp_cfg)
911 return;
912 xcp_cfg->xcp_mgr = adev->xcp_mgr;
913
914 r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
915 &adev->dev->kobj, "compute_partition_config");
916 if (r)
917 goto err1;
918
919 r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
920 if (r)
921 goto err1;
922
923 if (adev->gmc.supported_nps_modes != 0) {
924 r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
925 if (r) {
926 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
927 goto err1;
928 }
929 }
930
931 mode = (xcp_cfg->xcp_mgr->mode ==
932 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
933 AMDGPU_SPX_PARTITION_MODE :
934 xcp_cfg->xcp_mgr->mode;
935 r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
936 if (r) {
937 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
938 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
939 goto err1;
940 }
941
942 xcp_cfg->mode = mode;
943 for (i = 0; i < xcp_cfg->num_res; i++) {
944 xcp_res = &xcp_cfg->xcp_res[i];
945 rid = xcp_res->id;
946 r = kobject_init_and_add(&xcp_res->kobj,
947 &xcp_cfg_res_sysfs_ktype,
948 &xcp_cfg->kobj, "%s",
949 xcp_res_names[rid]);
950 if (r)
951 goto err;
952 }
953
954 adev->xcp_mgr->xcp_cfg = xcp_cfg;
955 return;
956err:
957 for (j = 0; j < i; j++) {
958 xcp_res = &xcp_cfg->xcp_res[i];
959 kobject_put(&xcp_res->kobj);
960 }
961
962 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
963 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
964err1:
965 kobject_put(&xcp_cfg->kobj);
966}
967
968static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
969{
970 struct amdgpu_xcp_res_details *xcp_res;
971 struct amdgpu_xcp_cfg *xcp_cfg;
972 int i;
973
974 if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
975 return;
976
977 xcp_cfg = adev->xcp_mgr->xcp_cfg;
978 for (i = 0; i < xcp_cfg->num_res; i++) {
979 xcp_res = &xcp_cfg->xcp_res[i];
980 kobject_put(&xcp_res->kobj);
981 }
982
983 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
984 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
985 kobject_put(&xcp_cfg->kobj);
986}
987
988/*====================== xcp sysfs - data entries ======================*/
989
990#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
991
992static ssize_t xcp_metrics_show(struct kobject *kobj,
993 struct kobj_attribute *attr, char *buf)
994{
995 struct amdgpu_xcp *xcp = to_xcp(kobj);
996 struct amdgpu_xcp_mgr *xcp_mgr;
997 ssize_t size;
998
999 xcp_mgr = xcp->xcp_mgr;
1000 size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
1001 if (size <= 0)
1002 return size;
1003
1004 if (size > PAGE_SIZE)
1005 return -ENOSPC;
1006
1007 return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
1008}
1009
1010static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
1011 struct attribute *attr, int n)
1012{
1013 struct amdgpu_xcp *xcp = to_xcp(kobj);
1014
1015 if (!xcp || !xcp->valid)
1016 return 0;
1017
1018 return attr->mode;
1019}
1020
1021static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
1022
1023static struct attribute *amdgpu_xcp_attrs[] = {
1024 &xcp_sysfs_metrics.attr,
1025 NULL,
1026};
1027
1028static const struct attribute_group amdgpu_xcp_attrs_group = {
1029 .attrs = amdgpu_xcp_attrs,
1030 .is_visible = amdgpu_xcp_attrs_is_visible
1031};
1032
1033static const struct kobj_type xcp_sysfs_ktype = {
1034 .sysfs_ops = &kobj_sysfs_ops,
1035};
1036
1037static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
1038{
1039 struct amdgpu_xcp *xcp;
1040
1041 for (n--; n >= 0; n--) {
1042 xcp = &xcp_mgr->xcp[n];
1043 if (!xcp->ddev || !xcp->valid)
1044 continue;
1045 sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1046 kobject_put(&xcp->kobj);
1047 }
1048}
1049
1050static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
1051{
1052 struct amdgpu_xcp *xcp;
1053 int i, r;
1054
1055 for (i = 0; i < MAX_XCP; i++) {
1056 /* Redirect all IOCTLs to the primary device */
1057 xcp = &xcp_mgr->xcp[i];
1058 if (!xcp->ddev)
1059 break;
1060 r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
1061 &xcp->ddev->dev->kobj, "xcp");
1062 if (r)
1063 goto out;
1064
1065 r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1066 if (r)
1067 goto out;
1068 }
1069
1070 return;
1071out:
1072 kobject_put(&xcp->kobj);
1073}
1074
1075static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
1076{
1077 struct amdgpu_xcp *xcp;
1078 int i;
1079
1080 for (i = 0; i < MAX_XCP; i++) {
1081 /* Redirect all IOCTLs to the primary device */
1082 xcp = &xcp_mgr->xcp[i];
1083 if (!xcp->ddev)
1084 continue;
1085 sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
1086 }
1087
1088 return;
1089}
1090
1091void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
1092{
1093 if (!adev->xcp_mgr)
1094 return;
1095
1096 amdgpu_xcp_cfg_sysfs_init(adev);
1097
1098 return;
1099}
1100
1101void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
1102{
1103 if (!adev->xcp_mgr)
1104 return;
1105 amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
1106 amdgpu_xcp_cfg_sysfs_fini(adev);
1107}