Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/module.h>
25
26#include <drm/drm_drv.h>
27#include <xen/xen.h>
28
29#include "amdgpu.h"
30#include "amdgpu_ras.h"
31#include "vi.h"
32#include "soc15.h"
33#include "nv.h"
34
35#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
36 do { \
37 vf2pf_info->ucode_info[ucode].id = ucode; \
38 vf2pf_info->ucode_info[ucode].version = ver; \
39 } while (0)
40
41bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
42{
43 /* By now all MMIO pages except mailbox are blocked */
44 /* if blocking is enabled in hypervisor. Choose the */
45 /* SCRATCH_REG0 to test. */
46 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
47}
48
49void amdgpu_virt_init_setting(struct amdgpu_device *adev)
50{
51 struct drm_device *ddev = adev_to_drm(adev);
52
53 /* enable virtual display */
54 if (adev->asic_type != CHIP_ALDEBARAN &&
55 adev->asic_type != CHIP_ARCTURUS) {
56 if (adev->mode_info.num_crtc == 0)
57 adev->mode_info.num_crtc = 1;
58 adev->enable_virtual_display = true;
59 }
60 ddev->driver_features &= ~DRIVER_ATOMIC;
61 adev->cg_flags = 0;
62 adev->pg_flags = 0;
63}
64
65void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
66 uint32_t reg0, uint32_t reg1,
67 uint32_t ref, uint32_t mask)
68{
69 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
70 struct amdgpu_ring *ring = &kiq->ring;
71 signed long r, cnt = 0;
72 unsigned long flags;
73 uint32_t seq;
74
75 spin_lock_irqsave(&kiq->ring_lock, flags);
76 amdgpu_ring_alloc(ring, 32);
77 amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
78 ref, mask);
79 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
80 if (r)
81 goto failed_undo;
82
83 amdgpu_ring_commit(ring);
84 spin_unlock_irqrestore(&kiq->ring_lock, flags);
85
86 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
87
88 /* don't wait anymore for IRQ context */
89 if (r < 1 && in_interrupt())
90 goto failed_kiq;
91
92 might_sleep();
93 while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
94
95 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
96 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
97 }
98
99 if (cnt > MAX_KIQ_REG_TRY)
100 goto failed_kiq;
101
102 return;
103
104failed_undo:
105 amdgpu_ring_undo(ring);
106 spin_unlock_irqrestore(&kiq->ring_lock, flags);
107failed_kiq:
108 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
109}
110
111/**
112 * amdgpu_virt_request_full_gpu() - request full gpu access
113 * @adev: amdgpu device.
114 * @init: is driver init time.
115 * When start to init/fini driver, first need to request full gpu access.
116 * Return: Zero if request success, otherwise will return error.
117 */
118int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
119{
120 struct amdgpu_virt *virt = &adev->virt;
121 int r;
122
123 if (virt->ops && virt->ops->req_full_gpu) {
124 r = virt->ops->req_full_gpu(adev, init);
125 if (r)
126 return r;
127
128 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
129 }
130
131 return 0;
132}
133
134/**
135 * amdgpu_virt_release_full_gpu() - release full gpu access
136 * @adev: amdgpu device.
137 * @init: is driver init time.
138 * When finishing driver init/fini, need to release full gpu access.
139 * Return: Zero if release success, otherwise will returen error.
140 */
141int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
142{
143 struct amdgpu_virt *virt = &adev->virt;
144 int r;
145
146 if (virt->ops && virt->ops->rel_full_gpu) {
147 r = virt->ops->rel_full_gpu(adev, init);
148 if (r)
149 return r;
150
151 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
152 }
153 return 0;
154}
155
156/**
157 * amdgpu_virt_reset_gpu() - reset gpu
158 * @adev: amdgpu device.
159 * Send reset command to GPU hypervisor to reset GPU that VM is using
160 * Return: Zero if reset success, otherwise will return error.
161 */
162int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
163{
164 struct amdgpu_virt *virt = &adev->virt;
165 int r;
166
167 if (virt->ops && virt->ops->reset_gpu) {
168 r = virt->ops->reset_gpu(adev);
169 if (r)
170 return r;
171
172 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
173 }
174
175 return 0;
176}
177
178void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
179{
180 struct amdgpu_virt *virt = &adev->virt;
181
182 if (virt->ops && virt->ops->req_init_data)
183 virt->ops->req_init_data(adev);
184
185 if (adev->virt.req_init_data_ver > 0)
186 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
187 else
188 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
189}
190
191/**
192 * amdgpu_virt_wait_reset() - wait for reset gpu completed
193 * @adev: amdgpu device.
194 * Wait for GPU reset completed.
195 * Return: Zero if reset success, otherwise will return error.
196 */
197int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
198{
199 struct amdgpu_virt *virt = &adev->virt;
200
201 if (!virt->ops || !virt->ops->wait_reset)
202 return -EINVAL;
203
204 return virt->ops->wait_reset(adev);
205}
206
207/**
208 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
209 * @adev: amdgpu device.
210 * MM table is used by UVD and VCE for its initialization
211 * Return: Zero if allocate success.
212 */
213int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
214{
215 int r;
216
217 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
218 return 0;
219
220 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
221 AMDGPU_GEM_DOMAIN_VRAM,
222 &adev->virt.mm_table.bo,
223 &adev->virt.mm_table.gpu_addr,
224 (void *)&adev->virt.mm_table.cpu_addr);
225 if (r) {
226 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
227 return r;
228 }
229
230 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
231 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
232 adev->virt.mm_table.gpu_addr,
233 adev->virt.mm_table.cpu_addr);
234 return 0;
235}
236
237/**
238 * amdgpu_virt_free_mm_table() - free mm table memory
239 * @adev: amdgpu device.
240 * Free MM table memory
241 */
242void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
243{
244 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
245 return;
246
247 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
248 &adev->virt.mm_table.gpu_addr,
249 (void *)&adev->virt.mm_table.cpu_addr);
250 adev->virt.mm_table.gpu_addr = 0;
251}
252
253
254unsigned int amd_sriov_msg_checksum(void *obj,
255 unsigned long obj_size,
256 unsigned int key,
257 unsigned int checksum)
258{
259 unsigned int ret = key;
260 unsigned long i = 0;
261 unsigned char *pos;
262
263 pos = (char *)obj;
264 /* calculate checksum */
265 for (i = 0; i < obj_size; ++i)
266 ret += *(pos + i);
267 /* minus the checksum itself */
268 pos = (char *)&checksum;
269 for (i = 0; i < sizeof(checksum); ++i)
270 ret -= *(pos + i);
271 return ret;
272}
273
274static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
275{
276 struct amdgpu_virt *virt = &adev->virt;
277 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
278 /* GPU will be marked bad on host if bp count more then 10,
279 * so alloc 512 is enough.
280 */
281 unsigned int align_space = 512;
282 void *bps = NULL;
283 struct amdgpu_bo **bps_bo = NULL;
284
285 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
286 if (!*data)
287 goto data_failure;
288
289 bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
290 if (!bps)
291 goto bps_failure;
292
293 bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
294 if (!bps_bo)
295 goto bps_bo_failure;
296
297 (*data)->bps = bps;
298 (*data)->bps_bo = bps_bo;
299 (*data)->count = 0;
300 (*data)->last_reserved = 0;
301
302 virt->ras_init_done = true;
303
304 return 0;
305
306bps_bo_failure:
307 kfree(bps);
308bps_failure:
309 kfree(*data);
310data_failure:
311 return -ENOMEM;
312}
313
314static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
315{
316 struct amdgpu_virt *virt = &adev->virt;
317 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
318 struct amdgpu_bo *bo;
319 int i;
320
321 if (!data)
322 return;
323
324 for (i = data->last_reserved - 1; i >= 0; i--) {
325 bo = data->bps_bo[i];
326 amdgpu_bo_free_kernel(&bo, NULL, NULL);
327 data->bps_bo[i] = bo;
328 data->last_reserved = i;
329 }
330}
331
332void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
333{
334 struct amdgpu_virt *virt = &adev->virt;
335 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
336
337 virt->ras_init_done = false;
338
339 if (!data)
340 return;
341
342 amdgpu_virt_ras_release_bp(adev);
343
344 kfree(data->bps);
345 kfree(data->bps_bo);
346 kfree(data);
347 virt->virt_eh_data = NULL;
348}
349
350static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
351 struct eeprom_table_record *bps, int pages)
352{
353 struct amdgpu_virt *virt = &adev->virt;
354 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
355
356 if (!data)
357 return;
358
359 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
360 data->count += pages;
361}
362
363static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
364{
365 struct amdgpu_virt *virt = &adev->virt;
366 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
367 struct amdgpu_bo *bo = NULL;
368 uint64_t bp;
369 int i;
370
371 if (!data)
372 return;
373
374 for (i = data->last_reserved; i < data->count; i++) {
375 bp = data->bps[i].retired_page;
376
377 /* There are two cases of reserve error should be ignored:
378 * 1) a ras bad page has been allocated (used by someone);
379 * 2) a ras bad page has been reserved (duplicate error injection
380 * for one page);
381 */
382 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
383 AMDGPU_GPU_PAGE_SIZE,
384 AMDGPU_GEM_DOMAIN_VRAM,
385 &bo, NULL))
386 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
387
388 data->bps_bo[i] = bo;
389 data->last_reserved = i + 1;
390 bo = NULL;
391 }
392}
393
394static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
395 uint64_t retired_page)
396{
397 struct amdgpu_virt *virt = &adev->virt;
398 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
399 int i;
400
401 if (!data)
402 return true;
403
404 for (i = 0; i < data->count; i++)
405 if (retired_page == data->bps[i].retired_page)
406 return true;
407
408 return false;
409}
410
411static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
412 uint64_t bp_block_offset, uint32_t bp_block_size)
413{
414 struct eeprom_table_record bp;
415 uint64_t retired_page;
416 uint32_t bp_idx, bp_cnt;
417
418 if (bp_block_size) {
419 bp_cnt = bp_block_size / sizeof(uint64_t);
420 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
421 retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
422 bp_block_offset + bp_idx * sizeof(uint64_t));
423 bp.retired_page = retired_page;
424
425 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
426 continue;
427
428 amdgpu_virt_ras_add_bps(adev, &bp, 1);
429
430 amdgpu_virt_ras_reserve_bps(adev);
431 }
432 }
433}
434
435static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
436{
437 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
438 uint32_t checksum;
439 uint32_t checkval;
440
441 uint32_t i;
442 uint32_t tmp;
443
444 if (adev->virt.fw_reserve.p_pf2vf == NULL)
445 return -EINVAL;
446
447 if (pf2vf_info->size > 1024) {
448 DRM_ERROR("invalid pf2vf message size\n");
449 return -EINVAL;
450 }
451
452 switch (pf2vf_info->version) {
453 case 1:
454 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
455 checkval = amd_sriov_msg_checksum(
456 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
457 adev->virt.fw_reserve.checksum_key, checksum);
458 if (checksum != checkval) {
459 DRM_ERROR("invalid pf2vf message\n");
460 return -EINVAL;
461 }
462
463 adev->virt.gim_feature =
464 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
465 break;
466 case 2:
467 /* TODO: missing key, need to add it later */
468 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
469 checkval = amd_sriov_msg_checksum(
470 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
471 0, checksum);
472 if (checksum != checkval) {
473 DRM_ERROR("invalid pf2vf message\n");
474 return -EINVAL;
475 }
476
477 adev->virt.vf2pf_update_interval_ms =
478 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
479 adev->virt.gim_feature =
480 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
481 adev->virt.reg_access =
482 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
483
484 adev->virt.decode_max_dimension_pixels = 0;
485 adev->virt.decode_max_frame_pixels = 0;
486 adev->virt.encode_max_dimension_pixels = 0;
487 adev->virt.encode_max_frame_pixels = 0;
488 adev->virt.is_mm_bw_enabled = false;
489 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
490 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
491 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
492
493 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
494 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
495
496 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
497 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
498
499 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
500 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
501 }
502 if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
503 adev->virt.is_mm_bw_enabled = true;
504
505 adev->unique_id =
506 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
507 break;
508 default:
509 DRM_ERROR("invalid pf2vf version\n");
510 return -EINVAL;
511 }
512
513 /* correct too large or too little interval value */
514 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
515 adev->virt.vf2pf_update_interval_ms = 2000;
516
517 return 0;
518}
519
520static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
521{
522 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
523 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
524
525 if (adev->virt.fw_reserve.p_vf2pf == NULL)
526 return;
527
528 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
529 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
530 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
531 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
532 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
533 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
534 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
535 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
536 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
537 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
538 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
539 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
540 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
541 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
542 adev->psp.asd_context.bin_desc.fw_version);
543 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
544 adev->psp.ras_context.context.bin_desc.fw_version);
545 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
546 adev->psp.xgmi_context.context.bin_desc.fw_version);
547 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
548 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
549 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
550 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
551 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
552}
553
554static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
555{
556 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
557
558 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
559
560 if (adev->virt.fw_reserve.p_vf2pf == NULL)
561 return -EINVAL;
562
563 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
564
565 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
566 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
567
568#ifdef MODULE
569 if (THIS_MODULE->version != NULL)
570 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
571 else
572#endif
573 strcpy(vf2pf_info->driver_version, "N/A");
574
575 vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
576 vf2pf_info->driver_cert = 0;
577 vf2pf_info->os_info.all = 0;
578
579 vf2pf_info->fb_usage =
580 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
581 vf2pf_info->fb_vis_usage =
582 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
583 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
584 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
585
586 amdgpu_virt_populate_vf2pf_ucode_info(adev);
587
588 /* TODO: read dynamic info */
589 vf2pf_info->gfx_usage = 0;
590 vf2pf_info->compute_usage = 0;
591 vf2pf_info->encode_usage = 0;
592 vf2pf_info->decode_usage = 0;
593
594 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
595 vf2pf_info->checksum =
596 amd_sriov_msg_checksum(
597 vf2pf_info, vf2pf_info->header.size, 0, 0);
598
599 return 0;
600}
601
602static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
603{
604 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
605 int ret;
606
607 ret = amdgpu_virt_read_pf2vf_data(adev);
608 if (ret)
609 goto out;
610 amdgpu_virt_write_vf2pf_data(adev);
611
612out:
613 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
614}
615
616void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
617{
618 if (adev->virt.vf2pf_update_interval_ms != 0) {
619 DRM_INFO("clean up the vf2pf work item\n");
620 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
621 adev->virt.vf2pf_update_interval_ms = 0;
622 }
623}
624
625void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
626{
627 adev->virt.fw_reserve.p_pf2vf = NULL;
628 adev->virt.fw_reserve.p_vf2pf = NULL;
629 adev->virt.vf2pf_update_interval_ms = 0;
630
631 if (adev->mman.fw_vram_usage_va != NULL) {
632 /* go through this logic in ip_init and reset to init workqueue*/
633 amdgpu_virt_exchange_data(adev);
634
635 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
636 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
637 } else if (adev->bios != NULL) {
638 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
639 adev->virt.fw_reserve.p_pf2vf =
640 (struct amd_sriov_msg_pf2vf_info_header *)
641 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
642
643 amdgpu_virt_read_pf2vf_data(adev);
644 }
645}
646
647
648void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
649{
650 uint64_t bp_block_offset = 0;
651 uint32_t bp_block_size = 0;
652 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
653
654 if (adev->mman.fw_vram_usage_va != NULL) {
655
656 adev->virt.fw_reserve.p_pf2vf =
657 (struct amd_sriov_msg_pf2vf_info_header *)
658 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
659 adev->virt.fw_reserve.p_vf2pf =
660 (struct amd_sriov_msg_vf2pf_info_header *)
661 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
662
663 amdgpu_virt_read_pf2vf_data(adev);
664 amdgpu_virt_write_vf2pf_data(adev);
665
666 /* bad page handling for version 2 */
667 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
668 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
669
670 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
671 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
672 bp_block_size = pf2vf_v2->bp_block_size;
673
674 if (bp_block_size && !adev->virt.ras_init_done)
675 amdgpu_virt_init_ras_err_handler_data(adev);
676
677 if (adev->virt.ras_init_done)
678 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
679 }
680 }
681}
682
683
684void amdgpu_detect_virtualization(struct amdgpu_device *adev)
685{
686 uint32_t reg;
687
688 switch (adev->asic_type) {
689 case CHIP_TONGA:
690 case CHIP_FIJI:
691 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
692 break;
693 case CHIP_VEGA10:
694 case CHIP_VEGA20:
695 case CHIP_NAVI10:
696 case CHIP_NAVI12:
697 case CHIP_SIENNA_CICHLID:
698 case CHIP_ARCTURUS:
699 case CHIP_ALDEBARAN:
700 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
701 break;
702 default: /* other chip doesn't support SRIOV */
703 reg = 0;
704 break;
705 }
706
707 if (reg & 1)
708 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
709
710 if (reg & 0x80000000)
711 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
712
713 if (!reg) {
714 /* passthrough mode exclus sriov mod */
715 if (is_virtual_machine() && !xen_initial_domain())
716 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
717 }
718
719 /* we have the ability to check now */
720 if (amdgpu_sriov_vf(adev)) {
721 switch (adev->asic_type) {
722 case CHIP_TONGA:
723 case CHIP_FIJI:
724 vi_set_virt_ops(adev);
725 break;
726 case CHIP_VEGA10:
727 soc15_set_virt_ops(adev);
728 /* send a dummy GPU_INIT_DATA request to host on vega10 */
729 amdgpu_virt_request_init_data(adev);
730 break;
731 case CHIP_VEGA20:
732 case CHIP_ARCTURUS:
733 case CHIP_ALDEBARAN:
734 soc15_set_virt_ops(adev);
735 break;
736 case CHIP_NAVI10:
737 case CHIP_NAVI12:
738 case CHIP_SIENNA_CICHLID:
739 nv_set_virt_ops(adev);
740 /* try send GPU_INIT_DATA request to host */
741 amdgpu_virt_request_init_data(adev);
742 break;
743 default: /* other chip doesn't support SRIOV */
744 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
745 break;
746 }
747 }
748}
749
750static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
751{
752 return amdgpu_sriov_is_debug(adev) ? true : false;
753}
754
755static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
756{
757 return amdgpu_sriov_is_normal(adev) ? true : false;
758}
759
760int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
761{
762 if (!amdgpu_sriov_vf(adev) ||
763 amdgpu_virt_access_debugfs_is_kiq(adev))
764 return 0;
765
766 if (amdgpu_virt_access_debugfs_is_mmio(adev))
767 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
768 else
769 return -EPERM;
770
771 return 0;
772}
773
774void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
775{
776 if (amdgpu_sriov_vf(adev))
777 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
778}
779
780enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
781{
782 enum amdgpu_sriov_vf_mode mode;
783
784 if (amdgpu_sriov_vf(adev)) {
785 if (amdgpu_sriov_is_pp_one_vf(adev))
786 mode = SRIOV_VF_MODE_ONE_VF;
787 else
788 mode = SRIOV_VF_MODE_MULTI_VF;
789 } else {
790 mode = SRIOV_VF_MODE_BARE_METAL;
791 }
792
793 return mode;
794}
795
796void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
797 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
798 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
799{
800 uint32_t i;
801
802 if (!adev->virt.is_mm_bw_enabled)
803 return;
804
805 if (encode) {
806 for (i = 0; i < encode_array_size; i++) {
807 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
808 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
809 if (encode[i].max_width > 0)
810 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
811 else
812 encode[i].max_height = 0;
813 }
814 }
815
816 if (decode) {
817 for (i = 0; i < decode_array_size; i++) {
818 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
819 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
820 if (decode[i].max_width > 0)
821 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
822 else
823 decode[i].max_height = 0;
824 }
825 }
826}
827
828static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
829 u32 acc_flags, u32 hwip,
830 bool write, u32 *rlcg_flag)
831{
832 bool ret = false;
833
834 switch (hwip) {
835 case GC_HWIP:
836 if (amdgpu_sriov_reg_indirect_gc(adev)) {
837 *rlcg_flag =
838 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
839 ret = true;
840 /* only in new version, AMDGPU_REGS_NO_KIQ and
841 * AMDGPU_REGS_RLC are enabled simultaneously */
842 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
843 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
844 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
845 ret = true;
846 }
847 break;
848 case MMHUB_HWIP:
849 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
850 (acc_flags & AMDGPU_REGS_RLC) && write) {
851 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
852 ret = true;
853 }
854 break;
855 default:
856 break;
857 }
858 return ret;
859}
860
861static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
862{
863 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
864 uint32_t timeout = 50000;
865 uint32_t i, tmp;
866 uint32_t ret = 0;
867 static void *scratch_reg0;
868 static void *scratch_reg1;
869 static void *scratch_reg2;
870 static void *scratch_reg3;
871 static void *spare_int;
872
873 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
874 dev_err(adev->dev,
875 "indirect registers access through rlcg is not available\n");
876 return 0;
877 }
878
879 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
880 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
881 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
882 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
883 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
884 if (reg_access_ctrl->spare_int)
885 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
886
887 if (offset == reg_access_ctrl->grbm_cntl) {
888 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
889 writel(v, scratch_reg2);
890 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
891 } else if (offset == reg_access_ctrl->grbm_idx) {
892 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
893 writel(v, scratch_reg3);
894 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
895 } else {
896 /*
897 * SCRATCH_REG0 = read/write value
898 * SCRATCH_REG1[30:28] = command
899 * SCRATCH_REG1[19:0] = address in dword
900 * SCRATCH_REG1[26:24] = Error reporting
901 */
902 writel(v, scratch_reg0);
903 writel((offset | flag), scratch_reg1);
904 if (reg_access_ctrl->spare_int)
905 writel(1, spare_int);
906
907 for (i = 0; i < timeout; i++) {
908 tmp = readl(scratch_reg1);
909 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
910 break;
911 udelay(10);
912 }
913
914 if (i >= timeout) {
915 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
916 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
917 dev_err(adev->dev,
918 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
919 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
920 dev_err(adev->dev,
921 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
922 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
923 dev_err(adev->dev,
924 "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
925 } else {
926 dev_err(adev->dev,
927 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
928 }
929 } else {
930 dev_err(adev->dev,
931 "timeout: rlcg faled to program reg: 0x%05x\n", offset);
932 }
933 }
934 }
935
936 ret = readl(scratch_reg0);
937 return ret;
938}
939
940void amdgpu_sriov_wreg(struct amdgpu_device *adev,
941 u32 offset, u32 value,
942 u32 acc_flags, u32 hwip)
943{
944 u32 rlcg_flag;
945
946 if (!amdgpu_sriov_runtime(adev) &&
947 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
948 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
949 return;
950 }
951
952 if (acc_flags & AMDGPU_REGS_NO_KIQ)
953 WREG32_NO_KIQ(offset, value);
954 else
955 WREG32(offset, value);
956}
957
958u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
959 u32 offset, u32 acc_flags, u32 hwip)
960{
961 u32 rlcg_flag;
962
963 if (!amdgpu_sriov_runtime(adev) &&
964 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
965 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
966
967 if (acc_flags & AMDGPU_REGS_NO_KIQ)
968 return RREG32_NO_KIQ(offset);
969 else
970 return RREG32(offset);
971}