Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/vce1: Ensure VCPU BO is in lower 32-bit address space (v3)

Based on research and ideas by Alexandre and Christian.

VCE1 actually executes its code from the VCPU BO.
Due to various hardware limitations, the VCE1 requires
the VCPU BO to be in the low 32 bit address range.
However, VRAM is typically mapped at the high address range,
which means the VCPU can't access VRAM through the FB aperture.

To solve this, we write a few page table entries to
map the VCPU BO in the GART address range. And we make sure
that the GART is located at the low address range.
That way the VCE1 can access the VCPU BO.

v2:
- Adjust to v2 of the GART helper commit.
- Add empty line to multi-line comment.

v3:
- Instead of relying on gmc_v6 to set the GART space before GTT,
add a new function amdgpu_vce_required_gart_pages() which is
called from amdgpu_gtt_mgr_init() directly.

Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Co-developed-by: Alexandre Demers <alexandre.f.demers@gmail.com>
Signed-off-by: Alexandre Demers <alexandre.f.demers@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Timur Kristóf and committed by
Alex Deucher
221cadb9 baf75a08

+75
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 284 284 ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size); 285 285 286 286 start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; 287 + start += amdgpu_vce_required_gart_pages(adev); 287 288 size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; 288 289 drm_mm_init(&mgr->mm, start, size); 289 290 spin_lock_init(&mgr->lock);
+18
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 451 451 } 452 452 453 453 /** 454 + * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE 455 + * 456 + * @adev: amdgpu_device pointer 457 + * 458 + * Returns how many GART pages we need before GTT for the VCE IP block. 459 + * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details. 460 + * For VCE2+, this is not needed so return zero. 461 + */ 462 + u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev) 463 + { 464 + /* VCE IP block not added yet, so can't use amdgpu_ip_version */ 465 + if (adev->family == AMDGPU_FAMILY_SI) 466 + return 512; 467 + 468 + return 0; 469 + } 470 + 471 + /** 454 472 * amdgpu_vce_get_create_msg - generate a VCE create msg 455 473 * 456 474 * @ring: ring we should submit the msg to
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
··· 61 61 int amdgpu_vce_suspend(struct amdgpu_device *adev); 62 62 int amdgpu_vce_resume(struct amdgpu_device *adev); 63 63 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); 64 + u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev); 64 65 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job, 65 66 struct amdgpu_ib *ib); 66 67 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
+55
drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
··· 34 34 35 35 #include "amdgpu.h" 36 36 #include "amdgpu_vce.h" 37 + #include "amdgpu_gart.h" 37 38 #include "sid.h" 38 39 #include "vce_v1_0.h" 39 40 #include "vce/vce_1_0_d.h" ··· 46 45 #define VCE_V1_0_STACK_SIZE (64 * 1024) 47 46 #define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1)) 48 47 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 48 + 49 + #define VCE_V1_0_GART_PAGE_START \ 50 + (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS) 51 + #define VCE_V1_0_GART_ADDR_START \ 52 + (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE) 49 53 50 54 static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev); 51 55 static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev); ··· 519 513 return 0; 520 514 } 521 515 516 + /** 517 + * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address 518 + * 519 + * @adev: amdgpu_device pointer 520 + * 521 + * Due to various hardware limitations, the VCE1 requires 522 + * the VCPU BO to be in the low 32 bit address range. 523 + * Ensure that the VCPU BO has a 32-bit GPU address, 524 + * or return an error code when that isn't possible. 525 + * 526 + * To accomodate that, we put GART to the LOW address range 527 + * and reserve some GART pages where we map the VCPU BO, 528 + * so that it gets a 32-bit address. 529 + */ 530 + static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev) 531 + { 532 + u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo); 533 + u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo); 534 + u64 max_vcpu_bo_addr = 0xffffffff - bo_size; 535 + u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE; 536 + u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo); 537 + u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID; 538 + 539 + /* 540 + * Check if the VCPU BO already has a 32-bit address. 541 + * Eg. if MC is configured to put VRAM in the low address range. 542 + */ 543 + if (gpu_addr <= max_vcpu_bo_addr) 544 + return 0; 545 + 546 + /* Check if we can map the VCPU BO in GART to a 32-bit address. */ 547 + if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr) 548 + return -EINVAL; 549 + 550 + amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START, 551 + num_pages, flags, adev->gart.ptr); 552 + adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START; 553 + if (adev->vce.gpu_addr > max_vcpu_bo_addr) 554 + return -EINVAL; 555 + 556 + return 0; 557 + } 558 + 522 559 static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block) 523 560 { 524 561 struct amdgpu_device *adev = ip_block->adev; ··· 581 532 if (r) 582 533 return r; 583 534 r = vce_v1_0_load_fw_signature(adev); 535 + if (r) 536 + return r; 537 + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); 584 538 if (r) 585 539 return r; 586 540 ··· 699 647 if (r) 700 648 return r; 701 649 r = vce_v1_0_load_fw_signature(adev); 650 + if (r) 651 + return r; 652 + r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev); 702 653 if (r) 703 654 return r; 704 655