Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-5.14-2021-06-16' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.14-2021-06-16:

amdgpu:
- Aldebaran fixes
- Expose asic independent throttler status
- BACO fixes for navi1x
- Smartshift fixes
- Misc code cleanups
- RAS fixes for Sienna Cichlid
- Gamma verificaton fixes
- DC LTTPR fixes
- DP AUX timeout handling fixes
- GFX9, 10 powergating fixes

amdkfd:
- TLB flush fixes when using SDMA
- Locking fixes
- SVM fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210617031719.4013-1-alexander.deucher@amd.com

+2365 -891
+18 -31
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 47 47 spinlock_t mem_limit_lock; 48 48 } kfd_mem_limit; 49 49 50 - /* Struct used for amdgpu_amdkfd_bo_validate */ 51 - struct amdgpu_vm_parser { 52 - uint32_t domain; 53 - bool wait; 54 - }; 55 - 56 50 static const char * const domain_bit_to_string[] = { 57 51 "CPU", 58 52 "GTT", ··· 342 348 return ret; 343 349 } 344 350 345 - static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) 351 + static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 346 352 { 347 - struct amdgpu_vm_parser *p = param; 348 - 349 - return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); 353 + return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); 350 354 } 351 355 352 356 /* vm_validate_pt_pd_bos - Validate page table and directory BOs ··· 356 364 */ 357 365 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 358 366 { 359 - struct amdgpu_bo *pd = vm->root.base.bo; 367 + struct amdgpu_bo *pd = vm->root.bo; 360 368 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 361 - struct amdgpu_vm_parser param; 362 369 int ret; 363 370 364 - param.domain = AMDGPU_GEM_DOMAIN_VRAM; 365 - param.wait = false; 366 - 367 - ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, 368 - &param); 371 + ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); 369 372 if (ret) { 370 373 pr_err("failed to validate PT BOs\n"); 371 374 return ret; 372 375 } 373 376 374 - ret = amdgpu_amdkfd_validate(&param, pd); 377 + ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); 375 378 if (ret) { 376 379 pr_err("failed to validate PD\n"); 377 380 return ret; 378 381 } 379 382 380 - vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 383 + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 381 384 382 385 if (vm->use_cpu_for_update) { 383 386 ret = amdgpu_bo_kmap(pd, NULL); ··· 387 400 388 401 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 389 402 { 390 - struct amdgpu_bo *pd = vm->root.base.bo; 403 + struct amdgpu_bo *pd = vm->root.bo; 391 404 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 392 405 int ret; 393 406 ··· 639 652 } 640 653 } 641 654 642 - gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf); 655 + gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); 643 656 if (IS_ERR(gobj)) 644 657 return PTR_ERR(gobj); 645 658 ··· 1153 1166 1154 1167 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1155 1168 vm_list_node) { 1156 - struct amdgpu_bo *pd = peer_vm->root.base.bo; 1169 + struct amdgpu_bo *pd = peer_vm->root.bo; 1157 1170 1158 1171 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1159 1172 AMDGPU_SYNC_NE_OWNER, ··· 1220 1233 vm->process_info = *process_info; 1221 1234 1222 1235 /* Validate page directory and attach eviction fence */ 1223 - ret = amdgpu_bo_reserve(vm->root.base.bo, true); 1236 + ret = amdgpu_bo_reserve(vm->root.bo, true); 1224 1237 if (ret) 1225 1238 goto reserve_pd_fail; 1226 1239 ret = vm_validate_pt_pd_bos(vm); ··· 1228 1241 pr_err("validate_pt_pd_bos() failed\n"); 1229 1242 goto validate_pd_fail; 1230 1243 } 1231 - ret = amdgpu_bo_sync_wait(vm->root.base.bo, 1244 + ret = amdgpu_bo_sync_wait(vm->root.bo, 1232 1245 AMDGPU_FENCE_OWNER_KFD, false); 1233 1246 if (ret) 1234 1247 goto wait_pd_fail; 1235 - ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 1248 + ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); 1236 1249 if (ret) 1237 1250 goto reserve_shared_fail; 1238 - amdgpu_bo_fence(vm->root.base.bo, 1251 + amdgpu_bo_fence(vm->root.bo, 1239 1252 &vm->process_info->eviction_fence->base, true); 1240 - amdgpu_bo_unreserve(vm->root.base.bo); 1253 + amdgpu_bo_unreserve(vm->root.bo); 1241 1254 1242 1255 /* Update process info */ 1243 1256 mutex_lock(&vm->process_info->lock); ··· 1251 1264 reserve_shared_fail: 1252 1265 wait_pd_fail: 1253 1266 validate_pd_fail: 1254 - amdgpu_bo_unreserve(vm->root.base.bo); 1267 + amdgpu_bo_unreserve(vm->root.bo); 1255 1268 reserve_pd_fail: 1256 1269 vm->process_info = NULL; 1257 1270 if (info) { ··· 1306 1319 struct amdgpu_vm *vm) 1307 1320 { 1308 1321 struct amdkfd_process_info *process_info = vm->process_info; 1309 - struct amdgpu_bo *pd = vm->root.base.bo; 1322 + struct amdgpu_bo *pd = vm->root.bo; 1310 1323 1311 1324 if (!process_info) 1312 1325 return; ··· 1362 1375 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1363 1376 { 1364 1377 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1365 - struct amdgpu_bo *pd = avm->root.base.bo; 1378 + struct amdgpu_bo *pd = avm->root.bo; 1366 1379 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1367 1380 1368 1381 if (adev->asic_type < CHIP_VEGA10) ··· 2389 2402 /* Attach eviction fence to PD / PT BOs */ 2390 2403 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2391 2404 vm_list_node) { 2392 - struct amdgpu_bo *bo = peer_vm->root.base.bo; 2405 + struct amdgpu_bo *bo = peer_vm->root.bo; 2393 2406 2394 2407 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2395 2408 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 832 832 if (r) 833 833 return r; 834 834 835 - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 835 + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 836 836 837 837 if (amdgpu_vm_debug) { 838 838 /* Invalidate all BOs to test for userspace bugs */
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1304 1304 1305 1305 seq_printf(m, "pid:%d\tProcess:%s ----------\n", 1306 1306 vm->task_info.pid, vm->task_info.process_name); 1307 - r = amdgpu_bo_reserve(vm->root.base.bo, true); 1307 + r = amdgpu_bo_reserve(vm->root.bo, true); 1308 1308 if (r) 1309 1309 break; 1310 1310 amdgpu_debugfs_vm_bo_info(vm, m); 1311 - amdgpu_bo_unreserve(vm->root.base.bo); 1311 + amdgpu_bo_unreserve(vm->root.bo); 1312 1312 } 1313 1313 1314 1314 mutex_unlock(&dev->filelist_mutex);
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4124 4124 { 4125 4125 struct dma_fence *fence = NULL, *next = NULL; 4126 4126 struct amdgpu_bo *shadow; 4127 + struct amdgpu_bo_vm *vmbo; 4127 4128 long r = 1, tmo; 4128 4129 4129 4130 if (amdgpu_sriov_runtime(adev)) ··· 4134 4133 4135 4134 dev_info(adev->dev, "recover vram bo from shadow start\n"); 4136 4135 mutex_lock(&adev->shadow_list_lock); 4137 - list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 4138 - 4136 + list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { 4137 + shadow = &vmbo->bo; 4139 4138 /* No need to recover an evicted BO */ 4140 4139 if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4141 4140 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 325 325 return 0; 326 326 } 327 327 328 - int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, 328 + int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 329 329 int *major, int *minor, int *revision) 330 330 { 331 331 struct binary_header *bhdr; ··· 357 357 for (j = 0; j < num_ips; j++) { 358 358 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 359 359 360 - if (le16_to_cpu(ip->hw_id) == hw_id) { 360 + if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 361 361 if (major) 362 362 *major = ip->major; 363 363 if (minor)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
··· 30 30 void amdgpu_discovery_fini(struct amdgpu_device *adev); 31 31 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); 32 32 void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev); 33 - int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, 33 + int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 34 34 int *major, int *minor, int *revision); 35 35 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); 36 36
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 448 448 449 449 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 450 450 struct amdgpu_vm *vm = bo_base->vm; 451 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 451 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 452 452 453 453 if (ticket) { 454 454 /* When we get an error here it means that somebody
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
··· 69 69 dev = PCI_SLOT(adev->pdev->devfn); 70 70 fn = PCI_FUNC(adev->pdev->devfn); 71 71 72 - ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false); 72 + ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false); 73 73 if (ret) { 74 74 DRM_ERROR("Fail to reserve bo\n"); 75 75 return; 76 76 } 77 77 amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem); 78 - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); 78 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 79 79 seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus, 80 80 dev, fn, fpriv->vm.pasid); 81 81 seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 170 170 return -EPERM; 171 171 172 172 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && 173 - abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 173 + abo->tbo.base.resv != vm->root.bo->tbo.base.resv) 174 174 return -EPERM; 175 175 176 176 r = amdgpu_bo_reserve(abo, false); ··· 320 320 } 321 321 322 322 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 323 - r = amdgpu_bo_reserve(vm->root.base.bo, false); 323 + r = amdgpu_bo_reserve(vm->root.bo, false); 324 324 if (r) 325 325 return r; 326 326 327 - resv = vm->root.base.bo->tbo.base.resv; 327 + resv = vm->root.bo->tbo.base.resv; 328 328 } 329 329 330 330 initial_domain = (u32)(0xffffffff & args->in.domains); ··· 353 353 if (!r) { 354 354 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 355 355 356 - abo->parent = amdgpu_bo_ref(vm->root.base.bo); 356 + abo->parent = amdgpu_bo_ref(vm->root.bo); 357 357 } 358 - amdgpu_bo_unreserve(vm->root.base.bo); 358 + amdgpu_bo_unreserve(vm->root.bo); 359 359 } 360 360 if (r) 361 361 return r; ··· 841 841 } 842 842 for (base = robj->vm_bo; base; base = base->next) 843 843 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), 844 - amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { 844 + amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { 845 845 r = -EINVAL; 846 846 amdgpu_bo_unreserve(robj); 847 847 goto out;
+43 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 124 124 mutex_unlock(&mgpu_info.mutex); 125 125 } 126 126 127 + static void amdgpu_get_audio_func(struct amdgpu_device *adev) 128 + { 129 + struct pci_dev *p = NULL; 130 + 131 + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), 132 + adev->pdev->bus->number, 1); 133 + if (p) { 134 + pm_runtime_get_sync(&p->dev); 135 + 136 + pm_runtime_mark_last_busy(&p->dev); 137 + pm_runtime_put_autosuspend(&p->dev); 138 + 139 + pci_dev_put(p); 140 + } 141 + } 142 + 127 143 /** 128 144 * amdgpu_driver_load_kms - Main load function for KMS. 129 145 * ··· 229 213 DPM_FLAG_MAY_SKIP_RESUME); 230 214 pm_runtime_use_autosuspend(dev->dev); 231 215 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 216 + 232 217 pm_runtime_allow(dev->dev); 218 + 233 219 pm_runtime_mark_last_busy(dev->dev); 234 220 pm_runtime_put_autosuspend(dev->dev); 221 + 222 + /* 223 + * For runpm implemented via BACO, PMFW will handle the 224 + * timing for BACO in and out: 225 + * - put ASIC into BACO state only when both video and 226 + * audio functions are in D3 state. 227 + * - pull ASIC out of BACO state when either video or 228 + * audio function is in D0 state. 229 + * Also, at startup, PMFW assumes both functions are in 230 + * D0 state. 231 + * 232 + * So if snd driver was loaded prior to amdgpu driver 233 + * and audio function was put into D3 state, there will 234 + * be no PMFW-aware D-state transition(D0->D3) on runpm 235 + * suspend. Thus the BACO will be not correctly kicked in. 236 + * 237 + * Via amdgpu_get_audio_func(), the audio dev is put 238 + * into D0 state. Then there will be a PMFW-aware D-state 239 + * transition(D0->D3) on runpm suspend. 240 + */ 241 + if (amdgpu_device_supports_baco(dev) && 242 + !(adev->flags & AMD_IS_APU) && 243 + (adev->asic_type >= CHIP_NAVI10)) 244 + amdgpu_get_audio_func(adev); 235 245 } 236 246 237 247 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) ··· 1262 1220 } 1263 1221 1264 1222 pasid = fpriv->vm.pasid; 1265 - pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1223 + pd = amdgpu_bo_ref(fpriv->vm.root.bo); 1266 1224 1267 1225 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1268 1226 amdgpu_vm_fini(adev, &fpriv->vm);
+38 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 54 54 55 55 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) 56 56 { 57 - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 58 57 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 59 - struct amdgpu_bo_user *ubo; 60 58 61 59 amdgpu_bo_kunmap(bo); 62 60 63 61 if (bo->tbo.base.import_attach) 64 62 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); 65 63 drm_gem_object_release(&bo->tbo.base); 64 + amdgpu_bo_unref(&bo->parent); 65 + kvfree(bo); 66 + } 67 + 68 + static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) 69 + { 70 + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 71 + struct amdgpu_bo_user *ubo; 72 + 73 + ubo = to_amdgpu_bo_user(bo); 74 + kfree(ubo->metadata); 75 + amdgpu_bo_destroy(tbo); 76 + } 77 + 78 + static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) 79 + { 80 + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 81 + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 82 + struct amdgpu_bo_vm *vmbo; 83 + 84 + vmbo = to_amdgpu_bo_vm(bo); 66 85 /* in case amdgpu_device_recover_vram got NULL of bo->parent */ 67 - if (!list_empty(&bo->shadow_list)) { 86 + if (!list_empty(&vmbo->shadow_list)) { 68 87 mutex_lock(&adev->shadow_list_lock); 69 - list_del_init(&bo->shadow_list); 88 + list_del_init(&vmbo->shadow_list); 70 89 mutex_unlock(&adev->shadow_list_lock); 71 90 } 72 - amdgpu_bo_unref(&bo->parent); 73 91 74 - if (bo->tbo.type != ttm_bo_type_kernel) { 75 - ubo = to_amdgpu_bo_user(bo); 76 - kfree(ubo->metadata); 77 - } 78 - 79 - kvfree(bo); 92 + amdgpu_bo_destroy(tbo); 80 93 } 81 94 82 95 /** ··· 104 91 */ 105 92 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) 106 93 { 107 - if (bo->destroy == &amdgpu_bo_destroy) 94 + if (bo->destroy == &amdgpu_bo_destroy || 95 + bo->destroy == &amdgpu_bo_user_destroy || 96 + bo->destroy == &amdgpu_bo_vm_destroy) 108 97 return true; 98 + 109 99 return false; 110 100 } 111 101 ··· 561 545 if (bo == NULL) 562 546 return -ENOMEM; 563 547 drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); 564 - INIT_LIST_HEAD(&bo->shadow_list); 565 548 bo->vm_bo = NULL; 566 549 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : 567 550 bp->domain; ··· 583 568 if (bp->type == ttm_bo_type_kernel) 584 569 bo->tbo.priority = 1; 585 570 571 + if (!bp->destroy) 572 + bp->destroy = &amdgpu_bo_destroy; 573 + 586 574 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, 587 575 &bo->placement, page_align, &ctx, NULL, 588 - bp->resv, &amdgpu_bo_destroy); 576 + bp->resv, bp->destroy); 589 577 if (unlikely(r != 0)) 590 578 return r; 591 579 ··· 652 634 int r; 653 635 654 636 bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); 637 + bp->destroy = &amdgpu_bo_user_destroy; 655 638 r = amdgpu_bo_create(adev, bp, &bo_ptr); 656 639 if (r) 657 640 return r; ··· 684 665 * num of amdgpu_vm_pt entries. 685 666 */ 686 667 BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); 668 + bp->destroy = &amdgpu_bo_vm_destroy; 687 669 r = amdgpu_bo_create(adev, bp, &bo_ptr); 688 670 if (r) 689 671 return r; 690 672 691 673 *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); 674 + INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); 692 675 return r; 693 676 } 694 677 ··· 735 714 * 736 715 * Insert a BO to the shadow list. 737 716 */ 738 - void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo) 717 + void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) 739 718 { 740 - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 719 + struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); 741 720 742 721 mutex_lock(&adev->shadow_list_lock); 743 - list_add_tail(&bo->shadow_list, &adev->shadow_list); 722 + list_add_tail(&vmbo->shadow_list, &adev->shadow_list); 744 723 mutex_unlock(&adev->shadow_list_lock); 745 724 } 746 725
+5 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 55 55 u64 flags; 56 56 enum ttm_bo_type type; 57 57 bool no_wait_gpu; 58 - struct dma_resv *resv; 58 + struct dma_resv *resv; 59 + void (*destroy)(struct ttm_buffer_object *bo); 59 60 }; 60 61 61 62 /* bo virtual addresses in a vm */ ··· 109 108 #ifdef CONFIG_MMU_NOTIFIER 110 109 struct mmu_interval_notifier notifier; 111 110 #endif 112 - 113 - struct list_head shadow_list; 114 - 115 111 struct kgd_mem *kfd_bo; 116 112 }; 117 113 ··· 124 126 struct amdgpu_bo_vm { 125 127 struct amdgpu_bo bo; 126 128 struct amdgpu_bo *shadow; 127 - struct amdgpu_vm_pt entries[]; 129 + struct list_head shadow_list; 130 + struct amdgpu_vm_bo_base entries[]; 128 131 }; 129 132 130 133 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) ··· 331 332 int amdgpu_bo_validate(struct amdgpu_bo *bo); 332 333 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, 333 334 uint64_t *gtt_mem, uint64_t *cpu_mem); 334 - void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo); 335 + void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); 335 336 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 336 337 struct dma_fence **fence); 337 338 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+253 -57
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 171 171 return ret; 172 172 } 173 173 174 + /* 175 + * Helper funciton to query psp runtime database entry 176 + * 177 + * @adev: amdgpu_device pointer 178 + * @entry_type: the type of psp runtime database entry 179 + * @db_entry: runtime database entry pointer 180 + * 181 + * Return false if runtime database doesn't exit or entry is invalid 182 + * or true if the specific database entry is found, and copy to @db_entry 183 + */ 184 + static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, 185 + enum psp_runtime_entry_type entry_type, 186 + void *db_entry) 187 + { 188 + uint64_t db_header_pos, db_dir_pos; 189 + struct psp_runtime_data_header db_header = {0}; 190 + struct psp_runtime_data_directory db_dir = {0}; 191 + bool ret = false; 192 + int i; 193 + 194 + db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; 195 + db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); 196 + 197 + /* read runtime db header from vram */ 198 + amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, 199 + sizeof(struct psp_runtime_data_header), false); 200 + 201 + if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { 202 + /* runtime db doesn't exist, exit */ 203 + dev_warn(adev->dev, "PSP runtime database doesn't exist\n"); 204 + return false; 205 + } 206 + 207 + /* read runtime database entry from vram */ 208 + amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, 209 + sizeof(struct psp_runtime_data_directory), false); 210 + 211 + if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { 212 + /* invalid db entry count, exit */ 213 + dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); 214 + return false; 215 + } 216 + 217 + /* look up for requested entry type */ 218 + for (i = 0; i < db_dir.entry_count && !ret; i++) { 219 + if (db_dir.entry_list[i].entry_type == entry_type) { 220 + switch (entry_type) { 221 + case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: 222 + if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { 223 + /* invalid db entry size */ 224 + dev_warn(adev->dev, "Invalid PSP runtime database entry size\n"); 225 + return false; 226 + } 227 + /* read runtime database entry */ 228 + amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, 229 + (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); 230 + ret = true; 231 + break; 232 + default: 233 + ret = false; 234 + break; 235 + } 236 + } 237 + } 238 + 239 + return ret; 240 + } 241 + 174 242 static int psp_sw_init(void *handle) 175 243 { 176 244 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 177 245 struct psp_context *psp = &adev->psp; 178 246 int ret; 247 + struct psp_runtime_boot_cfg_entry boot_cfg_entry; 248 + struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; 179 249 180 250 if (!amdgpu_sriov_vf(adev)) { 181 251 ret = psp_init_microcode(psp); ··· 261 191 } 262 192 } 263 193 264 - ret = psp_memory_training_init(psp); 265 - if (ret) { 266 - DRM_ERROR("Failed to initialize memory training!\n"); 267 - return ret; 194 + memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); 195 + if (psp_get_runtime_db_entry(adev, 196 + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, 197 + &boot_cfg_entry)) { 198 + psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; 199 + if ((psp->boot_cfg_bitmask) & 200 + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { 201 + /* If psp runtime database exists, then 202 + * only enable two stage memory training 203 + * when TWO_STAGE_DRAM_TRAINING bit is set 204 + * in runtime database */ 205 + mem_training_ctx->enable_mem_training = true; 206 + } 207 + 208 + } else { 209 + /* If psp runtime database doesn't exist or 210 + * is invalid, force enable two stage memory 211 + * training */ 212 + mem_training_ctx->enable_mem_training = true; 268 213 } 269 - ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 270 - if (ret) { 271 - DRM_ERROR("Failed to process memory training!\n"); 272 - return ret; 214 + 215 + if (mem_training_ctx->enable_mem_training) { 216 + ret = psp_memory_training_init(psp); 217 + if (ret) { 218 + DRM_ERROR("Failed to initialize memory training!\n"); 219 + return ret; 220 + } 221 + 222 + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); 223 + if (ret) { 224 + DRM_ERROR("Failed to process memory training!\n"); 225 + return ret; 226 + } 273 227 } 274 228 275 229 if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { ··· 645 551 return ret; 646 552 } 647 553 648 - static int psp_boot_config_set(struct amdgpu_device *adev) 554 + static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) 555 + { 556 + struct psp_context *psp = &adev->psp; 557 + struct psp_gfx_cmd_resp *cmd = psp->cmd; 558 + int ret; 559 + 560 + if (amdgpu_sriov_vf(adev)) 561 + return 0; 562 + 563 + memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); 564 + 565 + cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 566 + cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; 567 + 568 + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 569 + if (!ret) { 570 + *boot_cfg = 571 + (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; 572 + } 573 + 574 + return ret; 575 + } 576 + 577 + static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) 649 578 { 650 579 struct psp_context *psp = &adev->psp; 651 580 struct psp_gfx_cmd_resp *cmd = psp->cmd; ··· 680 563 681 564 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; 682 565 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; 683 - cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC; 684 - cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC; 566 + cmd->cmd.boot_cfg.boot_config = boot_cfg; 567 + cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; 685 568 686 569 return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 687 570 } ··· 1329 1212 static int psp_ras_initialize(struct psp_context *psp) 1330 1213 { 1331 1214 int ret; 1215 + uint32_t boot_cfg = 0xFF; 1216 + struct amdgpu_device *adev = psp->adev; 1332 1217 1333 1218 /* 1334 1219 * TODO: bypass the initialize in sriov for now 1335 1220 */ 1336 - if (amdgpu_sriov_vf(psp->adev)) 1221 + if (amdgpu_sriov_vf(adev)) 1337 1222 return 0; 1338 1223 1339 - if (!psp->adev->psp.ta_ras_ucode_size || 1340 - !psp->adev->psp.ta_ras_start_addr) { 1341 - dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); 1224 + if (!adev->psp.ta_ras_ucode_size || 1225 + !adev->psp.ta_ras_start_addr) { 1226 + dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); 1342 1227 return 0; 1228 + } 1229 + 1230 + if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 1231 + /* query GECC enablement status from boot config 1232 + * boot_cfg: 1: GECC is enabled or 0: GECC is disabled 1233 + */ 1234 + ret = psp_boot_config_get(adev, &boot_cfg); 1235 + if (ret) 1236 + dev_warn(adev->dev, "PSP get boot config failed\n"); 1237 + 1238 + if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { 1239 + if (!boot_cfg) { 1240 + dev_info(adev->dev, "GECC is disabled\n"); 1241 + } else { 1242 + /* disable GECC in next boot cycle if ras is 1243 + * disabled by module parameter amdgpu_ras_enable 1244 + * and/or amdgpu_ras_mask, or boot_config_get call 1245 + * is failed 1246 + */ 1247 + ret = psp_boot_config_set(adev, 0); 1248 + if (ret) 1249 + dev_warn(adev->dev, "PSP set boot config failed\n"); 1250 + else 1251 + dev_warn(adev->dev, "GECC will be disabled in next boot cycle " 1252 + "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); 1253 + } 1254 + } else { 1255 + if (1 == boot_cfg) { 1256 + dev_info(adev->dev, "GECC is enabled\n"); 1257 + } else { 1258 + /* enable GECC in next boot cycle if it is disabled 1259 + * in boot config, or force enable GECC if failed to 1260 + * get boot configuration 1261 + */ 1262 + ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); 1263 + if (ret) 1264 + dev_warn(adev->dev, "PSP set boot config failed\n"); 1265 + else 1266 + dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); 1267 + } 1268 + } 1343 1269 } 1344 1270 1345 1271 if (!psp->ras.ras_initialized) { ··· 2105 1945 return ret; 2106 1946 } 2107 1947 2108 - if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { 2109 - ret = psp_boot_config_set(adev); 2110 - if (ret) 2111 - dev_warn(adev->dev, "PSP set boot config failed\n"); 2112 - } 2113 - 2114 1948 ret = psp_tmr_init(psp); 2115 1949 if (ret) { 2116 1950 DRM_ERROR("PSP tmr init failed!\n"); ··· 2342 2188 if ((amdgpu_in_reset(adev) && 2343 2189 ras && adev->ras_enabled && 2344 2190 (adev->asic_type == CHIP_ARCTURUS || 2345 - adev->asic_type == CHIP_VEGA20)) || 2346 - (adev->in_runpm && 2347 - adev->asic_type >= CHIP_NAVI10 && 2348 - adev->asic_type <= CHIP_NAVI12)) { 2191 + adev->asic_type == CHIP_VEGA20))) { 2349 2192 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2350 2193 if (ret) { 2351 2194 DRM_WARN("Failed to set MP1 state prepare for reload\n"); ··· 2713 2562 2714 2563 DRM_INFO("PSP is resuming...\n"); 2715 2564 2716 - ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2717 - if (ret) { 2718 - DRM_ERROR("Failed to process memory training!\n"); 2719 - return ret; 2565 + if (psp->mem_train_ctx.enable_mem_training) { 2566 + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); 2567 + if (ret) { 2568 + DRM_ERROR("Failed to process memory training!\n"); 2569 + return ret; 2570 + } 2720 2571 } 2721 2572 2722 2573 mutex_lock(&adev->firmware.mutex); ··· 2902 2749 2903 2750 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 2904 2751 adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 2905 - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 2752 + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version); 2906 2753 adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 2907 2754 adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 2908 2755 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); ··· 2938 2785 2939 2786 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; 2940 2787 adev->psp.toc_fw_version = le32_to_cpu(toc_hdr->header.ucode_version); 2941 - adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->ucode_feature_version); 2788 + adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->sos.fw_version); 2942 2789 adev->psp.toc_bin_size = le32_to_cpu(toc_hdr->header.ucode_size_bytes); 2943 2790 adev->psp.toc_start_addr = (uint8_t *)toc_hdr + 2944 2791 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); ··· 2948 2795 release_firmware(adev->psp.toc_fw); 2949 2796 adev->psp.toc_fw = NULL; 2950 2797 return err; 2798 + } 2799 + 2800 + static int psp_init_sos_base_fw(struct amdgpu_device *adev) 2801 + { 2802 + const struct psp_firmware_header_v1_0 *sos_hdr; 2803 + const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 2804 + uint8_t *ucode_array_start_addr; 2805 + 2806 + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2807 + ucode_array_start_addr = (uint8_t *)sos_hdr + 2808 + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 2809 + 2810 + if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) { 2811 + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 2812 + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 2813 + 2814 + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos.offset_bytes); 2815 + adev->psp.sys_start_addr = ucode_array_start_addr; 2816 + 2817 + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos.size_bytes); 2818 + adev->psp.sos_start_addr = ucode_array_start_addr + 2819 + le32_to_cpu(sos_hdr->sos.offset_bytes); 2820 + } else { 2821 + /* Load alternate PSP SOS FW */ 2822 + sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 2823 + 2824 + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 2825 + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); 2826 + 2827 + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); 2828 + adev->psp.sys_start_addr = ucode_array_start_addr + 2829 + le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); 2830 + 2831 + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); 2832 + adev->psp.sos_start_addr = ucode_array_start_addr + 2833 + le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); 2834 + } 2835 + 2836 + if ((adev->psp.sys_bin_size == 0) || (adev->psp.sos_bin_size == 0)) { 2837 + dev_warn(adev->dev, "PSP SOS FW not available"); 2838 + return -EINVAL; 2839 + } 2840 + 2841 + return 0; 2951 2842 } 2952 2843 2953 2844 int psp_init_sos_microcode(struct psp_context *psp, ··· 3004 2807 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; 3005 2808 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; 3006 2809 int err = 0; 2810 + uint8_t *ucode_array_start_addr; 3007 2811 3008 2812 if (!chip_name) { 3009 2813 dev_err(adev->dev, "invalid chip name for sos microcode\n"); ··· 3021 2823 goto out; 3022 2824 3023 2825 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; 2826 + ucode_array_start_addr = (uint8_t *)sos_hdr + 2827 + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3024 2828 amdgpu_ucode_print_psp_hdr(&sos_hdr->header); 3025 2829 3026 2830 switch (sos_hdr->header.header_version_major) { 3027 2831 case 1: 3028 - adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3029 - adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); 3030 - adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); 3031 - adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); 3032 - adev->psp.sys_start_addr = (uint8_t *)sos_hdr + 3033 - le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3034 - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3035 - le32_to_cpu(sos_hdr->sos_offset_bytes); 2832 + err = psp_init_sos_base_fw(adev); 2833 + if (err) 2834 + goto out; 2835 + 3036 2836 if (sos_hdr->header.header_version_minor == 1) { 3037 2837 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; 3038 - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); 2838 + adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); 3039 2839 adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3040 - le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); 3041 - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); 2840 + le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); 2841 + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); 3042 2842 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3043 - le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); 2843 + le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); 3044 2844 } 3045 2845 if (sos_hdr->header.header_version_minor == 2) { 3046 2846 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; 3047 - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); 2847 + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); 3048 2848 adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3049 - le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); 2849 + le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); 3050 2850 } 3051 2851 if (sos_hdr->header.header_version_minor == 3) { 3052 2852 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; 3053 - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes); 3054 - adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3055 - le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes); 3056 - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes); 3057 - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3058 - le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes); 3059 - adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes); 3060 - adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3061 - le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes); 3062 - adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes); 3063 - adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr + 3064 - le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes); 2853 + adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); 2854 + adev->psp.toc_start_addr = ucode_array_start_addr + 2855 + le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); 2856 + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); 2857 + adev->psp.kdb_start_addr = ucode_array_start_addr + 2858 + le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); 2859 + adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); 2860 + adev->psp.spl_start_addr = ucode_array_start_addr + 2861 + le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); 2862 + adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); 2863 + adev->psp.rl_start_addr = ucode_array_start_addr + 2864 + le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); 3065 2865 } 3066 2866 break; 3067 2867 default:
+57
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 225 225 226 226 enum psp_memory_training_init_flag init; 227 227 u32 training_cnt; 228 + bool enable_mem_training; 229 + }; 230 + 231 + /** PSP runtime DB **/ 232 + #define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000 233 + #define PSP_RUNTIME_DB_OFFSET 0x100000 234 + #define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5 235 + #define PSP_RUNTIME_DB_VER_1 0x0100 236 + #define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40 237 + 238 + enum psp_runtime_entry_type { 239 + PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0, 240 + PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1, 241 + PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */ 242 + PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */ 243 + PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */ 244 + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */ 245 + }; 246 + 247 + /* PSP runtime DB header */ 248 + struct psp_runtime_data_header { 249 + /* determine the existence of runtime db */ 250 + uint16_t cookie; 251 + /* version of runtime db */ 252 + uint16_t version; 253 + }; 254 + 255 + /* PSP runtime DB entry */ 256 + struct psp_runtime_entry { 257 + /* type of runtime db entry */ 258 + uint32_t entry_type; 259 + /* offset of entry in bytes */ 260 + uint16_t offset; 261 + /* size of entry in bytes */ 262 + uint16_t size; 263 + }; 264 + 265 + /* PSP runtime DB directory */ 266 + struct psp_runtime_data_directory { 267 + /* number of valid entries */ 268 + uint16_t entry_count; 269 + /* db entries*/ 270 + struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT]; 271 + }; 272 + 273 + /* PSP runtime DB boot config feature bitmask */ 274 + enum psp_runtime_boot_cfg_feature { 275 + BOOT_CFG_FEATURE_GECC = 0x1, 276 + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2, 277 + }; 278 + 279 + /* PSP runtime DB boot config entry */ 280 + struct psp_runtime_boot_cfg_entry { 281 + uint32_t boot_cfg_bitmask; 282 + uint32_t reserved; 228 283 }; 229 284 230 285 struct psp_context ··· 380 325 struct psp_securedisplay_context securedisplay_context; 381 326 struct mutex mutex; 382 327 struct psp_memory_training_context mem_train_ctx; 328 + 329 + uint32_t boot_cfg_bitmask; 383 330 }; 384 331 385 332 struct amdgpu_psp_funcs {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 2122 2122 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, 2123 2123 ras_counte_delay_work.work); 2124 2124 struct amdgpu_device *adev = con->adev; 2125 - struct drm_device *dev = &adev->ddev; 2125 + struct drm_device *dev = adev_to_drm(adev); 2126 2126 unsigned long ce_count, ue_count; 2127 2127 int res; 2128 2128
+21 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
··· 257 257 container_of(hdr, struct psp_firmware_header_v1_0, header); 258 258 259 259 DRM_DEBUG("ucode_feature_version: %u\n", 260 - le32_to_cpu(psp_hdr->ucode_feature_version)); 260 + le32_to_cpu(psp_hdr->sos.fw_version)); 261 261 DRM_DEBUG("sos_offset_bytes: %u\n", 262 - le32_to_cpu(psp_hdr->sos_offset_bytes)); 262 + le32_to_cpu(psp_hdr->sos.offset_bytes)); 263 263 DRM_DEBUG("sos_size_bytes: %u\n", 264 - le32_to_cpu(psp_hdr->sos_size_bytes)); 264 + le32_to_cpu(psp_hdr->sos.size_bytes)); 265 265 if (version_minor == 1) { 266 266 const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = 267 267 container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); 268 268 DRM_DEBUG("toc_header_version: %u\n", 269 - le32_to_cpu(psp_hdr_v1_1->toc_header_version)); 269 + le32_to_cpu(psp_hdr_v1_1->toc.fw_version)); 270 270 DRM_DEBUG("toc_offset_bytes: %u\n", 271 - le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes)); 271 + le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes)); 272 272 DRM_DEBUG("toc_size_bytes: %u\n", 273 - le32_to_cpu(psp_hdr_v1_1->toc_size_bytes)); 273 + le32_to_cpu(psp_hdr_v1_1->toc.size_bytes)); 274 274 DRM_DEBUG("kdb_header_version: %u\n", 275 - le32_to_cpu(psp_hdr_v1_1->kdb_header_version)); 275 + le32_to_cpu(psp_hdr_v1_1->kdb.fw_version)); 276 276 DRM_DEBUG("kdb_offset_bytes: %u\n", 277 - le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes)); 277 + le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes)); 278 278 DRM_DEBUG("kdb_size_bytes: %u\n", 279 - le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes)); 279 + le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes)); 280 280 } 281 281 if (version_minor == 2) { 282 282 const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 = 283 283 container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0); 284 284 DRM_DEBUG("kdb_header_version: %u\n", 285 - le32_to_cpu(psp_hdr_v1_2->kdb_header_version)); 285 + le32_to_cpu(psp_hdr_v1_2->kdb.fw_version)); 286 286 DRM_DEBUG("kdb_offset_bytes: %u\n", 287 - le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes)); 287 + le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes)); 288 288 DRM_DEBUG("kdb_size_bytes: %u\n", 289 - le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes)); 289 + le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes)); 290 290 } 291 291 if (version_minor == 3) { 292 292 const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = ··· 294 294 const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 = 295 295 container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1); 296 296 DRM_DEBUG("toc_header_version: %u\n", 297 - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_header_version)); 297 + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version)); 298 298 DRM_DEBUG("toc_offset_bytes: %u\n", 299 - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_offset_bytes)); 299 + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes)); 300 300 DRM_DEBUG("toc_size_bytes: %u\n", 301 - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_size_bytes)); 301 + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes)); 302 302 DRM_DEBUG("kdb_header_version: %u\n", 303 - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_header_version)); 303 + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version)); 304 304 DRM_DEBUG("kdb_offset_bytes: %u\n", 305 - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_offset_bytes)); 305 + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes)); 306 306 DRM_DEBUG("kdb_size_bytes: %u\n", 307 - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_size_bytes)); 307 + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes)); 308 308 DRM_DEBUG("spl_header_version: %u\n", 309 - le32_to_cpu(psp_hdr_v1_3->spl_header_version)); 309 + le32_to_cpu(psp_hdr_v1_3->spl.fw_version)); 310 310 DRM_DEBUG("spl_offset_bytes: %u\n", 311 - le32_to_cpu(psp_hdr_v1_3->spl_offset_bytes)); 311 + le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes)); 312 312 DRM_DEBUG("spl_size_bytes: %u\n", 313 - le32_to_cpu(psp_hdr_v1_3->spl_size_bytes)); 313 + le32_to_cpu(psp_hdr_v1_3->spl.size_bytes)); 314 314 } 315 315 } else { 316 316 DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
+15 -19
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
··· 71 71 uint32_t pptable_entry_offset; 72 72 }; 73 73 74 + struct psp_fw_bin_desc { 75 + uint32_t fw_version; 76 + uint32_t offset_bytes; 77 + uint32_t size_bytes; 78 + }; 79 + 74 80 /* version_major=1, version_minor=0 */ 75 81 struct psp_firmware_header_v1_0 { 76 82 struct common_firmware_header header; 77 - uint32_t ucode_feature_version; 78 - uint32_t sos_offset_bytes; 79 - uint32_t sos_size_bytes; 83 + struct psp_fw_bin_desc sos; 80 84 }; 81 85 82 86 /* version_major=1, version_minor=1 */ 83 87 struct psp_firmware_header_v1_1 { 84 88 struct psp_firmware_header_v1_0 v1_0; 85 - uint32_t toc_header_version; 86 - uint32_t toc_offset_bytes; 87 - uint32_t toc_size_bytes; 88 - uint32_t kdb_header_version; 89 - uint32_t kdb_offset_bytes; 90 - uint32_t kdb_size_bytes; 89 + struct psp_fw_bin_desc toc; 90 + struct psp_fw_bin_desc kdb; 91 91 }; 92 92 93 93 /* version_major=1, version_minor=2 */ 94 94 struct psp_firmware_header_v1_2 { 95 95 struct psp_firmware_header_v1_0 v1_0; 96 - uint32_t reserve[3]; 97 - uint32_t kdb_header_version; 98 - uint32_t kdb_offset_bytes; 99 - uint32_t kdb_size_bytes; 96 + struct psp_fw_bin_desc res; 97 + struct psp_fw_bin_desc kdb; 100 98 }; 101 99 102 100 /* version_major=1, version_minor=3 */ 103 101 struct psp_firmware_header_v1_3 { 104 102 struct psp_firmware_header_v1_1 v1_1; 105 - uint32_t spl_header_version; 106 - uint32_t spl_offset_bytes; 107 - uint32_t spl_size_bytes; 108 - uint32_t rl_header_version; 109 - uint32_t rl_offset_bytes; 110 - uint32_t rl_size_bytes; 103 + struct psp_fw_bin_desc spl; 104 + struct psp_fw_bin_desc rl; 105 + struct psp_fw_bin_desc sys_drv_aux; 106 + struct psp_fw_bin_desc sos_aux; 111 107 }; 112 108 113 109 /* version_major=1, version_minor=0 */
+81 -94
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 332 332 base->next = bo->vm_bo; 333 333 bo->vm_bo = base; 334 334 335 - if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 335 + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 336 336 return; 337 337 338 338 vm->bulk_moveable = false; ··· 361 361 * Helper to get the parent entry for the child page table. NULL if we are at 362 362 * the root page directory. 363 363 */ 364 - static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) 364 + static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) 365 365 { 366 - struct amdgpu_bo *parent = pt->base.bo->parent; 366 + struct amdgpu_bo *parent = pt->bo->parent; 367 367 368 368 if (!parent) 369 369 return NULL; 370 370 371 - return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); 371 + return parent->vm_bo; 372 372 } 373 373 374 374 /* ··· 376 376 */ 377 377 struct amdgpu_vm_pt_cursor { 378 378 uint64_t pfn; 379 - struct amdgpu_vm_pt *parent; 380 - struct amdgpu_vm_pt *entry; 379 + struct amdgpu_vm_bo_base *parent; 380 + struct amdgpu_vm_bo_base *entry; 381 381 unsigned level; 382 382 }; 383 383 ··· 416 416 { 417 417 unsigned mask, shift, idx; 418 418 419 - if (!cursor->entry->entries) 419 + if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || 420 + !cursor->entry->bo) 420 421 return false; 421 422 422 - BUG_ON(!cursor->entry->base.bo); 423 423 mask = amdgpu_vm_entries_mask(adev, cursor->level); 424 424 shift = amdgpu_vm_level_shift(adev, cursor->level); 425 425 426 426 ++cursor->level; 427 427 idx = (cursor->pfn >> shift) & mask; 428 428 cursor->parent = cursor->entry; 429 - cursor->entry = &cursor->entry->entries[idx]; 429 + cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; 430 430 return true; 431 431 } 432 432 ··· 453 453 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 454 454 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 455 455 456 - if (cursor->entry == &cursor->parent->entries[num_entries - 1]) 456 + if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) 457 457 return false; 458 458 459 459 cursor->pfn += 1ULL << shift; ··· 539 539 * True when the search should continue, false otherwise. 540 540 */ 541 541 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 542 - struct amdgpu_vm_pt *entry) 542 + struct amdgpu_vm_bo_base *entry) 543 543 { 544 544 return entry && (!start || entry != start->entry); 545 545 } ··· 590 590 struct amdgpu_bo_list_entry *entry) 591 591 { 592 592 entry->priority = 0; 593 - entry->tv.bo = &vm->root.base.bo->tbo; 593 + entry->tv.bo = &vm->root.bo->tbo; 594 594 /* Two for VM updates, one for TTM and one for the CS job */ 595 595 entry->tv.num_shared = 4; 596 596 entry->user_pages = NULL; ··· 622 622 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 623 623 struct amdgpu_vm *vm = bo_base->vm; 624 624 625 - if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 625 + if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) 626 626 vm->bulk_moveable = false; 627 627 } 628 628 ··· 781 781 entries -= ats_entries; 782 782 783 783 } else { 784 - struct amdgpu_vm_pt *pt; 784 + struct amdgpu_vm_bo_base *pt; 785 785 786 - pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); 786 + pt = ancestor->vm_bo; 787 787 ats_entries = amdgpu_vm_num_ats_entries(adev); 788 - if ((pt - vm->root.entries) >= ats_entries) { 788 + if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { 789 789 ats_entries = 0; 790 790 } else { 791 791 ats_entries = entries; ··· 902 902 903 903 bp.type = ttm_bo_type_kernel; 904 904 bp.no_wait_gpu = immediate; 905 - if (vm->root.base.bo) 906 - bp.resv = vm->root.base.bo->tbo.base.resv; 905 + if (vm->root.bo) 906 + bp.resv = vm->root.bo->tbo.base.resv; 907 907 908 908 r = amdgpu_bo_create_vm(adev, &bp, vmbo); 909 909 if (r) ··· 938 938 } 939 939 940 940 (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); 941 - amdgpu_bo_add_to_shadow_list((*vmbo)->shadow); 941 + amdgpu_bo_add_to_shadow_list(*vmbo); 942 942 943 943 return 0; 944 944 } ··· 962 962 struct amdgpu_vm_pt_cursor *cursor, 963 963 bool immediate) 964 964 { 965 - struct amdgpu_vm_pt *entry = cursor->entry; 965 + struct amdgpu_vm_bo_base *entry = cursor->entry; 966 966 struct amdgpu_bo *pt_bo; 967 967 struct amdgpu_bo_vm *pt; 968 968 int r; 969 969 970 - if (entry->base.bo) { 971 - if (cursor->level < AMDGPU_VM_PTB) 972 - entry->entries = 973 - to_amdgpu_bo_vm(entry->base.bo)->entries; 974 - else 975 - entry->entries = NULL; 970 + if (entry->bo) 976 971 return 0; 977 - } 978 972 979 973 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); 980 974 if (r) ··· 978 984 * freeing them up in the wrong order. 979 985 */ 980 986 pt_bo = &pt->bo; 981 - pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo); 982 - amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo); 983 - if (cursor->level < AMDGPU_VM_PTB) 984 - entry->entries = pt->entries; 985 - else 986 - entry->entries = NULL; 987 - 987 + pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); 988 + amdgpu_vm_bo_base_init(entry, vm, pt_bo); 988 989 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); 989 990 if (r) 990 991 goto error_free_pt; ··· 997 1008 * 998 1009 * @entry: PDE to free 999 1010 */ 1000 - static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) 1011 + static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) 1001 1012 { 1002 1013 struct amdgpu_bo *shadow; 1003 1014 1004 - if (entry->base.bo) { 1005 - shadow = amdgpu_bo_shadowed(entry->base.bo); 1006 - entry->base.bo->vm_bo = NULL; 1007 - list_del(&entry->base.vm_status); 1008 - amdgpu_bo_unref(&shadow); 1009 - amdgpu_bo_unref(&entry->base.bo); 1010 - } 1011 - entry->entries = NULL; 1015 + if (!entry->bo) 1016 + return; 1017 + shadow = amdgpu_bo_shadowed(entry->bo); 1018 + entry->bo->vm_bo = NULL; 1019 + list_del(&entry->vm_status); 1020 + amdgpu_bo_unref(&shadow); 1021 + amdgpu_bo_unref(&entry->bo); 1012 1022 } 1013 1023 1014 1024 /** ··· 1024 1036 struct amdgpu_vm_pt_cursor *start) 1025 1037 { 1026 1038 struct amdgpu_vm_pt_cursor cursor; 1027 - struct amdgpu_vm_pt *entry; 1039 + struct amdgpu_vm_bo_base *entry; 1028 1040 1029 1041 vm->bulk_moveable = false; 1030 1042 ··· 1292 1304 */ 1293 1305 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1294 1306 struct amdgpu_vm *vm, 1295 - struct amdgpu_vm_pt *entry) 1307 + struct amdgpu_vm_bo_base *entry) 1296 1308 { 1297 - struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); 1298 - struct amdgpu_bo *bo = parent->base.bo, *pbo; 1309 + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); 1310 + struct amdgpu_bo *bo = parent->bo, *pbo; 1299 1311 uint64_t pde, pt, flags; 1300 1312 unsigned level; 1301 1313 ··· 1303 1315 pbo = pbo->parent; 1304 1316 1305 1317 level += params->adev->vm_manager.root_level; 1306 - amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); 1307 - pde = (entry - parent->entries) * 8; 1318 + amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); 1319 + pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; 1308 1320 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, 1309 1321 1, 0, flags); 1310 1322 } ··· 1321 1333 struct amdgpu_vm *vm) 1322 1334 { 1323 1335 struct amdgpu_vm_pt_cursor cursor; 1324 - struct amdgpu_vm_pt *entry; 1336 + struct amdgpu_vm_bo_base *entry; 1325 1337 1326 1338 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1327 - if (entry->base.bo && !entry->base.moved) 1328 - amdgpu_vm_bo_relocated(&entry->base); 1339 + if (entry->bo && !entry->moved) 1340 + amdgpu_vm_bo_relocated(entry); 1329 1341 } 1330 1342 1331 1343 /** ··· 1359 1371 return r; 1360 1372 1361 1373 while (!list_empty(&vm->relocated)) { 1362 - struct amdgpu_vm_pt *entry; 1374 + struct amdgpu_vm_bo_base *entry; 1363 1375 1364 - entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, 1365 - base.vm_status); 1366 - amdgpu_vm_bo_idle(&entry->base); 1376 + entry = list_first_entry(&vm->relocated, 1377 + struct amdgpu_vm_bo_base, 1378 + vm_status); 1379 + amdgpu_vm_bo_idle(entry); 1367 1380 1368 1381 r = amdgpu_vm_update_pde(&params, vm, entry); 1369 1382 if (r) ··· 1544 1555 continue; 1545 1556 } 1546 1557 1547 - pt = cursor.entry->base.bo; 1558 + pt = cursor.entry->bo; 1548 1559 if (!pt) { 1549 1560 /* We need all PDs and PTs for mapping something, */ 1550 1561 if (flags & AMDGPU_PTE_VALID) ··· 1556 1567 if (!amdgpu_vm_pt_ancestor(&cursor)) 1557 1568 return -EINVAL; 1558 1569 1559 - pt = cursor.entry->base.bo; 1570 + pt = cursor.entry->bo; 1560 1571 shift = parent_shift; 1561 1572 frag_end = max(frag_end, ALIGN(frag_start + 1, 1562 1573 1ULL << shift)); ··· 1611 1622 */ 1612 1623 while (cursor.pfn < frag_start) { 1613 1624 /* Make sure previous mapping is freed */ 1614 - if (cursor.entry->base.bo) { 1625 + if (cursor.entry->bo) { 1615 1626 params->table_freed = true; 1616 1627 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1617 1628 } ··· 1693 1704 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { 1694 1705 struct dma_fence *tmp = dma_fence_get_stub(); 1695 1706 1696 - amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true); 1707 + amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); 1697 1708 swap(vm->last_unlocked, tmp); 1698 1709 dma_fence_put(tmp); 1699 1710 } ··· 1839 1850 1840 1851 if (clear || !bo) { 1841 1852 mem = NULL; 1842 - resv = vm->root.base.bo->tbo.base.resv; 1853 + resv = vm->root.bo->tbo.base.resv; 1843 1854 } else { 1844 1855 struct drm_gem_object *obj = &bo->tbo.base; 1845 1856 ··· 1870 1881 } 1871 1882 1872 1883 if (clear || (bo && bo->tbo.base.resv == 1873 - vm->root.base.bo->tbo.base.resv)) 1884 + vm->root.bo->tbo.base.resv)) 1874 1885 last_update = &vm->last_update; 1875 1886 else 1876 1887 last_update = &bo_va->last_pt_update; ··· 1912 1923 * the evicted list so that it gets validated again on the 1913 1924 * next command submission. 1914 1925 */ 1915 - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1926 + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1916 1927 uint32_t mem_type = bo->tbo.resource->mem_type; 1917 1928 1918 1929 if (!(bo->preferred_domains & ··· 2049 2060 */ 2050 2061 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2051 2062 { 2052 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 2063 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2053 2064 struct dma_fence *excl, **shared; 2054 2065 unsigned i, shared_count; 2055 2066 int r; ··· 2095 2106 struct amdgpu_vm *vm, 2096 2107 struct dma_fence **fence) 2097 2108 { 2098 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 2109 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2099 2110 struct amdgpu_bo_va_mapping *mapping; 2100 2111 uint64_t init_pte_value = 0; 2101 2112 struct dma_fence *f = NULL; ··· 2254 2265 if (mapping->flags & AMDGPU_PTE_PRT) 2255 2266 amdgpu_vm_prt_get(adev); 2256 2267 2257 - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && 2268 + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 2258 2269 !bo_va->base.moved) { 2259 2270 list_move(&bo_va->base.vm_status, &vm->moved); 2260 2271 } ··· 2616 2627 struct amdgpu_vm_bo_base **base; 2617 2628 2618 2629 if (bo) { 2619 - if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2630 + if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2620 2631 vm->bulk_moveable = false; 2621 2632 2622 2633 for (base = &bo_va->base.bo->vm_bo; *base; ··· 2710 2721 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2711 2722 struct amdgpu_vm *vm = bo_base->vm; 2712 2723 2713 - if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 2724 + if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 2714 2725 amdgpu_vm_bo_evicted(bo_base); 2715 2726 continue; 2716 2727 } ··· 2721 2732 2722 2733 if (bo->tbo.type == ttm_bo_type_kernel) 2723 2734 amdgpu_vm_bo_relocated(bo_base); 2724 - else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2735 + else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2725 2736 amdgpu_vm_bo_moved(bo_base); 2726 2737 else 2727 2738 amdgpu_vm_bo_invalidated(bo_base); ··· 2851 2862 */ 2852 2863 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2853 2864 { 2854 - timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, 2865 + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, 2855 2866 true, timeout); 2856 2867 if (timeout <= 0) 2857 2868 return timeout; ··· 2937 2948 if (r) 2938 2949 goto error_unreserve; 2939 2950 2940 - amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo); 2951 + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); 2941 2952 2942 2953 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2943 2954 if (r) 2944 2955 goto error_unreserve; 2945 2956 2946 - amdgpu_bo_unreserve(vm->root.base.bo); 2957 + amdgpu_bo_unreserve(vm->root.bo); 2947 2958 2948 2959 if (pasid) { 2949 2960 unsigned long flags; ··· 2963 2974 return 0; 2964 2975 2965 2976 error_unreserve: 2966 - amdgpu_bo_unreserve(vm->root.base.bo); 2977 + amdgpu_bo_unreserve(vm->root.bo); 2967 2978 2968 2979 error_free_root: 2969 2980 amdgpu_bo_unref(&root->shadow); 2970 2981 amdgpu_bo_unref(&root_bo); 2971 - vm->root.base.bo = NULL; 2982 + vm->root.bo = NULL; 2972 2983 2973 2984 error_free_delayed: 2974 2985 dma_fence_put(vm->last_unlocked); ··· 2994 3005 * 0 if this VM is clean 2995 3006 */ 2996 3007 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 2997 - struct amdgpu_vm *vm) 3008 + struct amdgpu_vm *vm) 2998 3009 { 2999 3010 enum amdgpu_vm_level root = adev->vm_manager.root_level; 3000 3011 unsigned int entries = amdgpu_vm_num_entries(adev, root); 3001 3012 unsigned int i = 0; 3002 3013 3003 - if (!(vm->root.entries)) 3004 - return 0; 3005 - 3006 3014 for (i = 0; i < entries; i++) { 3007 - if (vm->root.entries[i].base.bo) 3015 + if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) 3008 3016 return -EINVAL; 3009 3017 } 3010 3018 ··· 3035 3049 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 3036 3050 int r; 3037 3051 3038 - r = amdgpu_bo_reserve(vm->root.base.bo, true); 3052 + r = amdgpu_bo_reserve(vm->root.bo, true); 3039 3053 if (r) 3040 3054 return r; 3041 3055 ··· 3063 3077 if (pte_support_ats != vm->pte_support_ats) { 3064 3078 vm->pte_support_ats = pte_support_ats; 3065 3079 r = amdgpu_vm_clear_bo(adev, vm, 3066 - to_amdgpu_bo_vm(vm->root.base.bo), 3080 + to_amdgpu_bo_vm(vm->root.bo), 3067 3081 false); 3068 3082 if (r) 3069 3083 goto free_idr; ··· 3080 3094 3081 3095 if (vm->use_cpu_for_update) { 3082 3096 /* Sync with last SDMA update/clear before switching to CPU */ 3083 - r = amdgpu_bo_sync_wait(vm->root.base.bo, 3097 + r = amdgpu_bo_sync_wait(vm->root.bo, 3084 3098 AMDGPU_FENCE_OWNER_UNDEFINED, true); 3085 3099 if (r) 3086 3100 goto free_idr; ··· 3108 3122 } 3109 3123 3110 3124 /* Free the shadow bo for compute VM */ 3111 - amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow); 3125 + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); 3112 3126 3113 3127 if (pasid) 3114 3128 vm->pasid = pasid; ··· 3124 3138 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3125 3139 } 3126 3140 unreserve_bo: 3127 - amdgpu_bo_unreserve(vm->root.base.bo); 3141 + amdgpu_bo_unreserve(vm->root.bo); 3128 3142 return r; 3129 3143 } 3130 3144 ··· 3167 3181 3168 3182 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 3169 3183 3170 - root = amdgpu_bo_ref(vm->root.base.bo); 3184 + root = amdgpu_bo_ref(vm->root.bo); 3171 3185 amdgpu_bo_reserve(root, true); 3172 3186 if (vm->pasid) { 3173 3187 unsigned long flags; ··· 3194 3208 amdgpu_vm_free_pts(adev, vm, NULL); 3195 3209 amdgpu_bo_unreserve(root); 3196 3210 amdgpu_bo_unref(&root); 3197 - WARN_ON(vm->root.base.bo); 3211 + WARN_ON(vm->root.bo); 3198 3212 3199 3213 drm_sched_entity_destroy(&vm->immediate); 3200 3214 drm_sched_entity_destroy(&vm->delayed); ··· 3311 3325 /* Wait vm idle to make sure the vmid set in SPM_VMID is 3312 3326 * not referenced anymore. 3313 3327 */ 3314 - r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); 3328 + r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); 3315 3329 if (r) 3316 3330 return r; 3317 3331 ··· 3319 3333 if (r < 0) 3320 3334 return r; 3321 3335 3322 - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); 3336 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 3323 3337 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3324 3338 break; 3325 3339 default: ··· 3385 3399 { 3386 3400 bool is_compute_context = false; 3387 3401 struct amdgpu_bo *root; 3402 + unsigned long irqflags; 3388 3403 uint64_t value, flags; 3389 3404 struct amdgpu_vm *vm; 3390 3405 int r; 3391 3406 3392 - spin_lock(&adev->vm_manager.pasid_lock); 3407 + spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); 3393 3408 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3394 3409 if (vm) { 3395 - root = amdgpu_bo_ref(vm->root.base.bo); 3410 + root = amdgpu_bo_ref(vm->root.bo); 3396 3411 is_compute_context = vm->is_compute_context; 3397 3412 } else { 3398 3413 root = NULL; 3399 3414 } 3400 - spin_unlock(&adev->vm_manager.pasid_lock); 3415 + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); 3401 3416 3402 3417 if (!root) 3403 3418 return false; ··· 3416 3429 goto error_unref; 3417 3430 3418 3431 /* Double check that the VM still exists */ 3419 - spin_lock(&adev->vm_manager.pasid_lock); 3432 + spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); 3420 3433 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3421 - if (vm && vm->root.base.bo != root) 3434 + if (vm && vm->root.bo != root) 3422 3435 vm = NULL; 3423 - spin_unlock(&adev->vm_manager.pasid_lock); 3436 + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); 3424 3437 if (!vm) 3425 3438 goto error_unlock; 3426 3439
+1 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 152 152 bool moved; 153 153 }; 154 154 155 - struct amdgpu_vm_pt { 156 - struct amdgpu_vm_bo_base base; 157 - 158 - /* array of page tables, one for each directory entry */ 159 - struct amdgpu_vm_pt *entries; 160 - }; 161 - 162 155 /* provided by hw blocks that can write ptes, e.g., sdma */ 163 156 struct amdgpu_vm_pte_funcs { 164 157 /* number of dw to reserve per operation */ ··· 277 284 struct list_head done; 278 285 279 286 /* contains the page directory */ 280 - struct amdgpu_vm_pt root; 287 + struct amdgpu_vm_bo_base root; 281 288 struct dma_fence *last_update; 282 289 283 290 /* Scheduler entities for page table updates */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 112 112 swap(p->vm->last_unlocked, f); 113 113 dma_fence_put(tmp); 114 114 } else { 115 - amdgpu_bo_fence(p->vm->root.base.bo, f, true); 115 + amdgpu_bo_fence(p->vm->root.bo, f, true); 116 116 } 117 117 118 118 if (fence && !p->immediate)
+5 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 6970 6970 if (ring->use_doorbell) { 6971 6971 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 6972 6972 (adev->doorbell_index.kiq * 2) << 2); 6973 + /* If GC has entered CGPG, ringing doorbell > first page doesn't 6974 + * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround 6975 + * this issue. 6976 + */ 6973 6977 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 6974 - (adev->doorbell_index.userqueue_end * 2) << 2); 6978 + (adev->doorbell.size - 4)); 6975 6979 } 6976 6980 6977 6981 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+5 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 3675 3675 if (ring->use_doorbell) { 3676 3676 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, 3677 3677 (adev->doorbell_index.kiq * 2) << 2); 3678 + /* If GC has entered CGPG, ringing doorbell > first page doesn't 3679 + * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround 3680 + * this issue. 3681 + */ 3678 3682 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, 3679 - (adev->doorbell_index.userqueue_end * 2) << 2); 3683 + (adev->doorbell.size - 4)); 3680 3684 } 3681 3685 3682 3686 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
+9 -4
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
··· 332 332 uint32_t fwar_db_addr_hi; 333 333 }; 334 334 335 + /* Command-specific response for boot config. */ 336 + struct psp_gfx_uresp_bootcfg { 337 + uint32_t boot_cfg; /* boot config data */ 338 + }; 339 + 335 340 /* Union of command-specific responses for GPCOM ring. */ 336 - union psp_gfx_uresp 337 - { 338 - struct psp_gfx_uresp_reserved reserved; 339 - struct psp_gfx_uresp_fwar_db_info fwar_db_info; 341 + union psp_gfx_uresp { 342 + struct psp_gfx_uresp_reserved reserved; 343 + struct psp_gfx_uresp_bootcfg boot_cfg; 344 + struct psp_gfx_uresp_fwar_db_info fwar_db_info; 340 345 }; 341 346 342 347 /* Structure of GFX Response buffer.
+1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 461 461 struct amdgpu_device *adev = psp->adev; 462 462 463 463 if (amdgpu_sriov_vf(adev)) { 464 + ring->ring_wptr = 0; 464 465 ret = psp_v11_0_ring_stop(psp, ring_type); 465 466 if (ret) { 466 467 DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
+1
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
··· 227 227 psp_v3_1_reroute_ih(psp); 228 228 229 229 if (amdgpu_sriov_vf(adev)) { 230 + ring->ring_wptr = 0; 230 231 ret = psp_v3_1_ring_stop(psp, ring_type); 231 232 if (ret) { 232 233 DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
+20 -6
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 1566 1566 i, args->n_devices); 1567 1567 goto unmap_memory_from_gpu_failed; 1568 1568 } 1569 - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); 1570 1569 args->n_success = i+1; 1571 1570 } 1572 - kfree(devices_arr); 1573 - 1574 1571 mutex_unlock(&p->mutex); 1572 + 1573 + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); 1574 + if (err) { 1575 + pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1576 + goto sync_memory_failed; 1577 + } 1578 + 1579 + /* Flush TLBs after waiting for the page table updates to complete */ 1580 + for (i = 0; i < args->n_devices; i++) { 1581 + peer = kfd_device_by_id(devices_arr[i]); 1582 + if (WARN_ON_ONCE(!peer)) 1583 + continue; 1584 + peer_pdd = kfd_get_process_device_data(peer, p); 1585 + if (WARN_ON_ONCE(!peer_pdd)) 1586 + continue; 1587 + kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); 1588 + } 1589 + 1590 + kfree(devices_arr); 1575 1591 1576 1592 return 0; 1577 1593 ··· 1596 1580 unmap_memory_from_gpu_failed: 1597 1581 mutex_unlock(&p->mutex); 1598 1582 copy_from_user_failed: 1583 + sync_memory_failed: 1599 1584 kfree(devices_arr); 1600 1585 return err; 1601 1586 } ··· 1796 1779 { 1797 1780 struct kfd_ioctl_svm_args *args = data; 1798 1781 int r = 0; 1799 - 1800 - if (p->svm_disabled) 1801 - return -EPERM; 1802 1782 1803 1783 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n", 1804 1784 args->start_addr, args->size, args->op, args->nattr);
+26 -20
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 486 486 if (retval == -ETIME) 487 487 qpd->reset_wavefronts = true; 488 488 489 - 490 - mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 491 - 492 489 list_del(&q->list); 493 490 if (list_empty(&qpd->queues_list)) { 494 491 if (qpd->reset_wavefronts) { ··· 520 523 int retval; 521 524 uint64_t sdma_val = 0; 522 525 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 526 + struct mqd_manager *mqd_mgr = 527 + dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)]; 523 528 524 529 /* Get the SDMA queue stats */ 525 530 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) || ··· 538 539 if (!retval) 539 540 pdd->sdma_past_activity_counter += sdma_val; 540 541 dqm_unlock(dqm); 542 + 543 + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 541 544 542 545 return retval; 543 546 } ··· 1630 1629 static int process_termination_nocpsch(struct device_queue_manager *dqm, 1631 1630 struct qcm_process_device *qpd) 1632 1631 { 1633 - struct queue *q, *next; 1632 + struct queue *q; 1634 1633 struct device_process_node *cur, *next_dpn; 1635 1634 int retval = 0; 1636 1635 bool found = false; ··· 1638 1637 dqm_lock(dqm); 1639 1638 1640 1639 /* Clear all user mode queues */ 1641 - list_for_each_entry_safe(q, next, &qpd->queues_list, list) { 1640 + while (!list_empty(&qpd->queues_list)) { 1641 + struct mqd_manager *mqd_mgr; 1642 1642 int ret; 1643 1643 1644 + q = list_first_entry(&qpd->queues_list, struct queue, list); 1645 + mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 1646 + q->properties.type)]; 1644 1647 ret = destroy_queue_nocpsch_locked(dqm, qpd, q); 1645 1648 if (ret) 1646 1649 retval = ret; 1650 + dqm_unlock(dqm); 1651 + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); 1652 + dqm_lock(dqm); 1647 1653 } 1648 1654 1649 1655 /* Unregister process */ ··· 1682 1674 u32 *save_area_used_size) 1683 1675 { 1684 1676 struct mqd_manager *mqd_mgr; 1685 - int r; 1686 1677 1687 1678 dqm_lock(dqm); 1688 1679 1689 - if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || 1690 - q->properties.is_active || !q->device->cwsr_enabled) { 1691 - r = -EINVAL; 1692 - goto dqm_unlock; 1693 - } 1694 - 1695 1680 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; 1696 1681 1697 - if (!mqd_mgr->get_wave_state) { 1698 - r = -EINVAL; 1699 - goto dqm_unlock; 1682 + if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || 1683 + q->properties.is_active || !q->device->cwsr_enabled || 1684 + !mqd_mgr->get_wave_state) { 1685 + dqm_unlock(dqm); 1686 + return -EINVAL; 1700 1687 } 1701 1688 1702 - r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, 1703 - ctl_stack_used_size, save_area_used_size); 1704 - 1705 - dqm_unlock: 1706 1689 dqm_unlock(dqm); 1707 - return r; 1690 + 1691 + /* 1692 + * get_wave_state is outside the dqm lock to prevent circular locking 1693 + * and the queue should be protected against destruction by the process 1694 + * lock. 1695 + */ 1696 + return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, 1697 + ctl_stack_used_size, save_area_used_size); 1708 1698 } 1709 1699 1710 1700 static int process_termination_cpsch(struct device_queue_manager *dqm,
-4
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
··· 405 405 case CHIP_POLARIS12: 406 406 case CHIP_VEGAM: 407 407 kfd_init_apertures_vi(pdd, id); 408 - /* VI GPUs cannot support SVM with only 409 - * 40 bits of virtual address space. 410 - */ 411 - process->svm_disabled = true; 412 408 break; 413 409 case CHIP_VEGA10: 414 410 case CHIP_VEGA12:
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 743 743 spinlock_t deferred_list_lock; 744 744 atomic_t evicted_ranges; 745 745 struct delayed_work restore_work; 746 + DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 746 747 }; 747 748 748 749 /* Process data */ ··· 827 826 828 827 /* shared virtual memory registered by this process */ 829 828 struct svm_range_list svms; 830 - bool svm_disabled; 831 829 832 830 bool xnack_enabled; 833 831 };
-1
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 1260 1260 process->mm = thread->mm; 1261 1261 process->lead_thread = thread->group_leader; 1262 1262 process->n_pdds = 0; 1263 - process->svm_disabled = false; 1264 1263 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker); 1265 1264 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker); 1266 1265 process->last_restore_timestamp = get_jiffies_64();
+36 -26
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 281 281 282 282 p = container_of(svms, struct kfd_process, svms); 283 283 if (p->xnack_enabled) 284 - bitmap_fill(prange->bitmap_access, MAX_GPU_INSTANCE); 284 + bitmap_copy(prange->bitmap_access, svms->bitmap_supported, 285 + MAX_GPU_INSTANCE); 285 286 286 287 svm_range_set_default_attributes(&prange->preferred_loc, 287 288 &prange->prefetch_loc, ··· 578 577 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 579 578 { 580 579 uint32_t i; 581 - int gpuidx; 582 580 583 581 for (i = 0; i < nattr; i++) { 582 + uint32_t val = attrs[i].value; 583 + int gpuidx = MAX_GPU_INSTANCE; 584 + 584 585 switch (attrs[i].type) { 585 586 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 586 - if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM && 587 - attrs[i].value != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 588 - kfd_process_gpuidx_from_gpuid(p, 589 - attrs[i].value) < 0) { 590 - pr_debug("no GPU 0x%x found\n", attrs[i].value); 591 - return -EINVAL; 592 - } 587 + if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM && 588 + val != KFD_IOCTL_SVM_LOCATION_UNDEFINED) 589 + gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 593 590 break; 594 591 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 595 - if (attrs[i].value != KFD_IOCTL_SVM_LOCATION_SYSMEM && 596 - kfd_process_gpuidx_from_gpuid(p, 597 - attrs[i].value) < 0) { 598 - pr_debug("no GPU 0x%x found\n", attrs[i].value); 599 - return -EINVAL; 600 - } 592 + if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM) 593 + gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 601 594 break; 602 595 case KFD_IOCTL_SVM_ATTR_ACCESS: 603 596 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 604 597 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 605 - gpuidx = kfd_process_gpuidx_from_gpuid(p, 606 - attrs[i].value); 607 - if (gpuidx < 0) { 608 - pr_debug("no GPU 0x%x found\n", attrs[i].value); 609 - return -EINVAL; 610 - } 598 + gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 611 599 break; 612 600 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 613 601 break; ··· 606 616 break; 607 617 default: 608 618 pr_debug("unknown attr type 0x%x\n", attrs[i].type); 619 + return -EINVAL; 620 + } 621 + 622 + if (gpuidx < 0) { 623 + pr_debug("no GPU 0x%x found\n", val); 624 + return -EINVAL; 625 + } else if (gpuidx < MAX_GPU_INSTANCE && 626 + !test_bit(gpuidx, p->svms.bitmap_supported)) { 627 + pr_debug("GPU 0x%x not supported\n", val); 609 628 return -EINVAL; 610 629 } 611 630 } ··· 1273 1274 adev = (struct amdgpu_device *)pdd->dev->kgd; 1274 1275 vm = drm_priv_to_vm(pdd->drm_priv); 1275 1276 1276 - ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo; 1277 + ctx->tv[gpuidx].bo = &vm->root.bo->tbo; 1277 1278 ctx->tv[gpuidx].num_shared = 4; 1278 1279 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); 1279 1280 } ··· 1854 1855 1855 1856 p = container_of(svms, struct kfd_process, svms); 1856 1857 1857 - for (i = 0; i < p->n_pdds; i++) { 1858 + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { 1858 1859 pdd = p->pdds[i]; 1859 1860 if (!pdd) 1860 1861 continue; ··· 2324 2325 bool write_locked = false; 2325 2326 int r = 0; 2326 2327 2328 + if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { 2329 + pr_debug("device does not support SVM\n"); 2330 + return -EFAULT; 2331 + } 2332 + 2327 2333 p = kfd_lookup_process_by_pasid(pasid); 2328 2334 if (!p) { 2329 2335 pr_debug("kfd process not founded pasid 0x%x\n", pasid); ··· 2476 2472 int svm_range_list_init(struct kfd_process *p) 2477 2473 { 2478 2474 struct svm_range_list *svms = &p->svms; 2475 + int i; 2479 2476 2480 2477 svms->objects = RB_ROOT_CACHED; 2481 2478 mutex_init(&svms->lock); ··· 2486 2481 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); 2487 2482 INIT_LIST_HEAD(&svms->deferred_range_list); 2488 2483 spin_lock_init(&svms->deferred_list_lock); 2484 + 2485 + for (i = 0; i < p->n_pdds; i++) 2486 + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev)) 2487 + bitmap_set(svms->bitmap_supported, i, 1); 2489 2488 2490 2489 return 0; 2491 2490 } ··· 2987 2978 svm_range_set_default_attributes(&location, &prefetch_loc, 2988 2979 &granularity, &flags); 2989 2980 if (p->xnack_enabled) 2990 - bitmap_fill(bitmap_access, MAX_GPU_INSTANCE); 2981 + bitmap_copy(bitmap_access, svms->bitmap_supported, 2982 + MAX_GPU_INSTANCE); 2991 2983 else 2992 2984 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE); 2993 2985 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE); 2994 2986 goto fill_values; 2995 2987 } 2996 - bitmap_fill(bitmap_access, MAX_GPU_INSTANCE); 2997 - bitmap_fill(bitmap_aip, MAX_GPU_INSTANCE); 2988 + bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); 2989 + bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); 2998 2990 2999 2991 while (node) { 3000 2992 struct interval_tree_node *next;
+7
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
··· 175 175 void svm_range_free_dma_mappings(struct svm_range *prange); 176 176 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm); 177 177 178 + /* SVM API and HMM page migration work together, device memory type 179 + * is initialized to not 0 when page migration register device memory. 180 + */ 181 + #define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0) 182 + 178 183 #else 179 184 180 185 struct kfd_process; ··· 205 200 WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled"); 206 201 return -EINVAL; 207 202 } 203 + 204 + #define KFD_IS_SVM_API_SUPPORTED(dev) false 208 205 209 206 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ 210 207
+2 -4
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 36 36 #include "kfd_topology.h" 37 37 #include "kfd_device_queue_manager.h" 38 38 #include "kfd_iommu.h" 39 + #include "kfd_svm.h" 39 40 #include "amdgpu_amdkfd.h" 40 41 #include "amdgpu_ras.h" 41 42 ··· 1442 1441 dev->node_props.capability |= (adev->ras_enabled != 0) ? 1443 1442 HSA_CAP_RASEVENTNOTIFY : 0; 1444 1443 1445 - /* SVM API and HMM page migration work together, device memory type 1446 - * is initialized to not 0 when page migration register device memory. 1447 - */ 1448 - if (adev->kfd.dev->pgmap.type != 0) 1444 + if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) 1449 1445 dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; 1450 1446 1451 1447 kfd_debug_print_topology();
+3 -4
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
··· 54 54 #define HSA_CAP_ASIC_REVISION_SHIFT 22 55 55 #define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 56 56 #define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 57 - 58 - #define HSA_CAP_RESERVED 0xf00f8000 57 + #define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 58 + #define HSA_CAP_RESERVED 0xe00f8000 59 59 60 60 struct kfd_node_properties { 61 61 uint64_t hive_id; ··· 101 101 102 102 #define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 103 103 #define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 104 - #define HSA_MEM_FLAGS_COHERENTHOSTACCESS 0x00000004 105 - #define HSA_MEM_FLAGS_RESERVED 0xfffffff8 104 + #define HSA_MEM_FLAGS_RESERVED 0xfffffffc 106 105 107 106 struct kfd_mem_properties { 108 107 struct list_head list;
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
··· 28 28 AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o 29 29 30 30 ifneq ($(CONFIG_DRM_AMD_DC),) 31 - AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o 31 + AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o 32 32 endif 33 33 34 34 ifdef CONFIG_DRM_AMD_DC_HDCP
+5 -136
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 58 58 #if defined(CONFIG_DEBUG_FS) 59 59 #include "amdgpu_dm_debugfs.h" 60 60 #endif 61 + #include "amdgpu_dm_psr.h" 61 62 62 63 #include "ivsrcid/ivsrcid_vislands30.h" 63 64 ··· 213 212 214 213 static void handle_cursor_update(struct drm_plane *plane, 215 214 struct drm_plane_state *old_plane_state); 216 - 217 - static void amdgpu_dm_set_psr_caps(struct dc_link *link); 218 - static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); 219 - static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); 220 - static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); 221 - static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); 222 215 223 216 static const struct drm_format_info * 224 217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); ··· 10240 10245 dm_old_crtc_state->dsc_force_changed == false) 10241 10246 continue; 10242 10247 10248 + ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10249 + if (ret) 10250 + goto fail; 10251 + 10243 10252 if (!new_crtc_state->enable) 10244 10253 continue; 10245 10254 ··· 10722 10723 if (connector->vrr_capable_property) 10723 10724 drm_connector_set_vrr_capable_property(connector, 10724 10725 freesync_capable); 10725 - } 10726 - 10727 - static void amdgpu_dm_set_psr_caps(struct dc_link *link) 10728 - { 10729 - uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; 10730 - 10731 - if (!(link->connector_signal & SIGNAL_TYPE_EDP)) 10732 - return; 10733 - if (link->type == dc_connection_none) 10734 - return; 10735 - if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, 10736 - dpcd_data, sizeof(dpcd_data))) { 10737 - link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; 10738 - 10739 - if (dpcd_data[0] == 0) { 10740 - link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 10741 - link->psr_settings.psr_feature_enabled = false; 10742 - } else { 10743 - link->psr_settings.psr_version = DC_PSR_VERSION_1; 10744 - link->psr_settings.psr_feature_enabled = true; 10745 - } 10746 - 10747 - DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); 10748 - } 10749 - } 10750 - 10751 - /* 10752 - * amdgpu_dm_link_setup_psr() - configure psr link 10753 - * @stream: stream state 10754 - * 10755 - * Return: true if success 10756 - */ 10757 - static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) 10758 - { 10759 - struct dc_link *link = NULL; 10760 - struct psr_config psr_config = {0}; 10761 - struct psr_context psr_context = {0}; 10762 - bool ret = false; 10763 - 10764 - if (stream == NULL) 10765 - return false; 10766 - 10767 - link = stream->link; 10768 - 10769 - psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; 10770 - 10771 - if (psr_config.psr_version > 0) { 10772 - psr_config.psr_exit_link_training_required = 0x1; 10773 - psr_config.psr_frame_capture_indication_req = 0; 10774 - psr_config.psr_rfb_setup_time = 0x37; 10775 - psr_config.psr_sdp_transmit_line_num_deadline = 0x20; 10776 - psr_config.allow_smu_optimizations = 0x0; 10777 - 10778 - ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); 10779 - 10780 - } 10781 - DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled); 10782 - 10783 - return ret; 10784 - } 10785 - 10786 - /* 10787 - * amdgpu_dm_psr_enable() - enable psr f/w 10788 - * @stream: stream state 10789 - * 10790 - * Return: true if success 10791 - */ 10792 - bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) 10793 - { 10794 - struct dc_link *link = stream->link; 10795 - unsigned int vsync_rate_hz = 0; 10796 - struct dc_static_screen_params params = {0}; 10797 - /* Calculate number of static frames before generating interrupt to 10798 - * enter PSR. 10799 - */ 10800 - // Init fail safe of 2 frames static 10801 - unsigned int num_frames_static = 2; 10802 - 10803 - DRM_DEBUG_DRIVER("Enabling psr...\n"); 10804 - 10805 - vsync_rate_hz = div64_u64(div64_u64(( 10806 - stream->timing.pix_clk_100hz * 100), 10807 - stream->timing.v_total), 10808 - stream->timing.h_total); 10809 - 10810 - /* Round up 10811 - * Calculate number of frames such that at least 30 ms of time has 10812 - * passed. 10813 - */ 10814 - if (vsync_rate_hz != 0) { 10815 - unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; 10816 - num_frames_static = (30000 / frame_time_microsec) + 1; 10817 - } 10818 - 10819 - params.triggers.cursor_update = true; 10820 - params.triggers.overlay_update = true; 10821 - params.triggers.surface_update = true; 10822 - params.num_frames = num_frames_static; 10823 - 10824 - dc_stream_set_static_screen_params(link->ctx->dc, 10825 - &stream, 1, 10826 - &params); 10827 - 10828 - return dc_link_set_psr_allow_active(link, true, false, false); 10829 - } 10830 - 10831 - /* 10832 - * amdgpu_dm_psr_disable() - disable psr f/w 10833 - * @stream: stream state 10834 - * 10835 - * Return: true if success 10836 - */ 10837 - static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) 10838 - { 10839 - 10840 - DRM_DEBUG_DRIVER("Disabling psr...\n"); 10841 - 10842 - return dc_link_set_psr_allow_active(stream->link, false, true, false); 10843 - } 10844 - 10845 - /* 10846 - * amdgpu_dm_psr_disable() - disable psr f/w 10847 - * if psr is enabled on any stream 10848 - * 10849 - * Return: true if success 10850 - */ 10851 - static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) 10852 - { 10853 - DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n"); 10854 - return dc_set_psr_allow_active(dm->dc, false); 10855 10726 } 10856 10727 10857 10728 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 617 617 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 618 618 619 619 void amdgpu_dm_init_color_mod(void); 620 + int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); 620 621 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); 621 622 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, 622 623 struct dc_plane_state *dc_plane_state);
+35 -6
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
··· 285 285 } 286 286 287 287 /** 288 + * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of 289 + * the expected size. 290 + * Returns 0 on success. 291 + */ 292 + int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) 293 + { 294 + const struct drm_color_lut *lut = NULL; 295 + uint32_t size = 0; 296 + 297 + lut = __extract_blob_lut(crtc_state->degamma_lut, &size); 298 + if (lut && size != MAX_COLOR_LUT_ENTRIES) { 299 + DRM_DEBUG_DRIVER( 300 + "Invalid Degamma LUT size. Should be %u but got %u.\n", 301 + MAX_COLOR_LUT_ENTRIES, size); 302 + return -EINVAL; 303 + } 304 + 305 + lut = __extract_blob_lut(crtc_state->gamma_lut, &size); 306 + if (lut && size != MAX_COLOR_LUT_ENTRIES && 307 + size != MAX_COLOR_LEGACY_LUT_ENTRIES) { 308 + DRM_DEBUG_DRIVER( 309 + "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n", 310 + MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES, 311 + size); 312 + return -EINVAL; 313 + } 314 + 315 + return 0; 316 + } 317 + 318 + /** 288 319 * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. 289 320 * @crtc: amdgpu_dm crtc state 290 321 * ··· 348 317 bool is_legacy; 349 318 int r; 350 319 351 - degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size); 352 - if (degamma_lut && degamma_size != MAX_COLOR_LUT_ENTRIES) 353 - return -EINVAL; 320 + r = amdgpu_dm_verify_lut_sizes(&crtc->base); 321 + if (r) 322 + return r; 354 323 324 + degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, &degamma_size); 355 325 regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, &regamma_size); 356 - if (regamma_lut && regamma_size != MAX_COLOR_LUT_ENTRIES && 357 - regamma_size != MAX_COLOR_LEGACY_LUT_ENTRIES) 358 - return -EINVAL; 359 326 360 327 has_degamma = 361 328 degamma_lut && !__is_lut_linear(degamma_lut, degamma_size);
+166
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
··· 1 + /* 2 + * Copyright 2021 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #include "amdgpu_dm_psr.h" 27 + #include "dc.h" 28 + #include "dm_helpers.h" 29 + 30 + /* 31 + * amdgpu_dm_set_psr_caps() - set link psr capabilities 32 + * @link: link 33 + * 34 + */ 35 + void amdgpu_dm_set_psr_caps(struct dc_link *link) 36 + { 37 + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; 38 + 39 + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) 40 + return; 41 + if (link->type == dc_connection_none) 42 + return; 43 + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, 44 + dpcd_data, sizeof(dpcd_data))) { 45 + link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; 46 + 47 + if (dpcd_data[0] == 0) { 48 + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; 49 + link->psr_settings.psr_feature_enabled = false; 50 + } else { 51 + link->psr_settings.psr_version = DC_PSR_VERSION_1; 52 + link->psr_settings.psr_feature_enabled = true; 53 + } 54 + 55 + DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); 56 + } 57 + } 58 + 59 + /* 60 + * amdgpu_dm_link_setup_psr() - configure psr link 61 + * @stream: stream state 62 + * 63 + * Return: true if success 64 + */ 65 + bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) 66 + { 67 + struct dc_link *link = NULL; 68 + struct psr_config psr_config = {0}; 69 + struct psr_context psr_context = {0}; 70 + struct dc *dc = NULL; 71 + bool ret = false; 72 + 73 + if (stream == NULL) 74 + return false; 75 + 76 + link = stream->link; 77 + dc = link->ctx->dc; 78 + 79 + psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; 80 + 81 + if (psr_config.psr_version > 0) { 82 + psr_config.psr_exit_link_training_required = 0x1; 83 + psr_config.psr_frame_capture_indication_req = 0; 84 + psr_config.psr_rfb_setup_time = 0x37; 85 + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; 86 + psr_config.allow_smu_optimizations = 0x0; 87 + 88 + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); 89 + 90 + } 91 + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled); 92 + 93 + return ret; 94 + } 95 + 96 + /* 97 + * amdgpu_dm_psr_enable() - enable psr f/w 98 + * @stream: stream state 99 + * 100 + * Return: true if success 101 + */ 102 + bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) 103 + { 104 + struct dc_link *link = stream->link; 105 + unsigned int vsync_rate_hz = 0; 106 + struct dc_static_screen_params params = {0}; 107 + /* Calculate number of static frames before generating interrupt to 108 + * enter PSR. 109 + */ 110 + // Init fail safe of 2 frames static 111 + unsigned int num_frames_static = 2; 112 + 113 + DRM_DEBUG_DRIVER("Enabling psr...\n"); 114 + 115 + vsync_rate_hz = div64_u64(div64_u64(( 116 + stream->timing.pix_clk_100hz * 100), 117 + stream->timing.v_total), 118 + stream->timing.h_total); 119 + 120 + /* Round up 121 + * Calculate number of frames such that at least 30 ms of time has 122 + * passed. 123 + */ 124 + if (vsync_rate_hz != 0) { 125 + unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; 126 + num_frames_static = (30000 / frame_time_microsec) + 1; 127 + } 128 + 129 + params.triggers.cursor_update = true; 130 + params.triggers.overlay_update = true; 131 + params.triggers.surface_update = true; 132 + params.num_frames = num_frames_static; 133 + 134 + dc_stream_set_static_screen_params(link->ctx->dc, 135 + &stream, 1, 136 + &params); 137 + 138 + return dc_link_set_psr_allow_active(link, true, false, false); 139 + } 140 + 141 + /* 142 + * amdgpu_dm_psr_disable() - disable psr f/w 143 + * @stream: stream state 144 + * 145 + * Return: true if success 146 + */ 147 + bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) 148 + { 149 + 150 + DRM_DEBUG_DRIVER("Disabling psr...\n"); 151 + 152 + return dc_link_set_psr_allow_active(stream->link, false, true, false); 153 + } 154 + 155 + /* 156 + * amdgpu_dm_psr_disable() - disable psr f/w 157 + * if psr is enabled on any stream 158 + * 159 + * Return: true if success 160 + */ 161 + bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) 162 + { 163 + DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n"); 164 + return dc_set_psr_allow_active(dm->dc, false); 165 + } 166 +
+37
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
··· 1 + /* 2 + * Copyright 2021 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef AMDGPU_DM_AMDGPU_DM_PSR_H_ 27 + #define AMDGPU_DM_AMDGPU_DM_PSR_H_ 28 + 29 + #include "amdgpu.h" 30 + 31 + void amdgpu_dm_set_psr_caps(struct dc_link *link); 32 + bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); 33 + bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); 34 + bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); 35 + bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); 36 + 37 + #endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
+1 -1
drivers/gpu/drm/amd/display/dc/Makefile
··· 60 60 61 61 DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 62 62 dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ 63 - dc_link_enc_cfg.o 63 + dc_link_enc_cfg.o dc_link_dpcd.o 64 64 65 65 ifdef CONFIG_DRM_AMD_DC_DCN 66 66 DISPLAY_CORE += dc_vm_helper.o
+65 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
··· 123 123 } 124 124 } 125 125 126 - void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) 126 + void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct dc_state *context) 127 127 { 128 128 int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR 129 129 * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; ··· 132 132 133 133 uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); 134 134 uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); 135 + uint32_t current_dispclk_wdivider; 136 + uint32_t i; 137 + 138 + REG_GET(DENTIST_DISPCLK_CNTL, 139 + DENTIST_DISPCLK_WDIVIDER, &current_dispclk_wdivider); 140 + 141 + /* When changing divider to or from 127, some extra programming is required to prevent corruption */ 142 + if (current_dispclk_wdivider == 127 && dispclk_wdivider != 127) { 143 + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 144 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 145 + uint32_t fifo_level; 146 + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; 147 + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 148 + int32_t N; 149 + int32_t j; 150 + 151 + if (!pipe_ctx->stream) 152 + continue; 153 + /* Virtual encoders don't have this function */ 154 + if (!stream_enc->funcs->get_fifo_cal_average_level) 155 + continue; 156 + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( 157 + stream_enc); 158 + N = fifo_level / 4; 159 + dccg->funcs->set_fifo_errdet_ovr_en( 160 + dccg, 161 + true); 162 + for (j = 0; j < N - 4; j++) 163 + dccg->funcs->otg_drop_pixel( 164 + dccg, 165 + pipe_ctx->stream_res.tg->inst); 166 + dccg->funcs->set_fifo_errdet_ovr_en( 167 + dccg, 168 + false); 169 + } 170 + } else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) { 171 + REG_UPDATE(DENTIST_DISPCLK_CNTL, 172 + DENTIST_DISPCLK_WDIVIDER, 126); 173 + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100); 174 + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 175 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 176 + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; 177 + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; 178 + uint32_t fifo_level; 179 + int32_t N; 180 + int32_t j; 181 + 182 + if (!pipe_ctx->stream) 183 + continue; 184 + /* Virtual encoders don't have this function */ 185 + if (!stream_enc->funcs->get_fifo_cal_average_level) 186 + continue; 187 + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( 188 + stream_enc); 189 + N = fifo_level / 4; 190 + dccg->funcs->set_fifo_errdet_ovr_en(dccg, true); 191 + for (j = 0; j < 12 - N; j++) 192 + dccg->funcs->otg_add_pixel(dccg, 193 + pipe_ctx->stream_res.tg->inst); 194 + dccg->funcs->set_fifo_errdet_ovr_en(dccg, false); 195 + } 196 + } 135 197 136 198 REG_UPDATE(DENTIST_DISPCLK_CNTL, 137 199 DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); ··· 313 251 if (dpp_clock_lowered) { 314 252 // if clock is being lowered, increase DTO before lowering refclk 315 253 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 316 - dcn20_update_clocks_update_dentist(clk_mgr); 254 + dcn20_update_clocks_update_dentist(clk_mgr, context); 317 255 } else { 318 256 // if clock is being raised, increase refclk before lowering DTO 319 257 if (update_dppclk || update_dispclk) 320 - dcn20_update_clocks_update_dentist(clk_mgr); 258 + dcn20_update_clocks_update_dentist(clk_mgr, context); 321 259 // always update dtos unless clock is lowered and not safe to lower 322 260 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 323 261 }
+2 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
··· 50 50 enum dc_clock_type clock_type, 51 51 struct dc_clock_config *clock_cfg); 52 52 53 - void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); 53 + void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, 54 + struct dc_state *context); 54 55 55 56 void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); 56 57
+2 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 334 334 if (dpp_clock_lowered) { 335 335 /* if clock is being lowered, increase DTO before lowering refclk */ 336 336 dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 337 - dcn20_update_clocks_update_dentist(clk_mgr); 337 + dcn20_update_clocks_update_dentist(clk_mgr, context); 338 338 } else { 339 339 /* if clock is being raised, increase refclk before lowering DTO */ 340 340 if (update_dppclk || update_dispclk) 341 - dcn20_update_clocks_update_dentist(clk_mgr); 341 + dcn20_update_clocks_update_dentist(clk_mgr, context); 342 342 /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures 343 343 * that we do not lower dto when it is not safe to lower. We do not need to 344 344 * compare the current and new dppclk before calling this function.*/
+5 -5
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 409 409 struct tg_color *color) 410 410 { 411 411 uint32_t color_value = MAX_TG_COLOR_VALUE; 412 - /* Determine the overscan color based on the top-most (desktop) plane's context */ 413 - struct pipe_ctx *top_pipe_ctx = pipe_ctx; 412 + /* Determine the overscan color based on the bottom-most plane's context */ 413 + struct pipe_ctx *bottom_pipe_ctx = pipe_ctx; 414 414 415 - while (top_pipe_ctx->top_pipe != NULL) 416 - top_pipe_ctx = top_pipe_ctx->top_pipe; 415 + while (bottom_pipe_ctx->bottom_pipe != NULL) 416 + bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe; 417 417 418 - switch (top_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { 418 + switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { 419 419 case DC_SW_LINEAR: 420 420 /* LINEAR Surface - set border color to red */ 421 421 color->color_r_cr = color_value;
+71 -102
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 49 49 #include "dmub/dmub_srv.h" 50 50 #include "inc/hw/panel_cntl.h" 51 51 #include "inc/link_enc_cfg.h" 52 + #include "inc/link_dpcd.h" 52 53 53 54 #define DC_LOGGER_INIT(logger) 54 55 ··· 60 59 #define RETIMER_REDRIVER_INFO(...) \ 61 60 DC_LOG_RETIMER_REDRIVER( \ 62 61 __VA_ARGS__) 63 - /******************************************************************************* 64 - * Private structures 65 - ******************************************************************************/ 66 - 67 - enum { 68 - PEAK_FACTOR_X1000 = 1006, 69 - /* 70 - * Some receivers fail to train on first try and are good 71 - * on subsequent tries. 2 retries should be plenty. If we 72 - * don't have a successful training then we don't expect to 73 - * ever get one. 74 - */ 75 - LINK_TRAINING_MAX_VERIFY_RETRY = 2 76 - }; 77 62 78 63 /******************************************************************************* 79 64 * Private functions ··· 705 718 706 719 static bool detect_dp(struct dc_link *link, 707 720 struct display_sink_capability *sink_caps, 708 - bool *converter_disable_audio, 709 - struct audio_support *audio_support, 710 721 enum dc_detect_reason reason) 711 722 { 712 - bool boot = false; 723 + struct audio_support *audio_support = &link->dc->res_pool->audio_support; 713 724 714 725 sink_caps->signal = link_detect_sink(link, reason); 715 726 sink_caps->transaction_type = ··· 730 745 * of this function). */ 731 746 query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link); 732 747 #endif 733 - /* 734 - * This call will initiate MST topology discovery. Which 735 - * will detect MST ports and add new DRM connector DRM 736 - * framework. Then read EDID via remote i2c over aux. In 737 - * the end, will notify DRM detect result and save EDID 738 - * into DRM framework. 739 - * 740 - * .detect is called by .fill_modes. 741 - * .fill_modes is called by user mode ioctl 742 - * DRM_IOCTL_MODE_GETCONNECTOR. 743 - * 744 - * .get_modes is called by .fill_modes. 745 - * 746 - * call .get_modes, AMDGPU DM implementation will create 747 - * new dc_sink and add to dc_link. For long HPD plug 748 - * in/out, MST has its own handle. 749 - * 750 - * Therefore, just after dc_create, link->sink is not 751 - * created for MST until user mode app calls 752 - * DRM_IOCTL_MODE_GETCONNECTOR. 753 - * 754 - * Need check ->sink usages in case ->sink = NULL 755 - * TODO: s3 resume check 756 - */ 757 - if (reason == DETECT_REASON_BOOT) 758 - boot = true; 759 - 760 - dm_helpers_dp_update_branch_info(link->ctx, link); 761 - 762 - if (!dm_helpers_dp_mst_start_top_mgr(link->ctx, 763 - link, boot)) { 764 - /* MST not supported */ 765 - link->type = dc_connection_single; 766 - sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; 767 - } 768 748 } 769 749 770 750 if (link->type != dc_connection_mst_branch && 771 - is_dp_branch_device(link)) { 751 + is_dp_branch_device(link)) 772 752 /* DP SST branch */ 773 753 link->type = dc_connection_sst_branch; 774 - if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { 775 - /* 776 - * SST branch unplug processing for short irq 777 - */ 778 - link_disconnect_sink(link); 779 - return true; 780 - } 781 - 782 - if (is_dp_active_dongle(link) && 783 - (link->dpcd_caps.dongle_type != 784 - DISPLAY_DONGLE_DP_HDMI_CONVERTER)) 785 - *converter_disable_audio = true; 786 - } 787 754 } else { 788 755 /* DP passive dongles */ 789 756 sink_caps->signal = dp_passive_dongle_detection(link->ddc, ··· 830 893 struct dc_sink *sink = NULL; 831 894 struct dc_sink *prev_sink = NULL; 832 895 struct dpcd_caps prev_dpcd_caps; 833 - bool same_dpcd = true; 834 896 enum dc_connection_type new_connection_type = dc_connection_none; 835 897 enum dc_connection_type pre_connection_type = dc_connection_none; 836 898 bool perform_dp_seamless_boot = false; ··· 840 904 if (dc_is_virtual_signal(link->connector_signal)) 841 905 return false; 842 906 843 - if ((link->connector_signal == SIGNAL_TYPE_LVDS || 844 - link->connector_signal == SIGNAL_TYPE_EDP) && 845 - link->local_sink) { 907 + if (((link->connector_signal == SIGNAL_TYPE_LVDS || 908 + link->connector_signal == SIGNAL_TYPE_EDP) && 909 + (!link->dc->config.allow_edp_hotplug_detection)) && 910 + link->local_sink) { 846 911 // need to re-write OUI and brightness in resume case 847 912 if (link->connector_signal == SIGNAL_TYPE_EDP) { 848 913 dpcd_set_source_specific_data(link); ··· 920 983 return false; 921 984 } 922 985 923 - if (!detect_dp(link, &sink_caps, 924 - &converter_disable_audio, 925 - aud_support, reason)) { 986 + if (!detect_dp(link, &sink_caps, reason)) { 926 987 if (prev_sink) 927 988 dc_sink_release(prev_sink); 928 989 return false; 929 - } 930 - 931 - // Check if dpcp block is the same 932 - if (prev_sink) { 933 - if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, 934 - sizeof(struct dpcd_caps))) 935 - same_dpcd = false; 936 - } 937 - /* Active SST downstream branch device unplug*/ 938 - if (link->type == dc_connection_sst_branch && 939 - link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { 940 - if (prev_sink) 941 - /* Downstream unplug */ 942 - dc_sink_release(prev_sink); 943 - return true; 944 - } 945 - 946 - // link switch from MST to non-MST stop topology manager 947 - if (pre_connection_type == dc_connection_mst_branch && 948 - link->type != dc_connection_mst_branch) { 949 - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); 950 990 } 951 991 952 992 if (link->type == dc_connection_mst_branch) { ··· 936 1022 */ 937 1023 dp_verify_mst_link_cap(link); 938 1024 939 - if (prev_sink) 940 - dc_sink_release(prev_sink); 941 - return false; 1025 + /* 1026 + * This call will initiate MST topology discovery. Which 1027 + * will detect MST ports and add new DRM connector DRM 1028 + * framework. Then read EDID via remote i2c over aux. In 1029 + * the end, will notify DRM detect result and save EDID 1030 + * into DRM framework. 1031 + * 1032 + * .detect is called by .fill_modes. 1033 + * .fill_modes is called by user mode ioctl 1034 + * DRM_IOCTL_MODE_GETCONNECTOR. 1035 + * 1036 + * .get_modes is called by .fill_modes. 1037 + * 1038 + * call .get_modes, AMDGPU DM implementation will create 1039 + * new dc_sink and add to dc_link. For long HPD plug 1040 + * in/out, MST has its own handle. 1041 + * 1042 + * Therefore, just after dc_create, link->sink is not 1043 + * created for MST until user mode app calls 1044 + * DRM_IOCTL_MODE_GETCONNECTOR. 1045 + * 1046 + * Need check ->sink usages in case ->sink = NULL 1047 + * TODO: s3 resume check 1048 + */ 1049 + 1050 + dm_helpers_dp_update_branch_info(link->ctx, link); 1051 + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, 1052 + link, reason == DETECT_REASON_BOOT)) { 1053 + if (prev_sink) 1054 + dc_sink_release(prev_sink); 1055 + return false; 1056 + } else { 1057 + link->type = dc_connection_sst_branch; 1058 + sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT; 1059 + } 942 1060 } 1061 + 1062 + /* Active SST downstream branch device unplug*/ 1063 + if (link->type == dc_connection_sst_branch && 1064 + link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { 1065 + if (prev_sink) 1066 + /* Downstream unplug */ 1067 + dc_sink_release(prev_sink); 1068 + return true; 1069 + } 1070 + 1071 + /* disable audio for non DP to HDMI active sst converter */ 1072 + if (link->type == dc_connection_sst_branch && 1073 + is_dp_active_dongle(link) && 1074 + (link->dpcd_caps.dongle_type != 1075 + DISPLAY_DONGLE_DP_HDMI_CONVERTER)) 1076 + converter_disable_audio = true; 1077 + 1078 + // link switch from MST to non-MST stop topology manager 1079 + if (pre_connection_type == dc_connection_mst_branch && 1080 + link->type != dc_connection_mst_branch) 1081 + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); 1082 + 943 1083 944 1084 // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. 945 1085 if (reason == DETECT_REASON_BOOT && 946 - !dc_ctx->dc->config.power_down_display_on_boot && 947 - link->link_status.link_active) 1086 + !dc_ctx->dc->config.power_down_display_on_boot && 1087 + link->link_status.link_active) 948 1088 perform_dp_seamless_boot = true; 949 1089 950 1090 if (perform_dp_seamless_boot) { ··· 1181 1213 link->dongle_max_pix_clk = 0; 1182 1214 } 1183 1215 1184 - LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n", 1216 + LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", 1185 1217 link->link_index, sink, 1186 1218 (sink_caps.signal == 1187 1219 SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), 1188 - prev_sink, same_dpcd, same_edid); 1220 + prev_sink, same_edid); 1189 1221 1190 1222 if (prev_sink) 1191 1223 dc_sink_release(prev_sink); ··· 1469 1501 link->connector_signal = SIGNAL_TYPE_EDP; 1470 1502 1471 1503 if (link->hpd_gpio) { 1472 - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; 1504 + if (!link->dc->config.allow_edp_hotplug_detection) 1505 + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; 1473 1506 link->irq_source_hpd_rx = 1474 1507 dal_irq_get_rx_source(link->hpd_gpio); 1475 1508 }
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 445 445 { 446 446 uint32_t base60_refresh_rates[] = {10, 20, 5}; 447 447 uint8_t i; 448 - uint8_t rr_count = sizeof(base60_refresh_rates)/sizeof(base60_refresh_rates[0]); 448 + uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates); 449 449 uint64_t frame_time_diff; 450 450 451 451 if (stream1->ctx->dc->config.vblank_alignment_dto_params &&
+2 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 45 45 /* forward declaration */ 46 46 struct aux_payload; 47 47 48 - #define DC_VER "3.2.139" 48 + #define DC_VER "3.2.140" 49 49 50 50 #define MAX_SURFACES 3 51 51 #define MAX_PLANES 6 ··· 303 303 bool multi_mon_pp_mclk_switch; 304 304 bool disable_dmcu; 305 305 bool enable_4to1MPC; 306 + bool allow_edp_hotplug_detection; 306 307 #if defined(CONFIG_DRM_AMD_DC_DCN) 307 308 bool clamp_min_dcfclk; 308 309 #endif
+98 -2
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 86 86 87 87 error: 88 88 DC_ERROR("Error queuing DMUB command: status=%d\n", status); 89 + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 89 90 } 90 91 91 92 void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) ··· 96 95 enum dmub_status status; 97 96 98 97 status = dmub_srv_cmd_execute(dmub); 99 - if (status != DMUB_STATUS_OK) 98 + if (status != DMUB_STATUS_OK) { 100 99 DC_ERROR("Error starting DMUB execution: status=%d\n", status); 100 + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 101 + } 101 102 } 102 103 103 104 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) ··· 109 106 enum dmub_status status; 110 107 111 108 status = dmub_srv_wait_for_idle(dmub, 100000); 112 - if (status != DMUB_STATUS_OK) 109 + if (status != DMUB_STATUS_OK) { 113 110 DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); 111 + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 112 + } 114 113 } 115 114 116 115 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, ··· 218 213 void dc_dmub_trace_event_control(struct dc *dc, bool enable) 219 214 { 220 215 dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); 216 + } 217 + 218 + bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) 219 + { 220 + if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) 221 + return false; 222 + return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); 223 + } 224 + 225 + void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) 226 + { 227 + struct dmub_diagnostic_data diag_data = {0}; 228 + 229 + if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 230 + DC_LOG_ERROR("%s: invalid parameters.", __func__); 231 + return; 232 + } 233 + 234 + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { 235 + DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); 236 + return; 237 + } 238 + 239 + DC_LOG_DEBUG( 240 + "DMCUB STATE\n" 241 + " dmcub_version : %08x\n" 242 + " scratch [0] : %08x\n" 243 + " scratch [1] : %08x\n" 244 + " scratch [2] : %08x\n" 245 + " scratch [3] : %08x\n" 246 + " scratch [4] : %08x\n" 247 + " scratch [5] : %08x\n" 248 + " scratch [6] : %08x\n" 249 + " scratch [7] : %08x\n" 250 + " scratch [8] : %08x\n" 251 + " scratch [9] : %08x\n" 252 + " scratch [10] : %08x\n" 253 + " scratch [11] : %08x\n" 254 + " scratch [12] : %08x\n" 255 + " scratch [13] : %08x\n" 256 + " scratch [14] : %08x\n" 257 + " scratch [15] : %08x\n" 258 + " pc : %08x\n" 259 + " unk_fault_addr : %08x\n" 260 + " inst_fault_addr : %08x\n" 261 + " data_fault_addr : %08x\n" 262 + " inbox1_rptr : %08x\n" 263 + " inbox1_wptr : %08x\n" 264 + " inbox1_size : %08x\n" 265 + " inbox0_rptr : %08x\n" 266 + " inbox0_wptr : %08x\n" 267 + " inbox0_size : %08x\n" 268 + " is_enabled : %d\n" 269 + " is_soft_reset : %d\n" 270 + " is_secure_reset : %d\n" 271 + " is_traceport_en : %d\n" 272 + " is_cw0_en : %d\n" 273 + " is_cw6_en : %d\n", 274 + diag_data.dmcub_version, 275 + diag_data.scratch[0], 276 + diag_data.scratch[1], 277 + diag_data.scratch[2], 278 + diag_data.scratch[3], 279 + diag_data.scratch[4], 280 + diag_data.scratch[5], 281 + diag_data.scratch[6], 282 + diag_data.scratch[7], 283 + diag_data.scratch[8], 284 + diag_data.scratch[9], 285 + diag_data.scratch[10], 286 + diag_data.scratch[11], 287 + diag_data.scratch[12], 288 + diag_data.scratch[13], 289 + diag_data.scratch[14], 290 + diag_data.scratch[15], 291 + diag_data.pc, 292 + diag_data.undefined_address_fault_addr, 293 + diag_data.inst_fetch_fault_addr, 294 + diag_data.data_write_fault_addr, 295 + diag_data.inbox1_rptr, 296 + diag_data.inbox1_wptr, 297 + diag_data.inbox1_size, 298 + diag_data.inbox0_rptr, 299 + diag_data.inbox0_wptr, 300 + diag_data.inbox0_size, 301 + diag_data.is_dmcub_enabled, 302 + diag_data.is_dmcub_soft_reset, 303 + diag_data.is_dmcub_secure_reset, 304 + diag_data.is_traceport_en, 305 + diag_data.is_cw0_enabled, 306 + diag_data.is_cw6_enabled); 221 307 }
+4
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
··· 71 71 72 72 void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); 73 73 74 + bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); 75 + 76 + void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); 77 + 74 78 #endif /* _DMUB_DC_SRV_H_ */
+18 -5
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
··· 615 615 } 616 616 617 617 #define AUX_MAX_RETRIES 7 618 - #define AUX_MAX_DEFER_RETRIES 7 618 + #define AUX_MIN_DEFER_RETRIES 7 619 + #define AUX_MAX_DEFER_TIMEOUT_MS 50 619 620 #define AUX_MAX_I2C_DEFER_RETRIES 7 620 621 #define AUX_MAX_INVALID_REPLY_RETRIES 2 621 622 #define AUX_MAX_TIMEOUT_RETRIES 3 ··· 629 628 bool payload_reply = true; 630 629 enum aux_return_code_type operation_result; 631 630 bool retry_on_defer = false; 631 + struct ddc *ddc_pin = ddc->ddc_pin; 632 + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; 633 + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); 634 + uint32_t defer_time_in_ms = 0; 632 635 633 636 int aux_ack_retries = 0, 634 637 aux_defer_retries = 0, ··· 665 660 break; 666 661 667 662 case AUX_TRANSACTION_REPLY_AUX_DEFER: 663 + /* polling_timeout_period is in us */ 664 + defer_time_in_ms += aux110->polling_timeout_period / 1000; 665 + ++aux_defer_retries; 666 + /* fall through */ 668 667 case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: 669 668 retry_on_defer = true; 670 669 fallthrough; 671 670 case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: 672 - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { 671 + if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES 672 + && defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) { 673 673 goto fail; 674 674 } else { 675 675 if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || 676 676 (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { 677 - if (payload->defer_delay > 1) 677 + if (payload->defer_delay > 1) { 678 678 msleep(payload->defer_delay); 679 - else if (payload->defer_delay <= 1) 679 + defer_time_in_ms += payload->defer_delay; 680 + } else if (payload->defer_delay <= 1) { 680 681 udelay(payload->defer_delay * 1000); 682 + defer_time_in_ms += payload->defer_delay; 683 + } 681 684 } 682 685 } 683 686 break; ··· 714 701 // Check whether a DEFER had occurred before the timeout. 715 702 // If so, treat timeout as a DEFER. 716 703 if (retry_on_defer) { 717 - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) 704 + if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) 718 705 goto fail; 719 706 else if (payload->defer_delay > 0) 720 707 msleep(payload->defer_delay);
-1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 65 65 66 66 #include "atomfirmware.h" 67 67 68 - #include "dce110_hw_sequencer.h" 69 68 #include "dcn10/dcn10_hw_sequencer.h" 70 69 71 70 #define GAMMA_HW_POINTS_NUM 256
+5
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 54 54 #include "dce/dmub_hw_lock_mgr.h" 55 55 #include "dc_trace.h" 56 56 #include "dce/dmub_outbox.h" 57 + #include "inc/dc_link_dp.h" 58 + #include "inc/link_dpcd.h" 57 59 58 60 #define DC_LOGGER_INIT(logger) 59 61 ··· 1404 1402 for (i = 0; i < dc->link_count; i++) { 1405 1403 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) 1406 1404 continue; 1405 + 1406 + /* DP 2.0 requires that LTTPR Caps be read first */ 1407 + dp_retrieve_lttpr_cap(dc->links[i]); 1407 1408 1408 1409 /* 1409 1410 * If any of the displays are lit up turn them off.
+24
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
··· 52 52 SRI(AFMT_60958_1, DIG, id), \ 53 53 SRI(AFMT_60958_2, DIG, id), \ 54 54 SRI(DIG_FE_CNTL, DIG, id), \ 55 + SRI(DIG_FIFO_STATUS, DIG, id), \ 55 56 SRI(HDMI_CONTROL, DIG, id), \ 56 57 SRI(HDMI_DB_CONTROL, DIG, id), \ 57 58 SRI(HDMI_GC, DIG, id), \ ··· 125 124 uint32_t AFMT_60958_2; 126 125 uint32_t DIG_FE_CNTL; 127 126 uint32_t DIG_FE_CNTL2; 127 + uint32_t DIG_FIFO_STATUS; 128 128 uint32_t DP_MSE_RATE_CNTL; 129 129 uint32_t DP_MSE_RATE_UPDATE; 130 130 uint32_t DP_PIXEL_FORMAT; ··· 268 266 SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ 269 267 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ 270 268 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ 269 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\ 270 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\ 271 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\ 272 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\ 273 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\ 274 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\ 275 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\ 276 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\ 277 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\ 278 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\ 279 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\ 271 280 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\ 272 281 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\ 273 282 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\ ··· 501 488 type DP_VID_N_MUL;\ 502 489 type DP_VID_M_DOUBLE_VALUE_EN;\ 503 490 type DIG_SOURCE_SELECT;\ 491 + type DIG_FIFO_LEVEL_ERROR;\ 492 + type DIG_FIFO_USE_OVERWRITE_LEVEL;\ 493 + type DIG_FIFO_OVERWRITE_LEVEL;\ 494 + type DIG_FIFO_ERROR_ACK;\ 495 + type DIG_FIFO_CAL_AVERAGE_LEVEL;\ 496 + type DIG_FIFO_MAXIMUM_LEVEL;\ 497 + type DIG_FIFO_MINIMUM_LEVEL;\ 498 + type DIG_FIFO_READ_CLOCK_SRC;\ 499 + type DIG_FIFO_CALIBRATED;\ 500 + type DIG_FIFO_FORCE_RECAL_AVERAGE;\ 501 + type DIG_FIFO_FORCE_RECOMP_MINMAX;\ 504 502 type DIG_CLOCK_PATTERN 505 503 506 504 #define SE_REG_FIELD_LIST_DCN2_0(type) \
+12
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
··· 552 552 DP_SST_SDP_SPLITTING, enable_sdp_splitting); 553 553 } 554 554 555 + uint32_t enc2_get_fifo_cal_average_level( 556 + struct stream_encoder *enc) 557 + { 558 + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 559 + uint32_t fifo_level; 560 + 561 + REG_GET(DIG_FIFO_STATUS, 562 + DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level); 563 + return fifo_level; 564 + } 565 + 555 566 static const struct stream_encoder_funcs dcn20_str_enc_funcs = { 556 567 .dp_set_odm_combine = 557 568 enc2_dp_set_odm_combine, ··· 609 598 .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, 610 599 .set_dynamic_metadata = enc2_set_dynamic_metadata, 611 600 .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, 601 + .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, 612 602 }; 613 603 614 604 void dcn20_stream_encoder_construct(
+3
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
··· 112 112 uint32_t hubp_requestor_id, 113 113 enum dynamic_metadata_mode dmdata_mode); 114 114 115 + uint32_t enc2_get_fifo_cal_average_level( 116 + struct stream_encoder *enc); 117 + 115 118 #endif /* __DC_STREAM_ENCODER_DCN20_H__ */
+2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
··· 823 823 .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, 824 824 .set_dynamic_metadata = enc2_set_dynamic_metadata, 825 825 .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, 826 + 827 + .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, 826 828 }; 827 829 828 830 void dcn30_dio_stream_encoder_construct(
+12
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
··· 106 106 SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ 107 107 SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ 108 108 SRI(DIG_FE_CNTL, DIG, id), \ 109 + SRI(DIG_FIFO_STATUS, DIG, id), \ 109 110 SRI(DIG_CLOCK_PATTERN, DIG, id) 110 111 111 112 ··· 168 167 SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ 169 168 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ 170 169 SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ 170 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\ 171 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\ 172 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\ 173 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\ 174 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\ 175 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\ 176 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\ 177 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\ 178 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\ 179 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\ 180 + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\ 171 181 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\ 172 182 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ 173 183 SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+4
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
··· 48 48 #include "dc_dmub_srv.h" 49 49 #include "link_hwss.h" 50 50 #include "dpcd_defs.h" 51 + #include "inc/dc_link_dp.h" 52 + #include "inc/link_dpcd.h" 51 53 52 54 53 55 ··· 531 529 for (i = 0; i < dc->link_count; i++) { 532 530 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) 533 531 continue; 532 + /* DP 2.0 states that LTTPR regs must be read first */ 533 + dp_retrieve_lttpr_cap(dc->links[i]); 534 534 535 535 /* if any of the displays are lit up turn them off */ 536 536 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+5
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c
··· 38 38 { 39 39 /*DCN303 removes PG registers*/ 40 40 } 41 + 42 + void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) 43 + { 44 + /*DCN303 removes PG registers*/ 45 + }
+1
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h
··· 13 13 void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); 14 14 void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); 15 15 void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on); 16 + void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); 16 17 17 18 #endif /* __DC_HWSS_DCN303_H__ */
+1
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
··· 16 16 dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; 17 17 dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; 18 18 dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; 19 + dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane; 19 20 }
+1 -1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
··· 33 33 #include "clk_mgr.h" 34 34 #include "reg_helper.h" 35 35 #include "abm.h" 36 - #include "clk_mgr.h" 37 36 #include "hubp.h" 38 37 #include "dchubbub.h" 39 38 #include "timing_generator.h" ··· 46 47 #include "dpcd_defs.h" 47 48 #include "dce/dmub_outbox.h" 48 49 #include "dc_link_dp.h" 50 + #include "inc/link_dpcd.h" 49 51 50 52 #define DC_LOGGER_INIT(logger) 51 53
-1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
··· 934 934 .dmub_command_table = true, 935 935 .pstate_enabled = true, 936 936 .use_max_lb = true, 937 - .pstate_enabled = true, 938 937 .enable_mem_low_power = { 939 938 .bits = { 940 939 .vga = false,
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
··· 3536 3536 *BytePerPixelDETC = 0; 3537 3537 *BytePerPixelY = 4; 3538 3538 *BytePerPixelC = 0; 3539 - } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { 3539 + } else if (SourcePixelFormat == dm_444_16) { 3540 3540 *BytePerPixelDETY = 2; 3541 3541 *BytePerPixelDETC = 0; 3542 3542 *BytePerPixelY = 2; ··· 5674 5674 for (k = 0; k < v->NumberOfActivePlanes; k++) { 5675 5675 if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) { 5676 5676 ViewportExceedsSurface = true; 5677 - if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 5677 + if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 5678 5678 && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 5679 5679 && v->SourcePixelFormat[k] != dm_rgbe) { 5680 5680 if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k]
-2
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
··· 159 159 dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank); 160 160 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip); 161 161 dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip); 162 - 163 162 dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup); 164 163 dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix); 165 164 dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix); ··· 418 419 visited[j] = true; 419 420 420 421 mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; 421 - 422 422 mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; 423 423 mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = 424 424 (enum scan_direction_class) (src->source_scan);
+1
drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
··· 33 33 #include "core_types.h" 34 34 #include "dc_link_ddc.h" 35 35 #include "link_hwss.h" 36 + #include "inc/link_dpcd.h" 36 37 37 38 #define DC_LOGGER \ 38 39 link->ctx->logger
+3
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
··· 237 237 void (*dp_set_odm_combine)( 238 238 struct stream_encoder *enc, 239 239 bool odm_combine); 240 + 241 + uint32_t (*get_fifo_cal_average_level)( 242 + struct stream_encoder *enc); 240 243 }; 241 244 242 245 #endif /* STREAM_ENCODER_H_ */
+29
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
··· 244 244 }; 245 245 246 246 /** 247 + * struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for 248 + * debugging purposes, including logging, crash analysis, etc. 249 + */ 250 + struct dmub_diagnostic_data { 251 + uint32_t dmcub_version; 252 + uint32_t scratch[16]; 253 + uint32_t pc; 254 + uint32_t undefined_address_fault_addr; 255 + uint32_t inst_fetch_fault_addr; 256 + uint32_t data_write_fault_addr; 257 + uint32_t inbox1_rptr; 258 + uint32_t inbox1_wptr; 259 + uint32_t inbox1_size; 260 + uint32_t inbox0_rptr; 261 + uint32_t inbox0_wptr; 262 + uint32_t inbox0_size; 263 + uint8_t is_dmcub_enabled : 1; 264 + uint8_t is_dmcub_soft_reset : 1; 265 + uint8_t is_dmcub_secure_reset : 1; 266 + uint8_t is_traceport_en : 1; 267 + uint8_t is_cw0_enabled : 1; 268 + uint8_t is_cw6_enabled : 1; 269 + }; 270 + 271 + /** 247 272 * struct dmub_srv_base_funcs - Driver specific base callbacks 248 273 */ 249 274 struct dmub_srv_base_funcs { ··· 360 335 361 336 void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data); 362 337 uint32_t (*get_current_time)(struct dmub_srv *dmub); 338 + 339 + void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); 363 340 }; 364 341 365 342 /** ··· 711 684 union dmub_rb_cmd *cmd); 712 685 713 686 bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry); 687 + 688 + bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); 714 689 715 690 #if defined(__cplusplus) 716 691 }
+2 -2
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 47 47 48 48 /* Firmware versioning. */ 49 49 #ifdef DMUB_EXPOSE_VERSION 50 - #define DMUB_FW_VERSION_GIT_HASH 0xefd666c1 50 + #define DMUB_FW_VERSION_GIT_HASH 0x5cac099d3 51 51 #define DMUB_FW_VERSION_MAJOR 0 52 52 #define DMUB_FW_VERSION_MINOR 0 53 - #define DMUB_FW_VERSION_REVISION 69 53 + #define DMUB_FW_VERSION_REVISION 70 54 54 #define DMUB_FW_VERSION_TEST 0 55 55 #define DMUB_FW_VERSION_VBIOS 0 56 56 #define DMUB_FW_VERSION_HOTFIX 0
+64 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
··· 40 40 41 41 const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { 42 42 #define DMUB_SR(reg) REG_OFFSET(reg), 43 - { DMUB_COMMON_REGS() }, 43 + { 44 + DMUB_COMMON_REGS() 45 + DMCUB_INTERNAL_REGS() 46 + }, 44 47 #undef DMUB_SR 45 48 46 49 #define DMUB_SF(reg, field) FD_MASK(reg, field), ··· 406 403 uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub) 407 404 { 408 405 return REG_READ(DMCUB_TIMER_CURRENT); 406 + } 407 + 408 + void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) 409 + { 410 + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; 411 + uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; 412 + 413 + if (!dmub || !diag_data) 414 + return; 415 + 416 + memset(diag_data, 0, sizeof(*diag_data)); 417 + 418 + diag_data->dmcub_version = dmub->fw_version; 419 + 420 + diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); 421 + diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); 422 + diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); 423 + diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); 424 + diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); 425 + diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); 426 + diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); 427 + diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); 428 + diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); 429 + diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); 430 + diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); 431 + diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); 432 + diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); 433 + diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); 434 + diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); 435 + diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); 436 + 437 + diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); 438 + diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); 439 + diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); 440 + 441 + diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); 442 + diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); 443 + diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); 444 + 445 + diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); 446 + diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); 447 + diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); 448 + 449 + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 450 + diag_data->is_dmcub_enabled = is_dmub_enabled; 451 + 452 + REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset); 453 + diag_data->is_dmcub_soft_reset = is_soft_reset; 454 + 455 + REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 456 + diag_data->is_dmcub_secure_reset = is_sec_reset; 457 + 458 + REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 459 + diag_data->is_traceport_en = is_traceport_enabled; 460 + 461 + REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 462 + diag_data->is_cw0_enabled = is_cw0_enabled; 463 + 464 + REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 465 + diag_data->is_cw6_enabled = is_cw6_enabled; 409 466 }
+13 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
··· 36 36 DMUB_SR(DMCUB_CNTL) \ 37 37 DMUB_SR(DMCUB_MEM_CNTL) \ 38 38 DMUB_SR(DMCUB_SEC_CNTL) \ 39 + DMUB_SR(DMCUB_INBOX0_SIZE) \ 40 + DMUB_SR(DMCUB_INBOX0_RPTR) \ 41 + DMUB_SR(DMCUB_INBOX0_WPTR) \ 39 42 DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \ 40 43 DMUB_SR(DMCUB_INBOX1_SIZE) \ 41 44 DMUB_SR(DMCUB_INBOX1_RPTR) \ ··· 111 108 DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ 112 109 DMUB_SR(DCN_VM_FB_OFFSET) \ 113 110 DMUB_SR(DMCUB_INTERRUPT_ACK) \ 114 - DMUB_SR(DMCUB_TIMER_CURRENT) 111 + DMUB_SR(DMCUB_TIMER_CURRENT) \ 112 + DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \ 113 + DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \ 114 + DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR) 115 + 116 + #define DMCUB_INTERNAL_REGS() 115 117 116 118 #define DMUB_COMMON_FIELDS() \ 117 119 DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ ··· 126 118 DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \ 127 119 DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \ 128 120 DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \ 121 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \ 129 122 DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \ 130 123 DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \ 131 124 DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \ ··· 156 147 struct dmub_srv_common_reg_offset { 157 148 #define DMUB_SR(reg) uint32_t reg; 158 149 DMUB_COMMON_REGS() 150 + DMCUB_INTERNAL_REGS() 159 151 #undef DMUB_SR 160 152 }; 161 153 ··· 243 233 bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub); 244 234 245 235 uint32_t dmub_dcn20_get_current_time(struct dmub_srv *dmub); 236 + 237 + void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); 246 238 247 239 #endif /* _DMUB_DCN20_H_ */
+4 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
··· 39 39 40 40 const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { 41 41 #define DMUB_SR(reg) REG_OFFSET(reg), 42 - { DMUB_COMMON_REGS() }, 42 + { 43 + DMUB_COMMON_REGS() 44 + DMCUB_INTERNAL_REGS() 45 + }, 43 46 #undef DMUB_SR 44 47 45 48 #define DMUB_SF(reg, field) FD_MASK(reg, field),
+4 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
··· 40 40 41 41 const struct dmub_srv_common_regs dmub_srv_dcn30_regs = { 42 42 #define DMUB_SR(reg) REG_OFFSET(reg), 43 - { DMUB_COMMON_REGS() }, 43 + { 44 + DMUB_COMMON_REGS() 45 + DMCUB_INTERNAL_REGS() 46 + }, 44 47 #undef DMUB_SR 45 48 46 49 #define DMUB_SF(reg, field) FD_MASK(reg, field),
+4 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c
··· 39 39 40 40 const struct dmub_srv_common_regs dmub_srv_dcn301_regs = { 41 41 #define DMUB_SR(reg) REG_OFFSET(reg), 42 - { DMUB_COMMON_REGS() }, 42 + { 43 + DMUB_COMMON_REGS() 44 + DMCUB_INTERNAL_REGS() 45 + }, 43 46 #undef DMUB_SR 44 47 45 48 #define DMUB_SF(reg, field) FD_MASK(reg, field),
+4 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c
··· 39 39 40 40 const struct dmub_srv_common_regs dmub_srv_dcn302_regs = { 41 41 #define DMUB_SR(reg) REG_OFFSET(reg), 42 - { DMUB_COMMON_REGS() }, 42 + { 43 + DMUB_COMMON_REGS() 44 + DMCUB_INTERNAL_REGS() 45 + }, 43 46 #undef DMUB_SR 44 47 45 48 #define DMUB_SF(reg, field) FD_MASK(reg, field),
+4 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c
··· 21 21 22 22 const struct dmub_srv_common_regs dmub_srv_dcn303_regs = { 23 23 #define DMUB_SR(reg) REG_OFFSET(reg), 24 - { DMUB_COMMON_REGS() }, 24 + { 25 + DMUB_COMMON_REGS() 26 + DMCUB_INTERNAL_REGS() 27 + }, 25 28 #undef DMUB_SR 26 29 27 30 #define DMUB_SF(reg, field) FD_MASK(reg, field),
+10
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
··· 176 176 funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; 177 177 funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; 178 178 179 + funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data; 180 + 179 181 if (asic == DMUB_ASIC_DCN21) { 180 182 dmub->regs = &dmub_srv_dcn21_regs; 181 183 ··· 795 793 dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); 796 794 797 795 return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); 796 + } 797 + 798 + bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) 799 + { 800 + if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data) 801 + return false; 802 + dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); 803 + return true; 798 804 }
+10 -10
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
··· 87 87 }; 88 88 89 89 static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { 90 - // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed 91 - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC}, 92 - {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC}, 93 - {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC}, 94 - {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 90 + // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart 91 + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xf777, 0xcccc}, 92 + {0xde, 0x85, 0x20, 0x00, 0xe0, 0x90, 0xa8, 0x40, 0xc8, 0xf777, 0xcccc}, 93 + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xeeee, 0x9999}, 94 + {0x82, 0x40, 0x20, 0x00, 0x00, 0xb8, 0xb3, 0x70, 0x70, 0xe333, 0xb333}, 95 95 }; 96 96 97 97 static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { 98 - // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed 99 - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 100 - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 101 - {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 102 - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 98 + // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blRed blStart 99 + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc}, 100 + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc}, 101 + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc}, 102 + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xcccc, 0xcccc}, 103 103 }; 104 104 105 105 static const struct abm_parameters * const abm_settings[] = {
+2
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h
··· 29292 29292 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL 29293 29293 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L 29294 29294 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L 29295 + #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L 29295 29296 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L 29296 29297 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L 29297 29298 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L ··· 34432 34431 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2 34433 34432 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8 34434 34433 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa 34434 + #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10 34435 34435 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16 34436 34436 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a 34437 34437 #define DIG3_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+2
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
··· 33869 33869 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2 33870 33870 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8 33871 33871 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa 33872 + #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10 33872 33873 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16 33873 33874 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a 33874 33875 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d ··· 33880 33879 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL 33881 33880 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L 33882 33881 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L 33882 + #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L 33883 33883 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L 33884 33884 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L 33885 33885 #define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+57 -1
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 560 560 uint16_t current_vclk1; 561 561 uint16_t current_dclk1; 562 562 563 - /* Throttle status */ 563 + /* Throttle status (ASIC dependent) */ 564 564 uint32_t throttle_status; 565 565 566 566 /* Fans */ ··· 648 648 uint16_t voltage_mem; 649 649 650 650 uint16_t padding1; 651 + 652 + /* Throttle status (ASIC independent) */ 653 + uint64_t indep_throttle_status; 651 654 }; 652 655 653 656 /* ··· 755 752 uint16_t fan_pwm; 756 753 757 754 uint16_t padding[3]; 755 + }; 756 + 757 + struct gpu_metrics_v2_2 { 758 + struct metrics_table_header common_header; 759 + 760 + /* Temperature */ 761 + uint16_t temperature_gfx; // gfx temperature on APUs 762 + uint16_t temperature_soc; // soc temperature on APUs 763 + uint16_t temperature_core[8]; // CPU core temperature on APUs 764 + uint16_t temperature_l3[2]; 765 + 766 + /* Utilization */ 767 + uint16_t average_gfx_activity; 768 + uint16_t average_mm_activity; // UVD or VCN 769 + 770 + /* Driver attached timestamp (in ns) */ 771 + uint64_t system_clock_counter; 772 + 773 + /* Power/Energy */ 774 + uint16_t average_socket_power; // dGPU + APU power on A + A platform 775 + uint16_t average_cpu_power; 776 + uint16_t average_soc_power; 777 + uint16_t average_gfx_power; 778 + uint16_t average_core_power[8]; // CPU core power on APUs 779 + 780 + /* Average clocks */ 781 + uint16_t average_gfxclk_frequency; 782 + uint16_t average_socclk_frequency; 783 + uint16_t average_uclk_frequency; 784 + uint16_t average_fclk_frequency; 785 + uint16_t average_vclk_frequency; 786 + uint16_t average_dclk_frequency; 787 + 788 + /* Current clocks */ 789 + uint16_t current_gfxclk; 790 + uint16_t current_socclk; 791 + uint16_t current_uclk; 792 + uint16_t current_fclk; 793 + uint16_t current_vclk; 794 + uint16_t current_dclk; 795 + uint16_t current_coreclk[8]; // CPU core clocks 796 + uint16_t current_l3clk[2]; 797 + 798 + /* Throttle status (ASIC dependent) */ 799 + uint32_t throttle_status; 800 + 801 + /* Fans */ 802 + uint16_t fan_pwm; 803 + 804 + uint16_t padding[3]; 805 + 806 + /* Throttle status (ASIC independent) */ 807 + uint64_t indep_throttle_status; 758 808 }; 759 809 760 810 #endif
+49 -2
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
··· 35 35 36 36 #define SMU_DPM_USER_PROFILE_RESTORE (1 << 0) 37 37 38 + // Power Throttlers 39 + #define SMU_THROTTLER_PPT0_BIT 0 40 + #define SMU_THROTTLER_PPT1_BIT 1 41 + #define SMU_THROTTLER_PPT2_BIT 2 42 + #define SMU_THROTTLER_PPT3_BIT 3 43 + #define SMU_THROTTLER_SPL_BIT 4 44 + #define SMU_THROTTLER_FPPT_BIT 5 45 + #define SMU_THROTTLER_SPPT_BIT 6 46 + #define SMU_THROTTLER_SPPT_APU_BIT 7 47 + 48 + // Current Throttlers 49 + #define SMU_THROTTLER_TDC_GFX_BIT 16 50 + #define SMU_THROTTLER_TDC_SOC_BIT 17 51 + #define SMU_THROTTLER_TDC_MEM_BIT 18 52 + #define SMU_THROTTLER_TDC_VDD_BIT 19 53 + #define SMU_THROTTLER_TDC_CVIP_BIT 20 54 + #define SMU_THROTTLER_EDC_CPU_BIT 21 55 + #define SMU_THROTTLER_EDC_GFX_BIT 22 56 + #define SMU_THROTTLER_APCC_BIT 23 57 + 58 + // Temperature 59 + #define SMU_THROTTLER_TEMP_GPU_BIT 32 60 + #define SMU_THROTTLER_TEMP_CORE_BIT 33 61 + #define SMU_THROTTLER_TEMP_MEM_BIT 34 62 + #define SMU_THROTTLER_TEMP_EDGE_BIT 35 63 + #define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36 64 + #define SMU_THROTTLER_TEMP_SOC_BIT 37 65 + #define SMU_THROTTLER_TEMP_VR_GFX_BIT 38 66 + #define SMU_THROTTLER_TEMP_VR_SOC_BIT 39 67 + #define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40 68 + #define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41 69 + #define SMU_THROTTLER_TEMP_LIQUID0_BIT 42 70 + #define SMU_THROTTLER_TEMP_LIQUID1_BIT 43 71 + #define SMU_THROTTLER_VRHOT0_BIT 44 72 + #define SMU_THROTTLER_VRHOT1_BIT 45 73 + #define SMU_THROTTLER_PROCHOT_CPU_BIT 46 74 + #define SMU_THROTTLER_PROCHOT_GFX_BIT 47 75 + 76 + // Other 77 + #define SMU_THROTTLER_PPM_BIT 56 78 + #define SMU_THROTTLER_FIT_BIT 57 79 + 38 80 struct smu_hw_power_state { 39 81 unsigned int magic; 40 82 }; ··· 765 723 /** 766 724 * @get_power_limit: Get the device's power limits. 767 725 */ 768 - int (*get_power_limit)(struct smu_context *smu); 726 + int (*get_power_limit)(struct smu_context *smu, 727 + uint32_t *current_power_limit, 728 + uint32_t *default_power_limit, 729 + uint32_t *max_power_limit); 769 730 770 731 /** 771 732 * @get_ppt_limit: Get the device's ppt limits. ··· 977 932 * @disable_all_features_with_exception: Disable all features with 978 933 * exception to those in &mask. 979 934 */ 980 - int (*disable_all_features_with_exception)(struct smu_context *smu, enum smu_feature_mask mask); 935 + int (*disable_all_features_with_exception)(struct smu_context *smu, 936 + bool no_hw_disablement, 937 + enum smu_feature_mask mask); 981 938 982 939 /** 983 940 * @notify_display_change: Enable fast memory clock switching.
+3
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
··· 244 244 int smu_v11_0_baco_enter(struct smu_context *smu); 245 245 int smu_v11_0_baco_exit(struct smu_context *smu); 246 246 247 + int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 248 + enum smu_v11_0_baco_seq baco_seq); 249 + 247 250 int smu_v11_0_mode1_reset(struct smu_context *smu); 248 251 249 252 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
+22 -4
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 688 688 return ret; 689 689 } 690 690 691 - ret = smu_get_asic_power_limits(smu); 691 + ret = smu_get_asic_power_limits(smu, 692 + &smu->current_power_limit, 693 + &smu->default_power_limit, 694 + &smu->max_power_limit); 692 695 if (ret) { 693 696 dev_err(adev->dev, "Failed to get asic power limits!\n"); 694 697 return ret; ··· 1382 1379 if (smu->uploading_custom_pp_table && 1383 1380 (adev->asic_type >= CHIP_NAVI10) && 1384 1381 (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) 1385 - return 0; 1382 + return smu_disable_all_features_with_exception(smu, 1383 + true, 1384 + SMU_FEATURE_COUNT); 1386 1385 1387 1386 /* 1388 1387 * For Sienna_Cichlid, PMFW will handle the features disablement properly 1389 1388 * on BACO in. Driver involvement is unnecessary. 1390 1389 */ 1391 - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && 1390 + if (((adev->asic_type == CHIP_SIENNA_CICHLID) || 1391 + ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) && 1392 1392 use_baco) 1393 - return 0; 1393 + return smu_disable_all_features_with_exception(smu, 1394 + true, 1395 + SMU_FEATURE_BACO_BIT); 1394 1396 1395 1397 /* 1396 1398 * For gpu reset, runpm and hibernation through BACO, ··· 1403 1395 */ 1404 1396 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1405 1397 ret = smu_disable_all_features_with_exception(smu, 1398 + false, 1406 1399 SMU_FEATURE_BACO_BIT); 1407 1400 if (ret) 1408 1401 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); ··· 2241 2232 } else { 2242 2233 switch (limit_level) { 2243 2234 case SMU_PPT_LIMIT_CURRENT: 2235 + if ((smu->adev->asic_type == CHIP_ALDEBARAN) || 2236 + (smu->adev->asic_type == CHIP_SIENNA_CICHLID) || 2237 + (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) || 2238 + (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) || 2239 + (smu->adev->asic_type == CHIP_BEIGE_GOBY)) 2240 + ret = smu_get_asic_power_limits(smu, 2241 + &smu->current_power_limit, 2242 + NULL, 2243 + NULL); 2244 2244 *limit = smu->current_power_limit; 2245 2245 break; 2246 2246 case SMU_PPT_LIMIT_DEFAULT:
+46 -13
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 211 211 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 212 212 }; 213 213 214 + static const uint8_t arcturus_throttler_map[] = { 215 + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 216 + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 217 + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 218 + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 219 + [THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 220 + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 221 + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 222 + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 223 + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 224 + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 225 + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 226 + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 227 + [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT), 228 + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 229 + [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT), 230 + [THROTTLER_VRHOT0_BIT] = (SMU_THROTTLER_VRHOT0_BIT), 231 + [THROTTLER_VRHOT1_BIT] = (SMU_THROTTLER_VRHOT1_BIT), 232 + }; 233 + 214 234 static int arcturus_tables_init(struct smu_context *smu) 215 235 { 216 236 struct smu_table_context *smu_table = &smu->smu_table; ··· 257 237 return -ENOMEM; 258 238 smu_table->metrics_time = 0; 259 239 260 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); 240 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 261 241 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 262 242 if (!smu_table->gpu_metrics_table) { 263 243 kfree(smu_table->metrics_table); ··· 1194 1174 return 0; 1195 1175 } 1196 1176 1197 - static int arcturus_get_power_limit(struct smu_context *smu) 1177 + static int arcturus_get_power_limit(struct smu_context *smu, 1178 + uint32_t *current_power_limit, 1179 + uint32_t *default_power_limit, 1180 + uint32_t *max_power_limit) 1198 1181 { 1199 1182 struct smu_11_0_powerplay_table *powerplay_table = 1200 1183 (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; ··· 1213 1190 power_limit = 1214 1191 pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; 1215 1192 } 1216 - smu->current_power_limit = smu->default_power_limit = power_limit; 1217 1193 1218 - if (smu->od_enabled) { 1219 - od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); 1194 + if (current_power_limit) 1195 + *current_power_limit = power_limit; 1196 + if (default_power_limit) 1197 + *default_power_limit = power_limit; 1220 1198 1221 - dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1199 + if (max_power_limit) { 1200 + if (smu->od_enabled) { 1201 + od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); 1222 1202 1223 - power_limit *= (100 + od_percent); 1224 - power_limit /= 100; 1203 + dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1204 + 1205 + power_limit *= (100 + od_percent); 1206 + power_limit /= 100; 1207 + } 1208 + 1209 + *max_power_limit = power_limit; 1225 1210 } 1226 - smu->max_power_limit = power_limit; 1227 1211 1228 1212 return 0; 1229 1213 } ··· 2308 2278 void **table) 2309 2279 { 2310 2280 struct smu_table_context *smu_table = &smu->smu_table; 2311 - struct gpu_metrics_v1_1 *gpu_metrics = 2312 - (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; 2281 + struct gpu_metrics_v1_3 *gpu_metrics = 2282 + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 2313 2283 SmuMetrics_t metrics; 2314 2284 int ret = 0; 2315 2285 ··· 2319 2289 if (ret) 2320 2290 return ret; 2321 2291 2322 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); 2292 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 2323 2293 2324 2294 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 2325 2295 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; ··· 2348 2318 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 2349 2319 2350 2320 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2321 + gpu_metrics->indep_throttle_status = 2322 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 2323 + arcturus_throttler_map); 2351 2324 2352 2325 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2353 2326 ··· 2363 2330 2364 2331 *table = (void *)gpu_metrics; 2365 2332 2366 - return sizeof(struct gpu_metrics_v1_1); 2333 + return sizeof(struct gpu_metrics_v1_3); 2367 2334 } 2368 2335 2369 2336 static const struct pptable_funcs arcturus_ppt_funcs = {
+79 -36
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 238 238 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 239 239 }; 240 240 241 + static const uint8_t navi1x_throttler_map[] = { 242 + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 243 + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 244 + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 245 + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 246 + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 247 + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 248 + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 249 + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 250 + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 251 + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 252 + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 253 + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 254 + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 255 + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 256 + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 257 + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 258 + [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT), 259 + [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT), 260 + }; 261 + 262 + 241 263 static bool is_asic_secure(struct smu_context *smu) 242 264 { 243 265 struct amdgpu_device *adev = smu->adev; ··· 464 442 465 443 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 466 444 sizeof(PPTable_t)); 467 - 468 - return 0; 469 - } 470 - 471 - static int navi10_set_mp1_state(struct smu_context *smu, 472 - enum pp_mp1_state mp1_state) 473 - { 474 - struct amdgpu_device *adev = smu->adev; 475 - uint32_t mp1_fw_flags; 476 - int ret = 0; 477 - 478 - ret = smu_cmn_set_mp1_state(smu, mp1_state); 479 - if (ret) 480 - return ret; 481 - 482 - if (mp1_state == PP_MP1_STATE_UNLOAD) { 483 - mp1_fw_flags = RREG32_PCIE(MP1_Public | 484 - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 485 - 486 - mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK; 487 - 488 - WREG32_PCIE(MP1_Public | 489 - (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags); 490 - } 491 445 492 446 return 0; 493 447 } ··· 2136 2138 return ret; 2137 2139 } 2138 2140 2139 - static int navi10_get_power_limit(struct smu_context *smu) 2141 + static int navi10_get_power_limit(struct smu_context *smu, 2142 + uint32_t *current_power_limit, 2143 + uint32_t *default_power_limit, 2144 + uint32_t *max_power_limit) 2140 2145 { 2141 2146 struct smu_11_0_powerplay_table *powerplay_table = 2142 2147 (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; ··· 2156 2155 power_limit = 2157 2156 pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; 2158 2157 } 2159 - smu->current_power_limit = smu->default_power_limit = power_limit; 2160 2158 2161 - if (smu->od_enabled && 2162 - navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { 2163 - od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); 2159 + if (current_power_limit) 2160 + *current_power_limit = power_limit; 2161 + if (default_power_limit) 2162 + *default_power_limit = power_limit; 2164 2163 2165 - dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 2164 + if (max_power_limit) { 2165 + if (smu->od_enabled && 2166 + navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) { 2167 + od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); 2166 2168 2167 - power_limit *= (100 + od_percent); 2168 - power_limit /= 100; 2169 + dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 2170 + 2171 + power_limit *= (100 + od_percent); 2172 + power_limit /= 100; 2173 + } 2174 + 2175 + *max_power_limit = power_limit; 2169 2176 } 2170 - smu->max_power_limit = power_limit; 2171 2177 2172 2178 return 0; 2173 2179 } ··· 2263 2255 *voltage = (uint16_t)value; 2264 2256 2265 2257 return 0; 2258 + } 2259 + 2260 + static int navi10_baco_enter(struct smu_context *smu) 2261 + { 2262 + struct amdgpu_device *adev = smu->adev; 2263 + 2264 + if (adev->in_runpm) 2265 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 2266 + else 2267 + return smu_v11_0_baco_enter(smu); 2268 + } 2269 + 2270 + static int navi10_baco_exit(struct smu_context *smu) 2271 + { 2272 + struct amdgpu_device *adev = smu->adev; 2273 + 2274 + if (adev->in_runpm) { 2275 + /* Wait for PMFW handling for the Dstate change */ 2276 + msleep(10); 2277 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2278 + } else { 2279 + return smu_v11_0_baco_exit(smu); 2280 + } 2266 2281 } 2267 2282 2268 2283 static int navi10_set_default_od_settings(struct smu_context *smu) ··· 2707 2676 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 2708 2677 2709 2678 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2679 + gpu_metrics->indep_throttle_status = 2680 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 2681 + navi1x_throttler_map); 2710 2682 2711 2683 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2712 2684 ··· 2787 2753 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 2788 2754 2789 2755 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2756 + gpu_metrics->indep_throttle_status = 2757 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 2758 + navi1x_throttler_map); 2790 2759 2791 2760 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2792 2761 ··· 2866 2829 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 2867 2830 2868 2831 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2832 + gpu_metrics->indep_throttle_status = 2833 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 2834 + navi1x_throttler_map); 2869 2835 2870 2836 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2871 2837 ··· 2951 2911 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 2952 2912 2953 2913 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2914 + gpu_metrics->indep_throttle_status = 2915 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 2916 + navi1x_throttler_map); 2954 2917 2955 2918 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2956 2919 ··· 3138 3095 .baco_is_support = smu_v11_0_baco_is_support, 3139 3096 .baco_get_state = smu_v11_0_baco_get_state, 3140 3097 .baco_set_state = smu_v11_0_baco_set_state, 3141 - .baco_enter = smu_v11_0_baco_enter, 3142 - .baco_exit = smu_v11_0_baco_exit, 3098 + .baco_enter = navi10_baco_enter, 3099 + .baco_exit = navi10_baco_exit, 3143 3100 .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, 3144 3101 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, 3145 3102 .set_default_od_settings = navi10_set_default_od_settings, ··· 3155 3112 .get_fan_parameters = navi10_get_fan_parameters, 3156 3113 .post_init = navi10_post_smu_init, 3157 3114 .interrupt_work = smu_v11_0_interrupt_work, 3158 - .set_mp1_state = navi10_set_mp1_state, 3115 + .set_mp1_state = smu_cmn_set_mp1_state, 3159 3116 }; 3160 3117 3161 3118 void navi10_set_ppt_funcs(struct smu_context *smu)
+71 -15
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 239 239 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 240 240 }; 241 241 242 + static const uint8_t sienna_cichlid_throttler_map[] = { 243 + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 244 + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 245 + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 246 + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 247 + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 248 + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 249 + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 250 + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 251 + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 252 + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 253 + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 254 + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 255 + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 256 + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 257 + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 258 + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 259 + [THROTTLER_PPM_BIT] = (SMU_THROTTLER_PPM_BIT), 260 + [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT), 261 + }; 262 + 242 263 static int 243 264 sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, 244 265 uint32_t *feature_mask, uint32_t num) ··· 470 449 goto err0_out; 471 450 smu_table->metrics_time = 0; 472 451 473 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1); 452 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 474 453 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 475 454 if (!smu_table->gpu_metrics_table) 476 455 goto err1_out; ··· 1791 1770 return ret; 1792 1771 } 1793 1772 1794 - static int sienna_cichlid_get_power_limit(struct smu_context *smu) 1773 + static int sienna_cichlid_get_power_limit(struct smu_context *smu, 1774 + uint32_t *current_power_limit, 1775 + uint32_t *default_power_limit, 1776 + uint32_t *max_power_limit) 1795 1777 { 1796 1778 struct smu_11_0_7_powerplay_table *powerplay_table = 1797 1779 (struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table; ··· 1807 1783 power_limit = 1808 1784 table_member[PPT_THROTTLER_PPT0]; 1809 1785 } 1810 - smu->current_power_limit = smu->default_power_limit = power_limit; 1811 1786 1812 - if (smu->od_enabled) { 1813 - od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); 1787 + if (current_power_limit) 1788 + *current_power_limit = power_limit; 1789 + if (default_power_limit) 1790 + *default_power_limit = power_limit; 1814 1791 1815 - dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1792 + if (max_power_limit) { 1793 + if (smu->od_enabled) { 1794 + od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]); 1816 1795 1817 - power_limit *= (100 + od_percent); 1818 - power_limit /= 100; 1796 + dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1797 + 1798 + power_limit *= (100 + od_percent); 1799 + power_limit /= 100; 1800 + } 1801 + *max_power_limit = power_limit; 1819 1802 } 1820 - smu->max_power_limit = power_limit; 1821 1803 1822 1804 return 0; 1823 1805 } ··· 2128 2098 static int sienna_cichlid_run_btc(struct smu_context *smu) 2129 2099 { 2130 2100 return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 2101 + } 2102 + 2103 + static int sienna_cichlid_baco_enter(struct smu_context *smu) 2104 + { 2105 + struct amdgpu_device *adev = smu->adev; 2106 + 2107 + if (adev->in_runpm) 2108 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 2109 + else 2110 + return smu_v11_0_baco_enter(smu); 2111 + } 2112 + 2113 + static int sienna_cichlid_baco_exit(struct smu_context *smu) 2114 + { 2115 + struct amdgpu_device *adev = smu->adev; 2116 + 2117 + if (adev->in_runpm) { 2118 + /* Wait for PMFW handling for the Dstate change */ 2119 + msleep(10); 2120 + return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2121 + } else { 2122 + return smu_v11_0_baco_exit(smu); 2123 + } 2131 2124 } 2132 2125 2133 2126 static bool sienna_cichlid_is_mode1_reset_supported(struct smu_context *smu) ··· 3673 3620 void **table) 3674 3621 { 3675 3622 struct smu_table_context *smu_table = &smu->smu_table; 3676 - struct gpu_metrics_v1_1 *gpu_metrics = 3677 - (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table; 3623 + struct gpu_metrics_v1_3 *gpu_metrics = 3624 + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 3678 3625 SmuMetricsExternal_t metrics_external; 3679 3626 SmuMetrics_t *metrics = 3680 3627 &(metrics_external.SmuMetrics); ··· 3688 3635 if (ret) 3689 3636 return ret; 3690 3637 3691 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1); 3638 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 3692 3639 3693 3640 gpu_metrics->temperature_edge = metrics->TemperatureEdge; 3694 3641 gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot; ··· 3723 3670 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 3724 3671 3725 3672 gpu_metrics->throttle_status = metrics->ThrottlerStatus; 3673 + gpu_metrics->indep_throttle_status = 3674 + smu_cmn_get_indep_throttler_status(metrics->ThrottlerStatus, 3675 + sienna_cichlid_throttler_map); 3726 3676 3727 3677 gpu_metrics->current_fan_speed = metrics->CurrFanSpeed; 3728 3678 ··· 3748 3692 3749 3693 *table = (void *)gpu_metrics; 3750 3694 3751 - return sizeof(struct gpu_metrics_v1_1); 3695 + return sizeof(struct gpu_metrics_v1_3); 3752 3696 } 3753 3697 3754 3698 static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) ··· 3931 3875 .baco_is_support = smu_v11_0_baco_is_support, 3932 3876 .baco_get_state = smu_v11_0_baco_get_state, 3933 3877 .baco_set_state = smu_v11_0_baco_set_state, 3934 - .baco_enter = smu_v11_0_baco_enter, 3935 - .baco_exit = smu_v11_0_baco_exit, 3878 + .baco_enter = sienna_cichlid_baco_enter, 3879 + .baco_exit = sienna_cichlid_baco_exit, 3936 3880 .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, 3937 3881 .mode1_reset = smu_v11_0_mode1_reset, 3938 3882 .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
+2 -9
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 1474 1474 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1475 1475 } 1476 1476 1477 - static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) 1477 + int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 1478 + enum smu_v11_0_baco_seq baco_seq) 1478 1479 { 1479 1480 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); 1480 1481 } ··· 1579 1578 1580 1579 int smu_v11_0_baco_enter(struct smu_context *smu) 1581 1580 { 1582 - struct amdgpu_device *adev = smu->adev; 1583 1581 int ret = 0; 1584 - 1585 - /* Arcturus does not need this audio workaround */ 1586 - if (adev->asic_type != CHIP_ARCTURUS) { 1587 - ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); 1588 - if (ret) 1589 - return ret; 1590 - } 1591 1582 1592 1583 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1593 1584 if (ret)
+39 -12
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 190 190 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 191 191 }; 192 192 193 + static const uint8_t vangogh_throttler_map[] = { 194 + [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 195 + [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 196 + [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 197 + [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 198 + [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 199 + [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 200 + [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 201 + [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 202 + [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 203 + [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT), 204 + [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT), 205 + }; 206 + 193 207 static int vangogh_tables_init(struct smu_context *smu) 194 208 { 195 209 struct smu_table_context *smu_table = &smu->smu_table; ··· 240 226 goto err0_out; 241 227 smu_table->metrics_time = 0; 242 228 243 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 229 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 244 230 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 245 231 if (!smu_table->gpu_metrics_table) 246 232 goto err1_out; ··· 1646 1632 void **table) 1647 1633 { 1648 1634 struct smu_table_context *smu_table = &smu->smu_table; 1649 - struct gpu_metrics_v2_1 *gpu_metrics = 1650 - (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 1635 + struct gpu_metrics_v2_2 *gpu_metrics = 1636 + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1651 1637 SmuMetrics_legacy_t metrics; 1652 1638 int ret = 0; 1653 1639 ··· 1655 1641 if (ret) 1656 1642 return ret; 1657 1643 1658 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1644 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1659 1645 1660 1646 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1661 1647 gpu_metrics->temperature_soc = metrics.SocTemperature; ··· 1688 1674 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1689 1675 1690 1676 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1677 + gpu_metrics->indep_throttle_status = 1678 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1679 + vangogh_throttler_map); 1691 1680 1692 1681 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1693 1682 1694 1683 *table = (void *)gpu_metrics; 1695 1684 1696 - return sizeof(struct gpu_metrics_v2_1); 1685 + return sizeof(struct gpu_metrics_v2_2); 1697 1686 } 1698 1687 1699 1688 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1700 1689 void **table) 1701 1690 { 1702 1691 struct smu_table_context *smu_table = &smu->smu_table; 1703 - struct gpu_metrics_v2_1 *gpu_metrics = 1704 - (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 1692 + struct gpu_metrics_v2_2 *gpu_metrics = 1693 + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1705 1694 SmuMetrics_t metrics; 1706 1695 int ret = 0; 1707 1696 ··· 1712 1695 if (ret) 1713 1696 return ret; 1714 1697 1715 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1698 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1716 1699 1717 1700 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1718 1701 gpu_metrics->temperature_soc = metrics.Current.SocTemperature; ··· 1752 1735 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1753 1736 1754 1737 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1738 + gpu_metrics->indep_throttle_status = 1739 + smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus, 1740 + vangogh_throttler_map); 1755 1741 1756 1742 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1757 1743 1758 1744 *table = (void *)gpu_metrics; 1759 1745 1760 - return sizeof(struct gpu_metrics_v2_1); 1746 + return sizeof(struct gpu_metrics_v2_2); 1761 1747 } 1762 1748 1763 1749 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, ··· 2071 2051 return vangogh_mode_reset(smu, SMU_RESET_MODE_2); 2072 2052 } 2073 2053 2074 - static int vangogh_get_power_limit(struct smu_context *smu) 2054 + static int vangogh_get_power_limit(struct smu_context *smu, 2055 + uint32_t *current_power_limit, 2056 + uint32_t *default_power_limit, 2057 + uint32_t *max_power_limit) 2075 2058 { 2076 2059 struct smu_11_5_power_context *power_context = 2077 2060 smu->smu_power.power_context; ··· 2090 2067 return ret; 2091 2068 } 2092 2069 /* convert from milliwatt to watt */ 2093 - smu->current_power_limit = smu->default_power_limit = ppt_limit / 1000; 2094 - smu->max_power_limit = 29; 2070 + if (current_power_limit) 2071 + *current_power_limit = ppt_limit / 1000; 2072 + if (default_power_limit) 2073 + *default_power_limit = ppt_limit / 1000; 2074 + if (max_power_limit) 2075 + *max_power_limit = 29; 2095 2076 2096 2077 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit); 2097 2078 if (ret) {
+58 -5
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 128 128 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 129 129 }; 130 130 131 + static const uint8_t renoir_throttler_map[] = { 132 + [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT), 133 + [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT), 134 + [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT), 135 + [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT), 136 + [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT), 137 + [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT), 138 + [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT), 139 + [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT), 140 + [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT), 141 + [THROTTLER_STATUS_BIT_PROCHOT_CPU] = (SMU_THROTTLER_PROCHOT_CPU_BIT), 142 + [THROTTLER_STATUS_BIT_PROCHOT_GFX] = (SMU_THROTTLER_PROCHOT_GFX_BIT), 143 + [THROTTLER_STATUS_BIT_EDC_CPU] = (SMU_THROTTLER_EDC_CPU_BIT), 144 + [THROTTLER_STATUS_BIT_EDC_GFX] = (SMU_THROTTLER_EDC_GFX_BIT), 145 + }; 146 + 131 147 static int renoir_init_smc_tables(struct smu_context *smu) 132 148 { 133 149 struct smu_table_context *smu_table = &smu->smu_table; ··· 169 153 if (!smu_table->watermarks_table) 170 154 goto err2_out; 171 155 172 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 156 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 173 157 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 174 158 if (!smu_table->gpu_metrics_table) 175 159 goto err3_out; ··· 1180 1164 case METRICS_VOLTAGE_VDDSOC: 1181 1165 *value = metrics->Voltage[1]; 1182 1166 break; 1167 + case METRICS_SS_APU_SHARE: 1168 + /* return the percentage of APU power with respect to APU's power limit. 1169 + * percentage is reported, this isn't boost value. Smartshift power 1170 + * boost/shift is only when the percentage is more than 100. 1171 + */ 1172 + if (metrics->StapmOriginalLimit > 0) 1173 + *value = (metrics->ApuPower * 100) / metrics->StapmOriginalLimit; 1174 + else 1175 + *value = 0; 1176 + break; 1177 + case METRICS_SS_DGPU_SHARE: 1178 + /* return the percentage of dGPU power with respect to dGPU's power limit. 1179 + * percentage is reported, this isn't boost value. Smartshift power 1180 + * boost/shift is only when the percentage is more than 100. 1181 + */ 1182 + if ((metrics->dGpuPower > 0) && 1183 + (metrics->StapmCurrentLimit > metrics->StapmOriginalLimit)) 1184 + *value = (metrics->dGpuPower * 100) / 1185 + (metrics->StapmCurrentLimit - metrics->StapmOriginalLimit); 1186 + else 1187 + *value = 0; 1188 + break; 1183 1189 default: 1184 1190 *value = UINT_MAX; 1185 1191 break; ··· 1273 1235 (uint32_t *)data); 1274 1236 *size = 4; 1275 1237 break; 1238 + case AMDGPU_PP_SENSOR_SS_APU_SHARE: 1239 + ret = renoir_get_smu_metrics_data(smu, 1240 + METRICS_SS_APU_SHARE, 1241 + (uint32_t *)data); 1242 + *size = 4; 1243 + break; 1244 + case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 1245 + ret = renoir_get_smu_metrics_data(smu, 1246 + METRICS_SS_DGPU_SHARE, 1247 + (uint32_t *)data); 1248 + *size = 4; 1249 + break; 1276 1250 default: 1277 1251 ret = -EOPNOTSUPP; 1278 1252 break; ··· 1314 1264 void **table) 1315 1265 { 1316 1266 struct smu_table_context *smu_table = &smu->smu_table; 1317 - struct gpu_metrics_v2_1 *gpu_metrics = 1318 - (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 1267 + struct gpu_metrics_v2_2 *gpu_metrics = 1268 + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; 1319 1269 SmuMetrics_t metrics; 1320 1270 int ret = 0; 1321 1271 ··· 1323 1273 if (ret) 1324 1274 return ret; 1325 1275 1326 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1276 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); 1327 1277 1328 1278 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1329 1279 gpu_metrics->temperature_soc = metrics.SocTemperature; ··· 1361 1311 gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; 1362 1312 1363 1313 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1314 + gpu_metrics->indep_throttle_status = 1315 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1316 + renoir_throttler_map); 1364 1317 1365 1318 gpu_metrics->fan_pwm = metrics.FanPwm; 1366 1319 ··· 1371 1318 1372 1319 *table = (void *)gpu_metrics; 1373 1320 1374 - return sizeof(struct gpu_metrics_v2_1); 1321 + return sizeof(struct gpu_metrics_v2_2); 1375 1322 } 1376 1323 1377 1324 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
+81 -20
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 191 191 TAB_MAP(I2C_COMMANDS), 192 192 }; 193 193 194 + static const uint8_t aldebaran_throttler_map[] = { 195 + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 196 + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 197 + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 198 + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 199 + [THROTTLER_TDC_HBM_BIT] = (SMU_THROTTLER_TDC_MEM_BIT), 200 + [THROTTLER_TEMP_GPU_BIT] = (SMU_THROTTLER_TEMP_GPU_BIT), 201 + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 202 + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 203 + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 204 + [THROTTLER_TEMP_VR_MEM_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 205 + [THROTTLER_APCC_BIT] = (SMU_THROTTLER_APCC_BIT), 206 + }; 207 + 194 208 static int aldebaran_tables_init(struct smu_context *smu) 195 209 { 196 210 struct smu_table_context *smu_table = &smu->smu_table; ··· 227 213 return -ENOMEM; 228 214 smu_table->metrics_time = 0; 229 215 230 - smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_2); 216 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 231 217 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 232 218 if (!smu_table->gpu_metrics_table) { 233 219 kfree(smu_table->metrics_table); ··· 524 510 return (abs(frequency1 - frequency2) <= EPSILON); 525 511 } 526 512 513 + static bool aldebaran_is_primary(struct smu_context *smu) 514 + { 515 + struct amdgpu_device *adev = smu->adev; 516 + 517 + if (adev->smuio.funcs && adev->smuio.funcs->get_die_id) 518 + return adev->smuio.funcs->get_die_id(adev) == 0; 519 + 520 + return true; 521 + } 522 + 527 523 static int aldebaran_get_smu_metrics_data(struct smu_context *smu, 528 524 MetricsMember_t member, 529 525 uint32_t *value) ··· 587 563 *value = metrics->AverageUclkActivity; 588 564 break; 589 565 case METRICS_AVERAGE_SOCKETPOWER: 590 - *value = metrics->AverageSocketPower << 8; 566 + /* Valid power data is available only from primary die */ 567 + *value = aldebaran_is_primary(smu) ? 568 + metrics->AverageSocketPower << 8 : 569 + 0; 591 570 break; 592 571 case METRICS_TEMPERATURE_EDGE: 593 572 *value = metrics->TemperatureEdge * ··· 1159 1132 return ret; 1160 1133 } 1161 1134 1162 - static int aldebaran_get_power_limit(struct smu_context *smu) 1135 + static int aldebaran_get_power_limit(struct smu_context *smu, 1136 + uint32_t *current_power_limit, 1137 + uint32_t *default_power_limit, 1138 + uint32_t *max_power_limit) 1163 1139 { 1164 1140 PPTable_t *pptable = smu->smu_table.driver_pptable; 1165 1141 uint32_t power_limit = 0; ··· 1171 1141 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 1172 1142 return -EINVAL; 1173 1143 1174 - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit); 1144 + /* Valid power data is available only from primary die. 1145 + * For secondary die show the value as 0. 1146 + */ 1147 + if (aldebaran_is_primary(smu)) { 1148 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, 1149 + &power_limit); 1175 1150 1176 - if (ret) { 1177 - /* the last hope to figure out the ppt limit */ 1178 - if (!pptable) { 1179 - dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!"); 1180 - return -EINVAL; 1151 + if (ret) { 1152 + /* the last hope to figure out the ppt limit */ 1153 + if (!pptable) { 1154 + dev_err(smu->adev->dev, 1155 + "Cannot get PPT limit due to pptable missing!"); 1156 + return -EINVAL; 1157 + } 1158 + power_limit = pptable->PptLimit; 1181 1159 } 1182 - power_limit = pptable->PptLimit; 1183 1160 } 1184 1161 1185 - smu->current_power_limit = smu->default_power_limit = power_limit; 1186 - if (pptable) 1187 - smu->max_power_limit = pptable->PptLimit; 1162 + if (current_power_limit) 1163 + *current_power_limit = power_limit; 1164 + if (default_power_limit) 1165 + *default_power_limit = power_limit; 1166 + 1167 + if (max_power_limit) { 1168 + if (pptable) 1169 + *max_power_limit = pptable->PptLimit; 1170 + } 1188 1171 1189 1172 return 0; 1173 + } 1174 + 1175 + static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n) 1176 + { 1177 + /* Power limit can be set only through primary die */ 1178 + if (aldebaran_is_primary(smu)) 1179 + return smu_v13_0_set_power_limit(smu, n); 1180 + 1181 + return -EINVAL; 1190 1182 } 1191 1183 1192 1184 static int aldebaran_system_features_control(struct smu_context *smu, bool enable) ··· 1758 1706 void **table) 1759 1707 { 1760 1708 struct smu_table_context *smu_table = &smu->smu_table; 1761 - struct gpu_metrics_v1_2 *gpu_metrics = 1762 - (struct gpu_metrics_v1_2 *)smu_table->gpu_metrics_table; 1709 + struct gpu_metrics_v1_3 *gpu_metrics = 1710 + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1763 1711 SmuMetrics_t metrics; 1764 1712 int i, ret = 0; 1765 1713 ··· 1769 1717 if (ret) 1770 1718 return ret; 1771 1719 1772 - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 2); 1720 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1773 1721 1774 1722 gpu_metrics->temperature_edge = metrics.TemperatureEdge; 1775 1723 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; ··· 1782 1730 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 1783 1731 gpu_metrics->average_mm_activity = 0; 1784 1732 1785 - gpu_metrics->average_socket_power = metrics.AverageSocketPower; 1786 - gpu_metrics->energy_accumulator = 1733 + /* Valid power data is available only from primary die */ 1734 + if (aldebaran_is_primary(smu)) { 1735 + gpu_metrics->average_socket_power = metrics.AverageSocketPower; 1736 + gpu_metrics->energy_accumulator = 1787 1737 (uint64_t)metrics.EnergyAcc64bitHigh << 32 | 1788 1738 metrics.EnergyAcc64bitLow; 1739 + } else { 1740 + gpu_metrics->average_socket_power = 0; 1741 + gpu_metrics->energy_accumulator = 0; 1742 + } 1789 1743 1790 1744 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency; 1791 1745 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency; ··· 1806 1748 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK]; 1807 1749 1808 1750 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1751 + gpu_metrics->indep_throttle_status = 1752 + smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus, 1753 + aldebaran_throttler_map); 1809 1754 1810 1755 gpu_metrics->current_fan_speed = 0; 1811 1756 ··· 1830 1769 1831 1770 *table = (void *)gpu_metrics; 1832 1771 1833 - return sizeof(struct gpu_metrics_v1_2); 1772 + return sizeof(struct gpu_metrics_v1_3); 1834 1773 } 1835 1774 1836 1775 static int aldebaran_mode2_reset(struct smu_context *smu) ··· 1959 1898 .get_enabled_mask = smu_cmn_get_enabled_mask, 1960 1899 .feature_is_enabled = smu_cmn_feature_is_enabled, 1961 1900 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception, 1962 - .set_power_limit = smu_v13_0_set_power_limit, 1901 + .set_power_limit = aldebaran_set_power_limit, 1963 1902 .init_max_sustainable_clocks = smu_v13_0_init_max_sustainable_clocks, 1964 1903 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 1965 1904 .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
+34
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 356 356 case METRICS_VOLTAGE_VDDSOC: 357 357 *value = metrics->Voltage[1]; 358 358 break; 359 + case METRICS_SS_APU_SHARE: 360 + /* return the percentage of APU power with respect to APU's power limit. 361 + * percentage is reported, this isn't boost value. Smartshift power 362 + * boost/shift is only when the percentage is more than 100. 363 + */ 364 + if (metrics->StapmOpnLimit > 0) 365 + *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 366 + else 367 + *value = 0; 368 + break; 369 + case METRICS_SS_DGPU_SHARE: 370 + /* return the percentage of dGPU power with respect to dGPU's power limit. 371 + * percentage is reported, this isn't boost value. Smartshift power 372 + * boost/shift is only when the percentage is more than 100. 373 + */ 374 + if ((metrics->dGpuPower > 0) && 375 + (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 376 + *value = (metrics->dGpuPower * 100) / 377 + (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 378 + else 379 + *value = 0; 380 + break; 359 381 default: 360 382 *value = UINT_MAX; 361 383 break; ··· 447 425 ret = yellow_carp_get_smu_metrics_data(smu, 448 426 METRICS_VOLTAGE_VDDSOC, 449 427 (uint32_t *)data); 428 + *size = 4; 429 + break; 430 + case AMDGPU_PP_SENSOR_SS_APU_SHARE: 431 + ret = yellow_carp_get_smu_metrics_data(smu, 432 + METRICS_SS_APU_SHARE, 433 + (uint32_t *)data); 434 + *size = 4; 435 + break; 436 + case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 437 + ret = yellow_carp_get_smu_metrics_data(smu, 438 + METRICS_SS_DGPU_SHARE, 439 + (uint32_t *)data); 450 440 *size = 4; 451 441 break; 452 442 default:
+54 -9
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 398 398 399 399 } 400 400 401 + uint64_t smu_cmn_get_indep_throttler_status( 402 + const unsigned long dep_status, 403 + const uint8_t *throttler_map) 404 + { 405 + uint64_t indep_status = 0; 406 + uint8_t dep_bit = 0; 407 + 408 + for_each_set_bit(dep_bit, &dep_status, 32) 409 + indep_status |= 1ULL << throttler_map[dep_bit]; 410 + 411 + return indep_status; 412 + } 413 + 401 414 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 402 415 uint64_t feature_mask, 403 416 bool enabled) ··· 588 575 return ret; 589 576 } 590 577 578 + /** 579 + * smu_cmn_disable_all_features_with_exception - disable all dpm features 580 + * except this specified by 581 + * @mask 582 + * 583 + * @smu: smu_context pointer 584 + * @no_hw_disablement: whether real dpm disablement should be performed 585 + * true: update the cache(about dpm enablement state) only 586 + * false: real dpm disablement plus cache update 587 + * @mask: the dpm feature which should not be disabled 588 + * SMU_FEATURE_COUNT: no exception, all dpm features 589 + * to disable 590 + * 591 + * Returns: 592 + * 0 on success or a negative error code on failure. 593 + */ 591 594 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 595 + bool no_hw_disablement, 592 596 enum smu_feature_mask mask) 593 597 { 598 + struct smu_feature *feature = &smu->smu_feature; 594 599 uint64_t features_to_disable = U64_MAX; 595 600 int skipped_feature_id; 596 601 597 - skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 598 - CMN2ASIC_MAPPING_FEATURE, 599 - mask); 600 - if (skipped_feature_id < 0) 601 - return -EINVAL; 602 + if (mask != SMU_FEATURE_COUNT) { 603 + skipped_feature_id = smu_cmn_to_asic_specific_index(smu, 604 + CMN2ASIC_MAPPING_FEATURE, 605 + mask); 606 + if (skipped_feature_id < 0) 607 + return -EINVAL; 602 608 603 - features_to_disable &= ~(1ULL << skipped_feature_id); 609 + features_to_disable &= ~(1ULL << skipped_feature_id); 610 + } 604 611 605 - return smu_cmn_feature_update_enable_state(smu, 606 - features_to_disable, 607 - 0); 612 + if (no_hw_disablement) { 613 + mutex_lock(&feature->mutex); 614 + bitmap_andnot(feature->enabled, feature->enabled, 615 + (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX); 616 + mutex_unlock(&feature->mutex); 617 + 618 + return 0; 619 + } else { 620 + return smu_cmn_feature_update_enable_state(smu, 621 + features_to_disable, 622 + 0); 623 + } 608 624 } 609 625 610 626 int smu_cmn_get_smc_version(struct smu_context *smu, ··· 814 772 break; 815 773 case METRICS_VERSION(2, 1): 816 774 structure_size = sizeof(struct gpu_metrics_v2_1); 775 + break; 776 + case METRICS_VERSION(2, 2): 777 + structure_size = sizeof(struct gpu_metrics_v2_2); 817 778 break; 818 779 default: 819 780 return;
+5
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
··· 60 60 uint32_t *feature_mask, 61 61 uint32_t num); 62 62 63 + uint64_t smu_cmn_get_indep_throttler_status( 64 + const unsigned long dep_status, 65 + const uint8_t *throttler_map); 66 + 63 67 int smu_cmn_feature_update_enable_state(struct smu_context *smu, 64 68 uint64_t feature_mask, 65 69 bool enabled); ··· 79 75 uint64_t new_mask); 80 76 81 77 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, 78 + bool no_hw_disablement, 82 79 enum smu_feature_mask mask); 83 80 84 81 int smu_cmn_get_smc_version(struct smu_context *smu,
+2 -2
drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
··· 57 57 #define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu) 58 58 #define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num) 59 59 #define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask) 60 - #define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask) 60 + #define smu_disable_all_features_with_exception(smu, no_hw_disablement, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, no_hw_disablement, mask) 61 61 #define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu) 62 62 #define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu) 63 63 #define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu) ··· 82 82 #define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control) 83 83 #define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu) 84 84 #define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu) 85 - #define smu_get_asic_power_limits(smu) smu_ppt_funcs(get_power_limit, 0, smu) 85 + #define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max) 86 86 #define smu_get_pp_feature_mask(smu, buf) smu_ppt_funcs(get_pp_feature_mask, 0, smu, buf) 87 87 #define smu_set_pp_feature_mask(smu, new_mask) smu_ppt_funcs(set_pp_feature_mask, 0, smu, new_mask) 88 88 #define smu_gfx_ulv_control(smu, enablement) smu_ppt_funcs(gfx_ulv_control, 0, smu, enablement)
+17
include/drm/drm_dp_helper.h
··· 1377 1377 #define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */ 1378 1378 #define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */ 1379 1379 #define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */ 1380 + 1381 + #define __DP_FEC1_BASE 0xf0290 /* 1.4 */ 1382 + #define __DP_FEC2_BASE 0xf0298 /* 1.4 */ 1383 + #define DP_FEC_BASE(dp_phy) \ 1384 + (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \ 1385 + ((dp_phy) - DP_PHY_LTTPR1))) 1386 + 1387 + #define DP_FEC_REG(dp_phy, fec1_reg) \ 1388 + (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg) 1389 + 1380 1390 #define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */ 1391 + #define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \ 1392 + DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1) 1393 + 1381 1394 #define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */ 1382 1395 #define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */ 1396 + 1397 + #define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */ 1398 + 1399 + #define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */ 1383 1400 1384 1401 /* Repeater modes */ 1385 1402 #define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */