Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-fixes-6.16-2025-05-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-fixes-6.16-2025-05-29:

amdgpu:
- UserQ fixes
- SMU 13.x fixes
- VCN fixes
- JPEG fixes
- Misc cleanups
- runtime pm fix
- DCN 4.0.1 fixes
- Misc display fixes
- ISP fix
- VRAM manager fix
- RAS fixes

amdkfd:
- SVM fix
- Misc cleanups
- Ref leak fix
- WPTR BO fix

radeon:
- Misc cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20250529205215.6790-1-alexander.deucher@amd.com

Dave Airlie 1c1df79c 84e2f918

+1534 -348
+1 -1
drivers/gpu/drm/amd/amdgpu/Kconfig
··· 77 77 78 78 config DRM_AMD_ISP 79 79 bool "Enable AMD Image Signal Processor IP support" 80 - depends on DRM_AMDGPU 80 + depends on DRM_AMDGPU && ACPI 81 81 select MFD_CORE 82 82 select PM_GENERIC_DOMAINS if PM 83 83 help
+4
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1713 1713 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } 1714 1714 #endif 1715 1715 1716 + #if defined(CONFIG_DRM_AMD_ISP) 1717 + int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]); 1718 + #endif 1719 + 1716 1720 void amdgpu_register_gpu_instance(struct amdgpu_device *adev); 1717 1721 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); 1718 1722
+31 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
··· 1532 1532 return true; 1533 1533 #endif /* CONFIG_AMD_PMC */ 1534 1534 } 1535 - 1536 1535 #endif /* CONFIG_SUSPEND */ 1536 + 1537 + #if IS_ENABLED(CONFIG_DRM_AMD_ISP) 1538 + static const struct acpi_device_id isp_sensor_ids[] = { 1539 + { "OMNI5C10" }, 1540 + { } 1541 + }; 1542 + 1543 + static int isp_match_acpi_device_ids(struct device *dev, const void *data) 1544 + { 1545 + return acpi_match_device(data, dev) ? 1 : 0; 1546 + } 1547 + 1548 + int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]) 1549 + { 1550 + struct device *pdev __free(put_device) = NULL; 1551 + struct acpi_device *acpi_pdev; 1552 + 1553 + pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids, 1554 + isp_match_acpi_device_ids); 1555 + if (!pdev) 1556 + return -EINVAL; 1557 + 1558 + acpi_pdev = ACPI_COMPANION(pdev); 1559 + if (!acpi_pdev) 1560 + return -ENODEV; 1561 + 1562 + strscpy(*hid, acpi_device_hid(acpi_pdev)); 1563 + 1564 + return 0; 1565 + } 1566 + #endif /* CONFIG_DRM_AMD_ISP */
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 368 368 { 369 369 struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; 370 370 371 + if (!bo || !*bo) 372 + return; 373 + 371 374 (void)amdgpu_bo_reserve(*bo, true); 372 375 amdgpu_bo_kunmap(*bo); 373 376 amdgpu_bo_unpin(*bo);
+1 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 919 919 return timeout; 920 920 } 921 921 922 - void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 922 + static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 923 923 { 924 924 struct amdgpu_ctx *ctx; 925 925 struct idr *idp; ··· 949 949 950 950 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) 951 951 { 952 - struct amdgpu_ctx *ctx; 953 - struct idr *idp; 954 - uint32_t id; 955 - 956 952 amdgpu_ctx_mgr_entity_fini(mgr); 957 - 958 - idp = &mgr->ctx_handles; 959 - 960 - idr_for_each_entry(idp, ctx, id) { 961 - if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) 962 - DRM_ERROR("ctx %p is still alive\n", ctx); 963 - } 964 - 965 953 idr_destroy(&mgr->ctx_handles); 966 954 mutex_destroy(&mgr->lock); 967 955 }
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
··· 92 92 93 93 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr, 94 94 struct amdgpu_device *adev); 95 - void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); 96 95 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); 97 96 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 98 97 void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+5 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 512 512 break; 513 513 case CHIP_VEGA10: 514 514 /* enable BACO as runpm mode if noretry=0 */ 515 - if (!adev->gmc.noretry) 515 + if (!adev->gmc.noretry && !amdgpu_passthrough(adev)) 516 516 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 517 517 break; 518 518 default: 519 519 /* enable BACO as runpm mode on CI+ */ 520 - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 520 + if (!amdgpu_passthrough(adev)) 521 + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 521 522 break; 522 523 } 523 524 ··· 4729 4728 4730 4729 amdgpu_fru_sysfs_init(adev); 4731 4730 amdgpu_reg_state_sysfs_init(adev); 4732 - amdgpu_xcp_cfg_sysfs_init(adev); 4731 + amdgpu_xcp_sysfs_init(adev); 4733 4732 4734 4733 if (IS_ENABLED(CONFIG_PERF_EVENTS)) 4735 4734 r = amdgpu_pmu_init(adev); ··· 4859 4858 amdgpu_fru_sysfs_fini(adev); 4860 4859 4861 4860 amdgpu_reg_state_sysfs_fini(adev); 4862 - amdgpu_xcp_cfg_sysfs_fini(adev); 4861 + amdgpu_xcp_sysfs_fini(adev); 4863 4862 4864 4863 /* disable ras feature must before hw fini */ 4865 4864 amdgpu_ras_pre_fini(adev);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2913 2913 2914 2914 if (fpriv) { 2915 2915 fpriv->evf_mgr.fd_closing = true; 2916 - amdgpu_userq_mgr_fini(&fpriv->userq_mgr); 2917 2916 amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); 2917 + amdgpu_userq_mgr_fini(&fpriv->userq_mgr); 2918 2918 } 2919 2919 2920 2920 return drm_release(inode, filp);
+10 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
··· 108 108 struct amdgpu_eviction_fence *ev_fence; 109 109 110 110 mutex_lock(&uq_mgr->userq_mutex); 111 + spin_lock(&evf_mgr->ev_fence_lock); 111 112 ev_fence = evf_mgr->ev_fence; 112 - if (!ev_fence) 113 + if (ev_fence) 114 + dma_fence_get(&ev_fence->base); 115 + else 113 116 goto unlock; 117 + spin_unlock(&evf_mgr->ev_fence_lock); 114 118 115 119 amdgpu_userq_evict(uq_mgr, ev_fence); 116 120 121 + mutex_unlock(&uq_mgr->userq_mutex); 122 + dma_fence_put(&ev_fence->base); 123 + return; 124 + 117 125 unlock: 126 + spin_unlock(&evf_mgr->ev_fence_lock); 118 127 mutex_unlock(&uq_mgr->userq_mutex); 119 128 } 120 129
-5
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 1502 1502 amdgpu_bo_unreserve(pd); 1503 1503 } 1504 1504 1505 - if (!fpriv->evf_mgr.fd_closing) { 1506 - fpriv->evf_mgr.fd_closing = true; 1507 - amdgpu_userq_mgr_fini(&fpriv->userq_mgr); 1508 - amdgpu_eviction_fence_destroy(&fpriv->evf_mgr); 1509 - } 1510 1505 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1511 1506 amdgpu_vm_fini(adev, &fpriv->vm); 1512 1507
+14 -61
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 300 300 queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); 301 301 queue_input.wptr_addr = ring->wptr_gpu_addr; 302 302 303 + amdgpu_mes_lock(&adev->mes); 303 304 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); 305 + amdgpu_mes_unlock(&adev->mes); 304 306 if (r) 305 307 DRM_ERROR("failed to map legacy queue\n"); 306 308 ··· 325 323 queue_input.trail_fence_addr = gpu_addr; 326 324 queue_input.trail_fence_data = seq; 327 325 326 + amdgpu_mes_lock(&adev->mes); 328 327 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 328 + amdgpu_mes_unlock(&adev->mes); 329 329 if (r) 330 330 DRM_ERROR("failed to unmap legacy queue\n"); 331 331 ··· 357 353 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) 358 354 queue_input.legacy_gfx = true; 359 355 356 + amdgpu_mes_lock(&adev->mes); 360 357 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); 358 + amdgpu_mes_unlock(&adev->mes); 361 359 if (r) 362 360 DRM_ERROR("failed to reset legacy queue\n"); 363 361 ··· 389 383 goto error; 390 384 } 391 385 386 + amdgpu_mes_lock(&adev->mes); 392 387 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 388 + amdgpu_mes_unlock(&adev->mes); 393 389 if (r) 394 390 dev_err(adev->dev, "failed to read reg (0x%x)\n", reg); 395 391 else ··· 419 411 goto error; 420 412 } 421 413 414 + amdgpu_mes_lock(&adev->mes); 422 415 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 416 + amdgpu_mes_unlock(&adev->mes); 423 417 if (r) 424 418 dev_err(adev->dev, "failed to write reg (0x%x)\n", reg); 425 419 ··· 448 438 goto error; 449 439 } 450 440 441 + amdgpu_mes_lock(&adev->mes); 451 442 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 452 - if (r) 453 - dev_err(adev->dev, "failed to reg_write_reg_wait\n"); 454 - 455 - error: 456 - return r; 457 - } 458 - 459 - int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 460 - uint32_t val, uint32_t mask) 461 - { 462 - struct mes_misc_op_input op_input; 463 - int r; 464 - 465 - op_input.op = MES_MISC_OP_WRM_REG_WAIT; 466 - op_input.wrm_reg.reg0 = reg; 467 - op_input.wrm_reg.ref = val; 468 - op_input.wrm_reg.mask = mask; 469 - 470 - if (!adev->mes.funcs->misc_op) { 471 - dev_err(adev->dev, "mes reg wait is not supported!\n"); 472 - r = -EINVAL; 473 - goto error; 474 - } 475 - 476 - r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 443 + amdgpu_mes_unlock(&adev->mes); 477 444 if (r) 478 445 dev_err(adev->dev, "failed to reg_write_reg_wait\n"); 479 446 ··· 524 537 amdgpu_mes_unlock(&adev->mes); 525 538 526 539 return r; 527 - } 528 - 529 - #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ 530 - do { \ 531 - if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ 532 - return offsetof(struct amdgpu_mes_ctx_meta_data, \ 533 - _eng[ring->idx].slots[id_offs]); \ 534 - else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ 535 - return offsetof(struct amdgpu_mes_ctx_meta_data, \ 536 - _eng[ring->idx].ring); \ 537 - else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ 538 - return offsetof(struct amdgpu_mes_ctx_meta_data, \ 539 - _eng[ring->idx].ib); \ 540 - else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ 541 - return offsetof(struct amdgpu_mes_ctx_meta_data, \ 542 - _eng[ring->idx].padding); \ 543 - } while(0) 544 - 545 - int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) 546 - { 547 - switch (ring->funcs->type) { 548 - case AMDGPU_RING_TYPE_GFX: 549 - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); 550 - break; 551 - case AMDGPU_RING_TYPE_COMPUTE: 552 - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); 553 - break; 554 - case AMDGPU_RING_TYPE_SDMA: 555 - DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); 556 - break; 557 - default: 558 - break; 559 - } 560 - 561 - WARN_ON(1); 562 - return -EINVAL; 563 540 } 564 541 565 542 uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, ··· 645 694 goto error; 646 695 } 647 696 697 + amdgpu_mes_lock(&adev->mes); 648 698 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 699 + amdgpu_mes_unlock(&adev->mes); 649 700 if (r) 650 701 dev_err(adev->dev, "failed to change_config.\n"); 651 702
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
··· 372 372 #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev)) 373 373 #define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev)) 374 374 375 - int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs); 376 - 377 375 int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe); 378 376 int amdgpu_mes_init(struct amdgpu_device *adev); 379 377 void amdgpu_mes_fini(struct amdgpu_device *adev); ··· 393 395 uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg); 394 396 int amdgpu_mes_wreg(struct amdgpu_device *adev, 395 397 uint32_t reg, uint32_t val); 396 - int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, 397 - uint32_t val, uint32_t mask); 398 398 int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, 399 399 uint32_t reg0, uint32_t reg1, 400 400 uint32_t ref, uint32_t mask);
+25 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 2859 2859 return -EINVAL; 2860 2860 } 2861 2861 } else { 2862 + if (bps[0].address == 0) { 2863 + /* for specific old eeprom data, mca address is not stored, 2864 + * calc it from pa 2865 + */ 2866 + if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT, 2867 + &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE)) 2868 + return -EINVAL; 2869 + } 2870 + 2862 2871 if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) { 2863 2872 if (nps == AMDGPU_NPS1_PARTITION_MODE) 2864 2873 memcpy(err_data->err_addr, bps, ··· 2895 2886 bps->retired_page << AMDGPU_GPU_PAGE_SHIFT)) 2896 2887 return -EINVAL; 2897 2888 } else { 2898 - if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) 2899 - return -EINVAL; 2889 + if (bps->address) { 2890 + if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) 2891 + return -EINVAL; 2892 + } else { 2893 + /* for specific old eeprom data, mca address is not stored, 2894 + * calc it from pa 2895 + */ 2896 + if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT, 2897 + &(bps->address), AMDGPU_NPS1_PARTITION_MODE)) 2898 + return -EINVAL; 2899 + 2900 + if (amdgpu_ras_mca2pa(adev, bps, err_data)) 2901 + return -EOPNOTSUPP; 2902 + } 2900 2903 } 2901 2904 2902 2905 return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, ··· 3729 3708 */ 3730 3709 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) || 3731 3710 amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) || 3732 - amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3)) 3711 + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) || 3712 + amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1)) 3733 3713 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 3734 3714 1 << AMDGPU_RAS_BLOCK__JPEG); 3735 3715 else
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
··· 113 113 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 114 114 struct amdgpu_irq_src trap_irq; 115 115 struct amdgpu_irq_src illegal_inst_irq; 116 + struct amdgpu_irq_src fence_irq; 116 117 struct amdgpu_irq_src ecc_irq; 117 118 struct amdgpu_irq_src vm_hole_irq; 118 119 struct amdgpu_irq_src doorbell_invalid_irq;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
··· 139 139 140 140 vm = &fpriv->vm; 141 141 142 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 142 + drm_exec_init(&exec, 0, 0); 143 143 drm_exec_until_all_locked(&exec) { 144 144 r = amdgpu_vm_lock_pd(vm, &exec, 0); 145 145 if (likely(!r))
+5 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
··· 765 765 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); 766 766 FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version); 767 767 FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); 768 + FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version); 768 769 FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); 769 770 FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK); 770 771 FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version); ··· 781 780 &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr, 782 781 &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, 783 782 &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, 784 - &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, 785 - &dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr, 786 - &dev_attr_pldm_fw_version.attr, NULL 783 + &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr, 784 + &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr, 785 + &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr, 786 + NULL 787 787 }; 788 788 789 789 #define to_dev_attr(x) container_of(x, struct device_attribute, attr)
+23
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 562 562 563 563 return 0; 564 564 } 565 + 566 + int amdgpu_umc_pa2mca(struct amdgpu_device *adev, 567 + uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps) 568 + { 569 + struct ta_ras_query_address_input addr_in; 570 + struct ta_ras_query_address_output addr_out; 571 + int ret; 572 + 573 + /* nps: the pa belongs to */ 574 + addr_in.pa.pa = pa | ((uint64_t)nps << 58); 575 + addr_in.addr_type = TA_RAS_PA_TO_MCA; 576 + ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out); 577 + if (ret) { 578 + dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx", 579 + pa); 580 + 581 + return ret; 582 + } 583 + 584 + *mca = addr_out.ma.err_addr; 585 + 586 + return 0; 587 + }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
··· 189 189 uint64_t err_addr, uint32_t ch, uint32_t umc, 190 190 uint32_t node, uint32_t socket, 191 191 struct ta_ras_query_address_output *addr_out, bool dump_addr); 192 + int amdgpu_umc_pa2mca(struct amdgpu_device *adev, 193 + uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps); 192 194 #endif
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 463 463 int r; 464 464 465 465 lpfn = (u64)place->lpfn << PAGE_SHIFT; 466 - if (!lpfn) 466 + if (!lpfn || lpfn > man->size) 467 467 lpfn = man->size; 468 468 469 469 fpfn = (u64)place->fpfn << PAGE_SHIFT;
+141 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
··· 27 27 #include <drm/drm_drv.h> 28 28 #include "../amdxcp/amdgpu_xcp_drv.h" 29 29 30 + static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr); 31 + static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr); 32 + 30 33 static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, 31 34 struct amdgpu_xcp_ip *xcp_ip, int xcp_state) 32 35 { ··· 192 189 193 190 goto out; 194 191 } 195 - 192 + amdgpu_xcp_sysfs_entries_update(xcp_mgr); 196 193 out: 197 194 mutex_unlock(&xcp_mgr->xcp_lock); 198 195 ··· 266 263 if (ret == -ENOSPC) { 267 264 dev_warn(adev->dev, 268 265 "Skip xcp node #%d when out of drm node resource.", i); 269 - return 0; 266 + ret = 0; 267 + goto out; 270 268 } else if (ret) { 271 - return ret; 269 + goto out; 272 270 } 273 271 274 272 /* Redirect all IOCTLs to the primary device */ ··· 282 278 p_ddev->vma_offset_manager = ddev->vma_offset_manager; 283 279 p_ddev->driver = &amdgpu_partition_driver; 284 280 adev->xcp_mgr->xcp[i].ddev = p_ddev; 285 - } 286 281 287 - return 0; 282 + dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]); 283 + } 284 + ret = 0; 285 + out: 286 + amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr); 287 + 288 + return ret; 288 289 } 289 290 290 291 int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, ··· 297 288 struct amdgpu_xcp_mgr_funcs *xcp_funcs) 298 289 { 299 290 struct amdgpu_xcp_mgr *xcp_mgr; 291 + int i; 300 292 301 293 if (!xcp_funcs || !xcp_funcs->get_ip_details) 302 294 return -EINVAL; ··· 316 306 amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode); 317 307 318 308 adev->xcp_mgr = xcp_mgr; 309 + for (i = 0; i < MAX_XCP; ++i) 310 + xcp_mgr->xcp[i].xcp_mgr = xcp_mgr; 319 311 320 312 return amdgpu_xcp_dev_alloc(adev); 321 313 } ··· 445 433 } 446 434 } 447 435 436 + /*====================== xcp sysfs - configuration ======================*/ 448 437 #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ 449 438 static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ 450 439 struct amdgpu_xcp_res_details *xcp_res, char *buf) \ ··· 648 635 NULL, 649 636 }; 650 637 651 - void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) 638 + static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev) 652 639 { 653 640 struct amdgpu_xcp_res_details *xcp_res; 654 641 struct amdgpu_xcp_cfg *xcp_cfg; ··· 716 703 kobject_put(&xcp_cfg->kobj); 717 704 } 718 705 719 - void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) 706 + static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev) 720 707 { 721 708 struct amdgpu_xcp_res_details *xcp_res; 722 709 struct amdgpu_xcp_cfg *xcp_cfg; ··· 734 721 sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr); 735 722 sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs); 736 723 kobject_put(&xcp_cfg->kobj); 724 + } 725 + 726 + /*====================== xcp sysfs - data entries ======================*/ 727 + 728 + #define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj) 729 + 730 + static ssize_t xcp_metrics_show(struct kobject *kobj, 731 + struct kobj_attribute *attr, char *buf) 732 + { 733 + struct amdgpu_xcp *xcp = to_xcp(kobj); 734 + struct amdgpu_xcp_mgr *xcp_mgr; 735 + ssize_t size; 736 + 737 + xcp_mgr = xcp->xcp_mgr; 738 + size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL); 739 + if (size <= 0) 740 + return size; 741 + 742 + if (size > PAGE_SIZE) 743 + return -ENOSPC; 744 + 745 + return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf); 746 + } 747 + 748 + static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj, 749 + struct attribute *attr, int n) 750 + { 751 + struct amdgpu_xcp *xcp = to_xcp(kobj); 752 + 753 + if (!xcp || !xcp->valid) 754 + return 0; 755 + 756 + return attr->mode; 757 + } 758 + 759 + static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics); 760 + 761 + static struct attribute *amdgpu_xcp_attrs[] = { 762 + &xcp_sysfs_metrics.attr, 763 + NULL, 764 + }; 765 + 766 + static const struct attribute_group amdgpu_xcp_attrs_group = { 767 + .attrs = amdgpu_xcp_attrs, 768 + .is_visible = amdgpu_xcp_attrs_is_visible 769 + }; 770 + 771 + static const struct kobj_type xcp_sysfs_ktype = { 772 + .sysfs_ops = &kobj_sysfs_ops, 773 + }; 774 + 775 + static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n) 776 + { 777 + struct amdgpu_xcp *xcp; 778 + 779 + for (n--; n >= 0; n--) { 780 + xcp = &xcp_mgr->xcp[n]; 781 + if (!xcp->ddev || !xcp->valid) 782 + continue; 783 + sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group); 784 + kobject_put(&xcp->kobj); 785 + } 786 + } 787 + 788 + static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr) 789 + { 790 + struct amdgpu_xcp *xcp; 791 + int i, r; 792 + 793 + for (i = 0; i < MAX_XCP; i++) { 794 + /* Redirect all IOCTLs to the primary device */ 795 + xcp = &xcp_mgr->xcp[i]; 796 + if (!xcp->ddev) 797 + break; 798 + r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype, 799 + &xcp->ddev->dev->kobj, "xcp"); 800 + if (r) 801 + goto out; 802 + 803 + r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group); 804 + if (r) 805 + goto out; 806 + } 807 + 808 + return; 809 + out: 810 + kobject_put(&xcp->kobj); 811 + } 812 + 813 + static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr) 814 + { 815 + struct amdgpu_xcp *xcp; 816 + int i; 817 + 818 + for (i = 0; i < MAX_XCP; i++) { 819 + /* Redirect all IOCTLs to the primary device */ 820 + xcp = &xcp_mgr->xcp[i]; 821 + if (!xcp->ddev) 822 + continue; 823 + sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group); 824 + } 825 + 826 + return; 827 + } 828 + 829 + void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev) 830 + { 831 + if (!adev->xcp_mgr) 832 + return; 833 + 834 + amdgpu_xcp_cfg_sysfs_init(adev); 835 + 836 + return; 837 + } 838 + 839 + void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev) 840 + { 841 + if (!adev->xcp_mgr) 842 + return; 843 + amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP); 844 + amdgpu_xcp_cfg_sysfs_fini(adev); 737 845 }
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
··· 108 108 struct drm_driver *driver; 109 109 struct drm_vma_offset_manager *vma_offset_manager; 110 110 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; 111 + struct amdgpu_xcp_mgr *xcp_mgr; 112 + struct kobject kobj; 111 113 }; 112 114 113 115 struct amdgpu_xcp_mgr { ··· 177 175 void amdgpu_xcp_release_sched(struct amdgpu_device *adev, 178 176 struct amdgpu_ctx_entity *entity); 179 177 180 - void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev); 181 - void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev); 178 + void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev); 179 + void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev); 182 180 183 181 #define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ 184 182 ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \
+17
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 294 294 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)}, 295 295 }; 296 296 297 + int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num) 298 + { 299 + int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 }; 300 + 301 + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 302 + case IP_VERSION(6, 4, 0): 303 + case IP_VERSION(6, 4, 1): 304 + if (link_num < ARRAY_SIZE(link_map_6_4_x)) 305 + return link_map_6_4_x[link_num]; 306 + break; 307 + default: 308 + return -EINVAL; 309 + } 310 + 311 + return -EINVAL; 312 + } 313 + 297 314 static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num) 298 315 { 299 316 const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
··· 125 125 int req_nps_mode); 126 126 int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, 127 127 int global_link_num); 128 + int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num); 128 129 129 130 void amdgpu_xgmi_early_init(struct amdgpu_device *adev); 130 131 uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+31
drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
··· 25 25 * 26 26 */ 27 27 28 + #include <linux/gpio/machine.h> 28 29 #include "amdgpu.h" 29 30 #include "isp_v4_1_1.h" 30 31 ··· 40 39 ISP_4_1__SRCID__ISP_RINGBUFFER_WPT16 41 40 }; 42 41 42 + static struct gpiod_lookup_table isp_gpio_table = { 43 + .dev_id = "amd_isp_capture", 44 + .table = { 45 + GPIO_LOOKUP("AMDI0030:00", 85, "enable_isp", GPIO_ACTIVE_HIGH), 46 + { } 47 + }, 48 + }; 49 + 50 + static struct gpiod_lookup_table isp_sensor_gpio_table = { 51 + .dev_id = "i2c-ov05c10", 52 + .table = { 53 + GPIO_LOOKUP("amdisp-pinctrl", 0, "enable", GPIO_ACTIVE_HIGH), 54 + { } 55 + }, 56 + }; 57 + 43 58 static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) 44 59 { 45 60 struct amdgpu_device *adev = isp->adev; 46 61 int idx, int_idx, num_res, r; 62 + u8 isp_dev_hid[ACPI_ID_LEN]; 47 63 u64 isp_base; 48 64 49 65 if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) 50 66 return -EINVAL; 67 + 68 + r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid); 69 + if (r) { 70 + drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r); 71 + /* allow GPU init to progress */ 72 + return 0; 73 + } 74 + 75 + /* add GPIO resources required for OMNI5C10 sensor */ 76 + if (!strcmp("OMNI5C10", isp_dev_hid)) { 77 + gpiod_add_lookup_table(&isp_gpio_table); 78 + gpiod_add_lookup_table(&isp_sensor_gpio_table); 79 + } 51 80 52 81 isp_base = adev->rmmio_base; 53 82
+76
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 149 149 return r; 150 150 } 151 151 152 + /* JPEG DJPEG POISON EVENT */ 153 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 154 + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 155 + if (r) 156 + return r; 157 + 158 + /* JPEG EJPEG POISON EVENT */ 159 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 160 + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 161 + if (r) 162 + return r; 163 + 152 164 r = amdgpu_jpeg_sw_init(adev); 153 165 if (r) 154 166 return r; ··· 445 433 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) 446 434 ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 447 435 } 436 + 437 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 438 + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 448 439 449 440 return ret; 450 441 } ··· 1056 1041 return 0; 1057 1042 } 1058 1043 1044 + static int jpeg_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev, 1045 + struct amdgpu_irq_src *source, 1046 + unsigned int type, 1047 + enum amdgpu_interrupt_state state) 1048 + { 1049 + return 0; 1050 + } 1051 + 1059 1052 static int jpeg_v4_0_3_process_interrupt(struct amdgpu_device *adev, 1060 1053 struct amdgpu_irq_src *source, 1061 1054 struct amdgpu_iv_entry *entry) ··· 1223 1200 .process = jpeg_v4_0_3_process_interrupt, 1224 1201 }; 1225 1202 1203 + static const struct amdgpu_irq_src_funcs jpeg_v4_0_3_ras_irq_funcs = { 1204 + .set = jpeg_v4_0_3_set_ras_interrupt_state, 1205 + .process = amdgpu_jpeg_process_poison_irq, 1206 + }; 1207 + 1226 1208 static void jpeg_v4_0_3_set_irq_funcs(struct amdgpu_device *adev) 1227 1209 { 1228 1210 int i; ··· 1236 1208 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; 1237 1209 } 1238 1210 adev->jpeg.inst->irq.funcs = &jpeg_v4_0_3_irq_funcs; 1211 + 1212 + adev->jpeg.inst->ras_poison_irq.num_types = 1; 1213 + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_3_ras_irq_funcs; 1239 1214 } 1240 1215 1241 1216 const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block = { ··· 1335 1304 jpeg_v4_0_3_inst_reset_ras_error_count(adev, i); 1336 1305 } 1337 1306 1307 + static uint32_t jpeg_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev, 1308 + uint32_t instance, uint32_t sub_block) 1309 + { 1310 + uint32_t poison_stat = 0, reg_value = 0; 1311 + 1312 + switch (sub_block) { 1313 + case AMDGPU_JPEG_V4_0_3_JPEG0: 1314 + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); 1315 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 1316 + break; 1317 + case AMDGPU_JPEG_V4_0_3_JPEG1: 1318 + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); 1319 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 1320 + break; 1321 + default: 1322 + break; 1323 + } 1324 + 1325 + if (poison_stat) 1326 + dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 1327 + instance, sub_block); 1328 + 1329 + return poison_stat; 1330 + } 1331 + 1332 + static bool jpeg_v4_0_3_query_ras_poison_status(struct amdgpu_device *adev) 1333 + { 1334 + uint32_t inst = 0, sub = 0, poison_stat = 0; 1335 + 1336 + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 1337 + for (sub = 0; sub < AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK; sub++) 1338 + poison_stat += 1339 + jpeg_v4_0_3_query_poison_by_instance(adev, inst, sub); 1340 + 1341 + return !!poison_stat; 1342 + } 1343 + 1338 1344 static const struct amdgpu_ras_block_hw_ops jpeg_v4_0_3_ras_hw_ops = { 1339 1345 .query_ras_error_count = jpeg_v4_0_3_query_ras_error_count, 1340 1346 .reset_ras_error_count = jpeg_v4_0_3_reset_ras_error_count, 1347 + .query_poison_status = jpeg_v4_0_3_query_ras_poison_status, 1341 1348 }; 1342 1349 1343 1350 static int jpeg_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, ··· 1451 1382 r = amdgpu_ras_block_late_init(adev, ras_block); 1452 1383 if (r) 1453 1384 return r; 1385 + 1386 + if (amdgpu_ras_is_supported(adev, ras_block->block) && 1387 + adev->jpeg.inst->ras_poison_irq.funcs) { 1388 + r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); 1389 + if (r) 1390 + goto late_fini; 1391 + } 1454 1392 1455 1393 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, 1456 1394 &jpeg_v4_0_3_aca_info, NULL);
+7
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h
··· 46 46 47 47 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 48 48 49 + enum amdgpu_jpeg_v4_0_3_sub_block { 50 + AMDGPU_JPEG_V4_0_3_JPEG0 = 0, 51 + AMDGPU_JPEG_V4_0_3_JPEG1, 52 + 53 + AMDGPU_JPEG_V4_0_3_MAX_SUB_BLOCK, 54 + }; 55 + 49 56 extern const struct amdgpu_ip_block_version jpeg_v4_0_3_ip_block; 50 57 51 58 void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
+182
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
··· 39 39 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); 40 40 static int jpeg_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 41 41 enum amd_powergating_state state); 42 + static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); 42 43 static void jpeg_v5_0_1_dec_ring_set_wptr(struct amdgpu_ring *ring); 43 44 44 45 static int amdgpu_ih_srcid_jpeg[] = { ··· 121 120 adev->jpeg.num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS; 122 121 jpeg_v5_0_1_set_dec_ring_funcs(adev); 123 122 jpeg_v5_0_1_set_irq_funcs(adev); 123 + jpeg_v5_0_1_set_ras_funcs(adev); 124 124 125 125 return 0; 126 126 } ··· 146 144 if (r) 147 145 return r; 148 146 } 147 + /* JPEG DJPEG POISON EVENT */ 148 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 149 + VCN_5_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 150 + if (r) 151 + return r; 152 + 153 + /* JPEG EJPEG POISON EVENT */ 154 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 155 + VCN_5_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); 156 + if (r) 157 + return r; 149 158 150 159 r = amdgpu_jpeg_sw_init(adev); 151 160 if (r) ··· 308 295 if (adev->jpeg.cur_state != AMD_PG_STATE_GATE) 309 296 ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE); 310 297 } 298 + 299 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) 300 + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); 311 301 312 302 return ret; 313 303 } ··· 739 723 return 0; 740 724 } 741 725 726 + static int jpeg_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, 727 + struct amdgpu_irq_src *source, 728 + unsigned int type, 729 + enum amdgpu_interrupt_state state) 730 + { 731 + return 0; 732 + } 733 + 734 + 735 + 742 736 static int jpeg_v5_0_1_process_interrupt(struct amdgpu_device *adev, 743 737 struct amdgpu_irq_src *source, 744 738 struct amdgpu_iv_entry *entry) ··· 918 892 .process = jpeg_v5_0_1_process_interrupt, 919 893 }; 920 894 895 + static const struct amdgpu_irq_src_funcs jpeg_v5_0_1_ras_irq_funcs = { 896 + .set = jpeg_v5_0_1_set_ras_interrupt_state, 897 + .process = amdgpu_jpeg_process_poison_irq, 898 + }; 899 + 921 900 static void jpeg_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) 922 901 { 923 902 int i; ··· 931 900 adev->jpeg.inst->irq.num_types += adev->jpeg.num_jpeg_rings; 932 901 933 902 adev->jpeg.inst->irq.funcs = &jpeg_v5_0_1_irq_funcs; 903 + 904 + adev->jpeg.inst->ras_poison_irq.num_types = 1; 905 + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v5_0_1_ras_irq_funcs; 906 + 934 907 } 935 908 936 909 const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block = { ··· 944 909 .rev = 1, 945 910 .funcs = &jpeg_v5_0_1_ip_funcs, 946 911 }; 912 + 913 + static uint32_t jpeg_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, 914 + uint32_t instance, uint32_t sub_block) 915 + { 916 + uint32_t poison_stat = 0, reg_value = 0; 917 + 918 + switch (sub_block) { 919 + case AMDGPU_JPEG_V5_0_1_JPEG0: 920 + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS); 921 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF); 922 + break; 923 + case AMDGPU_JPEG_V5_0_1_JPEG1: 924 + reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS); 925 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF); 926 + break; 927 + default: 928 + break; 929 + } 930 + 931 + if (poison_stat) 932 + dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n", 933 + instance, sub_block); 934 + 935 + return poison_stat; 936 + } 937 + 938 + static bool jpeg_v5_0_1_query_ras_poison_status(struct amdgpu_device *adev) 939 + { 940 + uint32_t inst = 0, sub = 0, poison_stat = 0; 941 + 942 + for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++) 943 + for (sub = 0; sub < AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK; sub++) 944 + poison_stat += 945 + jpeg_v5_0_1_query_poison_by_instance(adev, inst, sub); 946 + 947 + return !!poison_stat; 948 + } 949 + 950 + static const struct amdgpu_ras_block_hw_ops jpeg_v5_0_1_ras_hw_ops = { 951 + .query_poison_status = jpeg_v5_0_1_query_ras_poison_status, 952 + }; 953 + 954 + static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 955 + enum aca_smu_type type, void *data) 956 + { 957 + struct aca_bank_info info; 958 + u64 misc0; 959 + int ret; 960 + 961 + ret = aca_bank_info_decode(bank, &info); 962 + if (ret) 963 + return ret; 964 + 965 + misc0 = bank->regs[ACA_REG_IDX_MISC0]; 966 + switch (type) { 967 + case ACA_SMU_TYPE_UE: 968 + bank->aca_err_type = ACA_ERROR_TYPE_UE; 969 + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 970 + 1ULL); 971 + break; 972 + case ACA_SMU_TYPE_CE: 973 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 974 + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 975 + ACA_REG__MISC0__ERRCNT(misc0)); 976 + break; 977 + default: 978 + return -EINVAL; 979 + } 980 + 981 + return ret; 982 + } 983 + 984 + /* reference to smu driver if header file */ 985 + static int jpeg_v5_0_1_err_codes[] = { 986 + 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */ 987 + 24, 25, 26, 27, 28, 29, 30, 31 988 + }; 989 + 990 + static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 991 + enum aca_smu_type type, void *data) 992 + { 993 + u32 instlo; 994 + 995 + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 996 + instlo &= GENMASK(31, 1); 997 + 998 + if (instlo != mmSMNAID_AID0_MCA_SMU) 999 + return false; 1000 + 1001 + if (aca_bank_check_error_codes(handle->adev, bank, 1002 + jpeg_v5_0_1_err_codes, 1003 + ARRAY_SIZE(jpeg_v5_0_1_err_codes))) 1004 + return false; 1005 + 1006 + return true; 1007 + } 1008 + 1009 + static const struct aca_bank_ops jpeg_v5_0_1_aca_bank_ops = { 1010 + .aca_bank_parser = jpeg_v5_0_1_aca_bank_parser, 1011 + .aca_bank_is_valid = jpeg_v5_0_1_aca_bank_is_valid, 1012 + }; 1013 + 1014 + static const struct aca_info jpeg_v5_0_1_aca_info = { 1015 + .hwip = ACA_HWIP_TYPE_SMU, 1016 + .mask = ACA_ERROR_UE_MASK, 1017 + .bank_ops = &jpeg_v5_0_1_aca_bank_ops, 1018 + }; 1019 + 1020 + static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1021 + { 1022 + int r; 1023 + 1024 + r = amdgpu_ras_block_late_init(adev, ras_block); 1025 + if (r) 1026 + return r; 1027 + 1028 + if (amdgpu_ras_is_supported(adev, ras_block->block) && 1029 + adev->jpeg.inst->ras_poison_irq.funcs) { 1030 + r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0); 1031 + if (r) 1032 + goto late_fini; 1033 + } 1034 + 1035 + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG, 1036 + &jpeg_v5_0_1_aca_info, NULL); 1037 + if (r) 1038 + goto late_fini; 1039 + 1040 + return 0; 1041 + 1042 + late_fini: 1043 + amdgpu_ras_block_late_fini(adev, ras_block); 1044 + 1045 + return r; 1046 + } 1047 + 1048 + static struct amdgpu_jpeg_ras jpeg_v5_0_1_ras = { 1049 + .ras_block = { 1050 + .hw_ops = &jpeg_v5_0_1_ras_hw_ops, 1051 + .ras_late_init = jpeg_v5_0_1_ras_late_init, 1052 + }, 1053 + }; 1054 + 1055 + static void jpeg_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) 1056 + { 1057 + adev->jpeg.ras = &jpeg_v5_0_1_ras; 1058 + }
+10
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h
··· 26 26 27 27 extern const struct amdgpu_ip_block_version jpeg_v5_0_1_ip_block; 28 28 29 + #define regUVD_JRBC0_UVD_JRBC_SCRATCH0_INTERNAL_OFFSET 0x4094 30 + #define regUVD_JRBC_EXTERNAL_MCM_ADDR_INTERNAL_OFFSET 0x1bffe 31 + 29 32 #define regUVD_JRBC0_UVD_JRBC_RB_WPTR 0x0640 30 33 #define regUVD_JRBC0_UVD_JRBC_RB_WPTR_BASE_IDX 1 31 34 #define regUVD_JRBC0_UVD_JRBC_STATUS 0x0649 ··· 100 97 101 98 #define regVCN_RRMT_CNTL 0x0940 102 99 #define regVCN_RRMT_CNTL_BASE_IDX 1 100 + 101 + enum amdgpu_jpeg_v5_0_1_sub_block { 102 + AMDGPU_JPEG_V5_0_1_JPEG0 = 0, 103 + AMDGPU_JPEG_V5_0_1_JPEG1, 104 + 105 + AMDGPU_JPEG_V5_0_1_MAX_SUB_BLOCK, 106 + }; 103 107 104 108 #endif /* __JPEG_V5_0_1_H__ */
+39 -16
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 44 44 #include "sdma_v6_0.h" 45 45 #include "v11_structs.h" 46 46 #include "mes_userqueue.h" 47 + #include "amdgpu_userq_fence.h" 47 48 48 49 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin"); 49 50 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin"); ··· 894 893 m->sdmax_rlcx_csa_addr_lo = lower_32_bits(prop->csa_addr); 895 894 m->sdmax_rlcx_csa_addr_hi = upper_32_bits(prop->csa_addr); 896 895 896 + m->sdmax_rlcx_f32_dbg0 = lower_32_bits(prop->fence_address); 897 + m->sdmax_rlcx_f32_dbg1 = upper_32_bits(prop->fence_address); 898 + 897 899 return 0; 898 900 } 899 901 ··· 1319 1315 if (r) 1320 1316 return r; 1321 1317 1318 + /* SDMA user fence event */ 1319 + r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX, 1320 + GFX_11_0_0__SRCID__SDMA_FENCE, 1321 + &adev->sdma.fence_irq); 1322 + if (r) 1323 + return r; 1324 + 1322 1325 for (i = 0; i < adev->sdma.num_instances; i++) { 1323 1326 ring = &adev->sdma.instance[i].ring; 1324 1327 ring->ring_obj = NULL; ··· 1586 1575 struct amdgpu_iv_entry *entry) 1587 1576 { 1588 1577 int instances, queue; 1589 - uint32_t mes_queue_id = entry->src_data[0]; 1590 1578 1591 1579 DRM_DEBUG("IH: SDMA trap\n"); 1592 - 1593 - if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) { 1594 - struct amdgpu_mes_queue *queue; 1595 - 1596 - mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK; 1597 - 1598 - spin_lock(&adev->mes.queue_id_lock); 1599 - queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id); 1600 - if (queue) { 1601 - DRM_DEBUG("process smda queue id = %d\n", mes_queue_id); 1602 - amdgpu_fence_process(queue->ring); 1603 - } 1604 - spin_unlock(&adev->mes.queue_id_lock); 1605 - return 0; 1606 - } 1607 1580 1608 1581 queue = entry->ring_id & 0xf; 1609 1582 instances = (entry->ring_id & 0xf0) >> 4; ··· 1607 1612 } 1608 1613 break; 1609 1614 } 1615 + return 0; 1616 + } 1617 + 1618 + static int sdma_v6_0_process_fence_irq(struct amdgpu_device *adev, 1619 + struct amdgpu_irq_src *source, 1620 + struct amdgpu_iv_entry *entry) 1621 + { 1622 + u32 doorbell_offset = entry->src_data[0]; 1623 + 1624 + if (adev->enable_mes && doorbell_offset) { 1625 + struct amdgpu_userq_fence_driver *fence_drv = NULL; 1626 + struct xarray *xa = &adev->userq_xa; 1627 + unsigned long flags; 1628 + 1629 + doorbell_offset >>= SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 1630 + 1631 + xa_lock_irqsave(xa, flags); 1632 + fence_drv = xa_load(xa, doorbell_offset); 1633 + if (fence_drv) 1634 + amdgpu_userq_fence_driver_process(fence_drv); 1635 + xa_unlock_irqrestore(xa, flags); 1636 + } 1637 + 1610 1638 return 0; 1611 1639 } 1612 1640 ··· 1769 1751 .process = sdma_v6_0_process_trap_irq, 1770 1752 }; 1771 1753 1754 + static const struct amdgpu_irq_src_funcs sdma_v6_0_fence_irq_funcs = { 1755 + .process = sdma_v6_0_process_fence_irq, 1756 + }; 1757 + 1772 1758 static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = { 1773 1759 .process = sdma_v6_0_process_illegal_inst_irq, 1774 1760 }; ··· 1782 1760 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 + 1783 1761 adev->sdma.num_instances; 1784 1762 adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs; 1763 + adev->sdma.fence_irq.funcs = &sdma_v6_0_fence_irq_funcs; 1785 1764 adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs; 1786 1765 } 1787 1766
+21
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 1009 1009 1010 1010 jpeg_v1_0_start(adev, 0); 1011 1011 1012 + /* Keeping one read-back to ensure all register writes are done, 1013 + * otherwise it may introduce race conditions. 1014 + */ 1015 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1016 + 1012 1017 return 0; 1013 1018 } 1014 1019 ··· 1159 1154 1160 1155 jpeg_v1_0_start(adev, 1); 1161 1156 1157 + /* Keeping one read-back to ensure all register writes are done, 1158 + * otherwise it may introduce race conditions. 1159 + */ 1160 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1161 + 1162 1162 return 0; 1163 1163 } 1164 1164 ··· 1226 1216 1227 1217 vcn_v1_0_enable_clock_gating(vinst); 1228 1218 vcn_1_0_enable_static_power_gating(vinst); 1219 + 1220 + /* Keeping one read-back to ensure all register writes are done, 1221 + * otherwise it may introduce race conditions. 1222 + */ 1223 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1224 + 1229 1225 return 0; 1230 1226 } 1231 1227 ··· 1265 1249 /* disable dynamic power gating mode */ 1266 1250 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, 1267 1251 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1252 + 1253 + /* Keeping one read-back to ensure all register writes are done, 1254 + * otherwise it may introduce race conditions. 1255 + */ 1256 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1268 1257 1269 1258 return 0; 1270 1259 }
+21
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 978 978 /* Unstall DPG */ 979 979 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 980 980 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 981 + 982 + /* Keeping one read-back to ensure all register writes are done, 983 + * otherwise it may introduce race conditions. 984 + */ 985 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 986 + 981 987 return 0; 982 988 } 983 989 ··· 1158 1152 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); 1159 1153 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; 1160 1154 1155 + /* Keeping one read-back to ensure all register writes are done, 1156 + * otherwise it may introduce race conditions. 1157 + */ 1158 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1159 + 1161 1160 return 0; 1162 1161 } 1163 1162 ··· 1193 1182 /* disable dynamic power gating mode */ 1194 1183 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, 1195 1184 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1185 + 1186 + /* Keeping one read-back to ensure all register writes are done, 1187 + * otherwise it may introduce race conditions. 1188 + */ 1189 + RREG32_SOC15(UVD, 0, mmUVD_STATUS); 1196 1190 1197 1191 return 0; 1198 1192 } ··· 1263 1247 1264 1248 vcn_v2_0_enable_clock_gating(vinst); 1265 1249 vcn_v2_0_enable_static_power_gating(vinst); 1250 + 1251 + /* Keeping one read-back to ensure all register writes are done, 1252 + * otherwise it may introduce race conditions. 1253 + */ 1254 + RREG32_SOC15(VCN, 0, mmUVD_STATUS); 1266 1255 1267 1256 power_off: 1268 1257 if (adev->pm.dpm_enabled)
+19
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 1158 1158 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1159 1159 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1160 1160 1161 + /* Keeping one read-back to ensure all register writes are done, 1162 + * otherwise it may introduce race conditions. 1163 + */ 1164 + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); 1165 + 1161 1166 return 0; 1162 1167 } 1163 1168 ··· 1347 1342 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 1348 1343 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 1349 1344 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; 1345 + 1346 + /* Keeping one read-back to ensure all register writes are done, 1347 + * otherwise it may introduce race conditions. 1348 + */ 1349 + RREG32_SOC15(VCN, i, mmUVD_STATUS); 1350 1350 1351 1351 return 0; 1352 1352 } ··· 1579 1569 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, 1580 1570 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1581 1571 1572 + /* Keeping one read-back to ensure all register writes are done, 1573 + * otherwise it may introduce race conditions. 1574 + */ 1575 + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); 1576 + 1582 1577 return 0; 1583 1578 } 1584 1579 ··· 1650 1635 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, 1651 1636 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1652 1637 1638 + /* Keeping one read-back to ensure all register writes are done, 1639 + * otherwise it may introduce race conditions. 1640 + */ 1641 + RREG32_SOC15(VCN, i, mmUVD_STATUS); 1653 1642 done: 1654 1643 if (adev->pm.dpm_enabled) 1655 1644 amdgpu_dpm_enable_vcn(adev, false, i);
+20
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 1173 1173 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1174 1174 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1175 1175 1176 + /* Keeping one read-back to ensure all register writes are done, 1177 + * otherwise it may introduce race conditions. 1178 + */ 1179 + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); 1180 + 1176 1181 return 0; 1177 1182 } 1178 1183 ··· 1364 1359 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 1365 1360 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1366 1361 } 1362 + 1363 + /* Keeping one read-back to ensure all register writes are done, 1364 + * otherwise it may introduce race conditions. 1365 + */ 1366 + RREG32_SOC15(VCN, i, mmUVD_STATUS); 1367 1367 1368 1368 return 0; 1369 1369 } ··· 1612 1602 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, 1613 1603 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1614 1604 1605 + /* Keeping one read-back to ensure all register writes are done, 1606 + * otherwise it may introduce race conditions. 1607 + */ 1608 + RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); 1609 + 1615 1610 return 0; 1616 1611 } 1617 1612 ··· 1688 1673 1689 1674 /* enable VCN power gating */ 1690 1675 vcn_v3_0_enable_static_power_gating(vinst); 1676 + 1677 + /* Keeping one read-back to ensure all register writes are done, 1678 + * otherwise it may introduce race conditions. 1679 + */ 1680 + RREG32_SOC15(VCN, i, mmUVD_STATUS); 1691 1681 1692 1682 done: 1693 1683 if (adev->pm.dpm_enabled)
+20
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 1122 1122 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 1123 1123 VCN_RB1_DB_CTRL__EN_MASK); 1124 1124 1125 + /* Keeping one read-back to ensure all register writes are done, 1126 + * otherwise it may introduce race conditions. 1127 + */ 1128 + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); 1129 + 1125 1130 return 0; 1126 1131 } 1127 1132 ··· 1307 1302 tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 1308 1303 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 1309 1304 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1305 + 1306 + /* Keeping one read-back to ensure all register writes are done, 1307 + * otherwise it may introduce race conditions. 1308 + */ 1309 + RREG32_SOC15(VCN, i, regUVD_STATUS); 1310 1310 1311 1311 return 0; 1312 1312 } ··· 1593 1583 /* disable dynamic power gating mode */ 1594 1584 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 1595 1585 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1586 + 1587 + /* Keeping one read-back to ensure all register writes are done, 1588 + * otherwise it may introduce race conditions. 1589 + */ 1590 + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); 1596 1591 } 1597 1592 1598 1593 /** ··· 1680 1665 1681 1666 /* enable VCN power gating */ 1682 1667 vcn_v4_0_enable_static_power_gating(vinst); 1668 + 1669 + /* Keeping one read-back to ensure all register writes are done, 1670 + * otherwise it may introduce race conditions. 1671 + */ 1672 + RREG32_SOC15(VCN, i, regUVD_STATUS); 1683 1673 1684 1674 done: 1685 1675 if (adev->pm.dpm_enabled)
+81
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 169 169 if (r) 170 170 return r; 171 171 172 + /* VCN POISON TRAP */ 173 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 174 + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); 175 + 172 176 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 173 177 174 178 r = amdgpu_vcn_sw_init(adev, i); ··· 390 386 if (vinst->cur_state != AMD_PG_STATE_GATE) 391 387 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); 392 388 } 389 + 390 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 391 + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); 393 392 394 393 return 0; 395 394 } ··· 977 970 /*resetting done, fw can check RB ring */ 978 971 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 979 972 973 + /* Keeping one read-back to ensure all register writes are done, 974 + * otherwise it may introduce race conditions. 975 + */ 976 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 977 + 980 978 return 0; 981 979 } 982 980 ··· 1375 1363 /* disable dynamic power gating mode */ 1376 1364 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, 1377 1365 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1366 + 1367 + /* Keeping one read-back to ensure all register writes are done, 1368 + * otherwise it may introduce race conditions. 1369 + */ 1370 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1371 + 1378 1372 return 0; 1379 1373 } 1380 1374 ··· 1463 1445 1464 1446 /* apply HW clock gating */ 1465 1447 vcn_v4_0_3_enable_clock_gating(vinst); 1448 + 1449 + /* Keeping one read-back to ensure all register writes are done, 1450 + * otherwise it may introduce race conditions. 1451 + */ 1452 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1466 1453 1467 1454 Done: 1468 1455 return 0; ··· 1837 1814 return 0; 1838 1815 } 1839 1816 1817 + static int vcn_v4_0_3_set_ras_interrupt_state(struct amdgpu_device *adev, 1818 + struct amdgpu_irq_src *source, 1819 + unsigned int type, 1820 + enum amdgpu_interrupt_state state) 1821 + { 1822 + return 0; 1823 + } 1824 + 1840 1825 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = { 1841 1826 .set = vcn_v4_0_3_set_interrupt_state, 1842 1827 .process = vcn_v4_0_3_process_interrupt, 1828 + }; 1829 + 1830 + static const struct amdgpu_irq_src_funcs vcn_v4_0_3_ras_irq_funcs = { 1831 + .set = vcn_v4_0_3_set_ras_interrupt_state, 1832 + .process = amdgpu_vcn_process_poison_irq, 1843 1833 }; 1844 1834 1845 1835 /** ··· 1870 1834 adev->vcn.inst->irq.num_types++; 1871 1835 } 1872 1836 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs; 1837 + 1838 + adev->vcn.inst->ras_poison_irq.num_types = 1; 1839 + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs; 1873 1840 } 1874 1841 1875 1842 static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) ··· 2020 1981 vcn_v4_0_3_inst_reset_ras_error_count(adev, i); 2021 1982 } 2022 1983 1984 + static uint32_t vcn_v4_0_3_query_poison_by_instance(struct amdgpu_device *adev, 1985 + uint32_t instance, uint32_t sub_block) 1986 + { 1987 + uint32_t poison_stat = 0, reg_value = 0; 1988 + 1989 + switch (sub_block) { 1990 + case AMDGPU_VCN_V4_0_3_VCPU_VCODEC: 1991 + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); 1992 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); 1993 + break; 1994 + default: 1995 + break; 1996 + } 1997 + 1998 + if (poison_stat) 1999 + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", 2000 + instance, sub_block); 2001 + 2002 + return poison_stat; 2003 + } 2004 + 2005 + static bool vcn_v4_0_3_query_poison_status(struct amdgpu_device *adev) 2006 + { 2007 + uint32_t inst, sub; 2008 + uint32_t poison_stat = 0; 2009 + 2010 + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) 2011 + for (sub = 0; sub < AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK; sub++) 2012 + poison_stat += 2013 + vcn_v4_0_3_query_poison_by_instance(adev, inst, sub); 2014 + 2015 + return !!poison_stat; 2016 + } 2017 + 2023 2018 static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = { 2024 2019 .query_ras_error_count = vcn_v4_0_3_query_ras_error_count, 2025 2020 .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count, 2021 + .query_poison_status = vcn_v4_0_3_query_poison_status, 2026 2022 }; 2027 2023 2028 2024 static int vcn_v4_0_3_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, ··· 2132 2058 r = amdgpu_ras_block_late_init(adev, ras_block); 2133 2059 if (r) 2134 2060 return r; 2061 + 2062 + if (amdgpu_ras_is_supported(adev, ras_block->block) && 2063 + adev->vcn.inst->ras_poison_irq.funcs) { 2064 + r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0); 2065 + if (r) 2066 + goto late_fini; 2067 + } 2135 2068 2136 2069 r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, 2137 2070 &vcn_v4_0_3_aca_info, NULL);
+6
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h
··· 24 24 #ifndef __VCN_V4_0_3_H__ 25 25 #define __VCN_V4_0_3_H__ 26 26 27 + enum amdgpu_vcn_v4_0_3_sub_block { 28 + AMDGPU_VCN_V4_0_3_VCPU_VCODEC = 0, 29 + 30 + AMDGPU_VCN_V4_0_3_MAX_SUB_BLOCK, 31 + }; 32 + 27 33 extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block; 28 34 29 35 void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+10
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
··· 1254 1254 /* disable dynamic power gating mode */ 1255 1255 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 1256 1256 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1257 + 1258 + /* Keeping one read-back to ensure all register writes are done, 1259 + * otherwise it may introduce race conditions. 1260 + */ 1261 + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); 1257 1262 } 1258 1263 1259 1264 /** ··· 1341 1336 1342 1337 /* enable VCN power gating */ 1343 1338 vcn_v4_0_5_enable_static_power_gating(vinst); 1339 + 1340 + /* Keeping one read-back to ensure all register writes are done, 1341 + * otherwise it may introduce race conditions. 1342 + */ 1343 + RREG32_SOC15(VCN, i, regUVD_STATUS); 1344 1344 1345 1345 done: 1346 1346 if (adev->pm.dpm_enabled)
+20
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
··· 794 794 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 795 795 VCN_RB1_DB_CTRL__EN_MASK); 796 796 797 + /* Keeping one read-back to ensure all register writes are done, 798 + * otherwise it may introduce race conditions. 799 + */ 800 + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); 801 + 797 802 return 0; 798 803 } 799 804 ··· 951 946 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); 952 947 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 953 948 949 + /* Keeping one read-back to ensure all register writes are done, 950 + * otherwise it may introduce race conditions. 951 + */ 952 + RREG32_SOC15(VCN, i, regUVD_STATUS); 953 + 954 954 return 0; 955 955 } 956 956 ··· 986 976 /* disable dynamic power gating mode */ 987 977 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, 988 978 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 979 + 980 + /* Keeping one read-back to ensure all register writes are done, 981 + * otherwise it may introduce race conditions. 982 + */ 983 + RREG32_SOC15(VCN, inst_idx, regUVD_STATUS); 989 984 990 985 return; 991 986 } ··· 1072 1057 1073 1058 /* enable VCN power gating */ 1074 1059 vcn_v5_0_0_enable_static_power_gating(vinst); 1060 + 1061 + /* Keeping one read-back to ensure all register writes are done, 1062 + * otherwise it may introduce race conditions. 1063 + */ 1064 + RREG32_SOC15(VCN, i, regUVD_STATUS); 1075 1065 1076 1066 done: 1077 1067 if (adev->pm.dpm_enabled)
+179 -1
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 46 46 static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, 47 47 enum amd_powergating_state state); 48 48 static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); 49 - 49 + static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); 50 50 /** 51 51 * vcn_v5_0_1_early_init - set function pointers and load microcode 52 52 * ··· 66 66 67 67 vcn_v5_0_1_set_unified_ring_funcs(adev); 68 68 vcn_v5_0_1_set_irq_funcs(adev); 69 + vcn_v5_0_1_set_ras_funcs(adev); 69 70 70 71 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 71 72 adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state; ··· 113 112 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); 114 113 if (r) 115 114 return r; 115 + 116 + /* VCN POISON TRAP */ 117 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 118 + VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); 116 119 117 120 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 118 121 vcn_inst = GET_INST(VCN, i); ··· 283 278 if (vinst->cur_state != AMD_PG_STATE_GATE) 284 279 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); 285 280 } 281 + 282 + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) 283 + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); 286 284 287 285 return 0; 288 286 } ··· 1038 1030 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 1039 1031 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 1040 1032 1033 + /* Keeping one read-back to ensure all register writes are done, 1034 + * otherwise it may introduce race conditions. 1035 + */ 1036 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1037 + 1041 1038 return 0; 1042 1039 } 1043 1040 ··· 1077 1064 /* disable dynamic power gating mode */ 1078 1065 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, 1079 1066 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 1067 + 1068 + /* Keeping one read-back to ensure all register writes are done, 1069 + * otherwise it may introduce race conditions. 1070 + */ 1071 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1080 1072 } 1081 1073 1082 1074 /** ··· 1156 1138 1157 1139 /* clear status */ 1158 1140 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); 1141 + 1142 + /* Keeping one read-back to ensure all register writes are done, 1143 + * otherwise it may introduce race conditions. 1144 + */ 1145 + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 1159 1146 1160 1147 return 0; 1161 1148 } ··· 1414 1391 return 0; 1415 1392 } 1416 1393 1394 + static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, 1395 + struct amdgpu_irq_src *source, 1396 + unsigned int type, 1397 + enum amdgpu_interrupt_state state) 1398 + { 1399 + return 0; 1400 + } 1401 + 1417 1402 static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { 1418 1403 .process = vcn_v5_0_1_process_interrupt, 1419 1404 }; 1405 + 1406 + static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = { 1407 + .set = vcn_v5_0_1_set_ras_interrupt_state, 1408 + .process = amdgpu_vcn_process_poison_irq, 1409 + }; 1410 + 1420 1411 1421 1412 /** 1422 1413 * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions ··· 1445 1408 1446 1409 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 1447 1410 adev->vcn.inst->irq.num_types++; 1411 + 1448 1412 adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; 1413 + 1414 + adev->vcn.inst->ras_poison_irq.num_types = 1; 1415 + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs; 1416 + 1449 1417 } 1450 1418 1451 1419 static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { ··· 1482 1440 .rev = 1, 1483 1441 .funcs = &vcn_v5_0_1_ip_funcs, 1484 1442 }; 1443 + 1444 + static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, 1445 + uint32_t instance, uint32_t sub_block) 1446 + { 1447 + uint32_t poison_stat = 0, reg_value = 0; 1448 + 1449 + switch (sub_block) { 1450 + case AMDGPU_VCN_V5_0_1_VCPU_VCODEC: 1451 + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); 1452 + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); 1453 + break; 1454 + default: 1455 + break; 1456 + } 1457 + 1458 + if (poison_stat) 1459 + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", 1460 + instance, sub_block); 1461 + 1462 + return poison_stat; 1463 + } 1464 + 1465 + static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev) 1466 + { 1467 + uint32_t inst, sub; 1468 + uint32_t poison_stat = 0; 1469 + 1470 + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) 1471 + for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++) 1472 + poison_stat += 1473 + vcn_v5_0_1_query_poison_by_instance(adev, inst, sub); 1474 + 1475 + return !!poison_stat; 1476 + } 1477 + 1478 + static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = { 1479 + .query_poison_status = vcn_v5_0_1_query_poison_status, 1480 + }; 1481 + 1482 + static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, 1483 + enum aca_smu_type type, void *data) 1484 + { 1485 + struct aca_bank_info info; 1486 + u64 misc0; 1487 + int ret; 1488 + 1489 + ret = aca_bank_info_decode(bank, &info); 1490 + if (ret) 1491 + return ret; 1492 + 1493 + misc0 = bank->regs[ACA_REG_IDX_MISC0]; 1494 + switch (type) { 1495 + case ACA_SMU_TYPE_UE: 1496 + bank->aca_err_type = ACA_ERROR_TYPE_UE; 1497 + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, 1498 + 1ULL); 1499 + break; 1500 + case ACA_SMU_TYPE_CE: 1501 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1502 + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1503 + ACA_REG__MISC0__ERRCNT(misc0)); 1504 + break; 1505 + default: 1506 + return -EINVAL; 1507 + } 1508 + 1509 + return ret; 1510 + } 1511 + 1512 + /* reference to smu driver if header file */ 1513 + static int vcn_v5_0_1_err_codes[] = { 1514 + 14, 15, /* VCN */ 1515 + }; 1516 + 1517 + static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, 1518 + enum aca_smu_type type, void *data) 1519 + { 1520 + u32 instlo; 1521 + 1522 + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); 1523 + instlo &= GENMASK(31, 1); 1524 + 1525 + if (instlo != mmSMNAID_AID0_MCA_SMU) 1526 + return false; 1527 + 1528 + if (aca_bank_check_error_codes(handle->adev, bank, 1529 + vcn_v5_0_1_err_codes, 1530 + ARRAY_SIZE(vcn_v5_0_1_err_codes))) 1531 + return false; 1532 + 1533 + return true; 1534 + } 1535 + 1536 + static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = { 1537 + .aca_bank_parser = vcn_v5_0_1_aca_bank_parser, 1538 + .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid, 1539 + }; 1540 + 1541 + static const struct aca_info vcn_v5_0_1_aca_info = { 1542 + .hwip = ACA_HWIP_TYPE_SMU, 1543 + .mask = ACA_ERROR_UE_MASK, 1544 + .bank_ops = &vcn_v5_0_1_aca_bank_ops, 1545 + }; 1546 + 1547 + static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) 1548 + { 1549 + int r; 1550 + 1551 + r = amdgpu_ras_block_late_init(adev, ras_block); 1552 + if (r) 1553 + return r; 1554 + 1555 + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, 1556 + &vcn_v5_0_1_aca_info, NULL); 1557 + if (r) 1558 + goto late_fini; 1559 + 1560 + return 0; 1561 + 1562 + late_fini: 1563 + amdgpu_ras_block_late_fini(adev, ras_block); 1564 + 1565 + return r; 1566 + } 1567 + 1568 + static struct amdgpu_vcn_ras vcn_v5_0_1_ras = { 1569 + .ras_block = { 1570 + .hw_ops = &vcn_v5_0_1_ras_hw_ops, 1571 + .ras_late_init = vcn_v5_0_1_ras_late_init, 1572 + }, 1573 + }; 1574 + 1575 + static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) 1576 + { 1577 + adev->vcn.ras = &vcn_v5_0_1_ras; 1578 + }
+7
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
··· 27 27 #define regVCN_RRMT_CNTL 0x0940 28 28 #define regVCN_RRMT_CNTL_BASE_IDX 1 29 29 30 + 31 + enum amdgpu_vcn_v5_0_1_sub_block { 32 + AMDGPU_VCN_V5_0_1_VCPU_VCODEC = 0, 33 + 34 + AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK, 35 + }; 36 + 30 37 extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block; 31 38 32 39 #endif /* __VCN_v5_0_1_H__ */
+1 -6
drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
··· 91 91 const struct cik_ih_ring_entry *ihre = 92 92 (const struct cik_ih_ring_entry *)ih_ring_entry; 93 93 uint32_t context_id = ihre->data & 0xfffffff; 94 - unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8; 95 94 u32 pasid = (ihre->ring_id & 0xffff0000) >> 16; 96 95 97 96 if (pasid == 0) ··· 124 125 return; 125 126 } 126 127 127 - if (info.vmid == vmid) 128 - kfd_signal_vm_fault_event(pdd, &info, NULL); 129 - else 130 - kfd_signal_vm_fault_event(pdd, &info, NULL); 131 - 128 + kfd_signal_vm_fault_event(pdd, &info, NULL); 132 129 kfd_unref_process(p); 133 130 } 134 131 }
+1 -3
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 2039 2039 2040 2040 num_events = kfd_get_num_events(p); 2041 2041 2042 - ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); 2043 - if (ret) 2044 - return ret; 2042 + svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); 2045 2043 2046 2044 *num_objects = num_queues + num_events + num_svm_ranges; 2047 2045
+1
drivers/gpu/drm/amd/amdkfd/kfd_events.c
··· 1350 1350 user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id); 1351 1351 if (unlikely(user_gpu_id == -EINVAL)) { 1352 1352 WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id); 1353 + kfd_unref_process(p); 1353 1354 return; 1354 1355 } 1355 1356
+10 -13
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 279 279 /* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work 280 280 * on unmapped queues for usermode queue oversubscription (no aggregated doorbell) 281 281 */ 282 - if (((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) 283 - >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) { 284 - if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { 285 - pr_err("Queue memory allocated to wrong device\n"); 286 - retval = -EINVAL; 287 - goto free_gang_ctx_bo; 288 - } 282 + if (dev->adev != amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) { 283 + pr_err("Queue memory allocated to wrong device\n"); 284 + retval = -EINVAL; 285 + goto free_gang_ctx_bo; 286 + } 289 287 290 - retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, 291 - &(*q)->wptr_bo_gart); 292 - if (retval) { 293 - pr_err("Failed to map wptr bo to GART\n"); 294 - goto free_gang_ctx_bo; 295 - } 288 + retval = amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo, 289 + &(*q)->wptr_bo_gart); 290 + if (retval) { 291 + pr_err("Failed to map wptr bo to GART\n"); 292 + goto free_gang_ctx_bo; 296 293 } 297 294 } 298 295
+2 -7
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 4075 4075 return ret; 4076 4076 } 4077 4077 4078 - int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 4079 - uint64_t *svm_priv_data_size) 4078 + void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 4079 + uint64_t *svm_priv_data_size) 4080 4080 { 4081 4081 uint64_t total_size, accessibility_size, common_attr_size; 4082 4082 int nattr_common = 4, nattr_accessibility = 1; ··· 4088 4088 *svm_priv_data_size = 0; 4089 4089 4090 4090 svms = &p->svms; 4091 - if (!svms) 4092 - return -EINVAL; 4093 4091 4094 4092 mutex_lock(&svms->lock); 4095 4093 list_for_each_entry(prange, &svms->list, list) { ··· 4129 4131 4130 4132 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges, 4131 4133 *svm_priv_data_size); 4132 - return 0; 4133 4134 } 4134 4135 4135 4136 int kfd_criu_checkpoint_svm(struct kfd_process *p, ··· 4145 4148 struct mm_struct *mm; 4146 4149 4147 4150 svms = &p->svms; 4148 - if (!svms) 4149 - return -EINVAL; 4150 4151 4151 4152 mm = get_task_mm(p->lead_thread); 4152 4153 if (!mm) {
+5 -6
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
··· 184 184 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, 185 185 unsigned long offset, unsigned long npages); 186 186 void svm_range_dma_unmap(struct svm_range *prange); 187 - int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 188 - uint64_t *svm_priv_data_size); 187 + void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 188 + uint64_t *svm_priv_data_size); 189 189 int kfd_criu_checkpoint_svm(struct kfd_process *p, 190 190 uint8_t __user *user_priv_data, 191 191 uint64_t *priv_offset); ··· 237 237 return -EINVAL; 238 238 } 239 239 240 - static inline int svm_range_get_info(struct kfd_process *p, 241 - uint32_t *num_svm_ranges, 242 - uint64_t *svm_priv_data_size) 240 + static inline void svm_range_get_info(struct kfd_process *p, 241 + uint32_t *num_svm_ranges, 242 + uint64_t *svm_priv_data_size) 243 243 { 244 244 *num_svm_ranges = 0; 245 245 *svm_priv_data_size = 0; 246 - return 0; 247 246 } 248 247 249 248 static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
+11 -17
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 676 676 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 677 677 678 678 if (acrtc->dm_irq_params.stream && 679 - acrtc->dm_irq_params.vrr_params.supported) { 680 - bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; 681 - bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; 682 - bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; 683 - 679 + acrtc->dm_irq_params.vrr_params.supported && 680 + acrtc->dm_irq_params.freesync_config.state == 681 + VRR_STATE_ACTIVE_VARIABLE) { 684 682 mod_freesync_handle_v_update(adev->dm.freesync_module, 685 683 acrtc->dm_irq_params.stream, 686 684 &acrtc->dm_irq_params.vrr_params); 687 685 688 - /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ 689 - if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { 690 - dc_stream_adjust_vmin_vmax(adev->dm.dc, 691 - acrtc->dm_irq_params.stream, 692 - &acrtc->dm_irq_params.vrr_params.adjust); 693 - } 686 + dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 687 + &acrtc->dm_irq_params.vrr_params.adjust); 694 688 } 695 689 696 690 /* ··· 2000 2006 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 2001 2007 adev->dm.dc->debug.force_subvp_mclk_switch = true; 2002 2008 2003 - if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP) 2009 + if (amdgpu_dc_debug_mask & DC_DISABLE_SUBVP_FAMS) { 2004 2010 adev->dm.dc->debug.force_disable_subvp = true; 2011 + adev->dm.dc->debug.fams2_config.bits.enable = false; 2012 + } 2005 2013 2006 2014 if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 2007 2015 adev->dm.dc->debug.using_dml2 = true; ··· 2015 2019 2016 2020 if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) 2017 2021 adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; 2022 + 2023 + if (amdgpu_dc_debug_mask & DC_SKIP_DETECTION_LT) 2024 + adev->dm.dc->debug.skip_detection_link_training = true; 2018 2025 2019 2026 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2020 2027 ··· 3379 3380 3380 3381 return 0; 3381 3382 } 3382 - 3383 - /* leave display off for S4 sequence */ 3384 - if (adev->in_s4) 3385 - return 0; 3386 - 3387 3383 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 3388 3384 dc_state_release(dm_state->context); 3389 3385 dm_state->context = dc_state_create(dm->dc, NULL);
+1 -10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 246 246 struct vblank_control_work *vblank_work = 247 247 container_of(work, struct vblank_control_work, work); 248 248 struct amdgpu_display_manager *dm = vblank_work->dm; 249 - struct amdgpu_device *adev = drm_to_adev(dm->ddev); 250 - int r; 251 249 252 250 mutex_lock(&dm->dc_lock); 253 251 ··· 273 275 vblank_work->acrtc->dm_irq_params.allow_sr_entry); 274 276 } 275 277 276 - if (dm->active_vblank_irq_count == 0) { 277 - r = amdgpu_dpm_pause_power_profile(adev, true); 278 - if (r) 279 - dev_warn(adev->dev, "failed to set default power profile mode\n"); 278 + if (dm->active_vblank_irq_count == 0) 280 279 dc_allow_idle_optimizations(dm->dc, true); 281 - r = amdgpu_dpm_pause_power_profile(adev, false); 282 - if (r) 283 - dev_warn(adev->dev, "failed to restore the power profile mode\n"); 284 - } 285 280 286 281 mutex_unlock(&dm->dc_lock); 287 282
+2 -2
drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
··· 1393 1393 if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { 1394 1394 /*determine the minimum dram clock change margin for each set of clock frequencies*/ 1395 1395 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1396 - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1396 + /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/ 1397 1397 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); 1398 1398 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { 1399 1399 data->display_pstate_change_enable[k] = 1; ··· 1407 1407 if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { 1408 1408 /*determine the minimum dram clock change margin for each display pipe*/ 1409 1409 data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); 1410 - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ 1410 + /*compute the maximum clock frequency required for the dram clock change at each set of clock frequencies*/ 1411 1411 data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); 1412 1412 if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { 1413 1413 data->display_pstate_change_enable[k] = 1;
+4 -4
drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
··· 62 62 *ptr = NULL; 63 63 } 64 64 65 - static enum gpio_result get_value( 65 + static enum gpio_result dal_hw_hpd_get_value( 66 66 const struct hw_gpio_pin *ptr, 67 67 uint32_t *value) 68 68 { ··· 85 85 return dal_hw_gpio_get_value(ptr, value); 86 86 } 87 87 88 - static enum gpio_result set_config( 88 + static enum gpio_result dal_hw_hpd_set_config( 89 89 struct hw_gpio_pin *ptr, 90 90 const struct gpio_config_data *config_data) 91 91 { ··· 104 104 static const struct hw_gpio_pin_funcs funcs = { 105 105 .destroy = dal_hw_hpd_destroy, 106 106 .open = dal_hw_gpio_open, 107 - .get_value = get_value, 107 + .get_value = dal_hw_hpd_get_value, 108 108 .set_value = dal_hw_gpio_set_value, 109 - .set_config = set_config, 109 + .set_config = dal_hw_hpd_set_config, 110 110 .change_mode = dal_hw_gpio_change_mode, 111 111 .close = dal_hw_gpio_close, 112 112 };
+18 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 76 76 { 77 77 struct dc_context *dc_ctx = dc->ctx; 78 78 struct resource_pool *pool = dc->res_pool; 79 + bool is_gamut_remap_available = false; 79 80 int i; 80 81 81 82 DTN_INFO("DPP: DGAM mode SHAPER mode 3DLUT mode 3DLUT bit depth" ··· 90 89 struct dcn_dpp_state s = {0}; 91 90 92 91 dpp->funcs->dpp_read_state(dpp, &s); 93 - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); 92 + if (dpp->funcs->dpp_get_gamut_remap) { 93 + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); 94 + is_gamut_remap_available = true; 95 + } 94 96 95 97 if (!s.is_enabled) 96 98 continue; 97 99 98 - DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s %12s " 99 - "%010lld %010lld %010lld %010lld " 100 - "%010lld %010lld %010lld %010lld " 101 - "%010lld %010lld %010lld %010lld", 100 + DTN_INFO("[%2d]: %8s %11s %10s %15s %10s %9s", 102 101 dpp->inst, 103 102 (s.dgam_lut_mode == 0) ? "Bypass" : 104 103 ((s.dgam_lut_mode == 1) ? "sRGB" : ··· 115 114 (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit", 116 115 (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", 117 116 (s.rgam_lut_mode == 1) ? "RAM A" : 118 - ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"), 117 + ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass")); 118 + 119 + if (is_gamut_remap_available) { 120 + DTN_INFO(" %12s " 121 + "%010lld %010lld %010lld %010lld " 122 + "%010lld %010lld %010lld %010lld " 123 + "%010lld %010lld %010lld %010lld", 124 + 119 125 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : 120 - ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : 121 - "SW"), 126 + ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : 127 + "SW"), 122 128 s.gamut_remap.temperature_matrix[0].value, 123 129 s.gamut_remap.temperature_matrix[1].value, 124 130 s.gamut_remap.temperature_matrix[2].value, ··· 138 130 s.gamut_remap.temperature_matrix[9].value, 139 131 s.gamut_remap.temperature_matrix[10].value, 140 132 s.gamut_remap.temperature_matrix[11].value); 133 + } 134 + 141 135 DTN_INFO("\n"); 142 136 } 143 137 DTN_INFO("\n");
+17 -7
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 74 74 { 75 75 struct dc_context *dc_ctx = dc->ctx; 76 76 struct resource_pool *pool = dc->res_pool; 77 + bool is_gamut_remap_available = false; 77 78 int i; 78 79 79 80 DTN_INFO("DPP: DGAM ROM DGAM ROM type DGAM LUT SHAPER mode" ··· 89 88 struct dcn_dpp_state s = {0}; 90 89 91 90 dpp->funcs->dpp_read_state(dpp, &s); 92 - dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); 91 + 92 + if (dpp->funcs->dpp_get_gamut_remap) { 93 + dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap); 94 + is_gamut_remap_available = true; 95 + } 93 96 94 97 if (!s.is_enabled) 95 98 continue; 96 99 97 - DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s" 98 - " %12s " 99 - "%010lld %010lld %010lld %010lld " 100 - "%010lld %010lld %010lld %010lld " 101 - "%010lld %010lld %010lld %010lld", 100 + DTN_INFO("[%2d]: %7x %13s %8s %11s %10s %15s %10s %9s", 102 101 dpp->inst, 103 102 s.pre_dgam_mode, 104 103 (s.pre_dgam_select == 0) ? "sRGB" : ··· 122 121 (s.lut3d_size == 0) ? "17x17x17" : "9x9x9", 123 122 (s.rgam_lut_mode == 0) ? "Bypass" : 124 123 ((s.rgam_lut_mode == 1) ? "RAM A" : 125 - "RAM B"), 124 + "RAM B")); 125 + 126 + if (is_gamut_remap_available) { 127 + DTN_INFO(" %12s " 128 + "%010lld %010lld %010lld %010lld " 129 + "%010lld %010lld %010lld %010lld " 130 + "%010lld %010lld %010lld %010lld", 131 + 126 132 (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" : 127 133 ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : 128 134 "SW"), ··· 145 137 s.gamut_remap.temperature_matrix[9].value, 146 138 s.gamut_remap.temperature_matrix[10].value, 147 139 s.gamut_remap.temperature_matrix[11].value); 140 + } 141 + 148 142 DTN_INFO("\n"); 149 143 } 150 144 DTN_INFO("\n");
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
··· 502 502 REG_GET(OTG_DRR_CONTROL, OTG_V_TOTAL_LAST_USED_BY_DRR, refresh_rate); 503 503 } 504 504 505 - static struct timing_generator_funcs dcn20_tg_funcs = { 505 + static const struct timing_generator_funcs dcn20_tg_funcs = { 506 506 .validate_timing = optc1_validate_timing, 507 507 .program_timing = optc1_program_timing, 508 508 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
··· 129 129 *num_of_src_opp = 1; 130 130 } 131 131 132 - static struct timing_generator_funcs dcn201_tg_funcs = { 132 + static const struct timing_generator_funcs dcn201_tg_funcs = { 133 133 .validate_timing = optc201_validate_timing, 134 134 .program_timing = optc1_program_timing, 135 135 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
··· 357 357 optc1_clear_optc_underflow(optc); 358 358 } 359 359 360 - static struct timing_generator_funcs dcn30_tg_funcs = { 360 + static const struct timing_generator_funcs dcn30_tg_funcs = { 361 361 .validate_timing = optc1_validate_timing, 362 362 .program_timing = optc1_program_timing, 363 363 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
··· 109 109 OTG_TRIGA_CLEAR, 1); 110 110 } 111 111 112 - static struct timing_generator_funcs dcn30_tg_funcs = { 112 + static const struct timing_generator_funcs dcn30_tg_funcs = { 113 113 .validate_timing = optc1_validate_timing, 114 114 .program_timing = optc1_program_timing, 115 115 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
··· 315 315 s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); 316 316 } 317 317 318 - static struct timing_generator_funcs dcn31_tg_funcs = { 318 + static const struct timing_generator_funcs dcn31_tg_funcs = { 319 319 .validate_timing = optc1_validate_timing, 320 320 .program_timing = optc1_program_timing, 321 321 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
··· 192 192 } 193 193 194 194 195 - static struct timing_generator_funcs dcn314_tg_funcs = { 195 + static const struct timing_generator_funcs dcn314_tg_funcs = { 196 196 .validate_timing = optc1_validate_timing, 197 197 .program_timing = optc1_program_timing, 198 198 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
··· 297 297 optc32_setup_manual_trigger(optc); 298 298 } 299 299 300 - static struct timing_generator_funcs dcn32_tg_funcs = { 300 + static const struct timing_generator_funcs dcn32_tg_funcs = { 301 301 .validate_timing = optc1_validate_timing, 302 302 .program_timing = optc1_program_timing, 303 303 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
··· 428 428 } 429 429 } 430 430 431 - static struct timing_generator_funcs dcn35_tg_funcs = { 431 + static const struct timing_generator_funcs dcn35_tg_funcs = { 432 432 .validate_timing = optc1_validate_timing, 433 433 .program_timing = optc1_program_timing, 434 434 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+1 -1
drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
··· 459 459 return true; 460 460 } 461 461 462 - static struct timing_generator_funcs dcn401_tg_funcs = { 462 + static const struct timing_generator_funcs dcn401_tg_funcs = { 463 463 .validate_timing = optc1_validate_timing, 464 464 .program_timing = optc1_program_timing, 465 465 .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 1938 1938 dc->caps.color.dpp.gamma_corr = 1; 1939 1939 dc->caps.color.dpp.dgam_rom_for_yuv = 0; 1940 1940 1941 - dc->caps.color.dpp.hw_3d_lut = 1; 1942 - dc->caps.color.dpp.ogam_ram = 1; 1941 + dc->caps.color.dpp.hw_3d_lut = 0; 1942 + dc->caps.color.dpp.ogam_ram = 0; 1943 1943 // no OGAM ROM on DCN2 and later ASICs 1944 1944 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; 1945 1945 dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+2 -32
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 2139 2139 } stream_v1; //v1 2140 2140 }; 2141 2141 2142 - struct dmub_fams2_config_v2 { 2143 - struct dmub_cmd_fams2_global_config global; 2144 - struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1 2145 - }; 2146 - 2147 2142 /** 2148 2143 * DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy) 2149 2144 */ 2150 2145 struct dmub_rb_cmd_fams2 { 2151 2146 struct dmub_cmd_header header; 2152 2147 union dmub_cmd_fams2_config config; 2153 - }; 2154 - 2155 - /** 2156 - * Indirect buffer descriptor 2157 - */ 2158 - struct dmub_ib_data { 2159 - union dmub_addr src; // location of indirect buffer in memory 2160 - uint16_t size; // indirect buffer size in bytes 2161 - }; 2162 - 2163 - /** 2164 - * DMUB rb command definition for commands passed over indirect buffer 2165 - */ 2166 - struct dmub_rb_cmd_ib { 2167 - struct dmub_cmd_header header; 2168 - struct dmub_ib_data ib_data; 2169 2148 }; 2170 2149 2171 2150 /** ··· 2170 2191 * DCN hardware notify power state. 2171 2192 */ 2172 2193 DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, 2173 - 2174 - /** 2175 - * DCN notify to release HW. 2176 - */ 2177 - DMUB_CMD__IDLE_OPT_RELEASE_HW = 4, 2178 2194 }; 2179 2195 2180 2196 /** ··· 2931 2957 */ 2932 2958 DMUB_CMD__FAMS_SET_MANUAL_TRIGGER = 3, 2933 2959 DMUB_CMD__FAMS2_CONFIG = 4, 2934 - DMUB_CMD__FAMS2_IB_CONFIG = 5, 2935 - DMUB_CMD__FAMS2_DRR_UPDATE = 6, 2936 - DMUB_CMD__FAMS2_FLIP = 7, 2960 + DMUB_CMD__FAMS2_DRR_UPDATE = 5, 2961 + DMUB_CMD__FAMS2_FLIP = 6, 2937 2962 }; 2938 2963 2939 2964 /** ··· 5926 5953 * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. 5927 5954 */ 5928 5955 struct dmub_rb_cmd_assr_enable assr_enable; 5929 - 5930 5956 struct dmub_rb_cmd_fams2 fams2_config; 5931 - 5932 - struct dmub_rb_cmd_ib ib_fams2_config; 5933 5957 5934 5958 struct dmub_rb_cmd_fams2_drr_update fams2_drr_update; 5935 5959
+1 -1
drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
··· 424 424 /* 425 425 * DFS-bypass flag 426 426 */ 427 - /* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */ 427 + /* Copy of SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS from atombios.h */ 428 428 enum { 429 429 DFS_BYPASS_ENABLE = 0x10 430 430 };
+3
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
··· 368 368 struct mod_hdcp_display *display = get_first_active_display(hdcp); 369 369 enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; 370 370 371 + if (!display) 372 + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; 373 + 371 374 mutex_lock(&psp->hdcp_context.mutex); 372 375 hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; 373 376 memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+8 -2
drivers/gpu/drm/amd/include/amd_shared.h
··· 351 351 DC_DISABLE_HDMI_CEC = 0x10000, 352 352 353 353 /** 354 - * @DC_DISABLE_SUBVP: If set, disable DCN Sub-Viewport feature in amdgpu driver. 354 + * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted 355 + * Memory Clock Switching (FAMS) feature in amdgpu driver. 355 356 */ 356 - DC_DISABLE_SUBVP = 0x20000, 357 + DC_DISABLE_SUBVP_FAMS = 0x20000, 357 358 /** 358 359 * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves 359 360 */ ··· 371 370 * path failure, retry using legacy SW path. 372 371 */ 373 372 DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000, 373 + 374 + /** 375 + * @DC_SKIP_DETECTION_LT: If set, skip detection link training 376 + */ 377 + DC_SKIP_DETECTION_LT = 0x200000, 374 378 }; 375 379 376 380 enum amd_dpm_forced_level;
+26
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h
··· 9776 9776 #define regDIG0_DIG_BE_CNTL_BASE_IDX 2 9777 9777 #define regDIG0_DIG_BE_EN_CNTL 0x20bd 9778 9778 #define regDIG0_DIG_BE_EN_CNTL_BASE_IDX 2 9779 + #define regDIG0_HDCP_INT_CONTROL 0x20c0 9780 + #define regDIG0_HDCP_INT_CONTROL_BASE_IDX 2 9781 + #define regDIG0_HDCP_LINK0_STATUS 0x20c1 9782 + #define regDIG0_HDCP_LINK0_STATUS_BASE_IDX 2 9783 + #define regDIG0_HDCP_I2C_CONTROL_0 0x20c2 9784 + #define regDIG0_HDCP_I2C_CONTROL_0_BASE_IDX 2 9785 + #define regDIG0_HDCP_I2C_CONTROL_1 0x20c3 9786 + #define regDIG0_HDCP_I2C_CONTROL_1_BASE_IDX 2 9779 9787 #define regDIG0_TMDS_CNTL 0x20e4 9780 9788 #define regDIG0_TMDS_CNTL_BASE_IDX 2 9781 9789 #define regDIG0_TMDS_CONTROL_CHAR 0x20e5 ··· 10089 10081 #define regDIG1_DIG_BE_CNTL_BASE_IDX 2 10090 10082 #define regDIG1_DIG_BE_EN_CNTL 0x21e1 10091 10083 #define regDIG1_DIG_BE_EN_CNTL_BASE_IDX 2 10084 + #define regDIG1_HDCP_INT_CONTROL 0x21e4 10085 + #define regDIG1_HDCP_INT_CONTROL_BASE_IDX 2 10086 + #define regDIG1_HDCP_I2C_CONTROL_0 0x21e6 10087 + #define regDIG1_HDCP_I2C_CONTROL_0_BASE_IDX 2 10088 + #define regDIG1_HDCP_I2C_CONTROL_1 0x21e7 10089 + #define regDIG1_HDCP_I2C_CONTROL_1_BASE_IDX 2 10092 10090 #define regDIG1_TMDS_CNTL 0x2208 10093 10091 #define regDIG1_TMDS_CNTL_BASE_IDX 2 10094 10092 #define regDIG1_TMDS_CONTROL_CHAR 0x2209 ··· 10400 10386 #define regDIG2_DIG_BE_CNTL_BASE_IDX 2 10401 10387 #define regDIG2_DIG_BE_EN_CNTL 0x2305 10402 10388 #define regDIG2_DIG_BE_EN_CNTL_BASE_IDX 2 10389 + #define regDIG2_HDCP_INT_CONTROL 0x2308 10390 + #define regDIG2_HDCP_INT_CONTROL_BASE_IDX 2 10391 + #define regDIG2_HDCP_I2C_CONTROL_0 0x230a 10392 + #define regDIG2_HDCP_I2C_CONTROL_0_BASE_IDX 2 10393 + #define regDIG2_HDCP_I2C_CONTROL_1 0x230b 10394 + #define regDIG2_HDCP_I2C_CONTROL_1_BASE_IDX 2 10403 10395 #define regDIG2_TMDS_CNTL 0x232c 10404 10396 #define regDIG2_TMDS_CNTL_BASE_IDX 2 10405 10397 #define regDIG2_TMDS_CONTROL_CHAR 0x232d ··· 10711 10691 #define regDIG3_DIG_BE_CNTL_BASE_IDX 2 10712 10692 #define regDIG3_DIG_BE_EN_CNTL 0x2429 10713 10693 #define regDIG3_DIG_BE_EN_CNTL_BASE_IDX 2 10694 + #define regDIG3_HDCP_INT_CONTROL 0x242c 10695 + #define regDIG3_HDCP_INT_CONTROL_BASE_IDX 2 10696 + #define regDIG3_HDCP_I2C_CONTROL_0 0x242e 10697 + #define regDIG3_HDCP_I2C_CONTROL_0_BASE_IDX 2 10698 + #define regDIG3_HDCP_I2C_CONTROL_1 0x242f 10699 + #define regDIG3_HDCP_I2C_CONTROL_1_BASE_IDX 2 10714 10700 #define regDIG3_TMDS_CNTL 0x2450 10715 10701 #define regDIG3_TMDS_CNTL_BASE_IDX 2 10716 10702 #define regDIG3_TMDS_CONTROL_CHAR 0x2451
+16
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h
··· 2847 2847 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x1 2848 2848 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x2 2849 2849 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x3 2850 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x4 2851 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x5 2852 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0x6 2853 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0x7 2854 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0x8 2855 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0x9 2856 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xa 2857 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST__SHIFT 0xb 2850 2858 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST__SHIFT 0xc 2851 2859 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST__SHIFT 0xd 2852 2860 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST__SHIFT 0xe ··· 2879 2871 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000002L 2880 2872 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000004L 2881 2873 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP0_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000008L 2874 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000010L 2875 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000020L 2876 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000040L 2877 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP1_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000080L 2878 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00000100L 2879 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00000200L 2880 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00000400L 2881 + #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP2_I2C_XFER_DONE_INTERRUPT_DEST_MASK 0x00000800L 2882 2882 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_SUCCESS_INTERRUPT_DEST_MASK 0x00001000L 2883 2883 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_AUTH_FAIL_INTERRUPT_DEST_MASK 0x00002000L 2884 2884 #define HDCP_INTERRUPT_DEST__DOUT_IHC_HDCP3_I2C_XFER_REQ_INTERRUPT_DEST_MASK 0x00004000L
+6
drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h
··· 1067 1067 #define regVCN_FEATURES_BASE_IDX 1 1068 1068 #define regUVD_GPUIOV_STATUS 0x0055 1069 1069 #define regUVD_GPUIOV_STATUS_BASE_IDX 1 1070 + #define regUVD_RAS_VCPU_VCODEC_STATUS 0x0057 1071 + #define regUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1 1070 1072 #define regUVD_SCRATCH15 0x005c 1073 + #define regUVD_RAS_JPEG0_STATUS 0x0059 1074 + #define regUVD_RAS_JPEG0_STATUS_BASE_IDX 1 1075 + #define regUVD_RAS_JPEG1_STATUS 0x005a 1076 + #define regUVD_RAS_JPEG1_STATUS_BASE_IDX 1 1071 1077 #define regUVD_SCRATCH15_BASE_IDX 1 1072 1078 #define regUVD_VERSION 0x005d 1073 1079 #define regUVD_VERSION_BASE_IDX 1
+16
drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h
··· 5714 5714 //UVD_GPUIOV_STATUS 5715 5715 #define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE__SHIFT 0x0 5716 5716 #define UVD_GPUIOV_STATUS__UVD_GPUIOV_STATUS_VF_ENABLE_MASK 0x00000001L 5717 + //UVD_RAS_VCPU_VCODEC_STATUS 5718 + #define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0 5719 + #define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f 5720 + #define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL 5721 + #define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L 5722 + 5723 + //UVD_RAS_JPEG0_STATUS 5724 + #define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0 5725 + #define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f 5726 + #define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL 5727 + #define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L 5728 + //UVD_RAS_JPEG1_STATUS 5729 + #define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0 5730 + #define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f 5731 + #define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL 5732 + #define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L 5717 5733 //UVD_SCRATCH15 5718 5734 #define UVD_SCRATCH15__SCRATCH15_DATA__SHIFT 0x0 5719 5735 #define UVD_SCRATCH15__SCRATCH15_DATA_MASK 0xFFFFFFFFL
+2 -2
drivers/gpu/drm/amd/include/atombios.h
··· 6017 6017 #define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 6018 6018 #define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 6019 6019 #define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 6020 - #define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 6020 + #define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10 6021 6021 //ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML 6022 6022 #define SYS_INFO_GPUCAPS__GNB_FAST_RESUME_CAPABLE 0x00010000 6023 6023 ··· 6460 6460 6461 6461 // ulGPUCapInfo 6462 6462 #define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08 6463 - #define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10 6463 + #define SYS_INFO_V1_9_GPUCAPSINFO_ENABLE_DFS_BYPASS 0x10 6464 6464 //ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML 6465 6465 #define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000 6466 6466 //ulGPUCapInfo[18]=1 indicate the IOMMU is not available
+1 -1
drivers/gpu/drm/amd/include/atomfirmware.h
··· 1714 1714 1715 1715 // gpucapinfo 1716 1716 enum atom_system_gpucapinf_def{ 1717 - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS = 0x10, 1717 + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS = 0x10, 1718 1718 }; 1719 1719 1720 1720 //dpphy_override
+1
drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
··· 48 48 #define GFX_11_0_0__SRCID__SDMA_SRAM_ECC 64 // 0x40 SRAM ECC Error 49 49 #define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout) 50 50 #define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout 51 + #define GFX_11_0_0__SRCID__SDMA_FENCE 67 // 0x43 User fence 51 52 52 53 #define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning) 53 54
+24
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 494 494 int (*set_df_cstate)(void *handle, enum pp_df_cstate state); 495 495 int (*set_xgmi_pstate)(void *handle, uint32_t pstate); 496 496 ssize_t (*get_gpu_metrics)(void *handle, void **table); 497 + ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table); 497 498 ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size); 498 499 int (*set_watermarks_for_clock_ranges)(void *handle, 499 500 struct pp_smu_wm_range_sets *ranges); ··· 1591 1590 struct amdgpu_pmmetrics_header common_header; 1592 1591 1593 1592 uint8_t data[]; 1593 + }; 1594 + 1595 + struct amdgpu_partition_metrics_v1_0 { 1596 + struct metrics_table_header common_header; 1597 + /* Current clocks (Mhz) */ 1598 + uint16_t current_gfxclk[MAX_XCC]; 1599 + uint16_t current_socclk[MAX_CLKS]; 1600 + uint16_t current_vclk0[MAX_CLKS]; 1601 + uint16_t current_dclk0[MAX_CLKS]; 1602 + uint16_t current_uclk; 1603 + uint16_t padding; 1604 + 1605 + /* Utilization Instantaneous (%) */ 1606 + uint32_t gfx_busy_inst[MAX_XCC]; 1607 + uint16_t jpeg_busy[NUM_JPEG_ENG_V1]; 1608 + uint16_t vcn_busy[NUM_VCN]; 1609 + /* Utilization Accumulated (%) */ 1610 + uint64_t gfx_busy_acc[MAX_XCC]; 1611 + /* Total App Clock Counter Accumulated */ 1612 + uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC]; 1613 + uint64_t gfx_below_host_limit_thm_acc[MAX_XCC]; 1614 + uint64_t gfx_low_utilization_acc[MAX_XCC]; 1615 + uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; 1594 1616 }; 1595 1617 1596 1618 #endif
+32
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 2019 2019 2020 2020 return ret; 2021 2021 } 2022 + 2023 + /** 2024 + * amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute 2025 + * partition 2026 + * @adev: Pointer to the device. 2027 + * @xcp_id: Identifier of the XCP for which metrics are to be retrieved. 2028 + * @table: Pointer to a buffer where the metrics will be stored. If NULL, the 2029 + * function returns the size of the metrics structure. 2030 + * 2031 + * This function retrieves metrics for a specific XCP, including details such as 2032 + * VCN/JPEG activity, clock frequencies, and other performance metrics. If the 2033 + * table parameter is NULL, the function returns the size of the metrics 2034 + * structure without populating it. 2035 + * 2036 + * Return: Size of the metrics structure on success, or a negative error code on failure. 2037 + */ 2038 + ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, 2039 + void *table) 2040 + { 2041 + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 2042 + int ret = 0; 2043 + 2044 + if (!pp_funcs->get_xcp_metrics) 2045 + return 0; 2046 + 2047 + mutex_lock(&adev->pm.mutex); 2048 + ret = pp_funcs->get_xcp_metrics(adev->powerplay.pp_handle, xcp_id, 2049 + table); 2050 + mutex_unlock(&adev->pm.mutex); 2051 + 2052 + return ret; 2053 + }
+2
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
··· 524 524 int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, 525 525 long *input, uint32_t size); 526 526 int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); 527 + ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id, 528 + void *table); 527 529 528 530 /** 529 531 * @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
··· 2594 2594 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2595 2595 } 2596 2596 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2597 - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2597 + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) 2598 2598 pi->caps_enable_dfs_bypass = true; 2599 2599 2600 2600 sumo_construct_sclk_voltage_mapping_table(adev,
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
··· 394 394 } 395 395 396 396 if (le32_to_cpu(info->ulGPUCapInfo) & 397 - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) { 397 + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) { 398 398 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 399 399 PHM_PlatformCaps_EnableDFSBypass); 400 400 }
+14
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 3758 3758 return ret; 3759 3759 } 3760 3760 3761 + static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table) 3762 + { 3763 + struct smu_context *smu = handle; 3764 + 3765 + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3766 + return -EOPNOTSUPP; 3767 + 3768 + if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics) 3769 + return -EOPNOTSUPP; 3770 + 3771 + return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table); 3772 + } 3773 + 3761 3774 static const struct amd_pm_funcs swsmu_pm_funcs = { 3762 3775 /* export for sysfs */ 3763 3776 .set_fan_control_mode = smu_set_fan_control_mode, ··· 3829 3816 .get_uclk_dpm_states = smu_get_uclk_dpm_states, 3830 3817 .get_dpm_clock_table = smu_get_dpm_clock_table, 3831 3818 .get_smu_prv_buf_details = smu_get_prv_buffer_details, 3819 + .get_xcp_metrics = smu_sys_get_xcp_metrics, 3832 3820 }; 3833 3821 3834 3822 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+6
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 1466 1466 */ 1467 1467 int (*set_wbrf_exclusion_ranges)(struct smu_context *smu, 1468 1468 struct freq_band_range *exclusion_ranges); 1469 + /** 1470 + * @get_xcp_metrics: Get a copy of the partition metrics table from SMU. 1471 + * Return: Size of table 1472 + */ 1473 + ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, 1474 + void *table); 1469 1475 }; 1470 1476 1471 1477 typedef enum {
+3 -1
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
··· 127 127 VOLTAGE_GUARDBAND_COUNT 128 128 } GFX_GUARDBAND_e; 129 129 130 - #define SMU_METRICS_TABLE_VERSION 0x10 130 + #define SMU_METRICS_TABLE_VERSION 0x11 131 131 132 132 // Unified metrics table for smu_v13_0_6 133 133 typedef struct __attribute__((packed, aligned(4))) { ··· 463 463 typedef struct { 464 464 // Telemetry 465 465 uint32_t InputTelemetryVoltageInmV; 466 + // General info 467 + uint32_t pldmVersion[2]; 466 468 } StaticMetricsTable_t; 467 469 #pragma pack(pop) 468 470
+64 -7
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
··· 322 322 return ret; 323 323 } 324 324 325 - ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table) 325 + ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) 326 + { 327 + const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW; 328 + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; 329 + struct amdgpu_device *adev = smu->adev; 330 + MetricsTable_t *metrics; 331 + int inst, j, k, idx; 332 + u32 inst_mask; 333 + 334 + metrics = (MetricsTable_t *)smu_metrics; 335 + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table; 336 + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); 337 + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); 338 + idx = 0; 339 + for_each_inst(k, inst_mask) { 340 + /* Both JPEG and VCN has same instance */ 341 + inst = GET_INST(VCN, k); 342 + for (j = 0; j < num_jpeg_rings; ++j) { 343 + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = 344 + SMUQ10_ROUND(metrics-> 345 + JpegBusy[(inst * num_jpeg_rings) + j]); 346 + } 347 + xcp_metrics->vcn_busy[idx] = 348 + SMUQ10_ROUND(metrics->VcnBusy[inst]); 349 + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( 350 + metrics->VclkFrequency[inst]); 351 + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( 352 + metrics->DclkFrequency[inst]); 353 + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( 354 + metrics->SocclkFrequency[inst]); 355 + 356 + idx++; 357 + } 358 + 359 + xcp_metrics->current_uclk = 360 + SMUQ10_ROUND(metrics->UclkFrequency); 361 + 362 + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); 363 + idx = 0; 364 + for_each_inst(k, inst_mask) { 365 + inst = GET_INST(GC, k); 366 + xcp_metrics->current_gfxclk[idx] = SMUQ10_ROUND(metrics->GfxclkFrequency[inst]); 367 + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND(metrics->GfxBusy[inst]); 368 + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND(metrics->GfxBusyAcc[inst]); 369 + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) { 370 + xcp_metrics->gfx_below_host_limit_ppt_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]); 371 + xcp_metrics->gfx_below_host_limit_thm_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]); 372 + xcp_metrics->gfx_low_utilization_acc[idx] = SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]); 373 + xcp_metrics->gfx_below_host_limit_total_acc[idx] = SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]); 374 + } 375 + idx++; 376 + } 377 + 378 + return sizeof(*xcp_metrics); 379 + } 380 + 381 + ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics) 326 382 { 327 383 struct smu_table_context *smu_table = &smu->smu_table; 328 384 struct gpu_metrics_v1_8 *gpu_metrics = ··· 390 334 struct amdgpu_xcp *xcp; 391 335 u32 inst_mask; 392 336 393 - metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL); 394 - memcpy(metrics, smu_table->metrics_table, sizeof(MetricsTable_t)); 337 + metrics = (MetricsTable_t *)smu_metrics; 395 338 396 339 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8); 397 340 ··· 471 416 gpu_metrics->mem_activity_acc = SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc); 472 417 473 418 for (i = 0; i < NUM_XGMI_LINKS; i++) { 474 - gpu_metrics->xgmi_read_data_acc[i] = 419 + j = amdgpu_xgmi_get_ext_link(adev, i); 420 + if (j < 0 || j >= NUM_XGMI_LINKS) 421 + continue; 422 + gpu_metrics->xgmi_read_data_acc[j] = 475 423 SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]); 476 - gpu_metrics->xgmi_write_data_acc[i] = 424 + gpu_metrics->xgmi_write_data_acc[j] = 477 425 SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]); 478 426 ret = amdgpu_get_xgmi_link_status(adev, i); 479 427 if (ret >= 0) 480 - gpu_metrics->xgmi_link_status[i] = ret; 428 + gpu_metrics->xgmi_link_status[j] = ret; 481 429 } 482 430 483 431 gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; ··· 532 474 gpu_metrics->firmware_timestamp = metrics->Timestamp; 533 475 534 476 *table = (void *)gpu_metrics; 535 - kfree(metrics); 536 477 537 478 return sizeof(*gpu_metrics); 538 479 }
+150 -8
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 312 312 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 313 313 if (fw_ver >= 0x5551200) 314 314 smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); 315 + if (fw_ver >= 0x5551600) { 316 + smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); 317 + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); 318 + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); 319 + } 315 320 } 316 321 317 322 static void smu_v13_0_12_init_caps(struct smu_context *smu) ··· 397 392 if ((pgm == 7 && fw_ver >= 0x7550E00) || 398 393 (pgm == 0 && fw_ver >= 0x00557E00)) 399 394 smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 400 - if (fw_ver >= 0x00557F01) { 395 + if ((pgm == 0 && fw_ver >= 0x00557F01) || 396 + (pgm == 7 && fw_ver >= 0x7551000)) { 401 397 smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS)); 402 398 smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); 403 399 } 400 + if ((pgm == 0 && fw_ver >= 0x00558000) || 401 + (pgm == 7 && fw_ver >= 0x7551000)) 402 + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); 404 403 } 405 404 if (((pgm == 7) && (fw_ver >= 0x7550700)) || 406 405 ((pgm == 0) && (fw_ver >= 0x00557900)) || ··· 761 752 } 762 753 763 754 dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; 755 + 756 + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && 757 + static_metrics->pldmVersion[0] != 0xFFFFFFFF) 758 + smu->adev->firmware.pldm_version = 759 + static_metrics->pldmVersion[0]; 764 760 } 765 761 766 762 int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu) ··· 2543 2529 return pcie_gen_to_speed(speed_level + 1); 2544 2530 } 2545 2531 2532 + static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id, 2533 + void *table) 2534 + { 2535 + const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3; 2536 + int version = smu_v13_0_6_get_metrics_version(smu); 2537 + struct amdgpu_partition_metrics_v1_0 *xcp_metrics; 2538 + struct amdgpu_device *adev = smu->adev; 2539 + int ret, inst, i, j, k, idx; 2540 + MetricsTableV0_t *metrics_v0; 2541 + MetricsTableV1_t *metrics_v1; 2542 + MetricsTableV2_t *metrics_v2; 2543 + struct amdgpu_xcp *xcp; 2544 + u32 inst_mask; 2545 + bool per_inst; 2546 + 2547 + if (!table) 2548 + return sizeof(*xcp_metrics); 2549 + 2550 + for_each_xcp(adev->xcp_mgr, xcp, i) { 2551 + if (xcp->id == xcp_id) 2552 + break; 2553 + } 2554 + if (i == adev->xcp_mgr->num_xcps) 2555 + return -EINVAL; 2556 + 2557 + xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table; 2558 + smu_cmn_init_partition_metrics(xcp_metrics, 1, 0); 2559 + 2560 + metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); 2561 + if (!metrics_v0) 2562 + return -ENOMEM; 2563 + 2564 + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); 2565 + if (ret) { 2566 + kfree(metrics_v0); 2567 + return ret; 2568 + } 2569 + 2570 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 2571 + IP_VERSION(13, 0, 12) && 2572 + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { 2573 + ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0); 2574 + goto out; 2575 + } 2576 + 2577 + metrics_v1 = (MetricsTableV1_t *)metrics_v0; 2578 + metrics_v2 = (MetricsTableV2_t *)metrics_v0; 2579 + 2580 + per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS)); 2581 + 2582 + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); 2583 + idx = 0; 2584 + for_each_inst(k, inst_mask) { 2585 + /* Both JPEG and VCN has same instances */ 2586 + inst = GET_INST(VCN, k); 2587 + 2588 + for (j = 0; j < num_jpeg_rings; ++j) { 2589 + xcp_metrics->jpeg_busy[(idx * num_jpeg_rings) + j] = 2590 + SMUQ10_ROUND(GET_METRIC_FIELD( 2591 + JpegBusy, 2592 + version)[(inst * num_jpeg_rings) + j]); 2593 + } 2594 + xcp_metrics->vcn_busy[idx] = 2595 + SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]); 2596 + 2597 + xcp_metrics->current_vclk0[idx] = SMUQ10_ROUND( 2598 + GET_METRIC_FIELD(VclkFrequency, version)[inst]); 2599 + xcp_metrics->current_dclk0[idx] = SMUQ10_ROUND( 2600 + GET_METRIC_FIELD(DclkFrequency, version)[inst]); 2601 + xcp_metrics->current_socclk[idx] = SMUQ10_ROUND( 2602 + GET_METRIC_FIELD(SocclkFrequency, version)[inst]); 2603 + 2604 + idx++; 2605 + } 2606 + 2607 + xcp_metrics->current_uclk = 2608 + SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency, version)); 2609 + 2610 + if (per_inst) { 2611 + amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask); 2612 + idx = 0; 2613 + for_each_inst(k, inst_mask) { 2614 + inst = GET_INST(GC, k); 2615 + xcp_metrics->current_gfxclk[idx] = 2616 + SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, 2617 + version)[inst]); 2618 + 2619 + xcp_metrics->gfx_busy_inst[idx] = SMUQ10_ROUND( 2620 + GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]); 2621 + xcp_metrics->gfx_busy_acc[idx] = SMUQ10_ROUND( 2622 + GET_GPU_METRIC_FIELD(GfxBusyAcc, 2623 + version)[inst]); 2624 + if (smu_v13_0_6_cap_supported( 2625 + smu, SMU_CAP(HST_LIMIT_METRICS))) { 2626 + xcp_metrics->gfx_below_host_limit_ppt_acc 2627 + [idx] = SMUQ10_ROUND( 2628 + metrics_v0->GfxclkBelowHostLimitPptAcc 2629 + [inst]); 2630 + xcp_metrics->gfx_below_host_limit_thm_acc 2631 + [idx] = SMUQ10_ROUND( 2632 + metrics_v0->GfxclkBelowHostLimitThmAcc 2633 + [inst]); 2634 + xcp_metrics->gfx_low_utilization_acc 2635 + [idx] = SMUQ10_ROUND( 2636 + metrics_v0 2637 + ->GfxclkLowUtilizationAcc[inst]); 2638 + xcp_metrics->gfx_below_host_limit_total_acc 2639 + [idx] = SMUQ10_ROUND( 2640 + metrics_v0->GfxclkBelowHostLimitTotalAcc 2641 + [inst]); 2642 + } 2643 + idx++; 2644 + } 2645 + } 2646 + out: 2647 + kfree(metrics_v0); 2648 + 2649 + return sizeof(*xcp_metrics); 2650 + } 2651 + 2546 2652 static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table) 2547 2653 { 2548 2654 struct smu_table_context *smu_table = &smu->smu_table; ··· 2676 2542 MetricsTableV2_t *metrics_v2; 2677 2543 struct amdgpu_xcp *xcp; 2678 2544 u16 link_width_level; 2545 + ssize_t num_bytes; 2679 2546 u8 num_jpeg_rings; 2680 2547 u32 inst_mask; 2681 2548 bool per_inst; ··· 2689 2554 } 2690 2555 2691 2556 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && 2692 - smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) 2693 - return smu_v13_0_12_get_gpu_metrics(smu, table); 2557 + smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) { 2558 + num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0); 2559 + kfree(metrics_v0); 2560 + return num_bytes; 2561 + } 2694 2562 2695 2563 metrics_v1 = (MetricsTableV1_t *)metrics_v0; 2696 2564 metrics_v2 = (MetricsTableV2_t *)metrics_v0; ··· 2808 2670 SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc, version)); 2809 2671 2810 2672 for (i = 0; i < NUM_XGMI_LINKS; i++) { 2811 - gpu_metrics->xgmi_read_data_acc[i] = 2812 - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); 2813 - gpu_metrics->xgmi_write_data_acc[i] = 2814 - SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); 2673 + j = amdgpu_xgmi_get_ext_link(adev, i); 2674 + if (j < 0 || j >= NUM_XGMI_LINKS) 2675 + continue; 2676 + gpu_metrics->xgmi_read_data_acc[j] = SMUQ10_ROUND( 2677 + GET_METRIC_FIELD(XgmiReadDataSizeAcc, version)[i]); 2678 + gpu_metrics->xgmi_write_data_acc[j] = SMUQ10_ROUND( 2679 + GET_METRIC_FIELD(XgmiWriteDataSizeAcc, version)[i]); 2815 2680 ret = amdgpu_get_xgmi_link_status(adev, i); 2816 2681 if (ret >= 0) 2817 - gpu_metrics->xgmi_link_status[i] = ret; 2682 + gpu_metrics->xgmi_link_status[j] = ret; 2818 2683 } 2819 2684 2820 2685 gpu_metrics->num_partition = adev->xcp_mgr->num_xcps; ··· 3814 3673 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 3815 3674 .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, 3816 3675 .get_pm_metrics = smu_v13_0_6_get_pm_metrics, 3676 + .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, 3817 3677 .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, 3818 3678 .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, 3819 3679 .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
+5 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
··· 67 67 SMU_CAP(STATIC_METRICS), 68 68 SMU_CAP(HST_LIMIT_METRICS), 69 69 SMU_CAP(BOARD_VOLTAGE), 70 + SMU_CAP(PLDM_VERSION), 70 71 SMU_CAP(ALL), 71 72 }; 72 73 ··· 80 79 int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu); 81 80 int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, 82 81 MetricsMember_t member, uint32_t *value); 83 - ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table); 82 + ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics); 83 + ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, 84 + struct amdgpu_xcp *xcp, void *table, 85 + void *smu_metrics); 84 86 extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; 85 87 extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; 86 88 #endif
-67
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1051 1051 false); 1052 1052 } 1053 1053 1054 - void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) 1055 - { 1056 - struct metrics_table_header *header = (struct metrics_table_header *)table; 1057 - uint16_t structure_size; 1058 - 1059 - #define METRICS_VERSION(a, b) ((a << 16) | b) 1060 - 1061 - switch (METRICS_VERSION(frev, crev)) { 1062 - case METRICS_VERSION(1, 0): 1063 - structure_size = sizeof(struct gpu_metrics_v1_0); 1064 - break; 1065 - case METRICS_VERSION(1, 1): 1066 - structure_size = sizeof(struct gpu_metrics_v1_1); 1067 - break; 1068 - case METRICS_VERSION(1, 2): 1069 - structure_size = sizeof(struct gpu_metrics_v1_2); 1070 - break; 1071 - case METRICS_VERSION(1, 3): 1072 - structure_size = sizeof(struct gpu_metrics_v1_3); 1073 - break; 1074 - case METRICS_VERSION(1, 4): 1075 - structure_size = sizeof(struct gpu_metrics_v1_4); 1076 - break; 1077 - case METRICS_VERSION(1, 5): 1078 - structure_size = sizeof(struct gpu_metrics_v1_5); 1079 - break; 1080 - case METRICS_VERSION(1, 6): 1081 - structure_size = sizeof(struct gpu_metrics_v1_6); 1082 - break; 1083 - case METRICS_VERSION(1, 7): 1084 - structure_size = sizeof(struct gpu_metrics_v1_7); 1085 - break; 1086 - case METRICS_VERSION(1, 8): 1087 - structure_size = sizeof(struct gpu_metrics_v1_8); 1088 - break; 1089 - case METRICS_VERSION(2, 0): 1090 - structure_size = sizeof(struct gpu_metrics_v2_0); 1091 - break; 1092 - case METRICS_VERSION(2, 1): 1093 - structure_size = sizeof(struct gpu_metrics_v2_1); 1094 - break; 1095 - case METRICS_VERSION(2, 2): 1096 - structure_size = sizeof(struct gpu_metrics_v2_2); 1097 - break; 1098 - case METRICS_VERSION(2, 3): 1099 - structure_size = sizeof(struct gpu_metrics_v2_3); 1100 - break; 1101 - case METRICS_VERSION(2, 4): 1102 - structure_size = sizeof(struct gpu_metrics_v2_4); 1103 - break; 1104 - case METRICS_VERSION(3, 0): 1105 - structure_size = sizeof(struct gpu_metrics_v3_0); 1106 - break; 1107 - default: 1108 - return; 1109 - } 1110 - 1111 - #undef METRICS_VERSION 1112 - 1113 - memset(header, 0xFF, structure_size); 1114 - 1115 - header->format_revision = frev; 1116 - header->content_revision = crev; 1117 - header->structure_size = structure_size; 1118 - 1119 - } 1120 - 1121 1054 int smu_cmn_set_mp1_state(struct smu_context *smu, 1122 1055 enum pp_mp1_state mp1_state) 1123 1056 {
+24 -2
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
··· 40 40 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 41 41 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 42 42 43 + #define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ 44 + do { \ 45 + typecheck(struct gpu_metrics_v##frev##_##crev, \ 46 + typeof(*(ptr))); \ 47 + struct metrics_table_header *header = \ 48 + (struct metrics_table_header *)(ptr); \ 49 + memset(header, 0xFF, sizeof(*(ptr))); \ 50 + header->format_revision = frev; \ 51 + header->content_revision = crev; \ 52 + header->structure_size = sizeof(*(ptr)); \ 53 + } while (0) 54 + 55 + #define smu_cmn_init_partition_metrics(ptr, frev, crev) \ 56 + do { \ 57 + typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \ 58 + typeof(*(ptr))); \ 59 + struct metrics_table_header *header = \ 60 + (struct metrics_table_header *)(ptr); \ 61 + memset(header, 0xFF, sizeof(*(ptr))); \ 62 + header->format_revision = frev; \ 63 + header->content_revision = crev; \ 64 + header->structure_size = sizeof(*(ptr)); \ 65 + } while (0) 66 + 43 67 extern const int link_speed[]; 44 68 45 69 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */ ··· 148 124 bool bypass_cache); 149 125 150 126 int smu_cmn_get_combo_pptable(struct smu_context *smu); 151 - 152 - void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); 153 127 154 128 int smu_cmn_set_mp1_state(struct smu_context *smu, 155 129 enum pp_mp1_state mp1_state);
+1 -1
drivers/gpu/drm/radeon/atombios.h
··· 5071 5071 #define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 5072 5072 #define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02 5073 5073 #define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08 5074 - #define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10 5074 + #define SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS 0x10 5075 5075 5076 5076 /********************************************************************************************************************** 5077 5077 ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
+1 -1
drivers/gpu/drm/radeon/kv_dpm.c
··· 2329 2329 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); 2330 2330 } 2331 2331 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & 2332 - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 2332 + SYS_INFO_GPUCAPS__ENABLE_DFS_BYPASS) 2333 2333 pi->caps_enable_dfs_bypass = true; 2334 2334 2335 2335 sumo_construct_sclk_voltage_mapping_table(rdev,