Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-6.10-2024-04-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.10-2024-04-19:

amdgpu:
- DC resource allocation logic updates
- DC IPS fixes
- DC YUV fixes
- DMCUB fixes
- DML2 fixes
- Devcoredump updates
- USB-C DSC fix
- Misc display code cleanups
- PSR fixes
- MES timeout fix
- RAS updates
- UAF fix in VA IOCTL
- Fix visible VRAM handling during faults
- Fix IP discovery handling during PCI rescans
- Misc code cleanups
- PSP 14 updates
- More runtime PM code rework
- SMU 14.0.2 support
- GPUVM page fault redirection to secondary IH rings for IH 6.x
- Suspend/resume fixes
- SR-IOV fixes

amdkfd:
- Fix eviction fence handling
- Fix leak in GPU memory allocation failure case
- DMABuf import handling fix

radeon:
- Silence UBSAN warnings related to flexible arrays

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240419224332.2938259-1-alexander.deucher@amd.com

+4594 -349
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1409 1409 bool amdgpu_device_supports_boco(struct drm_device *dev); 1410 1410 bool amdgpu_device_supports_smart_shift(struct drm_device *dev); 1411 1411 int amdgpu_device_supports_baco(struct drm_device *dev); 1412 + void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); 1412 1413 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, 1413 1414 struct amdgpu_device *peer_adev); 1414 1415 int amdgpu_device_baco_enter(struct drm_device *dev);
+8 -15
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
··· 753 753 754 754 static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) 755 755 { 756 - int error_code; 756 + struct amdgpu_aca *aca = &adev->aca; 757 + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; 757 758 758 - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 759 - case IP_VERSION(13, 0, 6): 760 - if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { 761 - error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); 762 - return error_code & 0xff; 763 - } 764 - break; 765 - default: 766 - break; 767 - } 759 + if (!smu_funcs || !smu_funcs->parse_error_code) 760 + return -EOPNOTSUPP; 768 761 769 - /* NOTE: the true error code is encoded in status.errorcode[0:7] */ 770 - error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); 771 - 772 - return error_code & 0xff; 762 + return smu_funcs->parse_error_code(adev, bank); 773 763 } 774 764 775 765 int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) ··· 770 780 return -EINVAL; 771 781 772 782 error_code = aca_bank_get_error_code(adev, bank); 783 + if (error_code < 0) 784 + return error_code; 785 + 773 786 for (i = 0; i < size; i++) { 774 787 if (err_codes[i] == error_code) 775 788 return 0;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
··· 173 173 int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); 174 174 int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count); 175 175 int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank); 176 + int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank); 176 177 }; 177 178 178 179 struct amdgpu_aca {
+21 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1854 1854 err_bo_create: 1855 1855 amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); 1856 1856 err_reserve_limit: 1857 + amdgpu_sync_free(&(*mem)->sync); 1857 1858 mutex_destroy(&(*mem)->lock); 1858 1859 if (gobj) 1859 1860 drm_gem_object_put(gobj); ··· 2901 2900 2902 2901 amdgpu_sync_create(&sync_obj); 2903 2902 2904 - /* Validate BOs and map them to GPUVM (update VM page tables). */ 2903 + /* Validate BOs managed by KFD */ 2905 2904 list_for_each_entry(mem, &process_info->kfd_bo_list, 2906 2905 validate_list) { 2907 2906 2908 2907 struct amdgpu_bo *bo = mem->bo; 2909 2908 uint32_t domain = mem->domain; 2910 - struct kfd_mem_attachment *attachment; 2911 2909 struct dma_resv_iter cursor; 2912 2910 struct dma_fence *fence; 2913 2911 ··· 2931 2931 goto validate_map_fail; 2932 2932 } 2933 2933 } 2934 + } 2935 + 2936 + if (failed_size) 2937 + pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2938 + 2939 + /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO 2940 + * validations above would invalidate DMABuf imports again. 2941 + */ 2942 + ret = process_validate_vms(process_info, &exec.ticket); 2943 + if (ret) { 2944 + pr_debug("Validating VMs failed, ret: %d\n", ret); 2945 + goto validate_map_fail; 2946 + } 2947 + 2948 + /* Update mappings managed by KFD. */ 2949 + list_for_each_entry(mem, &process_info->kfd_bo_list, 2950 + validate_list) { 2951 + struct kfd_mem_attachment *attachment; 2952 + 2934 2953 list_for_each_entry(attachment, &mem->attachments, list) { 2935 2954 if (!attachment->is_mapped) 2936 2955 continue; ··· 2964 2945 goto validate_map_fail; 2965 2946 } 2966 2947 } 2967 - } 2968 - 2969 - if (failed_size) 2970 - pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); 2971 - 2972 - /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO 2973 - * validations above would invalidate DMABuf imports again. 2974 - */ 2975 - ret = process_validate_vms(process_info, &exec.ticket); 2976 - if (ret) { 2977 - pr_debug("Validating VMs failed, ret: %d\n", ret); 2978 - goto validate_map_fail; 2979 2948 } 2980 2949 2981 2950 /* Update mappings not managed by KFD */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 819 819 820 820 p->bytes_moved += ctx.bytes_moved; 821 821 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 822 - amdgpu_bo_in_cpu_visible_vram(bo)) 822 + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 823 823 p->bytes_moved_vis += ctx.bytes_moved; 824 824 825 825 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
+5 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
··· 188 188 adev->vpe.feature_version, adev->vpe.fw_version); 189 189 190 190 drm_printf(p, "\nVBIOS Information\n"); 191 - drm_printf(p, "name: %s\n", ctx->name); 192 - drm_printf(p, "pn %s\n", ctx->vbios_pn); 193 - drm_printf(p, "version: %s\n", ctx->vbios_ver_str); 194 - drm_printf(p, "date: %s\n", ctx->date); 191 + drm_printf(p, "vbios name : %s\n", ctx->name); 192 + drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); 193 + drm_printf(p, "vbios version : %d\n", ctx->version); 194 + drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); 195 + drm_printf(p, "vbios date : %s\n", ctx->date); 195 196 } 196 197 197 198 static ssize_t
+81 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 350 350 return amdgpu_asic_supports_baco(adev); 351 351 } 352 352 353 + void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) 354 + { 355 + struct drm_device *dev; 356 + int bamaco_support; 357 + 358 + dev = adev_to_drm(adev); 359 + 360 + adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; 361 + bamaco_support = amdgpu_device_supports_baco(dev); 362 + 363 + switch (amdgpu_runtime_pm) { 364 + case 2: 365 + if (bamaco_support & MACO_SUPPORT) { 366 + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; 367 + dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); 368 + } else if (bamaco_support == BACO_SUPPORT) { 369 + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 370 + dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); 371 + } 372 + break; 373 + case 1: 374 + if (bamaco_support & BACO_SUPPORT) { 375 + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 376 + dev_info(adev->dev, "Forcing BACO for runtime pm\n"); 377 + } 378 + break; 379 + case -1: 380 + case -2: 381 + if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ 382 + adev->pm.rpm_mode = AMDGPU_RUNPM_PX; 383 + dev_info(adev->dev, "Using ATPX for runtime pm\n"); 384 + } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ 385 + adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; 386 + dev_info(adev->dev, "Using BOCO for runtime pm\n"); 387 + } else { 388 + if (!bamaco_support) 389 + goto no_runtime_pm; 390 + 391 + switch (adev->asic_type) { 392 + case CHIP_VEGA20: 393 + case CHIP_ARCTURUS: 394 + /* BACO are not supported on vega20 and arctrus */ 395 + break; 396 + case CHIP_VEGA10: 397 + /* enable BACO as runpm mode if noretry=0 */ 398 + if (!adev->gmc.noretry) 399 + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 400 + break; 401 + default: 402 + /* enable BACO as runpm mode on CI+ */ 403 + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 404 + break; 405 + } 406 + 407 + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { 408 + if (bamaco_support & MACO_SUPPORT) { 409 + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; 410 + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); 411 + } else { 412 + dev_info(adev->dev, "Using BACO for runtime pm\n"); 413 + } 414 + } 415 + } 416 + break; 417 + case 0: 418 + dev_info(adev->dev, "runtime pm is manually disabled\n"); 419 + break; 420 + default: 421 + break; 422 + } 423 + 424 + no_runtime_pm: 425 + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) 426 + dev_info(adev->dev, "Runtime PM not available\n"); 427 + } 353 428 /** 354 429 * amdgpu_device_supports_smart_shift - Is the device dGPU with 355 430 * smart shift support ··· 1535 1460 1536 1461 /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ 1537 1462 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) 1538 - DRM_WARN("System can't access extended configuration space,please check!!\n"); 1463 + DRM_WARN("System can't access extended configuration space, please check!!\n"); 1539 1464 1540 1465 /* skip if the bios has already enabled large BAR */ 1541 1466 if (adev->gmc.real_vram_size && ··· 5357 5282 /* Try reset handler method first */ 5358 5283 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, 5359 5284 reset_list); 5360 - amdgpu_reset_reg_dumps(tmp_adev); 5285 + 5286 + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) 5287 + amdgpu_reset_reg_dumps(tmp_adev); 5361 5288 5362 5289 reset_context->reset_device_list = device_list_handle; 5363 5290 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); ··· 5432 5355 5433 5356 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 5434 5357 5435 - amdgpu_coredump(tmp_adev, vram_lost, reset_context); 5358 + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) 5359 + amdgpu_coredump(tmp_adev, vram_lost, reset_context); 5436 5360 5437 5361 if (vram_lost) { 5438 5362 DRM_INFO("VRAM is lost due to GPU reset!\n");
+8 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 255 255 uint64_t vram_size; 256 256 u32 msg; 257 257 int i, ret = 0; 258 - int ip_discovery_ver = 0; 259 258 260 259 /* It can take up to a second for IFWI init to complete on some dGPUs, 261 260 * but generally it should be in the 60-100ms range. Normally this starts ··· 264 265 * continue. 265 266 */ 266 267 267 - ip_discovery_ver = RREG32(mmIP_DISCOVERY_VERSION); 268 - if ((dev_is_removable(&adev->pdev->dev)) || 269 - (ip_discovery_ver == IP_DISCOVERY_V2) || 270 - (ip_discovery_ver == IP_DISCOVERY_V4)) { 271 - for (i = 0; i < 1000; i++) { 272 - msg = RREG32(mmMP0_SMN_C2PMSG_33); 273 - if (msg & 0x80000000) 274 - break; 275 - msleep(1); 276 - } 268 + for (i = 0; i < 1000; i++) { 269 + msg = RREG32(mmMP0_SMN_C2PMSG_33); 270 + if (msg & 0x80000000) 271 + break; 272 + usleep_range(1000, 1100); 277 273 } 274 + 278 275 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 279 276 280 277 if (vram_size) { ··· 1901 1906 break; 1902 1907 case IP_VERSION(14, 0, 0): 1903 1908 case IP_VERSION(14, 0, 1): 1909 + case IP_VERSION(14, 0, 2): 1910 + case IP_VERSION(14, 0, 3): 1904 1911 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); 1905 1912 break; 1906 1913 default:
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2481 2481 2482 2482 /* Use a common context, just need to make sure full reset is done */ 2483 2483 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); 2484 + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); 2484 2485 r = amdgpu_do_asic_reset(&device_list, &reset_context); 2485 2486 2486 2487 if (r) {
+1 -47
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 133 133 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) 134 134 { 135 135 struct drm_device *dev; 136 - int bamaco_support = 0; 137 136 int r, acpi_status; 138 137 139 138 dev = adev_to_drm(adev); ··· 149 150 goto out; 150 151 } 151 152 152 - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; 153 - if (amdgpu_device_supports_px(dev) && 154 - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ 155 - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; 156 - dev_info(adev->dev, "Using ATPX for runtime pm\n"); 157 - } else if (amdgpu_device_supports_boco(dev) && 158 - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ 159 - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; 160 - dev_info(adev->dev, "Using BOCO for runtime pm\n"); 161 - } else if (amdgpu_runtime_pm != 0) { 162 - bamaco_support = amdgpu_device_supports_baco(dev); 163 - 164 - if (!bamaco_support) 165 - goto no_runtime_pm; 166 - 167 - switch (adev->asic_type) { 168 - case CHIP_VEGA20: 169 - case CHIP_ARCTURUS: 170 - /* enable BACO as runpm mode if runpm=1 */ 171 - if (amdgpu_runtime_pm > 0) 172 - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 173 - break; 174 - case CHIP_VEGA10: 175 - /* enable BACO as runpm mode if noretry=0 */ 176 - if (!adev->gmc.noretry) 177 - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 178 - break; 179 - default: 180 - /* enable BACO as runpm mode on CI+ */ 181 - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; 182 - break; 183 - } 184 - 185 - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { 186 - if (bamaco_support & MACO_SUPPORT) { 187 - adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; 188 - dev_info(adev->dev, "Using BAMACO for runtime pm\n"); 189 - } else { 190 - dev_info(adev->dev, "Using BACO for runtime pm\n"); 191 - } 192 - } 193 - } 194 - 195 - no_runtime_pm: 196 - if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) 197 - dev_info(adev->dev, "NO pm mode for runtime pm\n"); 153 + amdgpu_device_detect_runtime_pm_mode(adev); 198 154 199 155 /* Call ACPI methods: require modeset init 200 156 * but failure is not fatal
+11 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 623 623 return r; 624 624 625 625 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 626 - bo->tbo.resource->mem_type == TTM_PL_VRAM && 627 - amdgpu_bo_in_cpu_visible_vram(bo)) 626 + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 628 627 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 629 628 ctx.bytes_moved); 630 629 else ··· 1277 1278 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, 1278 1279 struct amdgpu_mem_stats *stats) 1279 1280 { 1281 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1282 + struct ttm_resource *res = bo->tbo.resource; 1280 1283 uint64_t size = amdgpu_bo_size(bo); 1281 1284 struct drm_gem_object *obj; 1282 1285 unsigned int domain; 1283 1286 bool shared; 1284 1287 1285 1288 /* Abort if the BO doesn't currently have a backing store */ 1286 - if (!bo->tbo.resource) 1289 + if (!res) 1287 1290 return; 1288 1291 1289 1292 obj = &bo->tbo.base; 1290 1293 shared = drm_gem_object_is_shared_for_memory_stats(obj); 1291 1294 1292 - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); 1295 + domain = amdgpu_mem_type_to_domain(res->mem_type); 1293 1296 switch (domain) { 1294 1297 case AMDGPU_GEM_DOMAIN_VRAM: 1295 1298 stats->vram += size; 1296 - if (amdgpu_bo_in_cpu_visible_vram(bo)) 1299 + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 1297 1300 stats->visible_vram += size; 1298 1301 if (shared) 1299 1302 stats->vram_shared += size; ··· 1396 1395 /* Remember that this BO was accessed by the CPU */ 1397 1396 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1398 1397 1399 - if (bo->resource->mem_type != TTM_PL_VRAM) 1400 - return 0; 1401 - 1402 - if (amdgpu_bo_in_cpu_visible_vram(abo)) 1398 + if (amdgpu_res_cpu_visible(adev, bo->resource)) 1403 1399 return 0; 1404 1400 1405 1401 /* Can't move a pinned BO to visible VRAM */ ··· 1419 1421 1420 1422 /* this should never happen */ 1421 1423 if (bo->resource->mem_type == TTM_PL_VRAM && 1422 - !amdgpu_bo_in_cpu_visible_vram(abo)) 1424 + !amdgpu_res_cpu_visible(adev, bo->resource)) 1423 1425 return VM_FAULT_SIGBUS; 1424 1426 1425 1427 ttm_bo_move_to_lru_tail_unlocked(bo); ··· 1583 1585 */ 1584 1586 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) 1585 1587 { 1588 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1586 1589 struct dma_buf_attachment *attachment; 1587 1590 struct dma_buf *dma_buf; 1588 1591 const char *placement; ··· 1592 1593 1593 1594 if (dma_resv_trylock(bo->tbo.base.resv)) { 1594 1595 unsigned int domain; 1596 + 1595 1597 domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); 1596 1598 switch (domain) { 1597 1599 case AMDGPU_GEM_DOMAIN_VRAM: 1598 - if (amdgpu_bo_in_cpu_visible_vram(bo)) 1600 + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) 1599 1601 placement = "VRAM VISIBLE"; 1600 1602 else 1601 1603 placement = "VRAM";
-22
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 251 251 } 252 252 253 253 /** 254 - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 255 - */ 256 - static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 257 - { 258 - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 259 - struct amdgpu_res_cursor cursor; 260 - 261 - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) 262 - return false; 263 - 264 - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 265 - while (cursor.remaining) { 266 - if (cursor.start < adev->gmc.visible_vram_size) 267 - return true; 268 - 269 - amdgpu_res_next(&cursor, cursor.size); 270 - } 271 - 272 - return false; 273 - } 274 - 275 - /** 276 254 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 277 255 */ 278 256 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
+15
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 2265 2265 } 2266 2266 } 2267 2267 2268 + if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && 2269 + (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { 2270 + ret = psp_bootloader_load_ipkeymgr_drv(psp); 2271 + if (ret) { 2272 + dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); 2273 + return ret; 2274 + } 2275 + } 2276 + 2268 2277 if ((is_psp_fw_valid(psp->sos)) && 2269 2278 (psp->funcs->bootloader_load_sos != NULL)) { 2270 2279 ret = psp_bootloader_load_sos(psp); ··· 3288 3279 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version); 3289 3280 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3290 3281 psp->ras_drv.start_addr = ucode_start_addr; 3282 + break; 3283 + case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: 3284 + psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); 3285 + psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); 3286 + psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); 3287 + psp->ipkeymgr_drv.start_addr = ucode_start_addr; 3291 3288 break; 3292 3289 default: 3293 3290 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 73 73 PSP_BL__LOAD_KEY_DATABASE = 0x80000, 74 74 PSP_BL__LOAD_SOCDRV = 0xB0000, 75 75 PSP_BL__LOAD_DBGDRV = 0xC0000, 76 + PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV, 76 77 PSP_BL__LOAD_INTFDRV = 0xD0000, 77 - PSP_BL__LOAD_RASDRV = 0xE0000, 78 + PSP_BL__LOAD_RASDRV = 0xE0000, 79 + PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000, 78 80 PSP_BL__DRAM_LONG_TRAIN = 0x100000, 79 81 PSP_BL__DRAM_SHORT_TRAIN = 0x200000, 80 82 PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, ··· 119 117 int (*bootloader_load_intf_drv)(struct psp_context *psp); 120 118 int (*bootloader_load_dbg_drv)(struct psp_context *psp); 121 119 int (*bootloader_load_ras_drv)(struct psp_context *psp); 120 + int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp); 122 121 int (*bootloader_load_sos)(struct psp_context *psp); 123 122 int (*ring_create)(struct psp_context *psp, 124 123 enum psp_ring_type ring_type); ··· 339 336 struct psp_bin_desc intf_drv; 340 337 struct psp_bin_desc dbg_drv; 341 338 struct psp_bin_desc ras_drv; 339 + struct psp_bin_desc ipkeymgr_drv; 342 340 343 341 /* tmr buffer */ 344 342 struct amdgpu_bo *tmr_bo; ··· 428 424 #define psp_bootloader_load_ras_drv(psp) \ 429 425 ((psp)->funcs->bootloader_load_ras_drv ? \ 430 426 (psp)->funcs->bootloader_load_ras_drv((psp)) : 0) 427 + #define psp_bootloader_load_ipkeymgr_drv(psp) \ 428 + ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \ 429 + (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0) 431 430 #define psp_bootloader_load_sos(psp) \ 432 431 ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) 433 432 #define psp_smu_reload_quirk(psp) \
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
··· 32 32 33 33 AMDGPU_NEED_FULL_RESET = 0, 34 34 AMDGPU_SKIP_HW_RESET = 1, 35 + AMDGPU_SKIP_COREDUMP = 2, 35 36 }; 36 37 37 38 struct amdgpu_reset_context {
+44 -29
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 133 133 134 134 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 135 135 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && 136 - amdgpu_bo_in_cpu_visible_vram(abo)) { 136 + amdgpu_res_cpu_visible(adev, bo->resource)) { 137 137 138 138 /* Try evicting to the CPU inaccessible part of VRAM 139 139 * first, but only set GTT as busy placement, so this ··· 403 403 return r; 404 404 } 405 405 406 + /** 407 + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU 408 + * @adev: amdgpu device 409 + * @res: the resource to check 410 + * 411 + * Returns: true if the full resource is CPU visible, false otherwise. 412 + */ 413 + bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 414 + struct ttm_resource *res) 415 + { 416 + struct amdgpu_res_cursor cursor; 417 + 418 + if (!res) 419 + return false; 420 + 421 + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || 422 + res->mem_type == AMDGPU_PL_PREEMPT) 423 + return true; 424 + 425 + if (res->mem_type != TTM_PL_VRAM) 426 + return false; 427 + 428 + amdgpu_res_first(res, 0, res->size, &cursor); 429 + while (cursor.remaining) { 430 + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) 431 + return false; 432 + amdgpu_res_next(&cursor, cursor.size); 433 + } 434 + 435 + return true; 436 + } 437 + 406 438 /* 407 - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy 439 + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy 408 440 * 409 441 * Called by amdgpu_bo_move() 410 442 */ 411 - static bool amdgpu_mem_visible(struct amdgpu_device *adev, 412 - struct ttm_resource *mem) 443 + static bool amdgpu_res_copyable(struct amdgpu_device *adev, 444 + struct ttm_resource *mem) 413 445 { 414 - u64 mem_size = (u64)mem->size; 415 - struct amdgpu_res_cursor cursor; 416 - u64 end; 417 - 418 - if (mem->mem_type == TTM_PL_SYSTEM || 419 - mem->mem_type == TTM_PL_TT) 420 - return true; 421 - if (mem->mem_type != TTM_PL_VRAM) 446 + if (!amdgpu_res_cpu_visible(adev, mem)) 422 447 return false; 423 448 424 - amdgpu_res_first(mem, 0, mem_size, &cursor); 425 - end = cursor.start + cursor.size; 426 - while (cursor.remaining) { 427 - amdgpu_res_next(&cursor, cursor.size); 449 + /* ttm_resource_ioremap only supports contiguous memory */ 450 + if (mem->mem_type == TTM_PL_VRAM && 451 + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) 452 + return false; 428 453 429 - if (!cursor.remaining) 430 - break; 431 - 432 - /* ttm_resource_ioremap only supports contiguous memory */ 433 - if (end != cursor.start) 434 - return false; 435 - 436 - end = cursor.start + cursor.size; 437 - } 438 - 439 - return end <= adev->gmc.visible_vram_size; 454 + return true; 440 455 } 441 456 442 457 /* ··· 544 529 545 530 if (r) { 546 531 /* Check that all memory is CPU accessible */ 547 - if (!amdgpu_mem_visible(adev, old_mem) || 548 - !amdgpu_mem_visible(adev, new_mem)) { 532 + if (!amdgpu_res_copyable(adev, old_mem) || 533 + !amdgpu_res_copyable(adev, new_mem)) { 549 534 pr_err("Move buffer fallback to memcpy unavailable\n"); 550 535 return r; 551 536 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 139 139 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, 140 140 uint64_t start); 141 141 142 + bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 143 + struct ttm_resource *res); 144 + 142 145 int amdgpu_ttm_init(struct amdgpu_device *adev); 143 146 void amdgpu_ttm_fini(struct amdgpu_device *adev); 144 147 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
··· 125 125 PSP_FW_TYPE_PSP_INTF_DRV, 126 126 PSP_FW_TYPE_PSP_DBG_DRV, 127 127 PSP_FW_TYPE_PSP_RAS_DRV, 128 + PSP_FW_TYPE_PSP_IPKEYMGR_DRV, 128 129 PSP_FW_TYPE_MAX_INDEX, 129 130 }; 130 131
+46 -26
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1647 1647 trace_amdgpu_vm_bo_map(bo_va, mapping); 1648 1648 } 1649 1649 1650 + /* Validate operation parameters to prevent potential abuse */ 1651 + static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, 1652 + struct amdgpu_bo *bo, 1653 + uint64_t saddr, 1654 + uint64_t offset, 1655 + uint64_t size) 1656 + { 1657 + uint64_t tmp, lpfn; 1658 + 1659 + if (saddr & AMDGPU_GPU_PAGE_MASK 1660 + || offset & AMDGPU_GPU_PAGE_MASK 1661 + || size & AMDGPU_GPU_PAGE_MASK) 1662 + return -EINVAL; 1663 + 1664 + if (check_add_overflow(saddr, size, &tmp) 1665 + || check_add_overflow(offset, size, &tmp) 1666 + || size == 0 /* which also leads to end < begin */) 1667 + return -EINVAL; 1668 + 1669 + /* make sure object fit at this offset */ 1670 + if (bo && offset + size > amdgpu_bo_size(bo)) 1671 + return -EINVAL; 1672 + 1673 + /* Ensure last pfn not exceed max_pfn */ 1674 + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; 1675 + if (lpfn >= adev->vm_manager.max_pfn) 1676 + return -EINVAL; 1677 + 1678 + return 0; 1679 + } 1680 + 1650 1681 /** 1651 1682 * amdgpu_vm_bo_map - map bo inside a vm 1652 1683 * ··· 1704 1673 struct amdgpu_bo *bo = bo_va->base.bo; 1705 1674 struct amdgpu_vm *vm = bo_va->base.vm; 1706 1675 uint64_t eaddr; 1676 + int r; 1707 1677 1708 - /* validate the parameters */ 1709 - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1710 - return -EINVAL; 1711 - if (saddr + size <= saddr || offset + size <= offset) 1712 - return -EINVAL; 1713 - 1714 - /* make sure object fit at this offset */ 1715 - eaddr = saddr + size - 1; 1716 - if ((bo && offset + size > amdgpu_bo_size(bo)) || 1717 - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1718 - return -EINVAL; 1678 + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); 1679 + if (r) 1680 + return r; 1719 1681 1720 1682 saddr /= AMDGPU_GPU_PAGE_SIZE; 1721 - eaddr /= AMDGPU_GPU_PAGE_SIZE; 1683 + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; 1722 1684 1723 1685 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1724 1686 if (tmp) { ··· 1764 1740 uint64_t eaddr; 1765 1741 int r; 1766 1742 1767 - /* validate the parameters */ 1768 - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1769 - return -EINVAL; 1770 - if (saddr + size <= saddr || offset + size <= offset) 1771 - return -EINVAL; 1772 - 1773 - /* make sure object fit at this offset */ 1774 - eaddr = saddr + size - 1; 1775 - if ((bo && offset + size > amdgpu_bo_size(bo)) || 1776 - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1777 - return -EINVAL; 1743 + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); 1744 + if (r) 1745 + return r; 1778 1746 1779 1747 /* Allocate all the needed memory */ 1780 1748 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); ··· 1780 1764 } 1781 1765 1782 1766 saddr /= AMDGPU_GPU_PAGE_SIZE; 1783 - eaddr /= AMDGPU_GPU_PAGE_SIZE; 1767 + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; 1784 1768 1785 1769 mapping->start = saddr; 1786 1770 mapping->last = eaddr; ··· 1867 1851 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 1868 1852 LIST_HEAD(removed); 1869 1853 uint64_t eaddr; 1854 + int r; 1870 1855 1871 - eaddr = saddr + size - 1; 1856 + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); 1857 + if (r) 1858 + return r; 1859 + 1872 1860 saddr /= AMDGPU_GPU_PAGE_SIZE; 1873 - eaddr /= AMDGPU_GPU_PAGE_SIZE; 1861 + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; 1874 1862 1875 1863 /* Allocate all the needed memory */ 1876 1864 before = kzalloc(sizeof(*before), GFP_KERNEL);
+4 -2
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
··· 630 630 631 631 int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) 632 632 { 633 - u32 mask, inst_mask = adev->sdma.sdma_mask; 633 + u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask; 634 634 int ret, i; 635 635 636 636 /* generally 1 AID supports 4 instances */ ··· 642 642 643 643 for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; 644 644 inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { 645 - if ((inst_mask & mask) == mask) 645 + avail_inst = inst_mask & mask; 646 + if (avail_inst == mask || avail_inst == 0x3 || 647 + avail_inst == 0xc) 646 648 adev->aid_mask |= (1 << i); 647 649 } 648 650
+5 -10
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 4506 4506 4507 4507 gfx_v11_0_set_safe_mode(adev, 0); 4508 4508 4509 + mutex_lock(&adev->srbm_mutex); 4509 4510 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4510 4511 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4511 4512 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4512 - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4513 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4514 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4515 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4516 - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4513 + soc21_grbm_select(adev, i, k, j, 0); 4517 4514 4518 4515 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); 4519 4516 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); ··· 4520 4523 for (i = 0; i < adev->gfx.me.num_me; ++i) { 4521 4524 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { 4522 4525 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { 4523 - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); 4524 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); 4525 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); 4526 - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); 4527 - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); 4526 + soc21_grbm_select(adev, i, k, j, 0); 4528 4527 4529 4528 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); 4530 4529 } 4531 4530 } 4532 4531 } 4532 + soc21_grbm_select(adev, 0, 0, 0, 0); 4533 + mutex_unlock(&adev->srbm_mutex); 4533 4534 4534 4535 /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ 4535 4536 r = gfx_v11_0_request_gfx_index_mutex(adev, 1);
+24 -2
drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
··· 346 346 DELAY, 3); 347 347 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); 348 348 349 + /* Redirect the interrupts to IH RB1 for dGPU */ 350 + if (adev->irq.ih1.ring_size) { 351 + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); 352 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); 353 + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); 354 + 355 + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); 356 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); 357 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); 358 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, 359 + SOURCE_ID_MATCH_ENABLE, 0x1); 360 + 361 + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); 362 + } 363 + 349 364 pci_set_master(adev->pdev); 350 365 351 366 /* enable interrupts */ ··· 564 549 adev->irq.ih.use_doorbell = true; 565 550 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; 566 551 567 - adev->irq.ih1.ring_size = 0; 568 - adev->irq.ih2.ring_size = 0; 552 + if (!(adev->flags & AMD_IS_APU)) { 553 + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, 554 + use_bus_addr); 555 + if (r) 556 + return r; 557 + 558 + adev->irq.ih1.use_doorbell = true; 559 + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; 560 + } 569 561 570 562 /* initialize ih control register offset */ 571 563 ih_v6_0_init_register_offset(adev);
+24 -2
drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
··· 346 346 DELAY, 3); 347 347 WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); 348 348 349 + /* Redirect the interrupts to IH RB1 for dGPU */ 350 + if (adev->irq.ih1.ring_size) { 351 + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); 352 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); 353 + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); 354 + 355 + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); 356 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); 357 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); 358 + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, 359 + SOURCE_ID_MATCH_ENABLE, 0x1); 360 + 361 + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); 362 + } 363 + 349 364 pci_set_master(adev->pdev); 350 365 351 366 /* enable interrupts */ ··· 565 550 adev->irq.ih.use_doorbell = true; 566 551 adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; 567 552 568 - adev->irq.ih1.ring_size = 0; 569 - adev->irq.ih2.ring_size = 0; 553 + if (!(adev->flags & AMD_IS_APU)) { 554 + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, 555 + use_bus_addr); 556 + if (r) 557 + return r; 558 + 559 + adev->irq.ih1.use_doorbell = true; 560 + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; 561 + } 570 562 571 563 /* initialize ih control register offset */ 572 564 ih_v6_1_init_register_offset(adev);
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 111 111 struct amdgpu_device *adev = mes->adev; 112 112 struct amdgpu_ring *ring = &mes->ring; 113 113 unsigned long flags; 114 - signed long timeout = adev->usec_timeout; 114 + signed long timeout = 3000000; /* 3000 ms */ 115 115 116 116 if (amdgpu_emu_mode) { 117 117 timeout *= 100;
-2
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 446 446 amdgpu_virt_fini_data_exchange(adev); 447 447 xgpu_nv_send_access_requests_with_param(adev, 448 448 IDH_RAS_POISON, block, 0, 0); 449 - if (block != AMDGPU_RAS_BLOCK__SDMA) 450 - amdgpu_virt_init_data_exchange(adev); 451 449 } 452 450 } 453 451
+7 -1
drivers/gpu/drm/amd/amdgpu/psp_v14_0.c
··· 169 169 170 170 static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp) 171 171 { 172 - return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); 172 + /* dbg_drv was renamed to had_drv in psp v14 */ 173 + return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV); 173 174 } 174 175 175 176 static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) ··· 178 177 return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); 179 178 } 180 179 180 + static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp) 181 + { 182 + return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV); 183 + } 181 184 182 185 static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) 183 186 { ··· 658 653 .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv, 659 654 .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv, 660 655 .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv, 656 + .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv, 661 657 .bootloader_load_sos = psp_v14_0_bootloader_load_sos, 662 658 .ring_create = psp_v14_0_ring_create, 663 659 .ring_stop = psp_v14_0_ring_stop,
+8 -19
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
··· 144 144 uint16_t pasid, uint16_t client_id) 145 145 { 146 146 enum amdgpu_ras_block block = 0; 147 - int old_poison, ret = -EINVAL; 147 + int old_poison; 148 148 uint32_t reset = 0; 149 149 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); 150 150 ··· 163 163 case SOC15_IH_CLIENTID_SE2SH: 164 164 case SOC15_IH_CLIENTID_SE3SH: 165 165 case SOC15_IH_CLIENTID_UTCL2: 166 - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); 167 166 block = AMDGPU_RAS_BLOCK__GFX; 168 - if (ret) 169 - reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 167 + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 170 168 break; 171 169 case SOC15_IH_CLIENTID_VMC: 172 170 case SOC15_IH_CLIENTID_VMC1: 173 - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); 174 171 block = AMDGPU_RAS_BLOCK__MMHUB; 175 - if (ret) 176 - reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; 172 + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; 177 173 break; 178 174 case SOC15_IH_CLIENTID_SDMA0: 179 175 case SOC15_IH_CLIENTID_SDMA1: ··· 180 184 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; 181 185 break; 182 186 default: 183 - break; 187 + dev_warn(dev->adev->dev, 188 + "client %d does not support poison consumption\n", client_id); 189 + return; 184 190 } 185 191 186 192 kfd_signal_poison_consumed_event(dev, pasid); 187 193 188 - /* resetting queue passes, do page retirement without gpu reset 189 - * resetting queue fails, fallback to gpu reset solution 190 - */ 191 - if (!ret) 192 - dev_warn(dev->adev->dev, 193 - "RAS poison consumption, unmap queue flow succeeded: client id %d\n", 194 - client_id); 195 - else 196 - dev_warn(dev->adev->dev, 197 - "RAS poison consumption, fall back to gpu reset flow: client id %d\n", 198 - client_id); 194 + dev_warn(dev->adev->dev, 195 + "poison is consumed by client %d, kick off gpu reset flow\n", client_id); 199 196 200 197 amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); 201 198 }
+5 -4
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 1922 1922 rcu_read_lock(); 1923 1923 ef = dma_fence_get_rcu_safe(&p->ef); 1924 1924 rcu_read_unlock(); 1925 + if (!ef) 1926 + return -EINVAL; 1925 1927 1926 1928 ret = dma_fence_signal(ef); 1927 1929 dma_fence_put(ef); ··· 1951 1949 * they are responsible stopping the queues and scheduling 1952 1950 * the restore work. 1953 1951 */ 1954 - if (!signal_eviction_fence(p)) 1955 - queue_delayed_work(kfd_restore_wq, &p->restore_work, 1956 - msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); 1957 - else 1952 + if (signal_eviction_fence(p) || 1953 + mod_delayed_work(kfd_restore_wq, &p->restore_work, 1954 + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) 1958 1955 kfd_process_restore_queues(p); 1959 1956 1960 1957 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
+10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1230 1230 break; 1231 1231 } 1232 1232 1233 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1234 + case IP_VERSION(3, 5, 0): 1235 + case IP_VERSION(3, 5, 1): 1236 + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; 1237 + break; 1238 + default: 1239 + break; 1240 + } 1241 + 1233 1242 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1234 1243 if (status != DMUB_STATUS_OK) { 1235 1244 DRM_ERROR("Error initializing DMUB HW: %d\n", status); ··· 3046 3037 dc_stream_release(dm_new_crtc_state->stream); 3047 3038 dm_new_crtc_state->stream = NULL; 3048 3039 } 3040 + dm_new_crtc_state->base.color_mgmt_changed = true; 3049 3041 } 3050 3042 3051 3043 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 272 272 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 273 273 return &clk_mgr->base; 274 274 } 275 - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { 275 + if (ctx->dce_version == DCN_VERSION_2_01) { 276 276 dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 277 277 return &clk_mgr->base; 278 278 }
+2 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 29 29 #include "dcn20/dcn20_clk_mgr.h" 30 30 #include "dce100/dce_clk_mgr.h" 31 31 #include "dcn31/dcn31_clk_mgr.h" 32 + #include "dcn32/dcn32_clk_mgr.h" 32 33 #include "reg_helper.h" 33 34 #include "core_types.h" 34 35 #include "dm_helpers.h" ··· 830 829 dmcu->funcs->set_psr_wait_loop(dmcu, 831 830 clk_mgr_base->clks.dispclk_khz / 1000 / 7); 832 831 833 - if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { 832 + if (dc->config.enable_auto_dpm_test_logs) { 834 833 dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); 835 834 } 836 835 }
+7 -1
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 3446 3446 if (srf_updates[i].surface->flip_immediate) 3447 3447 continue; 3448 3448 3449 + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 3449 3450 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; 3450 3451 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, 3451 3452 sizeof(flip_addr->dirty_rects)); ··· 5043 5042 void dc_power_down_on_boot(struct dc *dc) 5044 5043 { 5045 5044 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 5046 - dc->hwss.power_down_on_boot) 5045 + dc->hwss.power_down_on_boot) { 5046 + 5047 + if (dc->caps.ips_support) 5048 + dc_exit_ips_for_hw_access(dc); 5049 + 5047 5050 dc->hwss.power_down_on_boot(dc); 5051 + } 5048 5052 } 5049 5053 5050 5054 void dc_set_power_state(
+2 -3
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 1500 1500 return false; 1501 1501 } 1502 1502 1503 - pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( 1504 - pipe_ctx->plane_state->format); 1505 - 1506 1503 /* Timing borders are part of vactive that we are also supposed to skip in addition 1507 1504 * to any stream dst offset. Since dm logic assumes dst is in addressable 1508 1505 * space we need to add the left and top borders to dst offsets temporarily. ··· 1511 1514 /* Calculate H and V active size */ 1512 1515 pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; 1513 1516 pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; 1517 + pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( 1518 + pipe_ctx->plane_state->format); 1514 1519 1515 1520 /* depends on h_active */ 1516 1521 calculate_recout(pipe_ctx);
+5 -5
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 191 191 struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) 192 192 { 193 193 #ifdef CONFIG_DRM_AMD_DC_FP 194 - struct dml2_configuration_options dml2_opt = dc->dml2_options; 194 + struct dml2_configuration_options *dml2_opt = &dc->dml2_options; 195 195 #endif 196 196 struct dc_state *state = kvzalloc(sizeof(struct dc_state), 197 197 GFP_KERNEL); ··· 205 205 206 206 #ifdef CONFIG_DRM_AMD_DC_FP 207 207 if (dc->debug.using_dml2) { 208 - dml2_opt.use_clock_dc_limits = false; 209 - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2); 208 + dml2_opt->use_clock_dc_limits = false; 209 + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); 210 210 211 - dml2_opt.use_clock_dc_limits = true; 212 - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2_dc_power_source); 211 + dml2_opt->use_clock_dc_limits = true; 212 + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); 213 213 } 214 214 #endif 215 215
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 495 495 struct dc_stream_state *stream, 496 496 uint32_t dwb_pipe_inst) 497 497 { 498 - int i = 0, j = 0; 498 + unsigned int i, j; 499 499 if (stream == NULL) { 500 500 dm_error("DC: dc_stream is NULL!\n"); 501 501 return false;
+9 -7
drivers/gpu/drm/amd/display/dc/dc.h
··· 53 53 struct set_config_cmd_payload; 54 54 struct dmub_notification; 55 55 56 - #define DC_VER "3.2.280" 56 + #define DC_VER "3.2.281" 57 57 58 58 #define MAX_SURFACES 3 59 59 #define MAX_PLANES 6 ··· 309 309 unsigned int max_compressed_blk_size; 310 310 unsigned int max_uncompressed_blk_size; 311 311 bool independent_64b_blks; 312 - //These bitfields to be used starting with DCN 312 + //These bitfields to be used starting with DCN 3.0 313 313 struct { 314 - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) 315 - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 316 - uint32_t dcc_256_128_128 : 1; //available starting with DCN 317 - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) 314 + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) 315 + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 316 + uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 317 + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) 318 318 } dcc_controls; 319 319 }; 320 320 ··· 1003 1003 unsigned int static_screen_wait_frames; 1004 1004 bool force_chroma_subsampling_1tap; 1005 1005 bool disable_422_left_edge_pixel; 1006 + unsigned int force_cositing; 1006 1007 }; 1007 1008 1008 - struct gpu_info_soc_bounding_box_v1_0; 1009 1009 1010 1010 /* Generic structure that can be used to query properties of DC. More fields 1011 1011 * can be added as required. ··· 1285 1285 struct tg_color visual_confirm_color; 1286 1286 1287 1287 bool is_statically_allocated; 1288 + enum chroma_cositing cositing; 1288 1289 }; 1289 1290 1290 1291 struct dc_plane_info { ··· 1304 1303 int global_alpha_value; 1305 1304 bool input_csc_enabled; 1306 1305 int layer_index; 1306 + enum chroma_cositing cositing; 1307 1307 }; 1308 1308 1309 1309 #include "dc_stream.h"
+7
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 738 738 SCANNING_TYPE_UNDEFINED 739 739 }; 740 740 741 + enum chroma_cositing { 742 + CHROMA_COSITING_NONE, 743 + CHROMA_COSITING_LEFT, 744 + CHROMA_COSITING_TOPLEFT, 745 + CHROMA_COSITING_COUNT 746 + }; 747 + 741 748 struct dc_crtc_timing_flags { 742 749 uint32_t INTERLACE :1; 743 750 uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+2
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 1050 1050 struct replay_config { 1051 1051 /* Replay feature is supported */ 1052 1052 bool replay_supported; 1053 + /* Replay caps support DPCD & EDID caps*/ 1054 + bool replay_cap_support; 1053 1055 /* Power opt flags that are supported */ 1054 1056 unsigned int replay_power_opt_supported; 1055 1057 /* SMU optimization is supported */
-6
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
··· 22 22 * Authors: AMD 23 23 * 24 24 */ 25 - 26 - #include <linux/delay.h> 27 - 28 25 #include "resource.h" 29 26 #include "dce_i2c.h" 30 27 #include "dce_i2c_hw.h" ··· 308 311 if (dce_i2c_hw->masks->DC_I2C_DDC1_CLK_EN) 309 312 REG_UPDATE_N(SETUP, 1, 310 313 FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1); 311 - 312 - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ 313 - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); 314 314 315 315 /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ 316 316 REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
+8 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
··· 168 168 169 169 case PIXEL_ENCODING_RGB: 170 170 case PIXEL_ENCODING_YCBCR444: 171 + REG_UPDATE_3(FMT_CONTROL, 172 + FMT_PIXEL_ENCODING, 0, 173 + FMT_SUBSAMPLING_MODE, 0, 174 + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); 171 175 REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); 172 176 break; 173 177 case PIXEL_ENCODING_YCBCR422: ··· 181 177 FMT_CBCR_BIT_REDUCTION_BYPASS, 0); 182 178 break; 183 179 case PIXEL_ENCODING_YCBCR420: 184 - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); 180 + REG_UPDATE_3(FMT_CONTROL, 181 + FMT_PIXEL_ENCODING, 2, 182 + FMT_SUBSAMPLING_MODE, 2, 183 + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); 185 184 break; 186 185 default: 187 186 break;
+2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
··· 79 79 OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ 80 80 OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ 81 81 OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ 82 + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ 83 + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ 82 84 OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ 83 85 OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ 84 86 OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
+3 -7
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
··· 127 127 uint32_t AFMT_60958_1; 128 128 uint32_t AFMT_60958_2; 129 129 uint32_t DIG_FE_CNTL; 130 - uint32_t DIG_FE_CNTL2; 131 130 uint32_t DIG_FIFO_STATUS; 132 131 uint32_t DP_MSE_RATE_CNTL; 133 132 uint32_t DP_MSE_RATE_UPDATE; ··· 569 570 type DP_SEC_GSP11_ENABLE;\ 570 571 type DP_SEC_GSP11_LINE_NUM 571 572 572 - #define SE_REG_FIELD_LIST_DCN3_2(type) \ 573 + #define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \ 573 574 type DIG_FIFO_OUTPUT_PIXEL_MODE;\ 574 575 type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\ 575 576 type DIG_SYMCLK_FE_ON;\ ··· 598 599 uint8_t HDMI_ACP_SEND; 599 600 SE_REG_FIELD_LIST_DCN2_0(uint8_t); 600 601 SE_REG_FIELD_LIST_DCN3_0(uint8_t); 601 - SE_REG_FIELD_LIST_DCN3_2(uint8_t); 602 + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t); 602 603 SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t); 603 604 }; 604 605 ··· 607 608 uint32_t HDMI_ACP_SEND; 608 609 SE_REG_FIELD_LIST_DCN2_0(uint32_t); 609 610 SE_REG_FIELD_LIST_DCN3_0(uint32_t); 610 - SE_REG_FIELD_LIST_DCN3_2(uint32_t); 611 + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t); 611 612 SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t); 612 613 }; 613 614 ··· 664 665 unsigned int sdp_message_size); 665 666 666 667 void enc1_stream_encoder_stop_dp_info_packets( 667 - struct stream_encoder *enc); 668 - 669 - void enc1_stream_encoder_reset_fifo( 670 668 struct stream_encoder *enc); 671 669 672 670 void enc1_stream_encoder_dp_blank(
+1 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h
··· 147 147 uint32_t DCN_CUR1_TTU_CNTL1;\ 148 148 uint32_t VMID_SETTINGS_0 149 149 150 - 150 + /*shared with dcn3.x*/ 151 151 #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ 152 152 DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ 153 153 uint32_t FLIP_PARAMETERS_3;\
+4 -6
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
··· 395 395 MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); 396 396 REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, 397 397 MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); 398 - 399 398 } 400 399 400 + REG_SEQ_SUBMIT(); 401 + PERF_TRACE(); 402 + REG_SEQ_WAIT_DONE(); 403 + PERF_TRACE(); 401 404 } 402 405 403 406 static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, ··· 504 501 ASSERT(!mpc_disabled); 505 502 ASSERT(!mpc_idle); 506 503 } 507 - 508 - REG_SEQ_SUBMIT(); 509 - PERF_TRACE(); 510 - REG_SEQ_WAIT_DONE(); 511 - PERF_TRACE(); 512 504 } 513 505 514 506 static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+5
drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
··· 77 77 MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, 78 78 CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); 79 79 80 + /* no need to program PTE */ 80 81 REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, 81 82 CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, 82 83 MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, ··· 100 99 struct _vcs_dpi_display_rq_regs_st *rq_regs, 101 100 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) 102 101 { 102 + /* 103 + * otg is locked when this func is called. Register are double buffered. 104 + * disable the requestors is not needed 105 + */ 103 106 hubp2_vready_at_or_After_vsync(hubp, pipe_dest); 104 107 hubp201_program_requestor(hubp, rq_regs); 105 108 hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
-18
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h
··· 29 29 #include "dcn20/dcn20_dccg.h" 30 30 31 31 32 - #define DCCG_REG_LIST_DCN3AG() \ 33 - DCCG_COMMON_REG_LIST_DCN_BASE(),\ 34 - SR(PHYASYMCLK_CLOCK_CNTL),\ 35 - SR(PHYBSYMCLK_CLOCK_CNTL),\ 36 - SR(PHYCSYMCLK_CLOCK_CNTL) 37 - 38 - 39 32 #define DCCG_REG_LIST_DCN30() \ 40 33 DCCG_REG_LIST_DCN2(),\ 41 34 DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ ··· 38 45 SR(PHYASYMCLK_CLOCK_CNTL),\ 39 46 SR(PHYBSYMCLK_CLOCK_CNTL),\ 40 47 SR(PHYCSYMCLK_CLOCK_CNTL) 41 - 42 - #define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \ 43 - DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\ 44 - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ 45 - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ 46 - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ 47 - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ 48 - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ 49 - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\ 50 - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\ 51 - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh) 52 48 53 49 #define DCCG_MASK_SH_LIST_DCN3(mask_sh) \ 54 50 DCCG_MASK_SH_LIST_DCN2(mask_sh),\
-1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
··· 29 29 #include "reg_helper.h" 30 30 #include "hw_shared.h" 31 31 #include "dc.h" 32 - #include "core_types.h" 33 32 34 33 #define DC_LOGGER \ 35 34 enc1->base.ctx->logger
-2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
··· 251 251 .set_fc_enable = dwb3_set_fc_enable, 252 252 .set_stereo = dwb3_set_stereo, 253 253 .set_new_content = dwb3_set_new_content, 254 - .dwb_program_output_csc = NULL, 255 254 .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename 256 - .dwb_set_scaler = NULL, 257 255 }; 258 256 259 257 void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30,
+1
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c
··· 63 63 .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, 64 64 .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, 65 65 .force_pstate_change_control = hubbub3_force_pstate_change_control, 66 + .init_watermarks = hubbub3_init_watermarks, 66 67 .hubbub_read_state = hubbub2_read_state, 67 68 }; 68 69
+3 -1
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 395 395 396 396 if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || 397 397 color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { 398 - cur_rom_en = 1; 398 + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { 399 + cur_rom_en = 1; 400 + } 399 401 } 400 402 401 403 REG_UPDATE_3(CURSOR0_CONTROL,
+4 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
··· 116 116 .update_visual_confirm_color = dcn10_update_visual_confirm_color, 117 117 .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, 118 118 .update_dsc_pg = dcn32_update_dsc_pg, 119 - .calc_blocks_to_gate = dcn351_calc_blocks_to_gate, 120 - .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate, 121 - .hw_block_power_up = dcn351_hw_block_power_up, 122 - .hw_block_power_down = dcn351_hw_block_power_down, 119 + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, 120 + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, 121 + .hw_block_power_up = dcn35_hw_block_power_up, 122 + .hw_block_power_down = dcn35_hw_block_power_down, 123 123 .root_clock_control = dcn35_root_clock_control, 124 124 }; 125 125
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 2547 2547 * full update which delays the flip for 1 frame. If we use the original pipe 2548 2548 * we don't have to toggle its power. So we can flip faster. 2549 2549 */ 2550 - static int find_optimal_free_pipe_as_secondary_dpp_pipe( 2550 + int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( 2551 2551 const struct resource_context *cur_res_ctx, 2552 2552 struct resource_context *new_res_ctx, 2553 2553 const struct resource_pool *pool, ··· 2730 2730 return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( 2731 2731 new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); 2732 2732 2733 - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( 2733 + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( 2734 2734 &cur_ctx->res_ctx, &new_ctx->res_ctx, 2735 2735 pool, opp_head_pipe); 2736 2736 if (free_pipe_idx >= 0) {
+6
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
··· 137 137 bool dcn32_is_center_timing(struct pipe_ctx *pipe); 138 138 bool dcn32_is_psr_capable(struct pipe_ctx *pipe); 139 139 140 + int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( 141 + const struct resource_context *cur_res_ctx, 142 + struct resource_context *new_res_ctx, 143 + const struct resource_pool *pool, 144 + const struct pipe_ctx *new_opp_head); 145 + 140 146 struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( 141 147 const struct dc_state *cur_ctx, 142 148 struct dc_state *new_ctx,
+2 -3
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 758 758 //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions 759 759 .enable_double_buffered_dsc_pg_support = true, 760 760 .enable_dp_dig_pixel_rate_div_policy = 1, 761 - .disable_z10 = true, 761 + .disable_z10 = false, 762 762 .ignore_pg = true, 763 763 .psp_disabled_wa = true, 764 764 .ips2_eval_delay_us = 2000, ··· 1722 1722 return out; 1723 1723 1724 1724 DC_FP_START(); 1725 - dcn351_decide_zstate_support(dc, context); 1725 + dcn35_decide_zstate_support(dc, context); 1726 1726 DC_FP_END(); 1727 1727 1728 1728 return out; 1729 1729 } 1730 - 1731 1730 1732 1731 static struct resource_funcs dcn351_res_pool_funcs = { 1733 1732 .destroy = dcn351_destroy_resource_pool,
+1
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
··· 297 297 bool dpia_hpd_int_enable_supported; 298 298 bool disable_clock_gate; 299 299 bool disallow_dispclk_dppclk_ds; 300 + bool ips_sequential_ono; 300 301 enum dmub_memory_access_type mem_access_type; 301 302 enum dmub_ips_disable_type disable_ips; 302 303 };
+52 -1
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 1614 1614 */ 1615 1615 struct dmub_dcn_notify_idle_cntl_data { 1616 1616 uint8_t driver_idle; 1617 - uint8_t pad[1]; 1617 + uint8_t reserved[59]; 1618 1618 }; 1619 1619 1620 1620 /** ··· 2335 2335 * UHBR10 - 20.0 Gbps/Lane 2336 2336 */ 2337 2337 PHY_RATE_2000 = 11, 2338 + 2339 + PHY_RATE_675 = 12, 2340 + /** 2341 + * Rate 12 - 6.75 Gbps/Lane 2342 + */ 2338 2343 }; 2339 2344 2340 2345 /** ··· 3067 3062 * Set pseudo vtotal 3068 3063 */ 3069 3064 DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, 3065 + /** 3066 + * Set adaptive sync sdp enabled 3067 + */ 3068 + DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, 3069 + 3070 3070 }; 3071 3071 3072 3072 /** ··· 3273 3263 */ 3274 3264 uint8_t pad; 3275 3265 }; 3266 + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data { 3267 + /** 3268 + * Panel Instance. 3269 + * Panel isntance to identify which replay_state to use 3270 + * Currently the support is only for 0 or 1 3271 + */ 3272 + uint8_t panel_inst; 3273 + /** 3274 + * enabled: set adaptive sync sdp enabled 3275 + */ 3276 + uint8_t force_disabled; 3277 + 3278 + uint8_t pad[2]; 3279 + }; 3276 3280 3277 3281 /** 3278 3282 * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. ··· 3391 3367 }; 3392 3368 3393 3369 /** 3370 + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. 3371 + */ 3372 + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp { 3373 + /** 3374 + * Command header. 3375 + */ 3376 + struct dmub_cmd_header header; 3377 + /** 3378 + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. 3379 + */ 3380 + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data; 3381 + }; 3382 + 3383 + /** 3394 3384 * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. 3395 3385 */ 3396 3386 struct dmub_cmd_replay_frameupdate_timer_data { ··· 3459 3421 * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. 3460 3422 */ 3461 3423 struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; 3424 + /** 3425 + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data. 3426 + */ 3427 + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; 3428 + 3462 3429 }; 3463 3430 3464 3431 /** ··· 4139 4096 * Queries backlight info for the embedded panel. 4140 4097 */ 4141 4098 DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1, 4099 + /** 4100 + * Sets the PWM Freq as per user's requirement. 4101 + */ 4102 + DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2, 4142 4103 }; 4143 4104 4144 4105 /** ··· 4714 4667 * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. 4715 4668 */ 4716 4669 struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; 4670 + /** 4671 + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. 4672 + */ 4673 + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp; 4717 4674 /** 4718 4675 * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. 4719 4676 */
+1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
··· 420 420 boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; 421 421 boot_options.bits.disable_clk_gate = params->disable_clock_gate; 422 422 boot_options.bits.ips_disable = params->disable_ips; 423 + boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; 423 424 424 425 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); 425 426 }
+4
drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h
··· 237 237 #define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 238 238 #define regIH_CLIENT_CFG 0x0184 239 239 #define regIH_CLIENT_CFG_BASE_IDX 0 240 + #define regIH_RING1_CLIENT_CFG_INDEX 0x0185 241 + #define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 242 + #define regIH_RING1_CLIENT_CFG_DATA 0x0186 243 + #define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 240 244 #define regIH_CLIENT_CFG_INDEX 0x0188 241 245 #define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 242 246 #define regIH_CLIENT_CFG_DATA 0x0189
+10
drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h
··· 888 888 //IH_CLIENT_CFG 889 889 #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 890 890 #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL 891 + //IH_RING1_CLIENT_CFG_INDEX 892 + #define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 893 + #define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L 894 + //IH_RING1_CLIENT_CFG_DATA 895 + #define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 896 + #define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 897 + #define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 898 + #define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL 899 + #define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L 900 + #define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L 891 901 //IH_CLIENT_CFG_INDEX 892 902 #define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 893 903 #define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL
+5
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 45 45 #include "smu_v13_0_6_ppt.h" 46 46 #include "smu_v13_0_7_ppt.h" 47 47 #include "smu_v14_0_0_ppt.h" 48 + #include "smu_v14_0_2_ppt.h" 48 49 #include "amd_pcie.h" 49 50 50 51 /* ··· 715 714 case IP_VERSION(14, 0, 0): 716 715 case IP_VERSION(14, 0, 1): 717 716 smu_v14_0_0_set_ppt_funcs(smu); 717 + break; 718 + case IP_VERSION(14, 0, 2): 719 + case IP_VERSION(14, 0, 3): 720 + smu_v14_0_2_set_ppt_funcs(smu); 718 721 break; 719 722 default: 720 723 return -EINVAL;
+1836
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef SMU14_DRIVER_IF_V14_0_H 25 + #define SMU14_DRIVER_IF_V14_0_H 26 + 27 + //Increment this version if SkuTable_t or BoardTable_t change 28 + #define PPTABLE_VERSION 0x18 29 + 30 + #define NUM_GFXCLK_DPM_LEVELS 16 31 + #define NUM_SOCCLK_DPM_LEVELS 8 32 + #define NUM_MP0CLK_DPM_LEVELS 2 33 + #define NUM_DCLK_DPM_LEVELS 8 34 + #define NUM_VCLK_DPM_LEVELS 8 35 + #define NUM_DISPCLK_DPM_LEVELS 8 36 + #define NUM_DPPCLK_DPM_LEVELS 8 37 + #define NUM_DPREFCLK_DPM_LEVELS 8 38 + #define NUM_DCFCLK_DPM_LEVELS 8 39 + #define NUM_DTBCLK_DPM_LEVELS 8 40 + #define NUM_UCLK_DPM_LEVELS 6 41 + #define NUM_LINK_LEVELS 3 42 + #define NUM_FCLK_DPM_LEVELS 8 43 + #define NUM_OD_FAN_MAX_POINTS 6 44 + 45 + // Feature Control Defines 46 + #define FEATURE_FW_DATA_READ_BIT 0 47 + #define FEATURE_DPM_GFXCLK_BIT 1 48 + #define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2 49 + #define FEATURE_DPM_UCLK_BIT 3 50 + #define FEATURE_DPM_FCLK_BIT 4 51 + #define FEATURE_DPM_SOCCLK_BIT 5 52 + #define FEATURE_DPM_LINK_BIT 6 53 + #define FEATURE_DPM_DCN_BIT 7 54 + #define FEATURE_VMEMP_SCALING_BIT 8 55 + #define FEATURE_VDDIO_MEM_SCALING_BIT 9 56 + #define FEATURE_DS_GFXCLK_BIT 10 57 + #define FEATURE_DS_SOCCLK_BIT 11 58 + #define FEATURE_DS_FCLK_BIT 12 59 + #define FEATURE_DS_LCLK_BIT 13 60 + #define FEATURE_DS_DCFCLK_BIT 14 61 + #define FEATURE_DS_UCLK_BIT 15 62 + #define FEATURE_GFX_ULV_BIT 16 63 + #define FEATURE_FW_DSTATE_BIT 17 64 + #define FEATURE_GFXOFF_BIT 18 65 + #define FEATURE_BACO_BIT 19 66 + #define FEATURE_MM_DPM_BIT 20 67 + #define FEATURE_SOC_MPCLK_DS_BIT 21 68 + #define FEATURE_BACO_MPCLK_DS_BIT 22 69 + #define FEATURE_THROTTLERS_BIT 23 70 + #define FEATURE_SMARTSHIFT_BIT 24 71 + #define FEATURE_GTHR_BIT 25 72 + #define FEATURE_ACDC_BIT 26 73 + #define FEATURE_VR0HOT_BIT 27 74 + #define FEATURE_FW_CTF_BIT 28 75 + #define FEATURE_FAN_CONTROL_BIT 29 76 + #define FEATURE_GFX_DCS_BIT 30 77 + #define FEATURE_GFX_READ_MARGIN_BIT 31 78 + #define FEATURE_LED_DISPLAY_BIT 32 79 + #define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33 80 + #define FEATURE_OUT_OF_BAND_MONITOR_BIT 34 81 + #define FEATURE_OPTIMIZED_VMIN_BIT 35 82 + #define FEATURE_GFX_IMU_BIT 36 83 + #define FEATURE_BOOT_TIME_CAL_BIT 37 84 + #define FEATURE_GFX_PCC_DFLL_BIT 38 85 + #define FEATURE_SOC_CG_BIT 39 86 + #define FEATURE_DF_CSTATE_BIT 40 87 + #define FEATURE_GFX_EDC_BIT 41 88 + #define FEATURE_BOOT_POWER_OPT_BIT 42 89 + #define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43 90 + #define FEATURE_DS_VCN_BIT 44 91 + #define FEATURE_BACO_CG_BIT 45 92 + #define FEATURE_MEM_TEMP_READ_BIT 46 93 + #define FEATURE_ATHUB_MMHUB_PG_BIT 47 94 + #define FEATURE_SOC_PCC_BIT 48 95 + #define FEATURE_EDC_PWRBRK_BIT 49 96 + #define FEATURE_SOC_EDC_XVMIN_BIT 50 97 + #define FEATURE_GFX_PSM_DIDT_BIT 51 98 + #define FEATURE_APT_ALL_ENABLE_BIT 52 99 + #define FEATURE_APT_SQ_THROTTLE_BIT 53 100 + #define FEATURE_APT_PF_DCS_BIT 54 101 + #define FEATURE_GFX_EDC_XVMIN_BIT 55 102 + #define FEATURE_GFX_DIDT_XVMIN_BIT 56 103 + #define FEATURE_FAN_ABNORMAL_BIT 57 104 + #define FEATURE_CLOCK_STRETCH_COMPENSATOR 58 105 + #define FEATURE_SPARE_59_BIT 59 106 + #define FEATURE_SPARE_60_BIT 60 107 + #define FEATURE_SPARE_61_BIT 61 108 + #define FEATURE_SPARE_62_BIT 62 109 + #define FEATURE_SPARE_63_BIT 63 110 + #define NUM_FEATURES 64 111 + 112 + #define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL 113 + #define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \ 114 + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ 115 + (1 << FEATURE_DPM_UCLK_BIT) | \ 116 + (1 << FEATURE_DPM_FCLK_BIT) | \ 117 + (1 << FEATURE_DPM_SOCCLK_BIT) | \ 118 + (1 << FEATURE_DPM_LINK_BIT) | \ 119 + (1 << FEATURE_DPM_DCN_BIT) | \ 120 + (1 << FEATURE_DS_GFXCLK_BIT) | \ 121 + (1 << FEATURE_DS_SOCCLK_BIT) | \ 122 + (1 << FEATURE_DS_FCLK_BIT) | \ 123 + (1 << FEATURE_DS_LCLK_BIT) | \ 124 + (1 << FEATURE_DS_DCFCLK_BIT) | \ 125 + (1 << FEATURE_DS_UCLK_BIT) | \ 126 + (1ULL << FEATURE_DS_VCN_BIT) 127 + 128 + 129 + //For use with feature control messages 130 + typedef enum { 131 + FEATURE_PWR_ALL, 132 + FEATURE_PWR_S5, 133 + FEATURE_PWR_BACO, 134 + FEATURE_PWR_SOC, 135 + FEATURE_PWR_GFX, 136 + FEATURE_PWR_DOMAIN_COUNT, 137 + } FEATURE_PWR_DOMAIN_e; 138 + 139 + //For use with feature control + BTC save restore 140 + typedef enum { 141 + FEATURE_BTC_NOP, 142 + FEATURE_BTC_SAVE, 143 + FEATURE_BTC_RESTORE, 144 + FEATURE_BTC_COUNT, 145 + } FEATURE_BTC_e; 146 + 147 + // Debug Overrides Bitmask 148 + #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 149 + #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 150 + #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 151 + #define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 152 + #define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010 153 + #define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020 154 + #define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040 155 + #define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080 156 + #define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100 157 + #define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 158 + #define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 159 + #define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 160 + #define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 161 + #define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 162 + #define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 163 + #define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 164 + 165 + // VR Mapping Bit Defines 166 + #define VR_MAPPING_VR_SELECT_MASK 0x01 167 + #define VR_MAPPING_VR_SELECT_SHIFT 0x00 168 + 169 + #define VR_MAPPING_PLANE_SELECT_MASK 0x02 170 + #define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 171 + 172 + // PSI Bit Defines 173 + #define PSI_SEL_VR0_PLANE0_PSI0 0x01 174 + #define PSI_SEL_VR0_PLANE0_PSI1 0x02 175 + #define PSI_SEL_VR0_PLANE1_PSI0 0x04 176 + #define PSI_SEL_VR0_PLANE1_PSI1 0x08 177 + #define PSI_SEL_VR1_PLANE0_PSI0 0x10 178 + #define PSI_SEL_VR1_PLANE0_PSI1 0x20 179 + #define PSI_SEL_VR1_PLANE1_PSI0 0x40 180 + #define PSI_SEL_VR1_PLANE1_PSI1 0x80 181 + 182 + typedef enum { 183 + SVI_PSI_0, // Full phase count (default) 184 + SVI_PSI_1, // Phase count 1st level 185 + SVI_PSI_2, // Phase count 2nd level 186 + SVI_PSI_3, // Single phase operation + active diode emulation 187 + SVI_PSI_4, // Single phase operation + passive diode emulation *optional* 188 + SVI_PSI_5, // Reserved 189 + SVI_PSI_6, // Power down to 0V (voltage regulation disabled) 190 + SVI_PSI_7, // Automated phase shedding and diode emulation 191 + } SVI_PSI_e; 192 + 193 + // Throttler Control/Status Bits 194 + #define THROTTLER_TEMP_EDGE_BIT 0 195 + #define THROTTLER_TEMP_HOTSPOT_BIT 1 196 + #define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2 197 + #define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3 198 + #define THROTTLER_TEMP_MEM_BIT 4 199 + #define THROTTLER_TEMP_VR_GFX_BIT 5 200 + #define THROTTLER_TEMP_VR_SOC_BIT 6 201 + #define THROTTLER_TEMP_VR_MEM0_BIT 7 202 + #define THROTTLER_TEMP_VR_MEM1_BIT 8 203 + #define THROTTLER_TEMP_LIQUID0_BIT 9 204 + #define THROTTLER_TEMP_LIQUID1_BIT 10 205 + #define THROTTLER_TEMP_PLX_BIT 11 206 + #define THROTTLER_TDC_GFX_BIT 12 207 + #define THROTTLER_TDC_SOC_BIT 13 208 + #define THROTTLER_PPT0_BIT 14 209 + #define THROTTLER_PPT1_BIT 15 210 + #define THROTTLER_PPT2_BIT 16 211 + #define THROTTLER_PPT3_BIT 17 212 + #define THROTTLER_FIT_BIT 18 213 + #define THROTTLER_GFX_APCC_PLUS_BIT 19 214 + #define THROTTLER_GFX_DVO_BIT 20 215 + #define THROTTLER_COUNT 21 216 + 217 + // FW DState Features Control Bits 218 + #define FW_DSTATE_SOC_ULV_BIT 0 219 + #define FW_DSTATE_G6_HSR_BIT 1 220 + #define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2 221 + #define FW_DSTATE_SMN_DS_BIT 3 222 + #define FW_DSTATE_MP1_WHISPER_MODE_BIT 4 223 + #define FW_DSTATE_SOC_LIV_MIN_BIT 5 224 + #define FW_DSTATE_SOC_PLL_PWRDN_BIT 6 225 + #define FW_DSTATE_MEM_PLL_PWRDN_BIT 7 226 + #define FW_DSTATE_MALL_ALLOC_BIT 8 227 + #define FW_DSTATE_MEM_PSI_BIT 9 228 + #define FW_DSTATE_HSR_NON_STROBE_BIT 10 229 + #define FW_DSTATE_MP0_ENTER_WFI_BIT 11 230 + #define FW_DSTATE_MALL_FLUSH_BIT 12 231 + #define FW_DSTATE_SOC_PSI_BIT 13 232 + #define FW_DSTATE_MMHUB_INTERLOCK_BIT 14 233 + #define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15 234 + #define FW_DSTATE_CLDO_PRG_BIT 16 235 + #define FW_DSTATE_DF_PLL_PWRDN_BIT 17 236 + 237 + //LED Display Mask & Control Bits 238 + #define LED_DISPLAY_GFX_DPM_BIT 0 239 + #define LED_DISPLAY_PCIE_BIT 1 240 + #define LED_DISPLAY_ERROR_BIT 2 241 + 242 + 243 + #define MEM_TEMP_READ_OUT_OF_BAND_BIT 0 244 + #define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1 245 + #define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2 246 + 247 + typedef enum { 248 + SMARTSHIFT_VERSION_1, 249 + SMARTSHIFT_VERSION_2, 250 + SMARTSHIFT_VERSION_3, 251 + } SMARTSHIFT_VERSION_e; 252 + 253 + typedef enum { 254 + FOPT_CALC_AC_CALC_DC, 255 + FOPT_PPTABLE_AC_CALC_DC, 256 + FOPT_CALC_AC_PPTABLE_DC, 257 + FOPT_PPTABLE_AC_PPTABLE_DC, 258 + } FOPT_CALC_e; 259 + 260 + typedef enum { 261 + DRAM_BIT_WIDTH_DISABLED = 0, 262 + DRAM_BIT_WIDTH_X_8 = 8, 263 + DRAM_BIT_WIDTH_X_16 = 16, 264 + DRAM_BIT_WIDTH_X_32 = 32, 265 + DRAM_BIT_WIDTH_X_64 = 64, 266 + DRAM_BIT_WIDTH_X_128 = 128, 267 + DRAM_BIT_WIDTH_COUNT, 268 + } DRAM_BIT_WIDTH_TYPE_e; 269 + 270 + //I2C Interface 271 + #define NUM_I2C_CONTROLLERS 8 272 + 273 + #define I2C_CONTROLLER_ENABLED 1 274 + #define I2C_CONTROLLER_DISABLED 0 275 + 276 + #define MAX_SW_I2C_COMMANDS 24 277 + 278 + typedef enum { 279 + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 280 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 281 + I2C_CONTROLLER_PORT_COUNT, 282 + } I2cControllerPort_e; 283 + 284 + typedef enum { 285 + I2C_CONTROLLER_NAME_VR_GFX = 0, 286 + I2C_CONTROLLER_NAME_VR_SOC, 287 + I2C_CONTROLLER_NAME_VR_VMEMP, 288 + I2C_CONTROLLER_NAME_VR_VDDIO, 289 + I2C_CONTROLLER_NAME_LIQUID0, 290 + I2C_CONTROLLER_NAME_LIQUID1, 291 + I2C_CONTROLLER_NAME_PLX, 292 + I2C_CONTROLLER_NAME_FAN_INTAKE, 293 + I2C_CONTROLLER_NAME_COUNT, 294 + } I2cControllerName_e; 295 + 296 + typedef enum { 297 + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, 298 + I2C_CONTROLLER_THROTTLER_VR_GFX, 299 + I2C_CONTROLLER_THROTTLER_VR_SOC, 300 + I2C_CONTROLLER_THROTTLER_VR_VMEMP, 301 + I2C_CONTROLLER_THROTTLER_VR_VDDIO, 302 + I2C_CONTROLLER_THROTTLER_LIQUID0, 303 + I2C_CONTROLLER_THROTTLER_LIQUID1, 304 + I2C_CONTROLLER_THROTTLER_PLX, 305 + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, 306 + I2C_CONTROLLER_THROTTLER_INA3221, 307 + I2C_CONTROLLER_THROTTLER_COUNT, 308 + } I2cControllerThrottler_e; 309 + 310 + typedef enum { 311 + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, 312 + I2C_CONTROLLER_PROTOCOL_VR_IR35217, 313 + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, 314 + I2C_CONTROLLER_PROTOCOL_INA3221, 315 + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, 316 + I2C_CONTROLLER_PROTOCOL_COUNT, 317 + } I2cControllerProtocol_e; 318 + 319 + typedef struct { 320 + uint8_t Enabled; 321 + uint8_t Speed; 322 + uint8_t SlaveAddress; 323 + uint8_t ControllerPort; 324 + uint8_t ControllerName; 325 + uint8_t ThermalThrotter; 326 + uint8_t I2cProtocol; 327 + uint8_t PaddingConfig; 328 + } I2cControllerConfig_t; 329 + 330 + typedef enum { 331 + I2C_PORT_SVD_SCL = 0, 332 + I2C_PORT_GPIO, 333 + } I2cPort_e; 334 + 335 + typedef enum { 336 + I2C_SPEED_FAST_50K = 0, //50 Kbits/s 337 + I2C_SPEED_FAST_100K, //100 Kbits/s 338 + I2C_SPEED_FAST_400K, //400 Kbits/s 339 + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) 340 + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) 341 + I2C_SPEED_HIGH_2M, //2.3 Mbits/s 342 + I2C_SPEED_COUNT, 343 + } I2cSpeed_e; 344 + 345 + typedef enum { 346 + I2C_CMD_READ = 0, 347 + I2C_CMD_WRITE, 348 + I2C_CMD_COUNT, 349 + } I2cCmdType_e; 350 + 351 + #define CMDCONFIG_STOP_BIT 0 352 + #define CMDCONFIG_RESTART_BIT 1 353 + #define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write 354 + 355 + #define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) 356 + #define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) 357 + #define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) 358 + 359 + typedef struct { 360 + uint8_t ReadWriteData; //Return data for read. Data to send for write 361 + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write 362 + } SwI2cCmd_t; //SW I2C Command Table 363 + 364 + typedef struct { 365 + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) 366 + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select 367 + uint8_t SlaveAddress; //Slave address of device 368 + uint8_t NumCmds; //Number of commands 369 + 370 + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; 371 + } SwI2cRequest_t; // SW I2C Request Table 372 + 373 + typedef struct { 374 + SwI2cRequest_t SwI2cRequest; 375 + 376 + uint32_t Spare[8]; 377 + uint32_t MmHubPadding[8]; // SMU internal use 378 + } SwI2cRequestExternal_t; 379 + 380 + typedef struct { 381 + uint64_t mca_umc_status; 382 + uint64_t mca_umc_addr; 383 + 384 + uint16_t ce_count_lo_chip; 385 + uint16_t ce_count_hi_chip; 386 + 387 + uint32_t eccPadding; 388 + } EccInfo_t; 389 + 390 + typedef struct { 391 + EccInfo_t EccInfo[24]; 392 + } EccInfoTable_t; 393 + 394 + //D3HOT sequences 395 + typedef enum { 396 + BACO_SEQUENCE, 397 + MSR_SEQUENCE, 398 + BAMACO_SEQUENCE, 399 + ULPS_SEQUENCE, 400 + D3HOT_SEQUENCE_COUNT, 401 + } D3HOTSequence_e; 402 + 403 + //This is aligned with RSMU PGFSM Register Mapping 404 + typedef enum { 405 + PG_DYNAMIC_MODE = 0, 406 + PG_STATIC_MODE, 407 + } PowerGatingMode_e; 408 + 409 + //This is aligned with RSMU PGFSM Register Mapping 410 + typedef enum { 411 + PG_POWER_DOWN = 0, 412 + PG_POWER_UP, 413 + } PowerGatingSettings_e; 414 + 415 + typedef struct { 416 + uint32_t a; // store in IEEE float format in this variable 417 + uint32_t b; // store in IEEE float format in this variable 418 + uint32_t c; // store in IEEE float format in this variable 419 + } QuadraticInt_t; 420 + 421 + typedef struct { 422 + uint32_t m; // store in IEEE float format in this variable 423 + uint32_t b; // store in IEEE float format in this variable 424 + } LinearInt_t; 425 + 426 + typedef struct { 427 + uint32_t a; // store in IEEE float format in this variable 428 + uint32_t b; // store in IEEE float format in this variable 429 + uint32_t c; // store in IEEE float format in this variable 430 + } DroopInt_t; 431 + 432 + typedef enum { 433 + DCS_ARCH_DISABLED, 434 + DCS_ARCH_FADCS, 435 + DCS_ARCH_ASYNC, 436 + } DCS_ARCH_e; 437 + 438 + //Only Clks that have DPM descriptors are listed here 439 + typedef enum { 440 + PPCLK_GFXCLK = 0, 441 + PPCLK_SOCCLK, 442 + PPCLK_UCLK, 443 + PPCLK_FCLK, 444 + PPCLK_DCLK_0, 445 + PPCLK_VCLK_0, 446 + PPCLK_DISPCLK, 447 + PPCLK_DPPCLK, 448 + PPCLK_DPREFCLK, 449 + PPCLK_DCFCLK, 450 + PPCLK_DTBCLK, 451 + PPCLK_COUNT, 452 + } PPCLK_e; 453 + 454 + typedef enum { 455 + VOLTAGE_MODE_PPTABLE = 0, 456 + VOLTAGE_MODE_FUSES, 457 + VOLTAGE_MODE_COUNT, 458 + } VOLTAGE_MODE_e; 459 + 460 + typedef enum { 461 + AVFS_VOLTAGE_GFX = 0, 462 + AVFS_VOLTAGE_SOC, 463 + AVFS_VOLTAGE_COUNT, 464 + } AVFS_VOLTAGE_TYPE_e; 465 + 466 + typedef enum { 467 + AVFS_TEMP_COLD = 0, 468 + AVFS_TEMP_HOT, 469 + AVFS_TEMP_COUNT, 470 + } AVFS_TEMP_e; 471 + 472 + typedef enum { 473 + AVFS_D_G, 474 + AVFS_D_COUNT, 475 + } AVFS_D_e; 476 + 477 + 478 + typedef enum { 479 + UCLK_DIV_BY_1 = 0, 480 + UCLK_DIV_BY_2, 481 + UCLK_DIV_BY_4, 482 + UCLK_DIV_BY_8, 483 + } UCLK_DIV_e; 484 + 485 + typedef enum { 486 + GPIO_INT_POLARITY_ACTIVE_LOW = 0, 487 + GPIO_INT_POLARITY_ACTIVE_HIGH, 488 + } GpioIntPolarity_e; 489 + 490 + typedef enum { 491 + PWR_CONFIG_TDP = 0, 492 + PWR_CONFIG_TGP, 493 + PWR_CONFIG_TCP_ESTIMATED, 494 + PWR_CONFIG_TCP_MEASURED, 495 + PWR_CONFIG_TBP_DESKTOP, 496 + PWR_CONFIG_TBP_MOBILE, 497 + } PwrConfig_e; 498 + 499 + typedef struct { 500 + uint8_t Padding; 501 + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM 502 + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used 503 + uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e 504 + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) 505 + uint32_t Padding3[3]; 506 + uint16_t Padding4; 507 + uint16_t FoptimalDc; //Foptimal frequency in DC power mode. 508 + uint16_t FoptimalAc; //Foptimal frequency in AC power mode. 509 + uint16_t Padding2; 510 + } DpmDescriptor_t; 511 + 512 + typedef enum { 513 + PPT_THROTTLER_PPT0, 514 + PPT_THROTTLER_PPT1, 515 + PPT_THROTTLER_PPT2, 516 + PPT_THROTTLER_PPT3, 517 + PPT_THROTTLER_COUNT 518 + } PPT_THROTTLER_e; 519 + 520 + typedef enum { 521 + TEMP_EDGE, 522 + TEMP_HOTSPOT, 523 + TEMP_HOTSPOT_GFX, 524 + TEMP_HOTSPOT_SOC, 525 + TEMP_MEM, 526 + TEMP_VR_GFX, 527 + TEMP_VR_SOC, 528 + TEMP_VR_MEM0, 529 + TEMP_VR_MEM1, 530 + TEMP_LIQUID0, 531 + TEMP_LIQUID1, 532 + TEMP_PLX, 533 + TEMP_COUNT, 534 + } TEMP_e; 535 + 536 + typedef enum { 537 + TDC_THROTTLER_GFX, 538 + TDC_THROTTLER_SOC, 539 + TDC_THROTTLER_COUNT 540 + } TDC_THROTTLER_e; 541 + 542 + typedef enum { 543 + SVI_PLANE_VDD_GFX, 544 + SVI_PLANE_VDD_SOC, 545 + SVI_PLANE_VDDCI_MEM, 546 + SVI_PLANE_VDDIO_MEM, 547 + SVI_PLANE_COUNT, 548 + } SVI_PLANE_e; 549 + 550 + typedef enum { 551 + PMFW_VOLT_PLANE_GFX, 552 + PMFW_VOLT_PLANE_SOC, 553 + PMFW_VOLT_PLANE_COUNT 554 + } PMFW_VOLT_PLANE_e; 555 + 556 + typedef enum { 557 + CUSTOMER_VARIANT_ROW, 558 + CUSTOMER_VARIANT_FALCON, 559 + CUSTOMER_VARIANT_COUNT, 560 + } CUSTOMER_VARIANT_e; 561 + 562 + typedef enum { 563 + POWER_SOURCE_AC, 564 + POWER_SOURCE_DC, 565 + POWER_SOURCE_COUNT, 566 + } POWER_SOURCE_e; 567 + 568 + typedef enum { 569 + MEM_VENDOR_PLACEHOLDER0, // 0 570 + MEM_VENDOR_SAMSUNG, // 1 571 + MEM_VENDOR_INFINEON, // 2 572 + MEM_VENDOR_ELPIDA, // 3 573 + MEM_VENDOR_ETRON, // 4 574 + MEM_VENDOR_NANYA, // 5 575 + MEM_VENDOR_HYNIX, // 6 576 + MEM_VENDOR_MOSEL, // 7 577 + MEM_VENDOR_WINBOND, // 8 578 + MEM_VENDOR_ESMT, // 9 579 + MEM_VENDOR_PLACEHOLDER1, // 10 580 + MEM_VENDOR_PLACEHOLDER2, // 11 581 + MEM_VENDOR_PLACEHOLDER3, // 12 582 + MEM_VENDOR_PLACEHOLDER4, // 13 583 + MEM_VENDOR_PLACEHOLDER5, // 14 584 + MEM_VENDOR_MICRON, // 15 585 + MEM_VENDOR_COUNT, 586 + } MEM_VENDOR_e; 587 + 588 + typedef enum { 589 + PP_GRTAVFS_HW_CPO_CTL_ZONE0, 590 + PP_GRTAVFS_HW_CPO_CTL_ZONE1, 591 + PP_GRTAVFS_HW_CPO_CTL_ZONE2, 592 + PP_GRTAVFS_HW_CPO_CTL_ZONE3, 593 + PP_GRTAVFS_HW_CPO_CTL_ZONE4, 594 + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0, 595 + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0, 596 + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1, 597 + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1, 598 + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2, 599 + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2, 600 + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3, 601 + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3, 602 + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4, 603 + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4, 604 + PP_GRTAVFS_HW_ZONE0_VF, 605 + PP_GRTAVFS_HW_ZONE1_VF1, 606 + PP_GRTAVFS_HW_ZONE2_VF2, 607 + PP_GRTAVFS_HW_ZONE3_VF3, 608 + PP_GRTAVFS_HW_VOLTAGE_GB, 609 + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0, 610 + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1, 611 + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2, 612 + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3, 613 + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4, 614 + PP_GRTAVFS_HW_RESERVED_0, 615 + PP_GRTAVFS_HW_RESERVED_1, 616 + PP_GRTAVFS_HW_RESERVED_2, 617 + PP_GRTAVFS_HW_RESERVED_3, 618 + PP_GRTAVFS_HW_RESERVED_4, 619 + PP_GRTAVFS_HW_RESERVED_5, 620 + PP_GRTAVFS_HW_RESERVED_6, 621 + PP_GRTAVFS_HW_FUSE_COUNT, 622 + } PP_GRTAVFS_HW_FUSE_e; 623 + 624 + typedef enum { 625 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0, 626 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0, 627 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0, 628 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0, 629 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0, 630 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0, 631 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0, 632 + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0, 633 + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0, 634 + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1, 635 + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2, 636 + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3, 637 + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4, 638 + PP_GRTAVFS_FW_COMMON_FUSE_COUNT, 639 + } PP_GRTAVFS_FW_COMMON_FUSE_e; 640 + 641 + typedef enum { 642 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1, 643 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0, 644 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1, 645 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2, 646 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3, 647 + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4, 648 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1, 649 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0, 650 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1, 651 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2, 652 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3, 653 + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4, 654 + PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY, 655 + PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY, 656 + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0, 657 + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1, 658 + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2, 659 + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3, 660 + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4, 661 + PP_GRTAVFS_FW_SEP_FUSE_COUNT, 662 + } PP_GRTAVFS_FW_SEP_FUSE_e; 663 + 664 + #define PP_NUM_RTAVFS_PWL_ZONES 5 665 + 666 + 667 + // VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 668 + // Slope Q1.7, Offset Q1.2 669 + typedef struct { 670 + int8_t Offset; // in Amps 671 + uint8_t Padding; 672 + uint16_t MaxCurrent; // in Amps 673 + } SviTelemetryScale_t; 674 + 675 + #define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 676 + 677 + #define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 678 + #define PP_OD_FEATURE_GFX_VMAX_BIT 1 679 + #define PP_OD_FEATURE_SOC_VMAX_BIT 2 680 + #define PP_OD_FEATURE_PPT_BIT 3 681 + #define PP_OD_FEATURE_FAN_CURVE_BIT 4 682 + #define PP_OD_FEATURE_FAN_LEGACY_BIT 5 683 + #define PP_OD_FEATURE_FULL_CTRL_BIT 6 684 + #define PP_OD_FEATURE_TDC_BIT 7 685 + #define PP_OD_FEATURE_GFXCLK_BIT 8 686 + #define PP_OD_FEATURE_UCLK_BIT 9 687 + #define PP_OD_FEATURE_FCLK_BIT 10 688 + #define PP_OD_FEATURE_ZERO_FAN_BIT 11 689 + #define PP_OD_FEATURE_TEMPERATURE_BIT 12 690 + #define PP_OD_FEATURE_EDC_BIT 13 691 + #define PP_OD_FEATURE_COUNT 14 692 + 693 + typedef enum { 694 + PP_OD_POWER_FEATURE_ALWAYS_ENABLED, 695 + PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING, 696 + PP_OD_POWER_FEATURE_ALWAYS_DISABLED, 697 + } PP_OD_POWER_FEATURE_e; 698 + 699 + typedef enum { 700 + FAN_MODE_AUTO = 0, 701 + FAN_MODE_MANUAL_LINEAR, 702 + } FanMode_e; 703 + 704 + typedef enum { 705 + OD_NO_ERROR, 706 + OD_REQUEST_ADVANCED_NOT_SUPPORTED, 707 + OD_UNSUPPORTED_FEATURE, 708 + OD_INVALID_FEATURE_COMBO_ERROR, 709 + OD_GFXCLK_VF_CURVE_OFFSET_ERROR, 710 + OD_VDD_GFX_VMAX_ERROR, 711 + OD_VDD_SOC_VMAX_ERROR, 712 + OD_PPT_ERROR, 713 + OD_FAN_MIN_PWM_ERROR, 714 + OD_FAN_ACOUSTIC_TARGET_ERROR, 715 + OD_FAN_ACOUSTIC_LIMIT_ERROR, 716 + OD_FAN_TARGET_TEMP_ERROR, 717 + OD_FAN_ZERO_RPM_STOP_TEMP_ERROR, 718 + OD_FAN_CURVE_PWM_ERROR, 719 + OD_FAN_CURVE_TEMP_ERROR, 720 + OD_FULL_CTRL_GFXCLK_ERROR, 721 + OD_FULL_CTRL_UCLK_ERROR, 722 + OD_FULL_CTRL_FCLK_ERROR, 723 + OD_FULL_CTRL_VDD_GFX_ERROR, 724 + OD_FULL_CTRL_VDD_SOC_ERROR, 725 + OD_TDC_ERROR, 726 + OD_GFXCLK_ERROR, 727 + OD_UCLK_ERROR, 728 + OD_FCLK_ERROR, 729 + OD_OP_TEMP_ERROR, 730 + OD_OP_GFX_EDC_ERROR, 731 + OD_OP_GFX_PCC_ERROR, 732 + OD_POWER_FEATURE_CTRL_ERROR, 733 + } OD_FAIL_e; 734 + 735 + typedef struct { 736 + uint32_t FeatureCtrlMask; 737 + 738 + //Voltage control 739 + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; 740 + 741 + uint16_t VddGfxVmax; // in mV 742 + uint16_t VddSocVmax; 743 + 744 + uint8_t IdlePwrSavingFeaturesCtrl; 745 + uint8_t RuntimePwrSavingFeaturesCtrl; 746 + uint16_t Padding; 747 + 748 + //Frequency changes 749 + int16_t GfxclkFmin; // MHz 750 + int16_t GfxclkFmax; // MHz 751 + uint16_t UclkFmin; // MHz 752 + uint16_t UclkFmax; // MHz 753 + uint16_t FclkFmin; 754 + uint16_t FclkFmax; 755 + 756 + //PPT 757 + int16_t Ppt; // % 758 + int16_t Tdc; 759 + 760 + //Fan control 761 + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; 762 + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; 763 + uint16_t FanMinimumPwm; 764 + uint16_t AcousticTargetRpmThreshold; 765 + uint16_t AcousticLimitRpmThreshold; 766 + uint16_t FanTargetTemperature; // Degree Celcius 767 + uint8_t FanZeroRpmEnable; 768 + uint8_t FanZeroRpmStopTemp; 769 + uint8_t FanMode; 770 + uint8_t MaxOpTemp; 771 + 772 + uint8_t AdvancedOdModeEnabled; 773 + uint8_t Padding1[3]; 774 + 775 + uint16_t GfxVoltageFullCtrlMode; 776 + uint16_t SocVoltageFullCtrlMode; 777 + uint16_t GfxclkFullCtrlMode; 778 + uint16_t UclkFullCtrlMode; 779 + uint16_t FclkFullCtrlMode; 780 + uint16_t Padding2; 781 + 782 + int16_t GfxEdc; 783 + int16_t GfxPccLimitControl; 784 + 785 + uint32_t Spare[10]; 786 + uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround 787 + } OverDriveTable_t; 788 + 789 + typedef struct { 790 + OverDriveTable_t OverDriveTable; 791 + 792 + } OverDriveTableExternal_t; 793 + 794 + typedef struct { 795 + uint32_t FeatureCtrlMask; 796 + 797 + //Gfx Vf Curve 798 + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; 799 + //gfx Vmax 800 + uint16_t VddGfxVmax; // in mV 801 + //soc Vmax 802 + uint16_t VddSocVmax; 803 + 804 + //gfxclk 805 + int16_t GfxclkFmin; // MHz 806 + int16_t GfxclkFmax; // MHz 807 + //uclk 808 + uint16_t UclkFmin; // MHz 809 + uint16_t UclkFmax; // MHz 810 + //fclk 811 + uint16_t FclkFmin; 812 + uint16_t FclkFmax; 813 + 814 + //PPT 815 + int16_t Ppt; // % 816 + //TDC 817 + int16_t Tdc; 818 + 819 + //Fan Curve 820 + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; 821 + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; 822 + //Fan Legacy 823 + uint16_t FanMinimumPwm; 824 + uint16_t AcousticTargetRpmThreshold; 825 + uint16_t AcousticLimitRpmThreshold; 826 + uint16_t FanTargetTemperature; // Degree Celcius 827 + //zero fan 828 + uint8_t FanZeroRpmEnable; 829 + //temperature 830 + uint8_t MaxOpTemp; 831 + uint8_t Padding[2]; 832 + 833 + //Full Ctrl 834 + uint16_t GfxVoltageFullCtrlMode; 835 + uint16_t SocVoltageFullCtrlMode; 836 + uint16_t GfxclkFullCtrlMode; 837 + uint16_t UclkFullCtrlMode; 838 + uint16_t FclkFullCtrlMode; 839 + //EDC 840 + int16_t GfxEdc; 841 + int16_t GfxPccLimitControl; 842 + int16_t Padding1; 843 + 844 + uint32_t Spare[5]; 845 + } OverDriveLimits_t; 846 + 847 + typedef enum { 848 + BOARD_GPIO_SMUIO_0, 849 + BOARD_GPIO_SMUIO_1, 850 + BOARD_GPIO_SMUIO_2, 851 + BOARD_GPIO_SMUIO_3, 852 + BOARD_GPIO_SMUIO_4, 853 + BOARD_GPIO_SMUIO_5, 854 + BOARD_GPIO_SMUIO_6, 855 + BOARD_GPIO_SMUIO_7, 856 + BOARD_GPIO_SMUIO_8, 857 + BOARD_GPIO_SMUIO_9, 858 + BOARD_GPIO_SMUIO_10, 859 + BOARD_GPIO_SMUIO_11, 860 + BOARD_GPIO_SMUIO_12, 861 + BOARD_GPIO_SMUIO_13, 862 + BOARD_GPIO_SMUIO_14, 863 + BOARD_GPIO_SMUIO_15, 864 + BOARD_GPIO_SMUIO_16, 865 + BOARD_GPIO_SMUIO_17, 866 + BOARD_GPIO_SMUIO_18, 867 + BOARD_GPIO_SMUIO_19, 868 + BOARD_GPIO_SMUIO_20, 869 + BOARD_GPIO_SMUIO_21, 870 + BOARD_GPIO_SMUIO_22, 871 + BOARD_GPIO_SMUIO_23, 872 + BOARD_GPIO_SMUIO_24, 873 + BOARD_GPIO_SMUIO_25, 874 + BOARD_GPIO_SMUIO_26, 875 + BOARD_GPIO_SMUIO_27, 876 + BOARD_GPIO_SMUIO_28, 877 + BOARD_GPIO_SMUIO_29, 878 + BOARD_GPIO_SMUIO_30, 879 + BOARD_GPIO_SMUIO_31, 880 + MAX_BOARD_GPIO_SMUIO_NUM, 881 + BOARD_GPIO_DC_GEN_A, 882 + BOARD_GPIO_DC_GEN_B, 883 + BOARD_GPIO_DC_GEN_C, 884 + BOARD_GPIO_DC_GEN_D, 885 + BOARD_GPIO_DC_GEN_E, 886 + BOARD_GPIO_DC_GEN_F, 887 + BOARD_GPIO_DC_GEN_G, 888 + BOARD_GPIO_DC_GENLK_CLK, 889 + BOARD_GPIO_DC_GENLK_VSYNC, 890 + BOARD_GPIO_DC_SWAPLOCK_A, 891 + BOARD_GPIO_DC_SWAPLOCK_B, 892 + MAX_BOARD_DC_GPIO_NUM, 893 + BOARD_GPIO_LV_EN, 894 + } BOARD_GPIO_TYPE_e; 895 + 896 + #define INVALID_BOARD_GPIO 0xFF 897 + 898 + 899 + typedef struct { 900 + //PLL 0 901 + uint16_t InitImuClk; 902 + uint16_t InitSocclk; 903 + uint16_t InitMpioclk; 904 + uint16_t InitSmnclk; 905 + //PLL 1 906 + uint16_t InitDispClk; 907 + uint16_t InitDppClk; 908 + uint16_t InitDprefclk; 909 + uint16_t InitDcfclk; 910 + uint16_t InitDtbclk; 911 + uint16_t InitDbguSocClk; 912 + //PLL 2 913 + uint16_t InitGfxclk_bypass; 914 + uint16_t InitMp1clk; 915 + uint16_t InitLclk; 916 + uint16_t InitDbguBacoClk; 917 + uint16_t InitBaco400clk; 918 + uint16_t InitBaco1200clk_bypass; 919 + uint16_t InitBaco700clk_bypass; 920 + uint16_t InitBaco500clk; 921 + // PLL 3 922 + uint16_t InitDclk0; 923 + uint16_t InitVclk0; 924 + // PLL 4 925 + uint16_t InitFclk; 926 + uint16_t Padding1; 927 + // PLL 5 928 + //UCLK clocks, assumed all UCLK instances will be the same. 929 + uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk 930 + 931 + uint8_t Padding[3]; 932 + 933 + uint32_t InitVcoFreqPll0; //smu_socclk_t 934 + uint32_t InitVcoFreqPll1; //smu_displayclk_t 935 + uint32_t InitVcoFreqPll2; //smu_nbioclk_t 936 + uint32_t InitVcoFreqPll3; //smu_vcnclk_t 937 + uint32_t InitVcoFreqPll4; //smu_fclk_t 938 + uint32_t InitVcoFreqPll5; //smu_uclk_01_t 939 + uint32_t InitVcoFreqPll6; //smu_uclk_23_t 940 + uint32_t InitVcoFreqPll7; //smu_uclk_45_t 941 + uint32_t InitVcoFreqPll8; //smu_uclk_67_t 942 + 943 + //encoding will be SVI3 944 + uint16_t InitGfx; // In mV(Q2) , should be 0? 945 + uint16_t InitSoc; // In mV(Q2) 946 + uint16_t InitVddIoMem; // In mV(Q2) MemVdd 947 + uint16_t InitVddCiMem; // In mV(Q2) VMemP 948 + 949 + //uint16_t Padding2; 950 + 951 + uint32_t Spare[8]; 952 + } BootValues_t; 953 + 954 + typedef struct { 955 + uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts 956 + uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps 957 + 958 + uint16_t Temperature[TEMP_COUNT]; // Celsius 959 + 960 + uint8_t PwmLimitMin; 961 + uint8_t PwmLimitMax; 962 + uint8_t FanTargetTemperature; 963 + uint8_t Spare1[1]; 964 + 965 + uint16_t AcousticTargetRpmThresholdMin; 966 + uint16_t AcousticTargetRpmThresholdMax; 967 + 968 + uint16_t AcousticLimitRpmThresholdMin; 969 + uint16_t AcousticLimitRpmThresholdMax; 970 + 971 + uint16_t PccLimitMin; 972 + uint16_t PccLimitMax; 973 + 974 + uint16_t FanStopTempMin; 975 + uint16_t FanStopTempMax; 976 + uint16_t FanStartTempMin; 977 + uint16_t FanStartTempMax; 978 + 979 + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; 980 + uint32_t Spare[11]; 981 + } MsgLimits_t; 982 + 983 + typedef struct { 984 + uint16_t BaseClockAc; 985 + uint16_t GameClockAc; 986 + uint16_t BoostClockAc; 987 + uint16_t BaseClockDc; 988 + uint16_t GameClockDc; 989 + uint16_t BoostClockDc; 990 + 991 + uint32_t Reserved[4]; 992 + } DriverReportedClocks_t; 993 + 994 + typedef struct { 995 + uint8_t DcBtcEnabled; 996 + uint8_t Padding[3]; 997 + 998 + uint16_t DcTol; // mV Q2 999 + uint16_t DcBtcGb; // mV Q2 1000 + 1001 + uint16_t DcBtcMin; // mV Q2 1002 + uint16_t DcBtcMax; // mV Q2 1003 + 1004 + LinearInt_t DcBtcGbScalar; 1005 + } AvfsDcBtcParams_t; 1006 + 1007 + typedef struct { 1008 + uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C 1009 + uint16_t VftFMin; // in MHz 1010 + uint16_t VInversion; // in mV Q2 1011 + QuadraticInt_t qVft[AVFS_TEMP_COUNT]; 1012 + QuadraticInt_t qAvfsGb; 1013 + QuadraticInt_t qAvfsGb2; 1014 + } AvfsFuseOverride_t; 1015 + 1016 + //all settings maintained by PFE team 1017 + typedef struct { 1018 + uint8_t Version; 1019 + uint8_t Spare8[3]; 1020 + // SECTION: Feature Control 1021 + uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping 1022 + // SECTION: FW DSTATE Settings 1023 + uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping 1024 + // SECTION: Advanced Options 1025 + uint32_t DebugOverrides; 1026 + 1027 + uint32_t Spare[2]; 1028 + } PFE_Settings_t; 1029 + 1030 + typedef struct { 1031 + // SECTION: Version 1032 + uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different) 1033 + 1034 + // SECTION: Miscellaneous Configuration 1035 + uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e 1036 + uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e 1037 + uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT 1038 + uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e 1039 + 1040 + // SECTION: Infrastructure Limits 1041 + uint8_t SocketPowerLimitSpare[10]; 1042 + 1043 + //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars 1044 + //relative index 0 1045 + uint8_t EnableLegacyPptLimit; 1046 + uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support 1047 + 1048 + uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting 1049 + 1050 + uint8_t PaddingPpt[7]; 1051 + 1052 + uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only 1053 + 1054 + uint16_t PaddingInfra; 1055 + 1056 + // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years) 1057 + uint32_t FitControllerFailureRateLimit; //in IEEE float 1058 + //Expected GFX Duty Cycle at Vmax. 1059 + uint32_t FitControllerGfxDutyCycle; // in IEEE float 1060 + //Expected SOC Duty Cycle at Vmax. 1061 + uint32_t FitControllerSocDutyCycle; // in IEEE float 1062 + 1063 + //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block. 1064 + uint32_t FitControllerSocOffset; //in IEEE float 1065 + 1066 + uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value 1067 + 1068 + // SECTION: Throttler settings 1069 + uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping 1070 + 1071 + 1072 + // SECTION: Voltage Control Parameters 1073 + uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE) 1074 + 1075 + uint8_t Padding[2]; 1076 + uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE 1077 + 1078 + // Voltage Limits 1079 + uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled 1080 + uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled 1081 + 1082 + //Vmin Optimizations 1083 + int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin 1084 + int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin 1085 + uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot. 1086 + uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold. 1087 + uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot. 1088 + uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold. 1089 + uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin 1090 + uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot 1091 + uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold 1092 + 1093 + //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for. 1094 + uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT]; 1095 + //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts. 1096 + uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT]; 1097 + //Scalar coefficient of the PSM aging degradation function 1098 + uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM 1099 + //Exponential coefficient of the PSM aging degradation function 1100 + uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM 1101 + //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. 1102 + uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN 1103 + //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. 1104 + uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN 1105 + 1106 + uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT]; 1107 + uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT]; 1108 + 1109 + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms 1110 + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms 1111 + 1112 + QuadraticInt_t Gfx_Vmin_droop; 1113 + QuadraticInt_t Soc_Vmin_droop; 1114 + uint32_t SpareVmin[6]; 1115 + 1116 + //SECTION: DPM Configuration 1 1117 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; 1118 + 1119 + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz 1120 + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz 1121 + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz 1122 + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz 1123 + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz 1124 + uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz 1125 + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz 1126 + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz 1127 + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz 1128 + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz 1129 + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz 1130 + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz 1131 + 1132 + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz 1133 + 1134 + uint16_t GfxclkAibFmax; 1135 + uint16_t GfxclkFreqCap; 1136 + 1137 + //GFX Idle Power Settings 1138 + uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz 1139 + uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz 1140 + uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz 1141 + uint16_t GfxclkThrottleClock; //Used primarily in DCS 1142 + uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages 1143 + uint8_t GfxIdlePadding; 1144 + 1145 + uint8_t SmsRepairWRCKClkDivEn; 1146 + uint8_t SmsRepairWRCKClkDivVal; 1147 + uint8_t GfxOffEntryEarlyMGCGEn; 1148 + uint8_t GfxOffEntryForceCGCGEn; 1149 + uint8_t GfxOffEntryForceCGCGDelayEn; 1150 + uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds 1151 + 1152 + uint16_t GfxclkFreqGfxUlv; // in MHz 1153 + uint8_t GfxIdlePadding2[2]; 1154 + uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry 1155 + uint32_t GfxoffSpare[15]; 1156 + 1157 + // DFLL 1158 + uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias 1159 + uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias 1160 + uint32_t DfllBtcMasterScalerM; 1161 + int32_t DfllBtcMasterScalerB; 1162 + uint32_t DfllBtcSlaveScalerM; 1163 + int32_t DfllBtcSlaveScalerB; 1164 + 1165 + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg 1166 + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg 1167 + uint32_t GfxDfllSpare[9]; 1168 + 1169 + // DVO 1170 + uint32_t DvoPsmDownThresholdVoltage; //Voltage float 1171 + uint32_t DvoPsmUpThresholdVoltage; //Voltage float 1172 + uint32_t DvoFmaxLowScaler; //Unitless float 1173 + 1174 + // GFX DCS 1175 + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase 1176 + uint16_t PaddingDcs; 1177 + 1178 + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase 1179 + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. 1180 + 1181 + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. 1182 + 1183 + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. 1184 + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. 1185 + 1186 + uint32_t DcsPfGfxFopt; //Default to GFX FMIN 1187 + uint32_t DcsPfUclkFopt; //Default to UCLK FMIN 1188 + 1189 + uint8_t FoptEnabled; 1190 + uint8_t DcsSpare2[3]; 1191 + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float 1192 + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float 1193 + uint32_t DcsSpare[9]; 1194 + 1195 + // UCLK section 1196 + uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations 1197 + uint8_t PaddingMem[3]; 1198 + 1199 + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow) 1200 + uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow) 1201 + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 1202 + uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 1203 + uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) 1204 + uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) 1205 + uint16_t DalDcModeMaxUclkFreq; 1206 + uint8_t PaddingsMem[2]; 1207 + //FCLK Section 1208 + uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value 1209 + uint16_t PaddingFclk; 1210 + 1211 + // Link DPM Settings 1212 + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 1213 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 1214 + uint16_t LclkFreq[NUM_LINK_LEVELS]; 1215 + 1216 + // SECTION: VDD_GFX AVFS 1217 + uint8_t OverrideGfxAvfsFuses; 1218 + uint8_t GfxAvfsPadding[3]; 1219 + 1220 + uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain 1221 + uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding 1222 + //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; 1223 + uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; 1224 + 1225 + uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; 1226 + uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; 1227 + 1228 + uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; 1229 + uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; 1230 + //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; 1231 + uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; 1232 + 1233 + uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; 1234 + uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; 1235 + uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; 1236 + uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; 1237 + 1238 + uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; 1239 + uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; 1240 + uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; 1241 + uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; 1242 + 1243 + uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; 1244 + uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; 1245 + 1246 + uint32_t dGbV_dT_vmin; 1247 + uint32_t dGbV_dT_vmax; 1248 + 1249 + //Unused: PMFW-9370 1250 + uint32_t V2F_vmin_range_low; 1251 + uint32_t V2F_vmin_range_high; 1252 + uint32_t V2F_vmax_range_low; 1253 + uint32_t V2F_vmax_range_high; 1254 + 1255 + AvfsDcBtcParams_t DcBtcGfxParams; 1256 + QuadraticInt_t SSCurve_GFX; 1257 + uint32_t GfxAvfsSpare[29]; 1258 + 1259 + //SECTION: VDD_SOC AVFS 1260 + uint8_t OverrideSocAvfsFuses; 1261 + uint8_t MinSocAvfsRevision; 1262 + uint8_t SocAvfsPadding[2]; 1263 + 1264 + AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT]; 1265 + 1266 + DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb 1267 + 1268 + LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V 1269 + 1270 + QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V 1271 + 1272 + AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT]; 1273 + 1274 + QuadraticInt_t SSCurve_SOC; 1275 + uint32_t SocAvfsSpare[29]; 1276 + 1277 + //SECTION: Boot clock and voltage values 1278 + BootValues_t BootValues; 1279 + 1280 + //SECTION: Driver Reported Clocks 1281 + DriverReportedClocks_t DriverReportedClocks; 1282 + 1283 + //SECTION: Message Limits 1284 + MsgLimits_t MsgLimits; 1285 + 1286 + //SECTION: OverDrive Limits 1287 + OverDriveLimits_t OverDriveLimitsBasicMin; 1288 + OverDriveLimits_t OverDriveLimitsBasicMax; 1289 + OverDriveLimits_t OverDriveLimitsAdvancedMin; 1290 + OverDriveLimits_t OverDriveLimitsAdvancedMax; 1291 + 1292 + // Section: Total Board Power idle vs active coefficients 1293 + uint8_t TotalBoardPowerSupport; 1294 + uint8_t TotalBoardPowerPadding[1]; 1295 + uint16_t TotalBoardPowerRoc; 1296 + 1297 + //PMFW-11158 1298 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; 1299 + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; 1300 + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; 1301 + 1302 + // APT GFX to UCLK mapping 1303 + int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6]; 1304 + uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6]; 1305 + uint32_t AptPadding; 1306 + 1307 + // Xvmin didt 1308 + QuadraticInt_t GfxXvminDidtDroopThresh; 1309 + uint32_t GfxXvminDidtResetDDWait; 1310 + uint32_t GfxXvminDidtClkStopWait; 1311 + uint32_t GfxXvminDidtFcsStepCtrl; 1312 + uint32_t GfxXvminDidtFcsWaitCtrl; 1313 + 1314 + // PSM based didt controller 1315 + uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode 1316 + uint32_t P2v_a; // floating point in U32 format 1317 + uint32_t P2v_b; 1318 + uint32_t P2v_c; 1319 + uint32_t T2p_a; 1320 + uint32_t T2p_b; 1321 + uint32_t T2p_c; 1322 + uint32_t P2vTemp; 1323 + QuadraticInt_t PsmDidtStaticSettings; 1324 + QuadraticInt_t PsmDidtDynamicSettings; 1325 + uint8_t PsmDidtAvgDiv; 1326 + uint8_t PsmDidtForceStall; 1327 + uint16_t PsmDidtReleaseTimer; 1328 + uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog 1329 + // CAC EDC 1330 + uint32_t Leakage_C0; // in IEEE float 1331 + uint32_t Leakage_C1; // in IEEE float 1332 + uint32_t Leakage_C2; // in IEEE float 1333 + uint32_t Leakage_C3; // in IEEE float 1334 + uint32_t Leakage_C4; // in IEEE float 1335 + uint32_t Leakage_C5; // in IEEE float 1336 + uint32_t GFX_CLK_SCALAR; // in IEEE float 1337 + uint32_t GFX_CLK_INTERCEPT; // in IEEE float 1338 + uint32_t GFX_CAC_M; // in IEEE float 1339 + uint32_t GFX_CAC_B; // in IEEE float 1340 + uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float 1341 + uint32_t DynToTotalCacScalar; // in IEEE 1342 + // GFX EDC XVMIN 1343 + uint32_t XVmin_Gfx_EdcThreshScalar; 1344 + uint32_t XVmin_Gfx_EdcEnableFreq; 1345 + uint32_t XVmin_Gfx_EdcPccAsStepCtrl; 1346 + uint32_t XVmin_Gfx_EdcPccAsWaitCtrl; 1347 + uint16_t XVmin_Gfx_EdcThreshold; 1348 + uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl; 1349 + // SOC EDC XVMIN 1350 + uint32_t XVmin_Soc_EdcThreshScalar; 1351 + uint32_t XVmin_Soc_EdcEnableFreq; 1352 + uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react. 1353 + uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted. 1354 + uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted. 1355 + uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts. 1356 + uint8_t PaddingSocEdc[3]; 1357 + 1358 + // Fuse Override for SOC and GFX XVMIN 1359 + uint8_t GfxXvminFuseOverride; 1360 + uint8_t SocXvminFuseOverride; 1361 + uint8_t PaddingXvminFuseOverride[2]; 1362 + uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value 1363 + uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value 1364 + uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value 1365 + uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value 1366 + 1367 + 1368 + uint16_t GfxXvminFddVolt0; // low voltage, in VID 1369 + uint16_t GfxXvminFddVolt1; // mid voltage, in VID 1370 + uint16_t GfxXvminFddVolt2; // high voltage, in VID 1371 + uint16_t SocXvminFddVolt0; // low voltage, in VID 1372 + uint16_t SocXvminFddVolt1; // mid voltage, in VID 1373 + uint16_t SocXvminFddVolt2; // high voltage, in VID 1374 + uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse 1375 + uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse 1376 + uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse 1377 + 1378 + // SECTION: Sku Reserved 1379 + uint32_t Spare; 1380 + 1381 + // Padding for MMHUB - do not modify this 1382 + uint32_t MmHubPadding[8]; 1383 + } SkuTable_t; 1384 + 1385 + typedef struct { 1386 + uint8_t SlewRateConditions; 1387 + uint8_t LoadLineAdjust; 1388 + uint8_t VoutOffset; 1389 + uint8_t VidMax; 1390 + uint8_t VidMin; 1391 + uint8_t TenBitTelEn; 1392 + uint8_t SixteenBitTelEn; 1393 + uint8_t OcpThresh; 1394 + uint8_t OcpWarnThresh; 1395 + uint8_t OcpSettings; 1396 + uint8_t VrhotThresh; 1397 + uint8_t OtpThresh; 1398 + uint8_t UvpOvpDeltaRef; 1399 + uint8_t PhaseShed; 1400 + uint8_t Padding[10]; 1401 + uint32_t SettingOverrideMask; 1402 + } Svi3RegulatorSettings_t; 1403 + 1404 + typedef struct { 1405 + // SECTION: Version 1406 + uint32_t Version; //should be unique to each board type 1407 + 1408 + // SECTION: I2C Control 1409 + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; 1410 + 1411 + //SECTION SVI3 Board Parameters 1412 + uint8_t SlaveAddrMapping[SVI_PLANE_COUNT]; 1413 + uint8_t VrPsiSupport[SVI_PLANE_COUNT]; 1414 + 1415 + uint32_t Svi3SvcSpeed; 1416 + uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3 1417 + 1418 + // SECTION: Voltage Regulator Settings 1419 + Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT]; 1420 + 1421 + // SECTION: GPIO Settings 1422 + uint8_t LedOffGpio; 1423 + uint8_t FanOffGpio; 1424 + uint8_t GfxVrPowerStageOffGpio; 1425 + 1426 + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching 1427 + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching 1428 + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event 1429 + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event 1430 + 1431 + uint8_t GthrGpio; // GPIO pin configured for GTHR Event 1432 + uint8_t GthrPolarity; // replace GPIO polarity for GTHR 1433 + 1434 + // LED Display Settings 1435 + uint8_t LedPin0; // GPIO number for LedPin[0] 1436 + uint8_t LedPin1; // GPIO number for LedPin[1] 1437 + uint8_t LedPin2; // GPIO number for LedPin[2] 1438 + uint8_t LedEnableMask; 1439 + 1440 + uint8_t LedPcie; // GPIO number for PCIE results 1441 + uint8_t LedError; // GPIO number for Error Cases 1442 + uint8_t PaddingLed; 1443 + 1444 + // SECTION: Clock Spread Spectrum 1445 + 1446 + // UCLK Spread Spectrum 1447 + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 1448 + uint8_t UclkSpreadPadding; 1449 + uint16_t UclkSpreadFreq; // kHz 1450 + 1451 + // UCLK Spread Spectrum 1452 + uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; 1453 + 1454 + // DFLL Spread Spectrum 1455 + uint8_t GfxclkSpreadEnable; 1456 + 1457 + // FCLK Spread Spectrum 1458 + uint8_t FclkSpreadPercent; // Q4.4 1459 + uint16_t FclkSpreadFreq; // kHz 1460 + 1461 + // Section: Memory Config 1462 + uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e 1463 + uint8_t PaddingMem1[7]; 1464 + 1465 + // SECTION: UMC feature flags 1466 + uint8_t HsrEnabled; 1467 + uint8_t VddqOffEnabled; 1468 + uint8_t PaddingUmcFlags[2]; 1469 + 1470 + uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued 1471 + uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS 1472 + 1473 + uint8_t FuseWritePowerMuxPresent; 1474 + uint8_t FuseWritePadding[3]; 1475 + 1476 + // SECTION: EDC Params 1477 + uint32_t LoadlineGfx; 1478 + uint32_t LoadlineSoc; 1479 + uint32_t GfxEdcLimit; 1480 + uint32_t SocEdcLimit; 1481 + 1482 + uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry 1483 + uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors 1484 + 1485 + uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0 1486 + uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1 1487 + uint8_t PaddingEpcs[2]; 1488 + 1489 + // SECTION: Board Reserved 1490 + uint32_t BoardSpare[52]; 1491 + 1492 + // SECTION: Structure Padding 1493 + 1494 + // Padding for MMHUB - do not modify this 1495 + uint32_t MmHubPadding[8]; 1496 + } BoardTable_t; 1497 + 1498 + typedef struct { 1499 + // SECTION: Infrastructure Limits 1500 + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported 1501 + 1502 + uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature 1503 + 1504 + int16_t TotalIdleBoardPowerM; 1505 + int16_t TotalIdleBoardPowerB; 1506 + int16_t TotalBoardPowerM; 1507 + int16_t TotalBoardPowerB; 1508 + 1509 + uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input 1510 + 1511 + // SECTION: Fan Control 1512 + uint16_t FanStopTemp[TEMP_COUNT]; //Celsius 1513 + uint16_t FanStartTemp[TEMP_COUNT]; //Celsius 1514 + 1515 + uint16_t FanGain[TEMP_COUNT]; 1516 + 1517 + uint16_t FanPwmMin; 1518 + uint16_t AcousticTargetRpmThreshold; 1519 + uint16_t AcousticLimitRpmThreshold; 1520 + uint16_t FanMaximumRpm; 1521 + uint16_t MGpuAcousticLimitRpmThreshold; 1522 + uint16_t FanTargetGfxclk; 1523 + uint32_t TempInputSelectMask; 1524 + uint8_t FanZeroRpmEnable; 1525 + uint8_t FanTachEdgePerRev; 1526 + uint16_t FanPadding; 1527 + uint16_t FanTargetTemperature[TEMP_COUNT]; 1528 + 1529 + // The following are AFC override parameters. Leave at 0 to use FW defaults. 1530 + int16_t FuzzyFan_ErrorSetDelta; 1531 + int16_t FuzzyFan_ErrorRateSetDelta; 1532 + int16_t FuzzyFan_PwmSetDelta; 1533 + uint16_t FuzzyFan_Reserved; 1534 + 1535 + uint16_t FwCtfLimit[TEMP_COUNT]; 1536 + 1537 + uint16_t IntakeTempEnableRPM; 1538 + int16_t IntakeTempOffsetTemp; 1539 + uint16_t IntakeTempReleaseTemp; 1540 + uint16_t IntakeTempHighIntakeAcousticLimit; 1541 + 1542 + uint16_t IntakeTempAcouticLimitReleaseRate; 1543 + int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset 1544 + uint16_t FanStalledTriggerRpm; // 1545 + uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm 1546 + 1547 + uint16_t FanSpare[1]; 1548 + uint8_t FanIntakeSensorSupport; 1549 + uint8_t FanIntakePadding; 1550 + uint32_t FanAmbientPerfBoostThreshold; 1551 + uint32_t FanSpare2[12]; 1552 + 1553 + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix 1554 + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron 1555 + uint16_t TemperatureFwCtfLimit_Hynix; 1556 + uint16_t TemperatureFwCtfLimit_Micron; 1557 + 1558 + // SECTION: Board Reserved 1559 + uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail 1560 + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported 1561 + uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift 1562 + uint16_t CustomSkuSpare16b; 1563 + uint32_t CustomSkuSpare32b[10]; 1564 + 1565 + // SECTION: Structure Padding 1566 + 1567 + // Padding for MMHUB - do not modify this 1568 + uint32_t MmHubPadding[8]; 1569 + } CustomSkuTable_t; 1570 + 1571 + typedef struct { 1572 + PFE_Settings_t PFE_Settings; 1573 + SkuTable_t SkuTable; 1574 + CustomSkuTable_t CustomSkuTable; 1575 + BoardTable_t BoardTable; 1576 + } PPTable_t; 1577 + 1578 + typedef struct { 1579 + // Time constant parameters for clock averages in ms 1580 + uint16_t GfxclkAverageLpfTau; 1581 + uint16_t FclkAverageLpfTau; 1582 + uint16_t UclkAverageLpfTau; 1583 + uint16_t GfxActivityLpfTau; 1584 + uint16_t UclkActivityLpfTau; 1585 + uint16_t UclkMaxActivityLpfTau; 1586 + uint16_t SocketPowerLpfTau; 1587 + uint16_t VcnClkAverageLpfTau; 1588 + uint16_t VcnUsageAverageLpfTau; 1589 + uint16_t PcieActivityLpTau; 1590 + } DriverSmuConfig_t; 1591 + 1592 + typedef struct { 1593 + DriverSmuConfig_t DriverSmuConfig; 1594 + 1595 + uint32_t Spare[8]; 1596 + // Padding - ignore 1597 + uint32_t MmHubPadding[8]; // SMU internal use 1598 + } DriverSmuConfigExternal_t; 1599 + 1600 + 1601 + typedef struct { 1602 + 1603 + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz 1604 + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz 1605 + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz 1606 + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz 1607 + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz 1608 + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz 1609 + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz 1610 + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz 1611 + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz 1612 + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz 1613 + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz 1614 + 1615 + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz 1616 + 1617 + uint16_t Padding; 1618 + 1619 + uint32_t Spare[32]; 1620 + 1621 + // Padding - ignore 1622 + uint32_t MmHubPadding[8]; // SMU internal use 1623 + 1624 + } DriverInfoTable_t; 1625 + 1626 + typedef struct { 1627 + uint32_t CurrClock[PPCLK_COUNT]; 1628 + 1629 + uint16_t AverageGfxclkFrequencyTarget; 1630 + uint16_t AverageGfxclkFrequencyPreDs; 1631 + uint16_t AverageGfxclkFrequencyPostDs; 1632 + uint16_t AverageFclkFrequencyPreDs; 1633 + uint16_t AverageFclkFrequencyPostDs; 1634 + uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock 1635 + uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock 1636 + uint16_t AverageVclk0Frequency ; 1637 + uint16_t AverageDclk0Frequency ; 1638 + uint16_t AverageVclk1Frequency ; 1639 + uint16_t AverageDclk1Frequency ; 1640 + uint16_t PCIeBusy ; 1641 + uint16_t dGPU_W_MAX ; 1642 + uint16_t padding ; 1643 + 1644 + uint32_t MetricsCounter ; 1645 + 1646 + uint16_t AvgVoltage[SVI_PLANE_COUNT]; 1647 + uint16_t AvgCurrent[SVI_PLANE_COUNT]; 1648 + 1649 + uint16_t AverageGfxActivity ; 1650 + uint16_t AverageUclkActivity ; 1651 + uint16_t Vcn0ActivityPercentage ; 1652 + uint16_t Vcn1ActivityPercentage ; 1653 + 1654 + uint32_t EnergyAccumulator; 1655 + uint16_t AverageSocketPower; 1656 + uint16_t AverageTotalBoardPower; 1657 + 1658 + uint16_t AvgTemperature[TEMP_COUNT]; 1659 + uint16_t AvgTemperatureFanIntake; 1660 + 1661 + uint8_t PcieRate ; 1662 + uint8_t PcieWidth ; 1663 + 1664 + uint8_t AvgFanPwm; 1665 + uint8_t Padding[1]; 1666 + uint16_t AvgFanRpm; 1667 + 1668 + 1669 + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; 1670 + uint8_t padding1[3]; 1671 + 1672 + //metrics for D3hot entry/exit and driver ARM msgs 1673 + uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; 1674 + uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; 1675 + uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; 1676 + 1677 + uint16_t ApuSTAPMSmartShiftLimit; 1678 + uint16_t ApuSTAPMLimit; 1679 + uint16_t AvgApuSocketPower; 1680 + 1681 + uint16_t AverageUclkActivity_MAX; 1682 + 1683 + uint32_t PublicSerialNumberLower; 1684 + uint32_t PublicSerialNumberUpper; 1685 + 1686 + } SmuMetrics_t; 1687 + 1688 + typedef struct { 1689 + SmuMetrics_t SmuMetrics; 1690 + uint32_t Spare[30]; 1691 + 1692 + // Padding - ignore 1693 + uint32_t MmHubPadding[8]; // SMU internal use 1694 + } SmuMetricsExternal_t; 1695 + 1696 + typedef struct { 1697 + uint8_t WmSetting; 1698 + uint8_t Flags; 1699 + uint8_t Padding[2]; 1700 + 1701 + } WatermarkRowGeneric_t; 1702 + 1703 + #define NUM_WM_RANGES 4 1704 + 1705 + typedef enum { 1706 + WATERMARKS_CLOCK_RANGE = 0, 1707 + WATERMARKS_DUMMY_PSTATE, 1708 + WATERMARKS_MALL, 1709 + WATERMARKS_COUNT, 1710 + } WATERMARKS_FLAGS_e; 1711 + 1712 + typedef struct { 1713 + // Watermarks 1714 + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; 1715 + } Watermarks_t; 1716 + 1717 + typedef struct { 1718 + Watermarks_t Watermarks; 1719 + uint32_t Spare[16]; 1720 + 1721 + uint32_t MmHubPadding[8]; // SMU internal use 1722 + } WatermarksExternal_t; 1723 + 1724 + typedef struct { 1725 + uint16_t avgPsmCount[76]; 1726 + uint16_t minPsmCount[76]; 1727 + uint16_t maxPsmCount[76]; 1728 + float avgPsmVoltage[76]; 1729 + float minPsmVoltage[76]; 1730 + float maxPsmVoltage[76]; 1731 + } AvfsDebugTable_t; 1732 + 1733 + typedef struct { 1734 + AvfsDebugTable_t AvfsDebugTable; 1735 + 1736 + uint32_t MmHubPadding[8]; // SMU internal use 1737 + } AvfsDebugTableExternal_t; 1738 + 1739 + 1740 + typedef struct { 1741 + uint8_t Gfx_ActiveHystLimit; 1742 + uint8_t Gfx_IdleHystLimit; 1743 + uint8_t Gfx_FPS; 1744 + uint8_t Gfx_MinActiveFreqType; 1745 + uint8_t Gfx_BoosterFreqType; 1746 + uint8_t PaddingGfx; 1747 + uint16_t Gfx_MinActiveFreq; // MHz 1748 + uint16_t Gfx_BoosterFreq; // MHz 1749 + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms 1750 + uint32_t Gfx_PD_Data_limit_a; // Q16 1751 + uint32_t Gfx_PD_Data_limit_b; // Q16 1752 + uint32_t Gfx_PD_Data_limit_c; // Q16 1753 + uint32_t Gfx_PD_Data_error_coeff; // Q16 1754 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 1755 + 1756 + uint8_t Fclk_ActiveHystLimit; 1757 + uint8_t Fclk_IdleHystLimit; 1758 + uint8_t Fclk_FPS; 1759 + uint8_t Fclk_MinActiveFreqType; 1760 + uint8_t Fclk_BoosterFreqType; 1761 + uint8_t PaddingFclk; 1762 + uint16_t Fclk_MinActiveFreq; // MHz 1763 + uint16_t Fclk_BoosterFreq; // MHz 1764 + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms 1765 + uint32_t Fclk_PD_Data_limit_a; // Q16 1766 + uint32_t Fclk_PD_Data_limit_b; // Q16 1767 + uint32_t Fclk_PD_Data_limit_c; // Q16 1768 + uint32_t Fclk_PD_Data_error_coeff; // Q16 1769 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 1770 + 1771 + uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16 1772 + uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS]; 1773 + uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS]; 1774 + uint16_t Mem_Fps; 1775 + 1776 + } DpmActivityMonitorCoeffInt_t; 1777 + 1778 + 1779 + typedef struct { 1780 + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; 1781 + uint32_t MmHubPadding[8]; // SMU internal use 1782 + } DpmActivityMonitorCoeffIntExternal_t; 1783 + 1784 + 1785 + 1786 + // Workload bits 1787 + #define WORKLOAD_PPLIB_DEFAULT_BIT 0 1788 + #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 1789 + #define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 1790 + #define WORKLOAD_PPLIB_VIDEO_BIT 3 1791 + #define WORKLOAD_PPLIB_VR_BIT 4 1792 + #define WORKLOAD_PPLIB_COMPUTE_BIT 5 1793 + #define WORKLOAD_PPLIB_CUSTOM_BIT 6 1794 + #define WORKLOAD_PPLIB_WINDOW_3D_BIT 7 1795 + #define WORKLOAD_PPLIB_DIRECT_ML_BIT 8 1796 + #define WORKLOAD_PPLIB_CGVDI_BIT 9 1797 + #define WORKLOAD_PPLIB_COUNT 10 1798 + 1799 + 1800 + // These defines are used with the following messages: 1801 + // SMC_MSG_TransferTableDram2Smu 1802 + // SMC_MSG_TransferTableSmu2Dram 1803 + 1804 + // Table transfer status 1805 + #define TABLE_TRANSFER_OK 0x0 1806 + #define TABLE_TRANSFER_FAILED 0xFF 1807 + #define TABLE_TRANSFER_PENDING 0xAB 1808 + 1809 + // Table types 1810 + #define TABLE_PPTABLE 0 1811 + #define TABLE_COMBO_PPTABLE 1 1812 + #define TABLE_WATERMARKS 2 1813 + #define TABLE_AVFS_PSM_DEBUG 3 1814 + #define TABLE_PMSTATUSLOG 4 1815 + #define TABLE_SMU_METRICS 5 1816 + #define TABLE_DRIVER_SMU_CONFIG 6 1817 + #define TABLE_ACTIVITY_MONITOR_COEFF 7 1818 + #define TABLE_OVERDRIVE 8 1819 + #define TABLE_I2C_COMMANDS 9 1820 + #define TABLE_DRIVER_INFO 10 1821 + #define TABLE_ECCINFO 11 1822 + #define TABLE_CUSTOM_SKUTABLE 12 1823 + #define TABLE_COUNT 13 1824 + 1825 + //IH Interupt ID 1826 + #define IH_INTERRUPT_ID_TO_DRIVER 0xFE 1827 + #define IH_INTERRUPT_CONTEXT_ID_BACO 0x2 1828 + #define IH_INTERRUPT_CONTEXT_ID_AC 0x3 1829 + #define IH_INTERRUPT_CONTEXT_ID_DC 0x4 1830 + #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 1831 + #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 1832 + #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 1833 + #define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 1834 + #define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 1835 + 1836 + #endif
+140
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef SMU_V14_0_2_PPSMC_H 25 + #define SMU_V14_0_2_PPSMC_H 26 + 27 + #define PPSMC_VERSION 0x1 28 + 29 + // SMU Response Codes: 30 + #define PPSMC_Result_OK 0x1 31 + #define PPSMC_Result_Failed 0xFF 32 + #define PPSMC_Result_UnknownCmd 0xFE 33 + #define PPSMC_Result_CmdRejectedPrereq 0xFD 34 + #define PPSMC_Result_CmdRejectedBusy 0xFC 35 + 36 + // Message Definitions: 37 + // BASIC 38 + #define PPSMC_MSG_TestMessage 0x1 39 + #define PPSMC_MSG_GetSmuVersion 0x2 40 + #define PPSMC_MSG_GetDriverIfVersion 0x3 41 + #define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 42 + #define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 43 + #define PPSMC_MSG_EnableAllSmuFeatures 0x6 44 + #define PPSMC_MSG_DisableAllSmuFeatures 0x7 45 + #define PPSMC_MSG_EnableSmuFeaturesLow 0x8 46 + #define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 47 + #define PPSMC_MSG_DisableSmuFeaturesLow 0xA 48 + #define PPSMC_MSG_DisableSmuFeaturesHigh 0xB 49 + #define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC 50 + #define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD 51 + #define PPSMC_MSG_SetDriverDramAddrHigh 0xE 52 + #define PPSMC_MSG_SetDriverDramAddrLow 0xF 53 + #define PPSMC_MSG_SetToolsDramAddrHigh 0x10 54 + #define PPSMC_MSG_SetToolsDramAddrLow 0x11 55 + #define PPSMC_MSG_TransferTableSmu2Dram 0x12 56 + #define PPSMC_MSG_TransferTableDram2Smu 0x13 57 + #define PPSMC_MSG_UseDefaultPPTable 0x14 58 + 59 + //BACO/BAMACO/BOMACO 60 + #define PPSMC_MSG_EnterBaco 0x15 61 + #define PPSMC_MSG_ExitBaco 0x16 62 + #define PPSMC_MSG_ArmD3 0x17 63 + #define PPSMC_MSG_BacoAudioD3PME 0x18 64 + 65 + //DPM 66 + #define PPSMC_MSG_SetSoftMinByFreq 0x19 67 + #define PPSMC_MSG_SetSoftMaxByFreq 0x1A 68 + #define PPSMC_MSG_SetHardMinByFreq 0x1B 69 + #define PPSMC_MSG_SetHardMaxByFreq 0x1C 70 + #define PPSMC_MSG_GetMinDpmFreq 0x1D 71 + #define PPSMC_MSG_GetMaxDpmFreq 0x1E 72 + #define PPSMC_MSG_GetDpmFreqByIndex 0x1F 73 + #define PPSMC_MSG_OverridePcieParameters 0x20 74 + 75 + //DramLog Set DramAddr 76 + #define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 77 + #define PPSMC_MSG_DramLogSetDramAddrLow 0x22 78 + #define PPSMC_MSG_DramLogSetDramSize 0x23 79 + #define PPSMC_MSG_SetWorkloadMask 0x24 80 + 81 + #define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed 82 + #define PPSMC_MSG_SetVideoFps 0x26 // Can be removed 83 + #define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 84 + 85 + //Power Gating 86 + #define PPSMC_MSG_AllowGfxOff 0x28 87 + #define PPSMC_MSG_DisallowGfxOff 0x29 88 + #define PPSMC_MSG_PowerUpVcn 0x2A 89 + #define PPSMC_MSG_PowerDownVcn 0x2B 90 + #define PPSMC_MSG_PowerUpJpeg 0x2C 91 + #define PPSMC_MSG_PowerDownJpeg 0x2D 92 + 93 + //Resets 94 + #define PPSMC_MSG_PrepareMp1ForUnload 0x2E 95 + #define PPSMC_MSG_Mode1Reset 0x2F 96 + 97 + //Set SystemVirtual DramAddrHigh 98 + #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 99 + #define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31 100 + //ACDC Power Source 101 + #define PPSMC_MSG_SetPptLimit 0x32 102 + #define PPSMC_MSG_GetPptLimit 0x33 103 + #define PPSMC_MSG_ReenableAcDcInterrupt 0x34 104 + #define PPSMC_MSG_NotifyPowerSource 0x35 105 + 106 + //BTC 107 + #define PPSMC_MSG_RunDcBtc 0x36 108 + 109 + // 0x37 110 + 111 + //Others 112 + #define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed 113 + #define PPSMC_MSG_SetFwDstatesMask 0x39 114 + #define PPSMC_MSG_SetThrottlerMask 0x3A 115 + 116 + #define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B 117 + 118 + #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C 119 + 120 + //STB to dram log 121 + #define PPSMC_MSG_DumpSTBtoDram 0x3D 122 + #define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E 123 + #define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F 124 + #define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 125 + #define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 126 + 127 + #define PPSMC_MSG_AllowGfxDcs 0x43 128 + #define PPSMC_MSG_DisallowGfxDcs 0x44 129 + #define PPSMC_MSG_EnableAudioStutterWA 0x45 130 + #define PPSMC_MSG_PowerUpUmsch 0x46 131 + #define PPSMC_MSG_PowerDownUmsch 0x47 132 + #define PPSMC_MSG_SetDcsArch 0x48 133 + #define PPSMC_MSG_TriggerVFFLR 0x49 134 + #define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A 135 + #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B 136 + #define PPSMC_MSG_SetPriorityDeltaGain 0x4C 137 + #define PPSMC_MSG_AllowIHHostInterrupt 0x4D 138 + #define PPSMC_MSG_Mode3Reset 0x4F 139 + #define PPSMC_Message_Count 0x50 140 + #endif
+3 -2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
··· 28 28 #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 29 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 30 30 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 31 - #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 31 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25 32 32 33 33 #define FEATURE_MASK(feature) (1ULL << feature) 34 34 ··· 39 39 #define MP1_SRAM 0x03c00004 40 40 41 41 /* address block */ 42 - #define smnMP1_FIRMWARE_FLAGS 0x3010028 42 + #define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028 43 + #define smnMP1_FIRMWARE_FLAGS 0x3010024 43 44 #define smnMP1_PUB_CTRL 0x3010d10 44 45 45 46 #define MAX_DPM_LEVELS 16
+164
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef SMU_14_0_2_PPTABLE_H 25 + #define SMU_14_0_2_PPTABLE_H 26 + 27 + 28 + #pragma pack(push, 1) 29 + 30 + #define SMU_14_0_2_TABLE_FORMAT_REVISION 3 31 + 32 + // POWERPLAYTABLE::ulPlatformCaps 33 + #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. 34 + #define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. 35 + #define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. 36 + #define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. 37 + #define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. 38 + #define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. 39 + #define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED. 40 + #define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive. 41 + 42 + // SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type 43 + #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 44 + 45 + #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD 46 + #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 47 + 48 + enum SMU_14_0_2_OD_SW_FEATURE_CAP 49 + { 50 + SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0, 51 + SMU_14_0_2_ODCAP_POWER_MODE = 1, 52 + SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2, 53 + SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3, 54 + SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4, 55 + SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5, 56 + SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6, 57 + SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7, 58 + SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8, 59 + SMU_14_0_2_ODCAP_COUNT = 9, 60 + }; 61 + 62 + enum SMU_14_0_2_OD_SW_FEATURE_ID 63 + { 64 + SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM 65 + SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode 66 + SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK 67 + SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK 68 + SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK 69 + SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning 70 + SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning 71 + SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning 72 + SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC 73 + }; 74 + 75 + #define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features 76 + 77 + enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID 78 + { 79 + SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0, 80 + SMU_14_0_2_ODSETTING_POWER_MODE = 1, 81 + SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2, 82 + SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3, 83 + SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4, 84 + SMU_14_0_2_ODSETTING_ACTIMING = 5, 85 + SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6, 86 + SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7, 87 + SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8, 88 + SMU_14_0_2_ODSETTING_COUNT = 9, 89 + }; 90 + #define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings 91 + 92 + enum SMU_14_0_2_PWRMODE_SETTING 93 + { 94 + SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0, 95 + SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE, 96 + SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO, 97 + SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE, 98 + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET, 99 + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE, 100 + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO, 101 + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE, 102 + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET, 103 + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE, 104 + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO, 105 + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE, 106 + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET, 107 + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, 108 + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, 109 + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, 110 + }; 111 + #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings 112 + 113 + enum SMU_14_0_2_overdrive_table_id 114 + { 115 + SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0, 116 + SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1, 117 + SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2, 118 + }; 119 + 120 + struct smu_14_0_2_overdrive_table 121 + { 122 + uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION 123 + uint8_t reserve[3]; // Zero filled field reserved for future use 124 + uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags 125 + int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings 126 + int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings 127 + int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings 128 + }; 129 + 130 + struct smu_14_0_2_powerplay_table 131 + { 132 + struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. 133 + uint8_t table_revision; // PPGen use only: table_revision = 3 134 + uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). 135 + uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) 136 + uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. 137 + uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. 138 + uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. 139 + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. 140 + uint16_t pmfw_board_table_size; // The size of BoardTable_t. 141 + uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. 142 + uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. 143 + uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base 144 + uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base 145 + uint16_t format_id; // PPGen use only: PPTable for different ASICs. 146 + uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps 147 + 148 + uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER 149 + 150 + uint16_t small_power_limit1; 151 + uint16_t small_power_limit2; 152 + uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. 153 + uint16_t software_shutdown_temp; 154 + 155 + uint8_t reserve[143]; // Zero filled field reserved for future use 156 + 157 + struct smu_14_0_2_overdrive_table overdrive_table; 158 + 159 + PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes 160 + }; 161 + 162 + #pragma pack(pop) 163 + 164 + #endif
+13
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 3118 3118 return 0; 3119 3119 } 3120 3120 3121 + static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) 3122 + { 3123 + int error_code; 3124 + 3125 + if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) 3126 + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); 3127 + else 3128 + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); 3129 + 3130 + return error_code & 0xff; 3131 + } 3132 + 3121 3133 static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { 3122 3134 .max_ue_bank_count = 12, 3123 3135 .max_ce_bank_count = 12, 3124 3136 .set_debug_mode = aca_smu_set_debug_mode, 3125 3137 .get_valid_aca_count = aca_smu_get_valid_aca_count, 3126 3138 .get_valid_aca_bank = aca_smu_get_valid_aca_bank, 3139 + .parse_error_code = aca_smu_parse_error_code, 3127 3140 }; 3128 3141 3129 3142 static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
··· 23 23 # Makefile for the 'smu manager' sub-component of powerplay. 24 24 # It provides the smu management services for the driver. 25 25 26 - SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o 26 + SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o 27 27 28 28 AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) 29 29
+89 -26
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 38 38 #include "amdgpu_ras.h" 39 39 #include "smu_cmn.h" 40 40 41 - #include "asic_reg/mp/mp_14_0_0_offset.h" 42 - #include "asic_reg/mp/mp_14_0_0_sh_mask.h" 41 + #include "asic_reg/mp/mp_14_0_2_offset.h" 42 + #include "asic_reg/mp/mp_14_0_2_sh_mask.h" 43 + 44 + #define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 45 + #define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 46 + #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 47 + #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 43 48 44 49 /* 45 50 * DO NOT use these for err/warn/info/debug messages. ··· 57 52 #undef pr_debug 58 53 59 54 MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); 55 + MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); 60 56 61 57 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 62 58 ··· 65 59 { 66 60 struct amdgpu_device *adev = smu->adev; 67 61 char fw_name[30]; 68 - char ucode_prefix[15]; 62 + char ucode_prefix[30]; 69 63 int err = 0; 70 64 const struct smc_firmware_header_v1_0 *hdr; 71 65 const struct common_firmware_header *header; ··· 112 106 113 107 int smu_v14_0_load_microcode(struct smu_context *smu) 114 108 { 115 - #if 0 116 109 struct amdgpu_device *adev = smu->adev; 117 110 const uint32_t *src; 118 111 const struct smc_firmware_header_v1_0 *hdr; ··· 136 131 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 137 132 138 133 for (i = 0; i < adev->usec_timeout; i++) { 139 - mp1_fw_flags = RREG32_PCIE(MP1_Public | 140 - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 134 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 135 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 136 + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); 137 + else 138 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 139 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 141 140 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 142 141 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 143 142 break; ··· 151 142 if (i == adev->usec_timeout) 152 143 return -ETIME; 153 144 154 - #endif 155 145 return 0; 156 - 157 146 } 158 147 159 148 int smu_v14_0_init_pptable_microcode(struct smu_context *smu) ··· 170 163 return 0; 171 164 172 165 if (!adev->scpm_enabled) 166 + return 0; 167 + 168 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || 169 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) 173 170 return 0; 174 171 175 172 /* override pptable_id from driver parameter */ ··· 209 198 struct amdgpu_device *adev = smu->adev; 210 199 uint32_t mp1_fw_flags; 211 200 212 - mp1_fw_flags = RREG32_PCIE(MP1_Public | 201 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) 202 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 203 + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); 204 + else 205 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 213 206 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 214 207 215 208 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> ··· 242 227 adev->pm.fw_version = smu_version; 243 228 244 229 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 245 - case IP_VERSION(14, 0, 2): 246 - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 247 - break; 248 230 case IP_VERSION(14, 0, 0): 249 231 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 250 232 break; 251 233 case IP_VERSION(14, 0, 1): 252 234 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; 253 235 break; 254 - 236 + case IP_VERSION(14, 0, 2): 237 + case IP_VERSION(14, 0, 3): 238 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 239 + break; 255 240 default: 256 241 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 257 242 amdgpu_ip_version(adev, MP1_HWIP, 0)); ··· 753 738 struct amdgpu_device *adev = smu->adev; 754 739 755 740 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 756 - case IP_VERSION(14, 0, 2): 757 741 case IP_VERSION(14, 0, 0): 758 742 case IP_VERSION(14, 0, 1): 743 + case IP_VERSION(14, 0, 2): 759 744 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 760 745 return 0; 761 746 if (enable) ··· 856 841 // TODO 857 842 858 843 /* For MP1 SW irqs */ 859 - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 860 - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 861 - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 844 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { 845 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); 846 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 847 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); 848 + } else { 849 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 850 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 851 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 852 + } 862 853 863 854 break; 864 855 case AMDGPU_IRQ_STATE_ENABLE: ··· 872 851 // TODO 873 852 874 853 /* For MP1 SW irqs */ 875 - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 876 - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 877 - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 878 - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 854 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { 855 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); 856 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 857 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 858 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); 879 859 880 - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 881 - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 882 - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 860 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); 861 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 862 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); 863 + } else { 864 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 865 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 866 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 867 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 868 + 869 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 870 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 871 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 872 + } 883 873 884 874 break; 885 875 default: ··· 900 868 return 0; 901 869 } 902 870 871 + #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 872 + #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 873 + 903 874 static int smu_v14_0_irq_process(struct amdgpu_device *adev, 904 875 struct amdgpu_irq_src *source, 905 876 struct amdgpu_iv_entry *entry) 906 877 { 907 - // TODO 878 + struct smu_context *smu = adev->powerplay.pp_handle; 879 + uint32_t client_id = entry->client_id; 880 + uint32_t src_id = entry->src_id; 881 + 882 + if (client_id == SOC15_IH_CLIENTID_THM) { 883 + switch (src_id) { 884 + case THM_11_0__SRCID__THM_DIG_THERM_L2H: 885 + schedule_delayed_work(&smu->swctf_delayed_work, 886 + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); 887 + break; 888 + case THM_11_0__SRCID__THM_DIG_THERM_H2L: 889 + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 890 + break; 891 + default: 892 + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 893 + src_id); 894 + break; 895 + } 896 + } 908 897 909 898 return 0; 910 899 } ··· 947 894 irq_src->num_types = 1; 948 895 irq_src->funcs = &smu_v14_0_irq_funcs; 949 896 950 - // TODO: THM related 897 + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 898 + THM_11_0__SRCID__THM_DIG_THERM_L2H, 899 + irq_src); 900 + if (ret) 901 + return ret; 902 + 903 + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 904 + THM_11_0__SRCID__THM_DIG_THERM_H2L, 905 + irq_src); 906 + if (ret) 907 + return ret; 951 908 952 909 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 953 910 SMU_IH_INTERRUPT_ID_TO_DRIVER,
+1796
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #define SWSMU_CODE_LAYER_L2 25 + 26 + #include <linux/firmware.h> 27 + #include <linux/pci.h> 28 + #include <linux/i2c.h> 29 + #include "amdgpu.h" 30 + #include "amdgpu_smu.h" 31 + #include "atomfirmware.h" 32 + #include "amdgpu_atomfirmware.h" 33 + #include "amdgpu_atombios.h" 34 + #include "smu_v14_0.h" 35 + #include "smu14_driver_if_v14_0.h" 36 + #include "soc15_common.h" 37 + #include "atom.h" 38 + #include "smu_v14_0_2_ppt.h" 39 + #include "smu_v14_0_2_pptable.h" 40 + #include "smu_v14_0_2_ppsmc.h" 41 + #include "mp/mp_14_0_2_offset.h" 42 + #include "mp/mp_14_0_2_sh_mask.h" 43 + 44 + #include "smu_cmn.h" 45 + #include "amdgpu_ras.h" 46 + 47 + /* 48 + * DO NOT use these for err/warn/info/debug messages. 49 + * Use dev_err, dev_warn, dev_info and dev_dbg instead. 50 + * They are more MGPU friendly. 51 + */ 52 + #undef pr_err 53 + #undef pr_warn 54 + #undef pr_info 55 + #undef pr_debug 56 + 57 + #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 58 + 59 + #define FEATURE_MASK(feature) (1ULL << feature) 60 + #define SMC_DPM_FEATURE ( \ 61 + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 62 + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 63 + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 64 + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 65 + FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) 66 + 67 + #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 68 + 69 + static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 70 + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 71 + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 72 + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 73 + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 74 + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 75 + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 76 + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 77 + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 78 + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 79 + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 80 + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 81 + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 82 + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 83 + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 84 + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 85 + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 86 + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 87 + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 88 + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 89 + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 90 + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 91 + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 92 + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 93 + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 94 + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 95 + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 96 + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 97 + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 98 + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 99 + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 100 + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 101 + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 102 + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 103 + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 104 + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 105 + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 106 + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 107 + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 108 + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 109 + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 110 + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 111 + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 112 + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 113 + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 114 + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 115 + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 116 + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 117 + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 118 + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 119 + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 120 + MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), 121 + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, 122 + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), 123 + MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 124 + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 125 + }; 126 + 127 + static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { 128 + CLK_MAP(GFXCLK, PPCLK_GFXCLK), 129 + CLK_MAP(SCLK, PPCLK_GFXCLK), 130 + CLK_MAP(SOCCLK, PPCLK_SOCCLK), 131 + CLK_MAP(FCLK, PPCLK_FCLK), 132 + CLK_MAP(UCLK, PPCLK_UCLK), 133 + CLK_MAP(MCLK, PPCLK_UCLK), 134 + CLK_MAP(VCLK, PPCLK_VCLK_0), 135 + CLK_MAP(DCLK, PPCLK_DCLK_0), 136 + }; 137 + 138 + static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { 139 + FEA_MAP(FW_DATA_READ), 140 + FEA_MAP(DPM_GFXCLK), 141 + FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 142 + FEA_MAP(DPM_UCLK), 143 + FEA_MAP(DPM_FCLK), 144 + FEA_MAP(DPM_SOCCLK), 145 + FEA_MAP(DPM_LINK), 146 + FEA_MAP(DPM_DCN), 147 + FEA_MAP(VMEMP_SCALING), 148 + FEA_MAP(VDDIO_MEM_SCALING), 149 + FEA_MAP(DS_GFXCLK), 150 + FEA_MAP(DS_SOCCLK), 151 + FEA_MAP(DS_FCLK), 152 + FEA_MAP(DS_LCLK), 153 + FEA_MAP(DS_DCFCLK), 154 + FEA_MAP(DS_UCLK), 155 + FEA_MAP(GFX_ULV), 156 + FEA_MAP(FW_DSTATE), 157 + FEA_MAP(GFXOFF), 158 + FEA_MAP(BACO), 159 + FEA_MAP(MM_DPM), 160 + FEA_MAP(SOC_MPCLK_DS), 161 + FEA_MAP(BACO_MPCLK_DS), 162 + FEA_MAP(THROTTLERS), 163 + FEA_MAP(SMARTSHIFT), 164 + FEA_MAP(GTHR), 165 + FEA_MAP(ACDC), 166 + FEA_MAP(VR0HOT), 167 + FEA_MAP(FW_CTF), 168 + FEA_MAP(FAN_CONTROL), 169 + FEA_MAP(GFX_DCS), 170 + FEA_MAP(GFX_READ_MARGIN), 171 + FEA_MAP(LED_DISPLAY), 172 + FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 173 + FEA_MAP(OUT_OF_BAND_MONITOR), 174 + FEA_MAP(OPTIMIZED_VMIN), 175 + FEA_MAP(GFX_IMU), 176 + FEA_MAP(BOOT_TIME_CAL), 177 + FEA_MAP(GFX_PCC_DFLL), 178 + FEA_MAP(SOC_CG), 179 + FEA_MAP(DF_CSTATE), 180 + FEA_MAP(GFX_EDC), 181 + FEA_MAP(BOOT_POWER_OPT), 182 + FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 183 + FEA_MAP(DS_VCN), 184 + FEA_MAP(BACO_CG), 185 + FEA_MAP(MEM_TEMP_READ), 186 + FEA_MAP(ATHUB_MMHUB_PG), 187 + FEA_MAP(SOC_PCC), 188 + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 189 + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 190 + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 191 + }; 192 + 193 + static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { 194 + TAB_MAP(PPTABLE), 195 + TAB_MAP(WATERMARKS), 196 + TAB_MAP(AVFS_PSM_DEBUG), 197 + TAB_MAP(PMSTATUSLOG), 198 + TAB_MAP(SMU_METRICS), 199 + TAB_MAP(DRIVER_SMU_CONFIG), 200 + TAB_MAP(ACTIVITY_MONITOR_COEFF), 201 + [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 202 + TAB_MAP(I2C_COMMANDS), 203 + TAB_MAP(ECCINFO), 204 + }; 205 + 206 + static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 207 + PWR_MAP(AC), 208 + PWR_MAP(DC), 209 + }; 210 + 211 + static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 212 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 213 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 214 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 215 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 216 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 217 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 218 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 219 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 220 + }; 221 + 222 + #if 0 223 + static const uint8_t smu_v14_0_2_throttler_map[] = { 224 + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 225 + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 226 + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 227 + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 228 + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 229 + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 230 + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 231 + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 232 + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 233 + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 234 + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 235 + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 236 + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 237 + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 238 + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 239 + [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 240 + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 241 + }; 242 + #endif 243 + 244 + static int 245 + smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, 246 + uint32_t *feature_mask, uint32_t num) 247 + { 248 + struct amdgpu_device *adev = smu->adev; 249 + /*u32 smu_version;*/ 250 + 251 + if (num > 2) 252 + return -EINVAL; 253 + 254 + memset(feature_mask, 0xff, sizeof(uint32_t) * num); 255 + 256 + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 257 + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 258 + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 259 + } 260 + #if 0 261 + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || 262 + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 263 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 264 + 265 + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) 266 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 267 + 268 + /* PMFW 78.58 contains a critical fix for gfxoff feature */ 269 + smu_cmn_get_smc_version(smu, NULL, &smu_version); 270 + if ((smu_version < 0x004e3a00) || 271 + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) 272 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); 273 + 274 + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { 275 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 276 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 277 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 278 + } 279 + 280 + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) 281 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 282 + 283 + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { 284 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); 285 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); 286 + } 287 + 288 + if (!(adev->pm.pp_feature & PP_ULV_MASK)) 289 + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); 290 + #endif 291 + 292 + return 0; 293 + } 294 + 295 + static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) 296 + { 297 + struct smu_table_context *table_context = &smu->smu_table; 298 + struct smu_14_0_2_powerplay_table *powerplay_table = 299 + table_context->power_play_table; 300 + struct smu_baco_context *smu_baco = &smu->smu_baco; 301 + PPTable_t *pptable = smu->smu_table.driver_pptable; 302 + const OverDriveLimits_t * const overdrive_upperlimits = 303 + &pptable->SkuTable.OverDriveLimitsBasicMax; 304 + const OverDriveLimits_t * const overdrive_lowerlimits = 305 + &pptable->SkuTable.OverDriveLimitsBasicMin; 306 + 307 + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) 308 + smu->dc_controlled_by_gpio = true; 309 + 310 + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { 311 + smu_baco->platform_support = true; 312 + 313 + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) 314 + smu_baco->maco_support = true; 315 + } 316 + 317 + if (!overdrive_lowerlimits->FeatureCtrlMask || 318 + !overdrive_upperlimits->FeatureCtrlMask) 319 + smu->od_enabled = false; 320 + 321 + table_context->thermal_controller_type = 322 + powerplay_table->thermal_controller_type; 323 + 324 + /* 325 + * Instead of having its own buffer space and get overdrive_table copied, 326 + * smu->od_settings just points to the actual overdrive_table 327 + */ 328 + smu->od_settings = &powerplay_table->overdrive_table; 329 + 330 + smu->adev->pm.no_fan = 331 + !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); 332 + 333 + return 0; 334 + } 335 + 336 + static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) 337 + { 338 + struct smu_table_context *table_context = &smu->smu_table; 339 + struct smu_14_0_2_powerplay_table *powerplay_table = 340 + table_context->power_play_table; 341 + 342 + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 343 + sizeof(PPTable_t)); 344 + 345 + return 0; 346 + } 347 + 348 + #ifndef atom_smc_dpm_info_table_14_0_0 349 + struct atom_smc_dpm_info_table_14_0_0 { 350 + struct atom_common_table_header table_header; 351 + BoardTable_t BoardTable; 352 + }; 353 + #endif 354 + 355 + static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) 356 + { 357 + struct smu_table_context *table_context = &smu->smu_table; 358 + PPTable_t *smc_pptable = table_context->driver_pptable; 359 + struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; 360 + BoardTable_t *BoardTable = &smc_pptable->BoardTable; 361 + int index, ret; 362 + 363 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 364 + smc_dpm_info); 365 + 366 + ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 367 + (uint8_t **)&smc_dpm_table); 368 + if (ret) 369 + return ret; 370 + 371 + memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 372 + 373 + return 0; 374 + } 375 + 376 + #if 0 377 + static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, 378 + void **table, 379 + uint32_t *size) 380 + { 381 + struct smu_table_context *smu_table = &smu->smu_table; 382 + void *combo_pptable = smu_table->combo_pptable; 383 + int ret = 0; 384 + 385 + ret = smu_cmn_get_combo_pptable(smu); 386 + if (ret) 387 + return ret; 388 + 389 + *table = combo_pptable; 390 + *size = sizeof(struct smu_14_0_powerplay_table); 391 + 392 + return 0; 393 + } 394 + #endif 395 + 396 + static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, 397 + void **table, 398 + uint32_t *size) 399 + { 400 + struct smu_table_context *smu_table = &smu->smu_table; 401 + void *combo_pptable = smu_table->combo_pptable; 402 + int ret = 0; 403 + 404 + ret = smu_cmn_get_combo_pptable(smu); 405 + if (ret) 406 + return ret; 407 + 408 + *table = combo_pptable; 409 + *size = sizeof(struct smu_14_0_2_powerplay_table); 410 + 411 + return 0; 412 + } 413 + 414 + static int smu_v14_0_2_setup_pptable(struct smu_context *smu) 415 + { 416 + struct smu_table_context *smu_table = &smu->smu_table; 417 + struct amdgpu_device *adev = smu->adev; 418 + int ret = 0; 419 + 420 + if (amdgpu_sriov_vf(smu->adev)) 421 + return 0; 422 + 423 + if (!adev->scpm_enabled) 424 + ret = smu_v14_0_setup_pptable(smu); 425 + else 426 + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, 427 + &smu_table->power_play_table, 428 + &smu_table->power_play_table_size); 429 + if (ret) 430 + return ret; 431 + 432 + ret = smu_v14_0_2_store_powerplay_table(smu); 433 + if (ret) 434 + return ret; 435 + 436 + /* 437 + * With SCPM enabled, the operation below will be handled 438 + * by PSP. Driver involvment is unnecessary and useless. 439 + */ 440 + if (!adev->scpm_enabled) { 441 + ret = smu_v14_0_2_append_powerplay_table(smu); 442 + if (ret) 443 + return ret; 444 + } 445 + 446 + ret = smu_v14_0_2_check_powerplay_table(smu); 447 + if (ret) 448 + return ret; 449 + 450 + return ret; 451 + } 452 + 453 + static int smu_v14_0_2_tables_init(struct smu_context *smu) 454 + { 455 + struct smu_table_context *smu_table = &smu->smu_table; 456 + struct smu_table *tables = smu_table->tables; 457 + 458 + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 459 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 460 + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 461 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 462 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 463 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 464 + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 465 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 466 + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), 467 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 468 + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 469 + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 470 + AMDGPU_GEM_DOMAIN_VRAM); 471 + SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 472 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 473 + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), 474 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 475 + 476 + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 477 + if (!smu_table->metrics_table) 478 + goto err0_out; 479 + smu_table->metrics_time = 0; 480 + 481 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 482 + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 483 + if (!smu_table->gpu_metrics_table) 484 + goto err1_out; 485 + 486 + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 487 + if (!smu_table->watermarks_table) 488 + goto err2_out; 489 + 490 + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); 491 + if (!smu_table->ecc_table) 492 + goto err3_out; 493 + 494 + return 0; 495 + 496 + err3_out: 497 + kfree(smu_table->watermarks_table); 498 + err2_out: 499 + kfree(smu_table->gpu_metrics_table); 500 + err1_out: 501 + kfree(smu_table->metrics_table); 502 + err0_out: 503 + return -ENOMEM; 504 + } 505 + 506 + static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) 507 + { 508 + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 509 + 510 + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), 511 + GFP_KERNEL); 512 + if (!smu_dpm->dpm_context) 513 + return -ENOMEM; 514 + 515 + smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); 516 + 517 + return 0; 518 + } 519 + 520 + static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) 521 + { 522 + int ret = 0; 523 + 524 + ret = smu_v14_0_2_tables_init(smu); 525 + if (ret) 526 + return ret; 527 + 528 + ret = smu_v14_0_2_allocate_dpm_context(smu); 529 + if (ret) 530 + return ret; 531 + 532 + return smu_v14_0_init_smc_tables(smu); 533 + } 534 + 535 + static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) 536 + { 537 + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 538 + struct smu_table_context *table_context = &smu->smu_table; 539 + PPTable_t *pptable = table_context->driver_pptable; 540 + SkuTable_t *skutable = &pptable->SkuTable; 541 + struct smu_14_0_dpm_table *dpm_table; 542 + struct smu_14_0_pcie_table *pcie_table; 543 + uint32_t link_level; 544 + int ret = 0; 545 + 546 + /* socclk dpm table setup */ 547 + dpm_table = &dpm_context->dpm_tables.soc_table; 548 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 549 + ret = smu_v14_0_set_single_dpm_table(smu, 550 + SMU_SOCCLK, 551 + dpm_table); 552 + if (ret) 553 + return ret; 554 + } else { 555 + dpm_table->count = 1; 556 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 557 + dpm_table->dpm_levels[0].enabled = true; 558 + dpm_table->min = dpm_table->dpm_levels[0].value; 559 + dpm_table->max = dpm_table->dpm_levels[0].value; 560 + } 561 + 562 + /* gfxclk dpm table setup */ 563 + dpm_table = &dpm_context->dpm_tables.gfx_table; 564 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 565 + ret = smu_v14_0_set_single_dpm_table(smu, 566 + SMU_GFXCLK, 567 + dpm_table); 568 + if (ret) 569 + return ret; 570 + 571 + /* 572 + * Update the reported maximum shader clock to the value 573 + * which can be guarded to be achieved on all cards. This 574 + * is aligned with Window setting. And considering that value 575 + * might be not the peak frequency the card can achieve, it 576 + * is normal some real-time clock frequency can overtake this 577 + * labelled maximum clock frequency(for example in pp_dpm_sclk 578 + * sysfs output). 579 + */ 580 + if (skutable->DriverReportedClocks.GameClockAc && 581 + (dpm_table->dpm_levels[dpm_table->count - 1].value > 582 + skutable->DriverReportedClocks.GameClockAc)) { 583 + dpm_table->dpm_levels[dpm_table->count - 1].value = 584 + skutable->DriverReportedClocks.GameClockAc; 585 + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 586 + } 587 + } else { 588 + dpm_table->count = 1; 589 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 590 + dpm_table->dpm_levels[0].enabled = true; 591 + dpm_table->min = dpm_table->dpm_levels[0].value; 592 + dpm_table->max = dpm_table->dpm_levels[0].value; 593 + } 594 + 595 + /* uclk dpm table setup */ 596 + dpm_table = &dpm_context->dpm_tables.uclk_table; 597 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 598 + ret = smu_v14_0_set_single_dpm_table(smu, 599 + SMU_UCLK, 600 + dpm_table); 601 + if (ret) 602 + return ret; 603 + } else { 604 + dpm_table->count = 1; 605 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 606 + dpm_table->dpm_levels[0].enabled = true; 607 + dpm_table->min = dpm_table->dpm_levels[0].value; 608 + dpm_table->max = dpm_table->dpm_levels[0].value; 609 + } 610 + 611 + /* fclk dpm table setup */ 612 + dpm_table = &dpm_context->dpm_tables.fclk_table; 613 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 614 + ret = smu_v14_0_set_single_dpm_table(smu, 615 + SMU_FCLK, 616 + dpm_table); 617 + if (ret) 618 + return ret; 619 + } else { 620 + dpm_table->count = 1; 621 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 622 + dpm_table->dpm_levels[0].enabled = true; 623 + dpm_table->min = dpm_table->dpm_levels[0].value; 624 + dpm_table->max = dpm_table->dpm_levels[0].value; 625 + } 626 + 627 + /* vclk dpm table setup */ 628 + dpm_table = &dpm_context->dpm_tables.vclk_table; 629 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 630 + ret = smu_v14_0_set_single_dpm_table(smu, 631 + SMU_VCLK, 632 + dpm_table); 633 + if (ret) 634 + return ret; 635 + } else { 636 + dpm_table->count = 1; 637 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 638 + dpm_table->dpm_levels[0].enabled = true; 639 + dpm_table->min = dpm_table->dpm_levels[0].value; 640 + dpm_table->max = dpm_table->dpm_levels[0].value; 641 + } 642 + 643 + /* dclk dpm table setup */ 644 + dpm_table = &dpm_context->dpm_tables.dclk_table; 645 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 646 + ret = smu_v14_0_set_single_dpm_table(smu, 647 + SMU_DCLK, 648 + dpm_table); 649 + if (ret) 650 + return ret; 651 + } else { 652 + dpm_table->count = 1; 653 + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 654 + dpm_table->dpm_levels[0].enabled = true; 655 + dpm_table->min = dpm_table->dpm_levels[0].value; 656 + dpm_table->max = dpm_table->dpm_levels[0].value; 657 + } 658 + 659 + /* lclk dpm table setup */ 660 + pcie_table = &dpm_context->dpm_tables.pcie_table; 661 + pcie_table->num_of_link_levels = 0; 662 + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 663 + if (!skutable->PcieGenSpeed[link_level] && 664 + !skutable->PcieLaneCount[link_level] && 665 + !skutable->LclkFreq[link_level]) 666 + continue; 667 + 668 + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 669 + skutable->PcieGenSpeed[link_level]; 670 + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 671 + skutable->PcieLaneCount[link_level]; 672 + pcie_table->clk_freq[pcie_table->num_of_link_levels] = 673 + skutable->LclkFreq[link_level]; 674 + pcie_table->num_of_link_levels++; 675 + } 676 + 677 + return 0; 678 + } 679 + 680 + static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) 681 + { 682 + int ret = 0; 683 + uint64_t feature_enabled; 684 + 685 + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 686 + if (ret) 687 + return false; 688 + 689 + return !!(feature_enabled & SMC_DPM_FEATURE); 690 + } 691 + 692 + static void smu_v14_0_2_dump_pptable(struct smu_context *smu) 693 + { 694 + struct smu_table_context *table_context = &smu->smu_table; 695 + PPTable_t *pptable = table_context->driver_pptable; 696 + PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; 697 + 698 + dev_info(smu->adev->dev, "Dumped PPTable:\n"); 699 + 700 + dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); 701 + dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); 702 + dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); 703 + } 704 + 705 + static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) 706 + { 707 + uint32_t throttler_status = 0; 708 + int i; 709 + 710 + for (i = 0; i < THROTTLER_COUNT; i++) 711 + throttler_status |= 712 + (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 713 + 714 + return throttler_status; 715 + } 716 + 717 + #define SMU_14_0_2_BUSY_THRESHOLD 5 718 + static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, 719 + MetricsMember_t member, 720 + uint32_t *value) 721 + { 722 + struct smu_table_context *smu_table = &smu->smu_table; 723 + SmuMetrics_t *metrics = 724 + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 725 + int ret = 0; 726 + 727 + ret = smu_cmn_get_metrics_table(smu, 728 + NULL, 729 + false); 730 + if (ret) 731 + return ret; 732 + 733 + switch (member) { 734 + case METRICS_CURR_GFXCLK: 735 + *value = metrics->CurrClock[PPCLK_GFXCLK]; 736 + break; 737 + case METRICS_CURR_SOCCLK: 738 + *value = metrics->CurrClock[PPCLK_SOCCLK]; 739 + break; 740 + case METRICS_CURR_UCLK: 741 + *value = metrics->CurrClock[PPCLK_UCLK]; 742 + break; 743 + case METRICS_CURR_VCLK: 744 + *value = metrics->CurrClock[PPCLK_VCLK_0]; 745 + break; 746 + case METRICS_CURR_DCLK: 747 + *value = metrics->CurrClock[PPCLK_DCLK_0]; 748 + break; 749 + case METRICS_CURR_FCLK: 750 + *value = metrics->CurrClock[PPCLK_FCLK]; 751 + break; 752 + case METRICS_CURR_DCEFCLK: 753 + *value = metrics->CurrClock[PPCLK_DCFCLK]; 754 + break; 755 + case METRICS_AVERAGE_GFXCLK: 756 + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) 757 + *value = metrics->AverageGfxclkFrequencyPostDs; 758 + else 759 + *value = metrics->AverageGfxclkFrequencyPreDs; 760 + break; 761 + case METRICS_AVERAGE_FCLK: 762 + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 763 + *value = metrics->AverageFclkFrequencyPostDs; 764 + else 765 + *value = metrics->AverageFclkFrequencyPreDs; 766 + break; 767 + case METRICS_AVERAGE_UCLK: 768 + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) 769 + *value = metrics->AverageMemclkFrequencyPostDs; 770 + else 771 + *value = metrics->AverageMemclkFrequencyPreDs; 772 + break; 773 + case METRICS_AVERAGE_VCLK: 774 + *value = metrics->AverageVclk0Frequency; 775 + break; 776 + case METRICS_AVERAGE_DCLK: 777 + *value = metrics->AverageDclk0Frequency; 778 + break; 779 + case METRICS_AVERAGE_VCLK1: 780 + *value = metrics->AverageVclk1Frequency; 781 + break; 782 + case METRICS_AVERAGE_DCLK1: 783 + *value = metrics->AverageDclk1Frequency; 784 + break; 785 + case METRICS_AVERAGE_GFXACTIVITY: 786 + *value = metrics->AverageGfxActivity; 787 + break; 788 + case METRICS_AVERAGE_MEMACTIVITY: 789 + *value = metrics->AverageUclkActivity; 790 + break; 791 + case METRICS_AVERAGE_SOCKETPOWER: 792 + *value = metrics->AverageSocketPower << 8; 793 + break; 794 + case METRICS_TEMPERATURE_EDGE: 795 + *value = metrics->AvgTemperature[TEMP_EDGE] * 796 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 797 + break; 798 + case METRICS_TEMPERATURE_HOTSPOT: 799 + *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 800 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 801 + break; 802 + case METRICS_TEMPERATURE_MEM: 803 + *value = metrics->AvgTemperature[TEMP_MEM] * 804 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 805 + break; 806 + case METRICS_TEMPERATURE_VRGFX: 807 + *value = metrics->AvgTemperature[TEMP_VR_GFX] * 808 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 809 + break; 810 + case METRICS_TEMPERATURE_VRSOC: 811 + *value = metrics->AvgTemperature[TEMP_VR_SOC] * 812 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 813 + break; 814 + case METRICS_THROTTLER_STATUS: 815 + *value = smu_v14_0_2_get_throttler_status(metrics); 816 + break; 817 + case METRICS_CURR_FANSPEED: 818 + *value = metrics->AvgFanRpm; 819 + break; 820 + case METRICS_CURR_FANPWM: 821 + *value = metrics->AvgFanPwm; 822 + break; 823 + case METRICS_VOLTAGE_VDDGFX: 824 + *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; 825 + break; 826 + case METRICS_PCIE_RATE: 827 + *value = metrics->PcieRate; 828 + break; 829 + case METRICS_PCIE_WIDTH: 830 + *value = metrics->PcieWidth; 831 + break; 832 + default: 833 + *value = UINT_MAX; 834 + break; 835 + } 836 + 837 + return ret; 838 + } 839 + 840 + static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, 841 + enum smu_clk_type clk_type, 842 + uint32_t *min, 843 + uint32_t *max) 844 + { 845 + struct smu_14_0_dpm_context *dpm_context = 846 + smu->smu_dpm.dpm_context; 847 + struct smu_14_0_dpm_table *dpm_table; 848 + 849 + switch (clk_type) { 850 + case SMU_MCLK: 851 + case SMU_UCLK: 852 + /* uclk dpm table */ 853 + dpm_table = &dpm_context->dpm_tables.uclk_table; 854 + break; 855 + case SMU_GFXCLK: 856 + case SMU_SCLK: 857 + /* gfxclk dpm table */ 858 + dpm_table = &dpm_context->dpm_tables.gfx_table; 859 + break; 860 + case SMU_SOCCLK: 861 + /* socclk dpm table */ 862 + dpm_table = &dpm_context->dpm_tables.soc_table; 863 + break; 864 + case SMU_FCLK: 865 + /* fclk dpm table */ 866 + dpm_table = &dpm_context->dpm_tables.fclk_table; 867 + break; 868 + case SMU_VCLK: 869 + case SMU_VCLK1: 870 + /* vclk dpm table */ 871 + dpm_table = &dpm_context->dpm_tables.vclk_table; 872 + break; 873 + case SMU_DCLK: 874 + case SMU_DCLK1: 875 + /* dclk dpm table */ 876 + dpm_table = &dpm_context->dpm_tables.dclk_table; 877 + break; 878 + default: 879 + dev_err(smu->adev->dev, "Unsupported clock type!\n"); 880 + return -EINVAL; 881 + } 882 + 883 + if (min) 884 + *min = dpm_table->min; 885 + if (max) 886 + *max = dpm_table->max; 887 + 888 + return 0; 889 + } 890 + 891 + static int smu_v14_0_2_read_sensor(struct smu_context *smu, 892 + enum amd_pp_sensors sensor, 893 + void *data, 894 + uint32_t *size) 895 + { 896 + struct smu_table_context *table_context = &smu->smu_table; 897 + PPTable_t *smc_pptable = table_context->driver_pptable; 898 + int ret = 0; 899 + 900 + switch (sensor) { 901 + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 902 + *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; 903 + *size = 4; 904 + break; 905 + case AMDGPU_PP_SENSOR_MEM_LOAD: 906 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 907 + METRICS_AVERAGE_MEMACTIVITY, 908 + (uint32_t *)data); 909 + *size = 4; 910 + break; 911 + case AMDGPU_PP_SENSOR_GPU_LOAD: 912 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 913 + METRICS_AVERAGE_GFXACTIVITY, 914 + (uint32_t *)data); 915 + *size = 4; 916 + break; 917 + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 918 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 919 + METRICS_AVERAGE_SOCKETPOWER, 920 + (uint32_t *)data); 921 + *size = 4; 922 + break; 923 + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 924 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 925 + METRICS_TEMPERATURE_HOTSPOT, 926 + (uint32_t *)data); 927 + *size = 4; 928 + break; 929 + case AMDGPU_PP_SENSOR_EDGE_TEMP: 930 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 931 + METRICS_TEMPERATURE_EDGE, 932 + (uint32_t *)data); 933 + *size = 4; 934 + break; 935 + case AMDGPU_PP_SENSOR_MEM_TEMP: 936 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 937 + METRICS_TEMPERATURE_MEM, 938 + (uint32_t *)data); 939 + *size = 4; 940 + break; 941 + case AMDGPU_PP_SENSOR_GFX_MCLK: 942 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 943 + METRICS_CURR_UCLK, 944 + (uint32_t *)data); 945 + *(uint32_t *)data *= 100; 946 + *size = 4; 947 + break; 948 + case AMDGPU_PP_SENSOR_GFX_SCLK: 949 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 950 + METRICS_AVERAGE_GFXCLK, 951 + (uint32_t *)data); 952 + *(uint32_t *)data *= 100; 953 + *size = 4; 954 + break; 955 + case AMDGPU_PP_SENSOR_VDDGFX: 956 + ret = smu_v14_0_2_get_smu_metrics_data(smu, 957 + METRICS_VOLTAGE_VDDGFX, 958 + (uint32_t *)data); 959 + *size = 4; 960 + break; 961 + default: 962 + ret = -EOPNOTSUPP; 963 + break; 964 + } 965 + 966 + return ret; 967 + } 968 + 969 + static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, 970 + enum smu_clk_type clk_type, 971 + uint32_t *value) 972 + { 973 + MetricsMember_t member_type; 974 + int clk_id = 0; 975 + 976 + clk_id = smu_cmn_to_asic_specific_index(smu, 977 + CMN2ASIC_MAPPING_CLK, 978 + clk_type); 979 + if (clk_id < 0) 980 + return -EINVAL; 981 + 982 + switch (clk_id) { 983 + case PPCLK_GFXCLK: 984 + member_type = METRICS_AVERAGE_GFXCLK; 985 + break; 986 + case PPCLK_UCLK: 987 + member_type = METRICS_CURR_UCLK; 988 + break; 989 + case PPCLK_FCLK: 990 + member_type = METRICS_CURR_FCLK; 991 + break; 992 + case PPCLK_SOCCLK: 993 + member_type = METRICS_CURR_SOCCLK; 994 + break; 995 + case PPCLK_VCLK_0: 996 + member_type = METRICS_AVERAGE_VCLK; 997 + break; 998 + case PPCLK_DCLK_0: 999 + member_type = METRICS_AVERAGE_DCLK; 1000 + break; 1001 + default: 1002 + return -EINVAL; 1003 + } 1004 + 1005 + return smu_v14_0_2_get_smu_metrics_data(smu, 1006 + member_type, 1007 + value); 1008 + } 1009 + 1010 + static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, 1011 + enum smu_clk_type clk_type, 1012 + char *buf) 1013 + { 1014 + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1015 + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1016 + struct smu_14_0_dpm_table *single_dpm_table; 1017 + int i, curr_freq, size = 0; 1018 + int ret = 0; 1019 + 1020 + smu_cmn_get_sysfs_buf(&buf, &size); 1021 + 1022 + if (amdgpu_ras_intr_triggered()) { 1023 + size += sysfs_emit_at(buf, size, "unavailable\n"); 1024 + return size; 1025 + } 1026 + 1027 + switch (clk_type) { 1028 + case SMU_SCLK: 1029 + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1030 + break; 1031 + case SMU_MCLK: 1032 + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1033 + break; 1034 + case SMU_SOCCLK: 1035 + single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1036 + break; 1037 + case SMU_FCLK: 1038 + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1039 + break; 1040 + case SMU_VCLK: 1041 + case SMU_VCLK1: 1042 + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1043 + break; 1044 + case SMU_DCLK: 1045 + case SMU_DCLK1: 1046 + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1047 + break; 1048 + default: 1049 + break; 1050 + } 1051 + 1052 + switch (clk_type) { 1053 + case SMU_SCLK: 1054 + case SMU_MCLK: 1055 + case SMU_SOCCLK: 1056 + case SMU_FCLK: 1057 + case SMU_VCLK: 1058 + case SMU_VCLK1: 1059 + case SMU_DCLK: 1060 + case SMU_DCLK1: 1061 + ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1062 + if (ret) { 1063 + dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1064 + return ret; 1065 + } 1066 + 1067 + if (single_dpm_table->is_fine_grained) { 1068 + /* 1069 + * For fine grained dpms, there are only two dpm levels: 1070 + * - level 0 -> min clock freq 1071 + * - level 1 -> max clock freq 1072 + * And the current clock frequency can be any value between them. 1073 + * So, if the current clock frequency is not at level 0 or level 1, 1074 + * we will fake it as three dpm levels: 1075 + * - level 0 -> min clock freq 1076 + * - level 1 -> current actual clock freq 1077 + * - level 2 -> max clock freq 1078 + */ 1079 + if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1080 + (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1081 + size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1082 + single_dpm_table->dpm_levels[0].value); 1083 + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1084 + curr_freq); 1085 + size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1086 + single_dpm_table->dpm_levels[1].value); 1087 + } else { 1088 + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1089 + single_dpm_table->dpm_levels[0].value, 1090 + single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1091 + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1092 + single_dpm_table->dpm_levels[1].value, 1093 + single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1094 + } 1095 + } else { 1096 + for (i = 0; i < single_dpm_table->count; i++) 1097 + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1098 + i, single_dpm_table->dpm_levels[i].value, 1099 + single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1100 + } 1101 + break; 1102 + case SMU_PCIE: 1103 + // TODO 1104 + break; 1105 + 1106 + default: 1107 + break; 1108 + } 1109 + 1110 + return size; 1111 + } 1112 + 1113 + static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, 1114 + enum smu_clk_type clk_type, 1115 + uint32_t mask) 1116 + { 1117 + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1118 + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1119 + struct smu_14_0_dpm_table *single_dpm_table; 1120 + uint32_t soft_min_level, soft_max_level; 1121 + uint32_t min_freq, max_freq; 1122 + int ret = 0; 1123 + 1124 + soft_min_level = mask ? (ffs(mask) - 1) : 0; 1125 + soft_max_level = mask ? (fls(mask) - 1) : 0; 1126 + 1127 + switch (clk_type) { 1128 + case SMU_GFXCLK: 1129 + case SMU_SCLK: 1130 + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1131 + break; 1132 + case SMU_MCLK: 1133 + case SMU_UCLK: 1134 + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1135 + break; 1136 + case SMU_SOCCLK: 1137 + single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1138 + break; 1139 + case SMU_FCLK: 1140 + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1141 + break; 1142 + case SMU_VCLK: 1143 + case SMU_VCLK1: 1144 + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1145 + break; 1146 + case SMU_DCLK: 1147 + case SMU_DCLK1: 1148 + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1149 + break; 1150 + default: 1151 + break; 1152 + } 1153 + 1154 + switch (clk_type) { 1155 + case SMU_GFXCLK: 1156 + case SMU_SCLK: 1157 + case SMU_MCLK: 1158 + case SMU_UCLK: 1159 + case SMU_SOCCLK: 1160 + case SMU_FCLK: 1161 + case SMU_VCLK: 1162 + case SMU_VCLK1: 1163 + case SMU_DCLK: 1164 + case SMU_DCLK1: 1165 + if (single_dpm_table->is_fine_grained) { 1166 + /* There is only 2 levels for fine grained DPM */ 1167 + soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1168 + soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1169 + } else { 1170 + if ((soft_max_level >= single_dpm_table->count) || 1171 + (soft_min_level >= single_dpm_table->count)) 1172 + return -EINVAL; 1173 + } 1174 + 1175 + min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1176 + max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1177 + 1178 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1179 + clk_type, 1180 + min_freq, 1181 + max_freq); 1182 + break; 1183 + case SMU_DCEFCLK: 1184 + case SMU_PCIE: 1185 + default: 1186 + break; 1187 + } 1188 + 1189 + return ret; 1190 + } 1191 + 1192 + static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, 1193 + uint8_t pcie_gen_cap, 1194 + uint8_t pcie_width_cap) 1195 + { 1196 + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 1197 + struct smu_14_0_pcie_table *pcie_table = 1198 + &dpm_context->dpm_tables.pcie_table; 1199 + uint32_t smu_pcie_arg; 1200 + int ret, i; 1201 + 1202 + for (i = 0; i < pcie_table->num_of_link_levels; i++) { 1203 + if (pcie_table->pcie_gen[i] > pcie_gen_cap) 1204 + pcie_table->pcie_gen[i] = pcie_gen_cap; 1205 + if (pcie_table->pcie_lane[i] > pcie_width_cap) 1206 + pcie_table->pcie_lane[i] = pcie_width_cap; 1207 + 1208 + smu_pcie_arg = i << 16; 1209 + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; 1210 + smu_pcie_arg |= pcie_table->pcie_lane[i]; 1211 + 1212 + ret = smu_cmn_send_smc_msg_with_param(smu, 1213 + SMU_MSG_OverridePcieParameters, 1214 + smu_pcie_arg, 1215 + NULL); 1216 + if (ret) 1217 + return ret; 1218 + } 1219 + 1220 + return 0; 1221 + } 1222 + 1223 + static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, 1224 + struct smu_temperature_range *range) 1225 + { 1226 + // TODO 1227 + 1228 + return 0; 1229 + } 1230 + 1231 + static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) 1232 + { 1233 + // TODO 1234 + 1235 + return 0; 1236 + } 1237 + 1238 + static void smu_v14_0_2_get_unique_id(struct smu_context *smu) 1239 + { 1240 + struct smu_table_context *smu_table = &smu->smu_table; 1241 + SmuMetrics_t *metrics = 1242 + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 1243 + struct amdgpu_device *adev = smu->adev; 1244 + uint32_t upper32 = 0, lower32 = 0; 1245 + int ret; 1246 + 1247 + ret = smu_cmn_get_metrics_table(smu, NULL, false); 1248 + if (ret) 1249 + goto out; 1250 + 1251 + upper32 = metrics->PublicSerialNumberUpper; 1252 + lower32 = metrics->PublicSerialNumberLower; 1253 + 1254 + out: 1255 + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; 1256 + } 1257 + 1258 + static int smu_v14_0_2_get_power_limit(struct smu_context *smu, 1259 + uint32_t *current_power_limit, 1260 + uint32_t *default_power_limit, 1261 + uint32_t *max_power_limit, 1262 + uint32_t *min_power_limit) 1263 + { 1264 + // TODO 1265 + 1266 + return 0; 1267 + } 1268 + 1269 + static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, 1270 + char *buf) 1271 + { 1272 + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1273 + DpmActivityMonitorCoeffInt_t *activity_monitor = 1274 + &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1275 + static const char *title[] = { 1276 + "PROFILE_INDEX(NAME)", 1277 + "CLOCK_TYPE(NAME)", 1278 + "FPS", 1279 + "MinActiveFreqType", 1280 + "MinActiveFreq", 1281 + "BoosterFreqType", 1282 + "BoosterFreq", 1283 + "PD_Data_limit_c", 1284 + "PD_Data_error_coeff", 1285 + "PD_Data_error_rate_coeff"}; 1286 + int16_t workload_type = 0; 1287 + uint32_t i, size = 0; 1288 + int result = 0; 1289 + 1290 + if (!buf) 1291 + return -EINVAL; 1292 + 1293 + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", 1294 + title[0], title[1], title[2], title[3], title[4], title[5], 1295 + title[6], title[7], title[8], title[9]); 1296 + 1297 + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1298 + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1299 + workload_type = smu_cmn_to_asic_specific_index(smu, 1300 + CMN2ASIC_MAPPING_WORKLOAD, 1301 + i); 1302 + if (workload_type == -ENOTSUPP) 1303 + continue; 1304 + else if (workload_type < 0) 1305 + return -EINVAL; 1306 + 1307 + result = smu_cmn_update_table(smu, 1308 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1309 + workload_type, 1310 + (void *)(&activity_monitor_external), 1311 + false); 1312 + if (result) { 1313 + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1314 + return result; 1315 + } 1316 + 1317 + size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", 1318 + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1319 + 1320 + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1321 + " ", 1322 + 0, 1323 + "GFXCLK", 1324 + activity_monitor->Gfx_FPS, 1325 + activity_monitor->Gfx_MinActiveFreqType, 1326 + activity_monitor->Gfx_MinActiveFreq, 1327 + activity_monitor->Gfx_BoosterFreqType, 1328 + activity_monitor->Gfx_BoosterFreq, 1329 + activity_monitor->Gfx_PD_Data_limit_c, 1330 + activity_monitor->Gfx_PD_Data_error_coeff, 1331 + activity_monitor->Gfx_PD_Data_error_rate_coeff); 1332 + 1333 + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", 1334 + " ", 1335 + 1, 1336 + "FCLK", 1337 + activity_monitor->Fclk_FPS, 1338 + activity_monitor->Fclk_MinActiveFreqType, 1339 + activity_monitor->Fclk_MinActiveFreq, 1340 + activity_monitor->Fclk_BoosterFreqType, 1341 + activity_monitor->Fclk_BoosterFreq, 1342 + activity_monitor->Fclk_PD_Data_limit_c, 1343 + activity_monitor->Fclk_PD_Data_error_coeff, 1344 + activity_monitor->Fclk_PD_Data_error_rate_coeff); 1345 + } 1346 + 1347 + return size; 1348 + } 1349 + 1350 + static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, 1351 + long *input, 1352 + uint32_t size) 1353 + { 1354 + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1355 + DpmActivityMonitorCoeffInt_t *activity_monitor = 1356 + &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1357 + int workload_type, ret = 0; 1358 + 1359 + smu->power_profile_mode = input[size]; 1360 + 1361 + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1362 + dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1363 + return -EINVAL; 1364 + } 1365 + 1366 + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1367 + ret = smu_cmn_update_table(smu, 1368 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1369 + WORKLOAD_PPLIB_CUSTOM_BIT, 1370 + (void *)(&activity_monitor_external), 1371 + false); 1372 + if (ret) { 1373 + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1374 + return ret; 1375 + } 1376 + 1377 + switch (input[0]) { 1378 + case 0: /* Gfxclk */ 1379 + activity_monitor->Gfx_FPS = input[1]; 1380 + activity_monitor->Gfx_MinActiveFreqType = input[2]; 1381 + activity_monitor->Gfx_MinActiveFreq = input[3]; 1382 + activity_monitor->Gfx_BoosterFreqType = input[4]; 1383 + activity_monitor->Gfx_BoosterFreq = input[5]; 1384 + activity_monitor->Gfx_PD_Data_limit_c = input[6]; 1385 + activity_monitor->Gfx_PD_Data_error_coeff = input[7]; 1386 + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; 1387 + break; 1388 + case 1: /* Fclk */ 1389 + activity_monitor->Fclk_FPS = input[1]; 1390 + activity_monitor->Fclk_MinActiveFreqType = input[2]; 1391 + activity_monitor->Fclk_MinActiveFreq = input[3]; 1392 + activity_monitor->Fclk_BoosterFreqType = input[4]; 1393 + activity_monitor->Fclk_BoosterFreq = input[5]; 1394 + activity_monitor->Fclk_PD_Data_limit_c = input[6]; 1395 + activity_monitor->Fclk_PD_Data_error_coeff = input[7]; 1396 + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; 1397 + break; 1398 + } 1399 + 1400 + ret = smu_cmn_update_table(smu, 1401 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1402 + WORKLOAD_PPLIB_CUSTOM_BIT, 1403 + (void *)(&activity_monitor_external), 1404 + true); 1405 + if (ret) { 1406 + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1407 + return ret; 1408 + } 1409 + } 1410 + 1411 + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1412 + workload_type = smu_cmn_to_asic_specific_index(smu, 1413 + CMN2ASIC_MAPPING_WORKLOAD, 1414 + smu->power_profile_mode); 1415 + if (workload_type < 0) 1416 + return -EINVAL; 1417 + 1418 + return smu_cmn_send_smc_msg_with_param(smu, 1419 + SMU_MSG_SetWorkloadMask, 1420 + 1 << workload_type, 1421 + NULL); 1422 + } 1423 + 1424 + static int smu_v14_0_2_baco_enter(struct smu_context *smu) 1425 + { 1426 + struct smu_baco_context *smu_baco = &smu->smu_baco; 1427 + struct amdgpu_device *adev = smu->adev; 1428 + 1429 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 1430 + return smu_v14_0_baco_set_armd3_sequence(smu, 1431 + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); 1432 + else 1433 + return smu_v14_0_baco_enter(smu); 1434 + } 1435 + 1436 + static int smu_v14_0_2_baco_exit(struct smu_context *smu) 1437 + { 1438 + struct amdgpu_device *adev = smu->adev; 1439 + 1440 + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 1441 + /* Wait for PMFW handling for the Dstate change */ 1442 + usleep_range(10000, 11000); 1443 + return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 1444 + } else { 1445 + return smu_v14_0_baco_exit(smu); 1446 + } 1447 + } 1448 + 1449 + static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) 1450 + { 1451 + // TODO 1452 + 1453 + return true; 1454 + } 1455 + 1456 + static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, 1457 + struct i2c_msg *msg, int num_msgs) 1458 + { 1459 + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); 1460 + struct amdgpu_device *adev = smu_i2c->adev; 1461 + struct smu_context *smu = adev->powerplay.pp_handle; 1462 + struct smu_table_context *smu_table = &smu->smu_table; 1463 + struct smu_table *table = &smu_table->driver_table; 1464 + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; 1465 + int i, j, r, c; 1466 + u16 dir; 1467 + 1468 + if (!adev->pm.dpm_enabled) 1469 + return -EBUSY; 1470 + 1471 + req = kzalloc(sizeof(*req), GFP_KERNEL); 1472 + if (!req) 1473 + return -ENOMEM; 1474 + 1475 + req->I2CcontrollerPort = smu_i2c->port; 1476 + req->I2CSpeed = I2C_SPEED_FAST_400K; 1477 + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ 1478 + dir = msg[0].flags & I2C_M_RD; 1479 + 1480 + for (c = i = 0; i < num_msgs; i++) { 1481 + for (j = 0; j < msg[i].len; j++, c++) { 1482 + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; 1483 + 1484 + if (!(msg[i].flags & I2C_M_RD)) { 1485 + /* write */ 1486 + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; 1487 + cmd->ReadWriteData = msg[i].buf[j]; 1488 + } 1489 + 1490 + if ((dir ^ msg[i].flags) & I2C_M_RD) { 1491 + /* The direction changes. 1492 + */ 1493 + dir = msg[i].flags & I2C_M_RD; 1494 + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; 1495 + } 1496 + 1497 + req->NumCmds++; 1498 + 1499 + /* 1500 + * Insert STOP if we are at the last byte of either last 1501 + * message for the transaction or the client explicitly 1502 + * requires a STOP at this particular message. 1503 + */ 1504 + if ((j == msg[i].len - 1) && 1505 + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { 1506 + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; 1507 + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; 1508 + } 1509 + } 1510 + } 1511 + mutex_lock(&adev->pm.mutex); 1512 + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); 1513 + mutex_unlock(&adev->pm.mutex); 1514 + if (r) 1515 + goto fail; 1516 + 1517 + for (c = i = 0; i < num_msgs; i++) { 1518 + if (!(msg[i].flags & I2C_M_RD)) { 1519 + c += msg[i].len; 1520 + continue; 1521 + } 1522 + for (j = 0; j < msg[i].len; j++, c++) { 1523 + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; 1524 + 1525 + msg[i].buf[j] = cmd->ReadWriteData; 1526 + } 1527 + } 1528 + r = num_msgs; 1529 + fail: 1530 + kfree(req); 1531 + return r; 1532 + } 1533 + 1534 + static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) 1535 + { 1536 + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 1537 + } 1538 + 1539 + static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { 1540 + .master_xfer = smu_v14_0_2_i2c_xfer, 1541 + .functionality = smu_v14_0_2_i2c_func, 1542 + }; 1543 + 1544 + static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { 1545 + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, 1546 + .max_read_len = MAX_SW_I2C_COMMANDS, 1547 + .max_write_len = MAX_SW_I2C_COMMANDS, 1548 + .max_comb_1st_msg_len = 2, 1549 + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, 1550 + }; 1551 + 1552 + static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) 1553 + { 1554 + struct amdgpu_device *adev = smu->adev; 1555 + int res, i; 1556 + 1557 + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1558 + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1559 + struct i2c_adapter *control = &smu_i2c->adapter; 1560 + 1561 + smu_i2c->adev = adev; 1562 + smu_i2c->port = i; 1563 + mutex_init(&smu_i2c->mutex); 1564 + control->owner = THIS_MODULE; 1565 + control->class = I2C_CLASS_SPD; 1566 + control->dev.parent = &adev->pdev->dev; 1567 + control->algo = &smu_v14_0_2_i2c_algo; 1568 + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 1569 + control->quirks = &smu_v14_0_2_i2c_control_quirks; 1570 + i2c_set_adapdata(control, smu_i2c); 1571 + 1572 + res = i2c_add_adapter(control); 1573 + if (res) { 1574 + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1575 + goto Out_err; 1576 + } 1577 + } 1578 + 1579 + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ 1580 + /* XXX ideally this would be something in a vbios data table */ 1581 + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 1582 + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1583 + 1584 + return 0; 1585 + Out_err: 1586 + for ( ; i >= 0; i--) { 1587 + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1588 + struct i2c_adapter *control = &smu_i2c->adapter; 1589 + 1590 + i2c_del_adapter(control); 1591 + } 1592 + return res; 1593 + } 1594 + 1595 + static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) 1596 + { 1597 + struct amdgpu_device *adev = smu->adev; 1598 + int i; 1599 + 1600 + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1601 + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1602 + struct i2c_adapter *control = &smu_i2c->adapter; 1603 + 1604 + i2c_del_adapter(control); 1605 + } 1606 + adev->pm.ras_eeprom_i2c_bus = NULL; 1607 + adev->pm.fru_eeprom_i2c_bus = NULL; 1608 + } 1609 + 1610 + static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, 1611 + enum pp_mp1_state mp1_state) 1612 + { 1613 + int ret; 1614 + 1615 + switch (mp1_state) { 1616 + case PP_MP1_STATE_UNLOAD: 1617 + ret = smu_cmn_set_mp1_state(smu, mp1_state); 1618 + break; 1619 + default: 1620 + /* Ignore others */ 1621 + ret = 0; 1622 + } 1623 + 1624 + return ret; 1625 + } 1626 + 1627 + static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, 1628 + enum pp_df_cstate state) 1629 + { 1630 + return smu_cmn_send_smc_msg_with_param(smu, 1631 + SMU_MSG_DFCstateControl, 1632 + state, 1633 + NULL); 1634 + } 1635 + 1636 + static int smu_v14_0_2_mode1_reset(struct smu_context *smu) 1637 + { 1638 + int ret = 0; 1639 + 1640 + // TODO 1641 + 1642 + return ret; 1643 + } 1644 + 1645 + static int smu_v14_0_2_mode2_reset(struct smu_context *smu) 1646 + { 1647 + int ret = 0; 1648 + 1649 + // TODO 1650 + 1651 + return ret; 1652 + } 1653 + 1654 + static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) 1655 + { 1656 + struct amdgpu_device *adev = smu->adev; 1657 + 1658 + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) 1659 + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, 1660 + FEATURE_PWR_GFX, NULL); 1661 + else 1662 + return -EOPNOTSUPP; 1663 + } 1664 + 1665 + static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) 1666 + { 1667 + struct amdgpu_device *adev = smu->adev; 1668 + 1669 + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); 1670 + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); 1671 + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); 1672 + } 1673 + 1674 + static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu, 1675 + uint32_t size) 1676 + { 1677 + int ret = 0; 1678 + 1679 + /* message SMU to update the bad page number on SMUBUS */ 1680 + ret = smu_cmn_send_smc_msg_with_param(smu, 1681 + SMU_MSG_SetNumBadMemoryPagesRetired, 1682 + size, NULL); 1683 + if (ret) 1684 + dev_err(smu->adev->dev, 1685 + "[%s] failed to message SMU to update bad memory pages number\n", 1686 + __func__); 1687 + 1688 + return ret; 1689 + } 1690 + 1691 + static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu, 1692 + uint32_t size) 1693 + { 1694 + int ret = 0; 1695 + 1696 + /* message SMU to update the bad channel info on SMUBUS */ 1697 + ret = smu_cmn_send_smc_msg_with_param(smu, 1698 + SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 1699 + size, NULL); 1700 + if (ret) 1701 + dev_err(smu->adev->dev, 1702 + "[%s] failed to message SMU to update bad memory pages channel info\n", 1703 + __func__); 1704 + 1705 + return ret; 1706 + } 1707 + 1708 + static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu, 1709 + void *table) 1710 + { 1711 + int ret = 0; 1712 + 1713 + // TODO 1714 + 1715 + return ret; 1716 + } 1717 + 1718 + static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { 1719 + .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, 1720 + .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, 1721 + .i2c_init = smu_v14_0_2_i2c_control_init, 1722 + .i2c_fini = smu_v14_0_2_i2c_control_fini, 1723 + .is_dpm_running = smu_v14_0_2_is_dpm_running, 1724 + .dump_pptable = smu_v14_0_2_dump_pptable, 1725 + .init_microcode = smu_v14_0_init_microcode, 1726 + .load_microcode = smu_v14_0_load_microcode, 1727 + .fini_microcode = smu_v14_0_fini_microcode, 1728 + .init_smc_tables = smu_v14_0_2_init_smc_tables, 1729 + .fini_smc_tables = smu_v14_0_fini_smc_tables, 1730 + .init_power = smu_v14_0_init_power, 1731 + .fini_power = smu_v14_0_fini_power, 1732 + .check_fw_status = smu_v14_0_check_fw_status, 1733 + .setup_pptable = smu_v14_0_2_setup_pptable, 1734 + .check_fw_version = smu_v14_0_check_fw_version, 1735 + .write_pptable = smu_cmn_write_pptable, 1736 + .set_driver_table_location = smu_v14_0_set_driver_table_location, 1737 + .system_features_control = smu_v14_0_system_features_control, 1738 + .set_allowed_mask = smu_v14_0_set_allowed_mask, 1739 + .get_enabled_mask = smu_cmn_get_enabled_mask, 1740 + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, 1741 + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, 1742 + .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, 1743 + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, 1744 + .read_sensor = smu_v14_0_2_read_sensor, 1745 + .feature_is_enabled = smu_cmn_feature_is_enabled, 1746 + .print_clk_levels = smu_v14_0_2_print_clk_levels, 1747 + .force_clk_levels = smu_v14_0_2_force_clk_levels, 1748 + .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, 1749 + .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, 1750 + .register_irq_handler = smu_v14_0_register_irq_handler, 1751 + .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, 1752 + .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, 1753 + .init_pptable_microcode = smu_v14_0_init_pptable_microcode, 1754 + .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, 1755 + .set_performance_level = smu_v14_0_set_performance_level, 1756 + .gfx_off_control = smu_v14_0_gfx_off_control, 1757 + .get_unique_id = smu_v14_0_2_get_unique_id, 1758 + .get_power_limit = smu_v14_0_2_get_power_limit, 1759 + .set_power_limit = smu_v14_0_set_power_limit, 1760 + .set_power_source = smu_v14_0_set_power_source, 1761 + .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, 1762 + .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, 1763 + .run_btc = smu_v14_0_run_btc, 1764 + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1765 + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 1766 + .set_tool_table_location = smu_v14_0_set_tool_table_location, 1767 + .deep_sleep_control = smu_v14_0_deep_sleep_control, 1768 + .gfx_ulv_control = smu_v14_0_gfx_ulv_control, 1769 + .get_bamaco_support = smu_v14_0_get_bamaco_support, 1770 + .baco_get_state = smu_v14_0_baco_get_state, 1771 + .baco_set_state = smu_v14_0_baco_set_state, 1772 + .baco_enter = smu_v14_0_2_baco_enter, 1773 + .baco_exit = smu_v14_0_2_baco_exit, 1774 + .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, 1775 + .mode1_reset = smu_v14_0_2_mode1_reset, 1776 + .mode2_reset = smu_v14_0_2_mode2_reset, 1777 + .enable_gfx_features = smu_v14_0_2_enable_gfx_features, 1778 + .set_mp1_state = smu_v14_0_2_set_mp1_state, 1779 + .set_df_cstate = smu_v14_0_2_set_df_cstate, 1780 + .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num, 1781 + .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag, 1782 + .gpo_control = smu_v14_0_gpo_control, 1783 + .get_ecc_info = smu_v14_0_2_get_ecc_info, 1784 + }; 1785 + 1786 + void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) 1787 + { 1788 + smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; 1789 + smu->message_map = smu_v14_0_2_message_map; 1790 + smu->clock_map = smu_v14_0_2_clk_map; 1791 + smu->feature_map = smu_v14_0_2_feature_mask_map; 1792 + smu->table_map = smu_v14_0_2_table_map; 1793 + smu->pwr_src_map = smu_v14_0_2_pwr_src_map; 1794 + smu->workload_map = smu_v14_0_2_workload_map; 1795 + smu_v14_0_2_set_smu_mailbox_registers(smu); 1796 + }
+28
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __SMU_V14_0_2_PPT_H__ 24 + #define __SMU_V14_0_2_PPT_H__ 25 + 26 + extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu); 27 + 28 + #endif
+5 -5
drivers/gpu/drm/radeon/pptable.h
··· 424 424 typedef struct _ATOM_PPLIB_STATE_V2 425 425 { 426 426 //number of valid dpm levels in this state; Driver uses it to calculate the whole 427 - //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) 427 + //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels) 428 428 UCHAR ucNumDPMLevels; 429 429 430 430 //a index to the array of nonClockInfos ··· 432 432 /** 433 433 * Driver will read the first ucNumDPMLevels in this array 434 434 */ 435 - UCHAR clockInfoIndex[1]; 435 + UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels); 436 436 } ATOM_PPLIB_STATE_V2; 437 437 438 438 typedef struct _StateArray{ 439 439 //how many states we have 440 440 UCHAR ucNumEntries; 441 441 442 - ATOM_PPLIB_STATE_V2 states[1]; 442 + ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); 443 443 }StateArray; 444 444 445 445 ··· 450 450 //sizeof(ATOM_PPLIB_CLOCK_INFO) 451 451 UCHAR ucEntrySize; 452 452 453 - UCHAR clockInfo[1]; 453 + UCHAR clockInfo[] __counted_by(ucNumEntries); 454 454 }ClockInfoArray; 455 455 456 456 typedef struct _NonClockInfoArray{ ··· 460 460 //sizeof(ATOM_PPLIB_NONCLOCK_INFO) 461 461 UCHAR ucEntrySize; 462 462 463 - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; 463 + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries); 464 464 }NonClockInfoArray; 465 465 466 466 typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+6 -2
drivers/gpu/drm/radeon/radeon_atombios.c
··· 923 923 max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; 924 924 925 925 for (i = 0; i < max_device; i++) { 926 - ATOM_CONNECTOR_INFO_I2C ci = 927 - supported_devices->info.asConnInfo[i]; 926 + ATOM_CONNECTOR_INFO_I2C ci; 927 + 928 + if (frev > 1) 929 + ci = supported_devices->info_2d1.asConnInfo[i]; 930 + else 931 + ci = supported_devices->info.asConnInfo[i]; 928 932 929 933 bios_connectors[i].valid = false; 930 934