Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-6.3-2023-01-06' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.3-2023-01-06:

amdgpu:
- secure display support for multiple displays
- DML optimizations
- DCN 3.2 updates
- PSR updates
- DP 2.1 updates
- SR-IOV RAS updates
- VCN RAS support
- SMU 13.x updates
- Switch 1 element arrays to flexible arrays
- Add RAS support for DF 4.3
- Stack size improvements
- S0ix rework
- Soft reset fix
- Allow 0 as a vram limit on APUs
- Display fixes
- Misc code cleanups
- Documentation fixes
- Handle profiling modes for SMU13.x

amdkfd:
- Error handling fixes
- PASID fixes

radeon:
- Switch 1 element arrays to flexible arrays

drm:
- Add DP adaptive sync DPCD definitions

UAPI:
- Add new INFO queries for peak and min sclk/mclk for profile modes on newer chips
Proposed mesa patch: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/278

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230106222037.7870-1-alexander.deucher@amd.com

+1980 -1000
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 81 81 # add DF block 82 82 amdgpu-y += \ 83 83 df_v1_7.o \ 84 - df_v3_6.o 84 + df_v3_6.o \ 85 + df_v4_3.o 85 86 86 87 # add GMC block 87 88 amdgpu-y += \
+10 -4
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 149 149 * Modules parameters. 150 150 */ 151 151 extern int amdgpu_modeset; 152 - extern int amdgpu_vram_limit; 152 + extern unsigned int amdgpu_vram_limit; 153 153 extern int amdgpu_vis_vram_limit; 154 154 extern int amdgpu_gart_size; 155 155 extern int amdgpu_gtt_size; ··· 194 194 extern uint amdgpu_smu_memory_pool_size; 195 195 extern int amdgpu_smu_pptable_id; 196 196 extern uint amdgpu_dc_feature_mask; 197 + extern uint amdgpu_freesync_vid_mode; 197 198 extern uint amdgpu_dc_debug_mask; 198 199 extern uint amdgpu_dc_visual_confirm; 199 200 extern uint amdgpu_dm_abm_level; ··· 608 607 struct drm_file *filp); 609 608 610 609 /* VRAM scratch page for HDP bug, default vram page */ 611 - struct amdgpu_vram_scratch { 610 + struct amdgpu_mem_scratch { 612 611 struct amdgpu_bo *robj; 613 612 volatile uint32_t *ptr; 614 613 u64 gpu_addr; ··· 755 754 #define AMDGPU_PRODUCT_NAME_LEN 64 756 755 struct amdgpu_reset_domain; 757 756 757 + /* 758 + * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise. 759 + */ 760 + #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size) 761 + 758 762 struct amdgpu_device { 759 763 struct device *dev; 760 764 struct pci_dev *pdev; ··· 853 847 854 848 /* memory management */ 855 849 struct amdgpu_mman mman; 856 - struct amdgpu_vram_scratch vram_scratch; 850 + struct amdgpu_mem_scratch mem_scratch; 857 851 struct amdgpu_wb wb; 858 852 atomic64_t num_bytes_moved; 859 853 atomic64_t num_evictions; ··· 875 869 struct amdgpu_vkms_output *amdgpu_vkms_output; 876 870 struct amdgpu_mode_info mode_info; 877 871 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ 878 - struct work_struct hotplug_work; 872 + struct delayed_work hotplug_work; 879 873 struct amdgpu_irq_src crtc_irq; 880 874 struct amdgpu_irq_src vline0_irq; 881 875 struct amdgpu_irq_src vupdate_irq;
+21 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 996 996 } 997 997 } 998 998 999 + if (amdgpu_connector->detected_hpd_without_ddc) { 1000 + force = true; 1001 + amdgpu_connector->detected_hpd_without_ddc = false; 1002 + } 1003 + 999 1004 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { 1000 1005 ret = connector->status; 1001 1006 goto exit; 1002 1007 } 1003 1008 1004 - if (amdgpu_connector->ddc_bus) 1009 + if (amdgpu_connector->ddc_bus) { 1005 1010 dret = amdgpu_display_ddc_probe(amdgpu_connector, false); 1011 + 1012 + /* Sometimes the pins required for the DDC probe on DVI 1013 + * connectors don't make contact at the same time that the ones 1014 + * for HPD do. If the DDC probe fails even though we had an HPD 1015 + * signal, try again later 1016 + */ 1017 + if (!dret && !force && 1018 + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 1019 + DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); 1020 + amdgpu_connector->detected_hpd_without_ddc = true; 1021 + schedule_delayed_work(&adev->hotplug_work, 1022 + msecs_to_jiffies(1000)); 1023 + goto exit; 1024 + } 1025 + } 1006 1026 if (dret) { 1007 1027 amdgpu_connector->detected_by_load = false; 1008 1028 amdgpu_connector_free_edid(connector);
+31 -41
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 924 924 } 925 925 926 926 /** 927 - * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page 927 + * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page 928 928 * 929 929 * @adev: amdgpu_device pointer 930 930 * 931 931 * Allocates a scratch page of VRAM for use by various things in the 932 932 * driver. 933 933 */ 934 - static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) 934 + static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev) 935 935 { 936 - return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, 937 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 938 - &adev->vram_scratch.robj, 939 - &adev->vram_scratch.gpu_addr, 940 - (void **)&adev->vram_scratch.ptr); 936 + return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, 937 + AMDGPU_GEM_DOMAIN_VRAM | 938 + AMDGPU_GEM_DOMAIN_GTT, 939 + &adev->mem_scratch.robj, 940 + &adev->mem_scratch.gpu_addr, 941 + (void **)&adev->mem_scratch.ptr); 941 942 } 942 943 943 944 /** 944 - * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page 945 + * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page 945 946 * 946 947 * @adev: amdgpu_device pointer 947 948 * 948 949 * Frees the VRAM scratch page. 949 950 */ 950 - static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) 951 + static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev) 951 952 { 952 - amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); 953 + amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL); 953 954 } 954 955 955 956 /** ··· 2391 2390 if (amdgpu_sriov_vf(adev)) 2392 2391 amdgpu_virt_exchange_data(adev); 2393 2392 2394 - r = amdgpu_device_vram_scratch_init(adev); 2393 + r = amdgpu_device_mem_scratch_init(adev); 2395 2394 if (r) { 2396 - DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 2395 + DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); 2397 2396 goto init_failed; 2398 2397 } 2399 2398 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); ··· 2411 2410 /* right after GMC hw init, we create CSA */ 2412 2411 if (amdgpu_mcbp) { 2413 2412 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, 2414 - AMDGPU_GEM_DOMAIN_VRAM, 2415 - AMDGPU_CSA_SIZE); 2413 + AMDGPU_GEM_DOMAIN_VRAM | 2414 + AMDGPU_GEM_DOMAIN_GTT, 2415 + AMDGPU_CSA_SIZE); 2416 2416 if (r) { 2417 2417 DRM_ERROR("allocate CSA failed %d\n", r); 2418 2418 goto init_failed; ··· 2583 2581 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2584 2582 if (!adev->ip_blocks[i].status.late_initialized) 2585 2583 continue; 2586 - /* skip CG for GFX on S0ix */ 2584 + /* skip CG for GFX, SDMA on S0ix */ 2587 2585 if (adev->in_s0ix && 2588 - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2586 + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2587 + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2589 2588 continue; 2590 2589 /* skip CG for VCE/UVD, it's handled specially */ 2591 2590 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && ··· 2620 2617 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 2621 2618 if (!adev->ip_blocks[i].status.late_initialized) 2622 2619 continue; 2623 - /* skip PG for GFX on S0ix */ 2620 + /* skip PG for GFX, SDMA on S0ix */ 2624 2621 if (adev->in_s0ix && 2625 - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX) 2622 + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 2623 + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 2626 2624 continue; 2627 2625 /* skip CG for VCE/UVD, it's handled specially */ 2628 2626 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && ··· 2875 2871 amdgpu_ucode_free_bo(adev); 2876 2872 amdgpu_free_static_csa(&adev->virt.csa_obj); 2877 2873 amdgpu_device_wb_fini(adev); 2878 - amdgpu_device_vram_scratch_fini(adev); 2874 + amdgpu_device_mem_scratch_fini(adev); 2879 2875 amdgpu_ib_pool_fini(adev); 2880 2876 } 2881 2877 ··· 3029 3025 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || 3030 3026 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX || 3031 3027 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES)) 3028 + continue; 3029 + 3030 + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 3031 + if (adev->in_s0ix && 3032 + (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) && 3033 + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 3032 3034 continue; 3033 3035 3034 3036 /* XXX handle errors */ ··· 3237 3227 return r; 3238 3228 } 3239 3229 adev->ip_blocks[i].status.hw = true; 3240 - 3241 - if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3242 - /* disable gfxoff for IP resume. The gfxoff will be re-enabled in 3243 - * amdgpu_device_resume() after IP resume. 3244 - */ 3245 - amdgpu_gfx_off_ctrl(adev, false); 3246 - DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); 3247 - } 3248 - 3249 3230 } 3250 3231 3251 3232 return 0; ··· 4222 4221 /* Make sure IB tests flushed */ 4223 4222 flush_delayed_work(&adev->delayed_init_work); 4224 4223 4225 - if (adev->in_s0ix) { 4226 - /* re-enable gfxoff after IP resume. This re-enables gfxoff after 4227 - * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). 4228 - */ 4229 - amdgpu_gfx_off_ctrl(adev, true); 4230 - DRM_DEBUG("will enable gfxoff for the mission mode\n"); 4231 - } 4232 4224 if (fbcon) 4233 4225 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); 4234 4226 ··· 4602 4608 if (!amdgpu_ras_is_poison_mode_supported(adev)) 4603 4609 return true; 4604 4610 4605 - if (!amdgpu_device_ip_check_soft_reset(adev)) { 4606 - dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); 4607 - return false; 4608 - } 4609 - 4610 4611 if (amdgpu_sriov_vf(adev)) 4611 4612 return true; 4612 4613 ··· 4726 4737 if (!need_full_reset) 4727 4738 need_full_reset = amdgpu_device_ip_need_full_reset(adev); 4728 4739 4729 - if (!need_full_reset && amdgpu_gpu_recovery) { 4740 + if (!need_full_reset && amdgpu_gpu_recovery && 4741 + amdgpu_device_ip_check_soft_reset(adev)) { 4730 4742 amdgpu_device_ip_pre_soft_reset(adev); 4731 4743 r = amdgpu_device_ip_soft_reset(adev); 4732 4744 amdgpu_device_ip_post_soft_reset(adev);
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 33 33 #include "gmc_v9_0.h" 34 34 #include "df_v1_7.h" 35 35 #include "df_v3_6.h" 36 + #include "df_v4_3.h" 36 37 #include "nbio_v6_1.h" 37 38 #include "nbio_v7_0.h" 38 39 #include "nbio_v7_4.h" ··· 2329 2328 case IP_VERSION(3, 5, 1): 2330 2329 case IP_VERSION(3, 5, 2): 2331 2330 adev->df.funcs = &df_v1_7_funcs; 2331 + break; 2332 + case IP_VERSION(4, 3, 0): 2333 + adev->df.funcs = &df_v4_3_funcs; 2332 2334 break; 2333 2335 default: 2334 2336 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 63 63 void amdgpu_display_hotplug_work_func(struct work_struct *work) 64 64 { 65 65 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 66 - hotplug_work); 66 + hotplug_work.work); 67 67 struct drm_device *dev = adev_to_drm(adev); 68 68 struct drm_mode_config *mode_config = &dev->mode_config; 69 69 struct drm_connector *connector;
+32 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 105 105 * - 3.46.0 - To enable hot plug amdgpu tests in libdrm 106 106 * - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags 107 107 * - 3.48.0 - Add IP discovery version info to HW INFO 108 - * 3.49.0 - Add gang submit into CS IOCTL 108 + * - 3.49.0 - Add gang submit into CS IOCTL 109 + * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock 110 + * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock 109 111 */ 110 112 #define KMS_DRIVER_MAJOR 3 111 - #define KMS_DRIVER_MINOR 49 113 + #define KMS_DRIVER_MINOR 50 112 114 #define KMS_DRIVER_PATCHLEVEL 0 113 115 114 - int amdgpu_vram_limit; 116 + unsigned int amdgpu_vram_limit = UINT_MAX; 115 117 int amdgpu_vis_vram_limit; 116 118 int amdgpu_gart_size = -1; /* auto */ 117 119 int amdgpu_gtt_size = -1; /* auto */ ··· 183 181 int amdgpu_noretry = -1; 184 182 int amdgpu_force_asic_type = -1; 185 183 int amdgpu_tmz = -1; /* auto */ 184 + uint amdgpu_freesync_vid_mode; 186 185 int amdgpu_reset_method = -1; /* auto */ 187 186 int amdgpu_num_kcq = -1; 188 187 int amdgpu_smartshift_bias; ··· 881 878 */ 882 879 MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); 883 880 module_param_named(tmz, amdgpu_tmz, int, 0444); 881 + 882 + /** 883 + * DOC: freesync_video (uint) 884 + * Enable the optimization to adjust front porch timing to achieve seamless 885 + * mode change experience when setting a freesync supported mode for which full 886 + * modeset is not needed. 887 + * 888 + * The Display Core will add a set of modes derived from the base FreeSync 889 + * video mode into the corresponding connector's mode list based on commonly 890 + * used refresh rates and VRR range of the connected display, when users enable 891 + * this feature. From the userspace perspective, they can see a seamless mode 892 + * change experience when the change between different refresh rates under the 893 + * same resolution. Additionally, userspace applications such as Video playback 894 + * can read this modeset list and change the refresh rate based on the video 895 + * frame rate. Finally, the userspace can also derive an appropriate mode for a 896 + * particular refresh rate based on the FreeSync Mode and add it to the 897 + * connector's mode list. 898 + * 899 + * Note: This is an experimental feature. 900 + * 901 + * The default value: 0 (off). 902 + */ 903 + MODULE_PARM_DESC( 904 + freesync_video, 905 + "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); 906 + module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); 884 907 885 908 /** 886 909 * DOC: reset_method (int)
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 372 372 * KIQ MQD no matter SRIOV or Bare-metal 373 373 */ 374 374 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, 375 - AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, 376 - &ring->mqd_gpu_addr, &ring->mqd_ptr); 375 + AMDGPU_GEM_DOMAIN_VRAM | 376 + AMDGPU_GEM_DOMAIN_GTT, 377 + &ring->mqd_obj, 378 + &ring->mqd_gpu_addr, 379 + &ring->mqd_ptr); 377 380 if (r) { 378 381 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); 379 382 return r;
+8 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 202 202 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, 203 203 u64 base) 204 204 { 205 + uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20; 205 206 uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; 206 207 207 208 mc->vram_start = base; 208 209 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 209 - if (limit && limit < mc->real_vram_size) 210 + if (limit < mc->real_vram_size) 210 211 mc->real_vram_size = limit; 212 + 213 + if (vis_limit && vis_limit < mc->visible_vram_size) 214 + mc->visible_vram_size = vis_limit; 215 + 216 + if (mc->real_vram_size < mc->visible_vram_size) 217 + mc->visible_vram_size = mc->real_vram_size; 211 218 212 219 if (mc->xgmi.num_physical_nodes == 0) { 213 220 mc->fb_start = mc->vram_start;
+26 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 785 785 if (adev->pm.dpm_enabled) { 786 786 dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 787 787 dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 788 + dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10; 789 + dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10; 788 790 } else { 789 - dev_info->max_engine_clock = adev->clock.default_sclk * 10; 790 - dev_info->max_memory_clock = adev->clock.default_mclk * 10; 791 + dev_info->max_engine_clock = 792 + dev_info->min_engine_clock = 793 + adev->clock.default_sclk * 10; 794 + dev_info->max_memory_clock = 795 + dev_info->min_memory_clock = 796 + adev->clock.default_mclk * 10; 791 797 } 792 798 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; 793 799 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se * ··· 1015 1009 /* get stable pstate mclk in Mhz */ 1016 1010 if (amdgpu_dpm_read_sensor(adev, 1017 1011 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, 1012 + (void *)&ui32, &ui32_size)) { 1013 + return -EINVAL; 1014 + } 1015 + ui32 /= 100; 1016 + break; 1017 + case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK: 1018 + /* get peak pstate sclk in Mhz */ 1019 + if (amdgpu_dpm_read_sensor(adev, 1020 + AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, 1021 + (void *)&ui32, &ui32_size)) { 1022 + return -EINVAL; 1023 + } 1024 + ui32 /= 100; 1025 + break; 1026 + case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK: 1027 + /* get peak pstate mclk in Mhz */ 1028 + if (amdgpu_dpm_read_sensor(adev, 1029 + AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, 1018 1030 (void *)&ui32, &ui32_size)) { 1019 1031 return -EINVAL; 1020 1032 }
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 534 534 void *con_priv; 535 535 bool dac_load_detect; 536 536 bool detected_by_load; /* if the connection status was determined by load */ 537 + bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ 537 538 uint16_t connector_object_id; 538 539 struct amdgpu_hpd hpd; 539 540 struct amdgpu_router router;
+16 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 66 66 /* allocate 4k Page of Local Frame Buffer memory for ring */ 67 67 ring->ring_size = 0x1000; 68 68 ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, 69 - AMDGPU_GEM_DOMAIN_VRAM, 69 + AMDGPU_GEM_DOMAIN_VRAM | 70 + AMDGPU_GEM_DOMAIN_GTT, 70 71 &adev->firmware.rbuf, 71 72 &ring->ring_mem_mc_addr, 72 73 (void **)&ring->ring_mem); ··· 798 797 799 798 if (!psp->tmr_bo) { 800 799 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; 801 - ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, 802 - AMDGPU_GEM_DOMAIN_VRAM, 803 - &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 800 + ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, 801 + PSP_TMR_ALIGNMENT, 802 + AMDGPU_HAS_VRAM(psp->adev) ? 803 + AMDGPU_GEM_DOMAIN_VRAM : 804 + AMDGPU_GEM_DOMAIN_GTT, 805 + &psp->tmr_bo, &psp->tmr_mc_addr, 806 + pptr); 804 807 } 805 808 806 809 return ret; ··· 1097 1092 * physical) for ta to host memory 1098 1093 */ 1099 1094 return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, 1100 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 1095 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 1096 + AMDGPU_GEM_DOMAIN_GTT, 1101 1097 &mem_ctx->shared_bo, 1102 1098 &mem_ctx->shared_mc_addr, 1103 1099 &mem_ctx->shared_buf); ··· 1907 1901 static int psp_securedisplay_initialize(struct psp_context *psp) 1908 1902 { 1909 1903 int ret; 1910 - struct securedisplay_cmd *securedisplay_cmd; 1904 + struct ta_securedisplay_cmd *securedisplay_cmd; 1911 1905 1912 1906 /* 1913 1907 * TODO: bypass the initialize in sriov for now ··· 3450 3444 3451 3445 /* LFB address which is aligned to 1MB boundary per PSP request */ 3452 3446 ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, 3453 - AMDGPU_GEM_DOMAIN_VRAM, 3454 - &fw_buf_bo, 3455 - &fw_pri_mc_addr, 3456 - &fw_pri_cpu_addr); 3447 + AMDGPU_GEM_DOMAIN_VRAM | 3448 + AMDGPU_GEM_DOMAIN_GTT, 3449 + &fw_buf_bo, &fw_pri_mc_addr, 3450 + &fw_pri_cpu_addr); 3457 3451 if (ret) 3458 3452 goto rel_buf; 3459 3453
+132 -55
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 706 706 return 0; 707 707 } 708 708 709 + static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev, 710 + struct ras_common_if *head) 711 + { 712 + if (amdgpu_ras_is_feature_allowed(adev, head) || 713 + amdgpu_ras_is_poison_mode_supported(adev)) 714 + return 1; 715 + else 716 + return 0; 717 + } 718 + 709 719 /* wrapper of psp_ras_enable_features */ 710 720 int amdgpu_ras_feature_enable(struct amdgpu_device *adev, 711 721 struct ras_common_if *head, bool enable) 712 722 { 713 723 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 714 724 union ta_ras_cmd_input *info; 715 - int ret; 725 + int ret = 0; 716 726 717 727 if (!con) 718 728 return -EINVAL; ··· 746 736 } 747 737 748 738 /* Do not enable if it is not allowed. */ 749 - WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); 739 + if (enable && !amdgpu_ras_check_feature_allowed(adev, head)) 740 + goto out; 750 741 751 742 /* Only enable ras feature operation handle on host side */ 752 743 if (head->block == AMDGPU_RAS_BLOCK__GFX && ··· 765 754 766 755 /* setup the obj */ 767 756 __amdgpu_ras_feature_enable(adev, head, enable); 768 - ret = 0; 769 757 out: 770 758 if (head->block == AMDGPU_RAS_BLOCK__GFX) 771 759 kfree(info); ··· 1097 1087 info->head.block, 1098 1088 info->head.sub_block_index); 1099 1089 1090 + /* inject on guest isn't allowed, return success directly */ 1091 + if (amdgpu_sriov_vf(adev)) 1092 + return 0; 1093 + 1100 1094 if (!obj) 1101 1095 return -EINVAL; 1102 1096 ··· 1136 1122 } 1137 1123 1138 1124 /** 1139 - * amdgpu_ras_query_error_count -- Get error counts of all IPs 1125 + * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP 1126 + * @adev: pointer to AMD GPU device 1127 + * @ce_count: pointer to an integer to be set to the count of correctible errors. 1128 + * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. 1129 + * @query_info: pointer to ras_query_if 1130 + * 1131 + * Return 0 for query success or do nothing, otherwise return an error 1132 + * on failures 1133 + */ 1134 + static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, 1135 + unsigned long *ce_count, 1136 + unsigned long *ue_count, 1137 + struct ras_query_if *query_info) 1138 + { 1139 + int ret; 1140 + 1141 + if (!query_info) 1142 + /* do nothing if query_info is not specified */ 1143 + return 0; 1144 + 1145 + ret = amdgpu_ras_query_error_status(adev, query_info); 1146 + if (ret) 1147 + return ret; 1148 + 1149 + *ce_count += query_info->ce_count; 1150 + *ue_count += query_info->ue_count; 1151 + 1152 + /* some hardware/IP supports read to clear 1153 + * no need to explictly reset the err status after the query call */ 1154 + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1155 + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1156 + if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1157 + dev_warn(adev->dev, 1158 + "Failed to reset error counter and error status\n"); 1159 + } 1160 + 1161 + return 0; 1162 + } 1163 + 1164 + /** 1165 + * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP 1140 1166 * @adev: pointer to AMD GPU device 1141 1167 * @ce_count: pointer to an integer to be set to the count of correctible errors. 1142 1168 * @ue_count: pointer to an integer to be set to the count of uncorrectible 1143 1169 * errors. 1170 + * @query_info: pointer to ras_query_if if the query request is only for 1171 + * specific ip block; if info is NULL, then the qurey request is for 1172 + * all the ip blocks that support query ras error counters/status 1144 1173 * 1145 1174 * If set, @ce_count or @ue_count, count and return the corresponding 1146 1175 * error counts in those integer pointers. Return 0 if the device ··· 1191 1134 */ 1192 1135 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1193 1136 unsigned long *ce_count, 1194 - unsigned long *ue_count) 1137 + unsigned long *ue_count, 1138 + struct ras_query_if *query_info) 1195 1139 { 1196 1140 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1197 1141 struct ras_manager *obj; 1198 1142 unsigned long ce, ue; 1143 + int ret; 1199 1144 1200 1145 if (!adev->ras_enabled || !con) 1201 1146 return -EOPNOTSUPP; ··· 1209 1150 1210 1151 ce = 0; 1211 1152 ue = 0; 1212 - list_for_each_entry(obj, &con->head, node) { 1213 - struct ras_query_if info = { 1214 - .head = obj->head, 1215 - }; 1216 - int res; 1153 + if (!query_info) { 1154 + /* query all the ip blocks that support ras query interface */ 1155 + list_for_each_entry(obj, &con->head, node) { 1156 + struct ras_query_if info = { 1157 + .head = obj->head, 1158 + }; 1217 1159 1218 - res = amdgpu_ras_query_error_status(adev, &info); 1219 - if (res) 1220 - return res; 1221 - 1222 - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1223 - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1224 - if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1225 - dev_warn(adev->dev, "Failed to reset error counter and error status"); 1160 + ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info); 1226 1161 } 1227 - 1228 - ce += info.ce_count; 1229 - ue += info.ue_count; 1162 + } else { 1163 + /* query specific ip block */ 1164 + ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info); 1230 1165 } 1166 + 1167 + if (ret) 1168 + return ret; 1231 1169 1232 1170 if (ce_count) 1233 1171 *ce_count = ce; ··· 2400 2344 2401 2345 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { 2402 2346 dev_info(adev->dev, "SRAM ECC is active.\n"); 2403 - if (!amdgpu_sriov_vf(adev)) { 2347 + if (!amdgpu_sriov_vf(adev)) 2404 2348 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | 2405 2349 1 << AMDGPU_RAS_BLOCK__DF); 2406 - 2407 - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || 2408 - adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) 2409 - adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2410 - 1 << AMDGPU_RAS_BLOCK__JPEG); 2411 - else 2412 - adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 2413 - 1 << AMDGPU_RAS_BLOCK__JPEG); 2414 - } else { 2350 + else 2415 2351 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | 2416 2352 1 << AMDGPU_RAS_BLOCK__SDMA | 2417 2353 1 << AMDGPU_RAS_BLOCK__GFX); 2418 - } 2354 + 2355 + /* VCN/JPEG RAS can be supported on both bare metal and 2356 + * SRIOV environment 2357 + */ 2358 + if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || 2359 + adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) 2360 + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2361 + 1 << AMDGPU_RAS_BLOCK__JPEG); 2362 + else 2363 + adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | 2364 + 1 << AMDGPU_RAS_BLOCK__JPEG); 2419 2365 } else { 2420 2366 dev_info(adev->dev, "SRAM ECC is not presented.\n"); 2421 2367 } ··· 2453 2395 2454 2396 /* Cache new values. 2455 2397 */ 2456 - if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2398 + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) { 2457 2399 atomic_set(&con->ras_ce_count, ce_count); 2458 2400 atomic_set(&con->ras_ue_count, ue_count); 2459 2401 } ··· 2463 2405 pm_runtime_put_autosuspend(dev->dev); 2464 2406 } 2465 2407 2408 + static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) 2409 + { 2410 + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2411 + bool df_poison, umc_poison; 2412 + 2413 + /* poison setting is useless on SRIOV guest */ 2414 + if (amdgpu_sriov_vf(adev) || !con) 2415 + return; 2416 + 2417 + /* Init poison supported flag, the default value is false */ 2418 + if (adev->gmc.xgmi.connected_to_cpu) { 2419 + /* enabled by default when GPU is connected to CPU */ 2420 + con->poison_supported = true; 2421 + } else if (adev->df.funcs && 2422 + adev->df.funcs->query_ras_poison_mode && 2423 + adev->umc.ras && 2424 + adev->umc.ras->query_ras_poison_mode) { 2425 + df_poison = 2426 + adev->df.funcs->query_ras_poison_mode(adev); 2427 + umc_poison = 2428 + adev->umc.ras->query_ras_poison_mode(adev); 2429 + 2430 + /* Only poison is set in both DF and UMC, we can support it */ 2431 + if (df_poison && umc_poison) 2432 + con->poison_supported = true; 2433 + else if (df_poison != umc_poison) 2434 + dev_warn(adev->dev, 2435 + "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 2436 + df_poison, umc_poison); 2437 + } 2438 + } 2439 + 2466 2440 int amdgpu_ras_init(struct amdgpu_device *adev) 2467 2441 { 2468 2442 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2469 2443 int r; 2470 - bool df_poison, umc_poison; 2471 2444 2472 2445 if (con) 2473 2446 return 0; ··· 2573 2484 goto release_con; 2574 2485 } 2575 2486 2576 - /* Init poison supported flag, the default value is false */ 2577 - if (adev->gmc.xgmi.connected_to_cpu) { 2578 - /* enabled by default when GPU is connected to CPU */ 2579 - con->poison_supported = true; 2580 - } 2581 - else if (adev->df.funcs && 2582 - adev->df.funcs->query_ras_poison_mode && 2583 - adev->umc.ras && 2584 - adev->umc.ras->query_ras_poison_mode) { 2585 - df_poison = 2586 - adev->df.funcs->query_ras_poison_mode(adev); 2587 - umc_poison = 2588 - adev->umc.ras->query_ras_poison_mode(adev); 2589 - /* Only poison is set in both DF and UMC, we can support it */ 2590 - if (df_poison && umc_poison) 2591 - con->poison_supported = true; 2592 - else if (df_poison != umc_poison) 2593 - dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", 2594 - df_poison, umc_poison); 2595 - } 2487 + amdgpu_ras_query_poison_mode(adev); 2596 2488 2597 2489 if (amdgpu_ras_fs_init(adev)) { 2598 2490 r = -EINVAL; ··· 2634 2564 { 2635 2565 struct amdgpu_ras_block_object *ras_obj = NULL; 2636 2566 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 2567 + struct ras_query_if *query_info; 2637 2568 unsigned long ue_count, ce_count; 2638 2569 int r; 2639 2570 ··· 2676 2605 2677 2606 /* Those are the cached values at init. 2678 2607 */ 2679 - if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2608 + query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL); 2609 + if (!query_info) 2610 + return -ENOMEM; 2611 + memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); 2612 + 2613 + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) { 2680 2614 atomic_set(&con->ras_ce_count, ce_count); 2681 2615 atomic_set(&con->ras_ue_count, ue_count); 2682 2616 } 2683 2617 2618 + kfree(query_info); 2684 2619 return 0; 2685 2620 2686 2621 interrupt:
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 540 540 541 541 int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 542 542 unsigned long *ce_count, 543 - unsigned long *ue_count); 543 + unsigned long *ue_count, 544 + struct ras_query_if *query_info); 544 545 545 546 /* error handling functions */ 546 547 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+6 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
··· 93 93 94 94 /* allocate save restore block */ 95 95 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 96 - AMDGPU_GEM_DOMAIN_VRAM, 96 + AMDGPU_GEM_DOMAIN_VRAM | 97 + AMDGPU_GEM_DOMAIN_GTT, 97 98 &adev->gfx.rlc.save_restore_obj, 98 99 &adev->gfx.rlc.save_restore_gpu_addr, 99 100 (void **)&adev->gfx.rlc.sr_ptr); ··· 131 130 /* allocate clear state block */ 132 131 adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); 133 132 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, 134 - AMDGPU_GEM_DOMAIN_VRAM, 133 + AMDGPU_GEM_DOMAIN_VRAM | 134 + AMDGPU_GEM_DOMAIN_GTT, 135 135 &adev->gfx.rlc.clear_state_obj, 136 136 &adev->gfx.rlc.clear_state_gpu_addr, 137 137 (void **)&adev->gfx.rlc.cs_ptr); ··· 158 156 int r; 159 157 160 158 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, 161 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 159 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | 160 + AMDGPU_GEM_DOMAIN_GTT, 162 161 &adev->gfx.rlc.cp_table_obj, 163 162 &adev->gfx.rlc.cp_table_gpu_addr, 164 163 (void **)&adev->gfx.rlc.cp_table_ptr);
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
··· 77 77 } 78 78 } 79 79 80 - void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, 80 + void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd, 81 81 enum ta_securedisplay_command command_id) 82 82 { 83 - *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf; 84 - memset(*cmd, 0, sizeof(struct securedisplay_cmd)); 83 + *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf; 84 + memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd)); 85 85 (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE; 86 86 (*cmd)->cmd_id = command_id; 87 87 } ··· 93 93 { 94 94 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; 95 95 struct psp_context *psp = &adev->psp; 96 - struct securedisplay_cmd *securedisplay_cmd; 96 + struct ta_securedisplay_cmd *securedisplay_cmd; 97 97 struct drm_device *dev = adev_to_drm(adev); 98 98 uint32_t phy_id; 99 99 uint32_t op;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
··· 30 30 void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev); 31 31 void psp_securedisplay_parse_resp_status(struct psp_context *psp, 32 32 enum ta_securedisplay_status status); 33 - void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, 33 + void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd, 34 34 enum ta_securedisplay_command command_id); 35 35 36 36 #endif
+8 -15
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1679 1679 /* reserve vram for mem train according to TMR location */ 1680 1680 amdgpu_ttm_training_data_block_init(adev); 1681 1681 ret = amdgpu_bo_create_kernel_at(adev, 1682 - ctx->c2p_train_data_offset, 1683 - ctx->train_data_size, 1684 - &ctx->c2p_bo, 1685 - NULL); 1682 + ctx->c2p_train_data_offset, 1683 + ctx->train_data_size, 1684 + &ctx->c2p_bo, 1685 + NULL); 1686 1686 if (ret) { 1687 1687 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1688 1688 amdgpu_ttm_training_reserve_vram_fini(adev); ··· 1692 1692 } 1693 1693 1694 1694 ret = amdgpu_bo_create_kernel_at(adev, 1695 - adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, 1696 - adev->mman.discovery_tmr_size, 1697 - &adev->mman.discovery_memory, 1698 - NULL); 1695 + adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, 1696 + adev->mman.discovery_tmr_size, 1697 + &adev->mman.discovery_memory, 1698 + NULL); 1699 1699 if (ret) { 1700 1700 DRM_ERROR("alloc tmr failed(%d)!\n", ret); 1701 1701 amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); ··· 1718 1718 { 1719 1719 uint64_t gtt_size; 1720 1720 int r; 1721 - u64 vis_vram_limit; 1722 1721 1723 1722 mutex_init(&adev->mman.gtt_window_lock); 1724 1723 ··· 1739 1740 DRM_ERROR("Failed initializing VRAM heap.\n"); 1740 1741 return r; 1741 1742 } 1742 - 1743 - /* Reduce size of CPU-visible VRAM if requested */ 1744 - vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; 1745 - if (amdgpu_vis_vram_limit > 0 && 1746 - vis_vram_limit <= adev->gmc.visible_vram_size) 1747 - adev->gmc.visible_vram_size = vis_vram_limit; 1748 1743 1749 1744 /* Change the size here instead of the init above so only lpfn is affected */ 1750 1745 amdgpu_ttm_set_buffer_funcs_status(adev, false);
+24 -16
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 169 169 { 170 170 int ret = AMDGPU_RAS_SUCCESS; 171 171 172 - if (!adev->gmc.xgmi.connected_to_cpu) { 173 - struct ras_err_data err_data = {0, 0, 0, NULL}; 174 - struct ras_common_if head = { 175 - .block = AMDGPU_RAS_BLOCK__UMC, 176 - }; 177 - struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); 172 + if (!amdgpu_sriov_vf(adev)) { 173 + if (!adev->gmc.xgmi.connected_to_cpu) { 174 + struct ras_err_data err_data = {0, 0, 0, NULL}; 175 + struct ras_common_if head = { 176 + .block = AMDGPU_RAS_BLOCK__UMC, 177 + }; 178 + struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head); 178 179 179 - ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); 180 + ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset); 180 181 181 - if (ret == AMDGPU_RAS_SUCCESS && obj) { 182 - obj->err_data.ue_count += err_data.ue_count; 183 - obj->err_data.ce_count += err_data.ce_count; 182 + if (ret == AMDGPU_RAS_SUCCESS && obj) { 183 + obj->err_data.ue_count += err_data.ue_count; 184 + obj->err_data.ce_count += err_data.ce_count; 185 + } 186 + } else if (reset) { 187 + /* MCA poison handler is only responsible for GPU reset, 188 + * let MCA notifier do page retirement. 189 + */ 190 + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 191 + amdgpu_ras_reset_gpu(adev); 184 192 } 185 - } else if (reset) { 186 - /* MCA poison handler is only responsible for GPU reset, 187 - * let MCA notifier do page retirement. 188 - */ 189 - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 190 - amdgpu_ras_reset_gpu(adev); 193 + } else { 194 + if (adev->virt.ops && adev->virt.ops->ras_poison_handler) 195 + adev->virt.ops->ras_poison_handler(adev); 196 + else 197 + dev_warn(adev->dev, 198 + "No ras_poison_handler interface in SRIOV!\n"); 191 199 } 192 200 193 201 return ret;
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 331 331 if (adev->uvd.harvest_config & (1 << j)) 332 332 continue; 333 333 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 334 - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, 335 - &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); 334 + AMDGPU_GEM_DOMAIN_VRAM | 335 + AMDGPU_GEM_DOMAIN_GTT, 336 + &adev->uvd.inst[j].vcpu_bo, 337 + &adev->uvd.inst[j].gpu_addr, 338 + &adev->uvd.inst[j].cpu_addr); 336 339 if (r) { 337 340 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 338 341 return r;
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 186 186 (binary_id << 8)); 187 187 188 188 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 189 - AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, 189 + AMDGPU_GEM_DOMAIN_VRAM | 190 + AMDGPU_GEM_DOMAIN_GTT, 191 + &adev->vce.vcpu_bo, 190 192 &adev->vce.gpu_addr, &adev->vce.cpu_addr); 191 193 if (r) { 192 194 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
+20 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 274 274 continue; 275 275 276 276 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 277 - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, 278 - &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); 277 + AMDGPU_GEM_DOMAIN_VRAM | 278 + AMDGPU_GEM_DOMAIN_GTT, 279 + &adev->vcn.inst[i].vcpu_bo, 280 + &adev->vcn.inst[i].gpu_addr, 281 + &adev->vcn.inst[i].cpu_addr); 279 282 if (r) { 280 283 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); 281 284 return r; ··· 299 296 300 297 if (adev->vcn.indirect_sram) { 301 298 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, 302 - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, 303 - &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); 299 + AMDGPU_GEM_DOMAIN_VRAM | 300 + AMDGPU_GEM_DOMAIN_GTT, 301 + &adev->vcn.inst[i].dpg_sram_bo, 302 + &adev->vcn.inst[i].dpg_sram_gpu_addr, 303 + &adev->vcn.inst[i].dpg_sram_cpu_addr); 304 304 if (r) { 305 305 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); 306 306 return r; ··· 1256 1250 if (!ras_if) 1257 1251 return 0; 1258 1252 1259 - ih_data.head = *ras_if; 1260 - amdgpu_ras_interrupt_dispatch(adev, &ih_data); 1253 + if (!amdgpu_sriov_vf(adev)) { 1254 + ih_data.head = *ras_if; 1255 + amdgpu_ras_interrupt_dispatch(adev, &ih_data); 1256 + } else { 1257 + if (adev->virt.ops && adev->virt.ops->ras_poison_handler) 1258 + adev->virt.ops->ras_poison_handler(adev); 1259 + else 1260 + dev_warn(adev->dev, 1261 + "No ras_poison_handler interface in SRIOV for VCN!\n"); 1262 + } 1261 1263 1262 1264 return 0; 1263 1265 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 232 232 return 0; 233 233 234 234 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 235 - AMDGPU_GEM_DOMAIN_VRAM, 235 + AMDGPU_GEM_DOMAIN_VRAM | 236 + AMDGPU_GEM_DOMAIN_GTT, 236 237 &adev->virt.mm_table.bo, 237 238 &adev->virt.mm_table.gpu_addr, 238 239 (void *)&adev->virt.mm_table.cpu_addr);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 88 88 int (*wait_reset)(struct amdgpu_device *adev); 89 89 void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req, 90 90 u32 data1, u32 data2, u32 data3); 91 + void (*ras_poison_handler)(struct amdgpu_device *adev); 91 92 }; 92 93 93 94 /*
+3 -3
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2837 2837 if (r) 2838 2838 return r; 2839 2839 2840 - INIT_WORK(&adev->hotplug_work, 2840 + INIT_DELAYED_WORK(&adev->hotplug_work, 2841 2841 amdgpu_display_hotplug_work_func); 2842 2842 2843 2843 drm_kms_helper_poll_init(adev_to_drm(adev)); ··· 2902 2902 2903 2903 dce_v10_0_pageflip_interrupt_fini(adev); 2904 2904 2905 - flush_work(&adev->hotplug_work); 2905 + flush_delayed_work(&adev->hotplug_work); 2906 2906 2907 2907 return 0; 2908 2908 } ··· 3302 3302 3303 3303 if (disp_int & mask) { 3304 3304 dce_v10_0_hpd_int_ack(adev, hpd); 3305 - schedule_work(&adev->hotplug_work); 3305 + schedule_delayed_work(&adev->hotplug_work, 0); 3306 3306 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3307 3307 } 3308 3308
+3 -3
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 2956 2956 if (r) 2957 2957 return r; 2958 2958 2959 - INIT_WORK(&adev->hotplug_work, 2959 + INIT_DELAYED_WORK(&adev->hotplug_work, 2960 2960 amdgpu_display_hotplug_work_func); 2961 2961 2962 2962 drm_kms_helper_poll_init(adev_to_drm(adev)); ··· 3032 3032 3033 3033 dce_v11_0_pageflip_interrupt_fini(adev); 3034 3034 3035 - flush_work(&adev->hotplug_work); 3035 + flush_delayed_work(&adev->hotplug_work); 3036 3036 3037 3037 return 0; 3038 3038 } ··· 3426 3426 3427 3427 if (disp_int & mask) { 3428 3428 dce_v11_0_hpd_int_ack(adev, hpd); 3429 - schedule_work(&adev->hotplug_work); 3429 + schedule_delayed_work(&adev->hotplug_work, 0); 3430 3430 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3431 3431 } 3432 3432
+3 -3
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 2715 2715 return r; 2716 2716 2717 2717 /* Pre-DCE11 */ 2718 - INIT_WORK(&adev->hotplug_work, 2718 + INIT_DELAYED_WORK(&adev->hotplug_work, 2719 2719 amdgpu_display_hotplug_work_func); 2720 2720 2721 2721 drm_kms_helper_poll_init(adev_to_drm(adev)); ··· 2776 2776 2777 2777 dce_v6_0_pageflip_interrupt_fini(adev); 2778 2778 2779 - flush_work(&adev->hotplug_work); 2779 + flush_delayed_work(&adev->hotplug_work); 2780 2780 2781 2781 return 0; 2782 2782 } ··· 3103 3103 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3104 3104 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3105 3105 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3106 - schedule_work(&adev->hotplug_work); 3106 + schedule_delayed_work(&adev->hotplug_work, 0); 3107 3107 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3108 3108 } 3109 3109
+3 -3
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2739 2739 return r; 2740 2740 2741 2741 /* Pre-DCE11 */ 2742 - INIT_WORK(&adev->hotplug_work, 2742 + INIT_DELAYED_WORK(&adev->hotplug_work, 2743 2743 amdgpu_display_hotplug_work_func); 2744 2744 2745 2745 drm_kms_helper_poll_init(adev_to_drm(adev)); ··· 2802 2802 2803 2803 dce_v8_0_pageflip_interrupt_fini(adev); 2804 2804 2805 - flush_work(&adev->hotplug_work); 2805 + flush_delayed_work(&adev->hotplug_work); 2806 2806 2807 2807 return 0; 2808 2808 } ··· 3195 3195 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3196 3196 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3197 3197 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3198 - schedule_work(&adev->hotplug_work); 3198 + schedule_delayed_work(&adev->hotplug_work, 0); 3199 3199 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3200 3200 } 3201 3201
+61
drivers/gpu/drm/amd/amdgpu/df_v4_3.c
··· 1 + /* 2 + * Copyright 2022 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include "amdgpu.h" 24 + #include "df_v4_3.h" 25 + 26 + #include "df/df_4_3_offset.h" 27 + #include "df/df_4_3_sh_mask.h" 28 + 29 + static bool df_v4_3_query_ras_poison_mode(struct amdgpu_device *adev) 30 + { 31 + uint32_t hw_assert_msklo, hw_assert_mskhi; 32 + uint32_t v0, v1, v28, v31; 33 + 34 + hw_assert_msklo = RREG32_SOC15(DF, 0, 35 + regDF_CS_UMC_AON0_HardwareAssertMaskLow); 36 + hw_assert_mskhi = RREG32_SOC15(DF, 0, 37 + regDF_NCS_PG0_HardwareAssertMaskHigh); 38 + 39 + v0 = REG_GET_FIELD(hw_assert_msklo, 40 + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0); 41 + v1 = REG_GET_FIELD(hw_assert_msklo, 42 + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1); 43 + v28 = REG_GET_FIELD(hw_assert_mskhi, 44 + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28); 45 + v31 = REG_GET_FIELD(hw_assert_mskhi, 46 + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31); 47 + 48 + if (v0 && v1 && v28 && v31) 49 + return true; 50 + else if (!v0 && !v1 && !v28 && !v31) 51 + return false; 52 + else { 53 + dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n", 54 + v0, v1, v28, v31); 55 + return false; 56 + } 57 + } 58 + 59 + const struct amdgpu_df_funcs df_v4_3_funcs = { 60 + .query_ras_poison_mode = df_v4_3_query_ras_poison_mode, 61 + };
+31
drivers/gpu/drm/amd/amdgpu/df_v4_3.h
··· 1 + /* 2 + * Copyright 2022 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __DF_V4_3_H__ 25 + #define __DF_V4_3_H__ 26 + 27 + #include "soc15_common.h" 28 + 29 + extern const struct amdgpu_df_funcs df_v4_3_funcs; 30 + 31 + #endif
+23 -10
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 987 987 total_size = gfx_v11_0_calc_toc_total_size(adev); 988 988 989 989 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, 990 - AMDGPU_GEM_DOMAIN_VRAM, 991 - &adev->gfx.rlc.rlc_autoload_bo, 992 - &adev->gfx.rlc.rlc_autoload_gpu_addr, 993 - (void **)&adev->gfx.rlc.rlc_autoload_ptr); 990 + AMDGPU_GEM_DOMAIN_VRAM | 991 + AMDGPU_GEM_DOMAIN_GTT, 992 + &adev->gfx.rlc.rlc_autoload_bo, 993 + &adev->gfx.rlc.rlc_autoload_gpu_addr, 994 + (void **)&adev->gfx.rlc.rlc_autoload_ptr); 994 995 995 996 if (r) { 996 997 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r); ··· 2650 2649 2651 2650 /* 64kb align */ 2652 2651 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2653 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2652 + 64 * 1024, 2653 + AMDGPU_GEM_DOMAIN_VRAM | 2654 + AMDGPU_GEM_DOMAIN_GTT, 2654 2655 &adev->gfx.pfp.pfp_fw_obj, 2655 2656 &adev->gfx.pfp.pfp_fw_gpu_addr, 2656 2657 (void **)&adev->gfx.pfp.pfp_fw_ptr); ··· 2663 2660 } 2664 2661 2665 2662 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2666 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2663 + 64 * 1024, 2664 + AMDGPU_GEM_DOMAIN_VRAM | 2665 + AMDGPU_GEM_DOMAIN_GTT, 2667 2666 &adev->gfx.pfp.pfp_fw_data_obj, 2668 2667 &adev->gfx.pfp.pfp_fw_data_gpu_addr, 2669 2668 (void **)&adev->gfx.pfp.pfp_fw_data_ptr); ··· 2868 2863 2869 2864 /* 64kb align*/ 2870 2865 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 2871 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2866 + 64 * 1024, 2867 + AMDGPU_GEM_DOMAIN_VRAM | 2868 + AMDGPU_GEM_DOMAIN_GTT, 2872 2869 &adev->gfx.me.me_fw_obj, 2873 2870 &adev->gfx.me.me_fw_gpu_addr, 2874 2871 (void **)&adev->gfx.me.me_fw_ptr); ··· 2881 2874 } 2882 2875 2883 2876 r = amdgpu_bo_create_reserved(adev, fw_data_size, 2884 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 2877 + 64 * 1024, 2878 + AMDGPU_GEM_DOMAIN_VRAM | 2879 + AMDGPU_GEM_DOMAIN_GTT, 2885 2880 &adev->gfx.me.me_fw_data_obj, 2886 2881 &adev->gfx.me.me_fw_data_gpu_addr, 2887 2882 (void **)&adev->gfx.me.me_fw_data_ptr); ··· 3389 3380 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); 3390 3381 3391 3382 r = amdgpu_bo_create_reserved(adev, fw_ucode_size, 3392 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3383 + 64 * 1024, 3384 + AMDGPU_GEM_DOMAIN_VRAM | 3385 + AMDGPU_GEM_DOMAIN_GTT, 3393 3386 &adev->gfx.mec.mec_fw_obj, 3394 3387 &adev->gfx.mec.mec_fw_gpu_addr, 3395 3388 (void **)&fw_ucode_ptr); ··· 3402 3391 } 3403 3392 3404 3393 r = amdgpu_bo_create_reserved(adev, fw_data_size, 3405 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 3394 + 64 * 1024, 3395 + AMDGPU_GEM_DOMAIN_VRAM | 3396 + AMDGPU_GEM_DOMAIN_GTT, 3406 3397 &adev->gfx.mec.mec_fw_data_obj, 3407 3398 &adev->gfx.mec.mec_fw_data_gpu_addr, 3408 3399 (void **)&fw_data_ptr);
+2 -1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 2375 2375 dws = adev->gfx.rlc.clear_state_size + (256 / 4); 2376 2376 2377 2377 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, 2378 - AMDGPU_GEM_DOMAIN_VRAM, 2378 + AMDGPU_GEM_DOMAIN_VRAM | 2379 + AMDGPU_GEM_DOMAIN_GTT, 2379 2380 &adev->gfx.rlc.clear_state_obj, 2380 2381 &adev->gfx.rlc.clear_state_gpu_addr, 2381 2382 (void **)&adev->gfx.rlc.cs_ptr);
+2 -1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2772 2772 * GFX7_MEC_HPD_SIZE * 2; 2773 2773 2774 2774 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 2775 - AMDGPU_GEM_DOMAIN_VRAM, 2775 + AMDGPU_GEM_DOMAIN_VRAM | 2776 + AMDGPU_GEM_DOMAIN_GTT, 2776 2777 &adev->gfx.mec.hpd_eop_obj, 2777 2778 &adev->gfx.mec.hpd_eop_gpu_addr, 2778 2779 (void **)&hpd);
+2 -1
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1340 1340 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; 1341 1341 if (mec_hpd_size) { 1342 1342 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1343 - AMDGPU_GEM_DOMAIN_VRAM, 1343 + AMDGPU_GEM_DOMAIN_VRAM | 1344 + AMDGPU_GEM_DOMAIN_GTT, 1344 1345 &adev->gfx.mec.hpd_eop_obj, 1345 1346 &adev->gfx.mec.hpd_eop_gpu_addr, 1346 1347 (void **)&hpd);
+2 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1783 1783 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; 1784 1784 if (mec_hpd_size) { 1785 1785 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 1786 - AMDGPU_GEM_DOMAIN_VRAM, 1786 + AMDGPU_GEM_DOMAIN_VRAM | 1787 + AMDGPU_GEM_DOMAIN_GTT, 1787 1788 &adev->gfx.mec.hpd_eop_obj, 1788 1789 &adev->gfx.mec.hpd_eop_gpu_addr, 1789 1790 (void **)&hpd);
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
··· 120 120 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 121 121 122 122 /* Set default page address. */ 123 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 123 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 124 124 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 125 125 (u32)(value >> 12)); 126 126 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
··· 165 165 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 166 166 167 167 /* Set default page address. */ 168 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 168 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 169 169 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 170 170 (u32)(value >> 12)); 171 171 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
··· 167 167 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 168 168 169 169 /* Set default page address. */ 170 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 170 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 171 171 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 172 172 (u32)(value >> 12)); 173 173 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
··· 163 163 adev->gmc.vram_end >> 18); 164 164 165 165 /* Set default page address. */ 166 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 166 + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start 167 167 + adev->vm_manager.vram_base_offset; 168 168 WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 169 169 (u32)(value >> 12));
+1 -1
drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
··· 169 169 adev->gmc.vram_end >> 18); 170 170 171 171 /* Set default page address. */ 172 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 172 + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start 173 173 + adev->vm_manager.vram_base_offset; 174 174 WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 175 175 (u32)(value >> 12));
+27 -12
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 78 78 /* MM HUB */ 79 79 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); 80 80 /* GFX HUB */ 81 - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); 81 + /* This works because this interrupt is only 82 + * enabled at init/resume and disabled in 83 + * fini/suspend, so the overall state doesn't 84 + * change over the course of suspend/resume. 85 + */ 86 + if (!adev->in_s0ix) 87 + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); 82 88 break; 83 89 case AMDGPU_IRQ_STATE_ENABLE: 84 90 /* MM HUB */ 85 91 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); 86 92 /* GFX HUB */ 87 - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); 93 + /* This works because this interrupt is only 94 + * enabled at init/resume and disabled in 95 + * fini/suspend, so the overall state doesn't 96 + * change over the course of suspend/resume. 97 + */ 98 + if (!adev->in_s0ix) 99 + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); 88 100 break; 89 101 default: 90 102 break; ··· 847 835 } 848 836 #endif 849 837 850 - /* In case the PCI BAR is larger than the actual amount of vram */ 851 838 adev->gmc.visible_vram_size = adev->gmc.aper_size; 852 - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 853 - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 854 839 855 840 /* set the gart size */ 856 841 if (amdgpu_gart_size == -1) { ··· 1070 1061 } 1071 1062 1072 1063 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 1073 - r = adev->gfxhub.funcs->gart_enable(adev); 1074 - if (r) 1075 - return r; 1064 + 1065 + if (!adev->in_s0ix) { 1066 + r = adev->gfxhub.funcs->gart_enable(adev); 1067 + if (r) 1068 + return r; 1069 + } 1076 1070 1077 1071 r = adev->mmhub.funcs->gart_enable(adev); 1078 1072 if (r) ··· 1089 1077 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? 1090 1078 false : true; 1091 1079 1092 - adev->gfxhub.funcs->set_fault_enable_default(adev, value); 1080 + if (!adev->in_s0ix) 1081 + adev->gfxhub.funcs->set_fault_enable_default(adev, value); 1093 1082 adev->mmhub.funcs->set_fault_enable_default(adev, value); 1094 1083 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); 1095 - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 1084 + if (!adev->in_s0ix) 1085 + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); 1096 1086 1097 1087 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1098 1088 (unsigned)(adev->gmc.gart_size >> 20), ··· 1115 1101 * harvestable groups in gc_utcl2 need to be programmed before any GFX block 1116 1102 * register setup within GMC, or else system hang when harvesting SA. 1117 1103 */ 1118 - if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) 1104 + if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) 1119 1105 adev->gfxhub.funcs->utcl2_harvest(adev); 1120 1106 1121 1107 r = gmc_v10_0_gart_enable(adev); ··· 1143 1129 */ 1144 1130 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) 1145 1131 { 1146 - adev->gfxhub.funcs->gart_disable(adev); 1132 + if (!adev->in_s0ix) 1133 + adev->gfxhub.funcs->gart_disable(adev); 1147 1134 adev->mmhub.funcs->gart_disable(adev); 1148 1135 } 1149 1136
+14 -2
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 64 64 /* MM HUB */ 65 65 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); 66 66 /* GFX HUB */ 67 - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); 67 + /* This works because this interrupt is only 68 + * enabled at init/resume and disabled in 69 + * fini/suspend, so the overall state doesn't 70 + * change over the course of suspend/resume. 71 + */ 72 + if (!adev->in_s0ix) 73 + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); 68 74 break; 69 75 case AMDGPU_IRQ_STATE_ENABLE: 70 76 /* MM HUB */ 71 77 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); 72 78 /* GFX HUB */ 73 - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); 79 + /* This works because this interrupt is only 80 + * enabled at init/resume and disabled in 81 + * fini/suspend, so the overall state doesn't 82 + * change over the course of suspend/resume. 83 + */ 84 + if (!adev->in_s0ix) 85 + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); 74 86 break; 75 87 default: 76 88 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 258 258 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 259 259 adev->gmc.vram_end >> 12); 260 260 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 261 - adev->vram_scratch.gpu_addr >> 12); 261 + adev->mem_scratch.gpu_addr >> 12); 262 262 WREG32(mmMC_VM_AGP_BASE, 0); 263 263 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 264 264 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
+1 -4
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 292 292 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 293 293 adev->gmc.vram_end >> 12); 294 294 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 295 - adev->vram_scratch.gpu_addr >> 12); 295 + adev->mem_scratch.gpu_addr >> 12); 296 296 WREG32(mmMC_VM_AGP_BASE, 0); 297 297 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 298 298 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); ··· 389 389 } 390 390 #endif 391 391 392 - /* In case the PCI BAR is larger than the actual amount of vram */ 393 392 adev->gmc.visible_vram_size = adev->gmc.aper_size; 394 - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 395 - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 396 393 397 394 /* set the gart size */ 398 395 if (amdgpu_gart_size == -1) {
+1 -4
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 474 474 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 475 475 adev->gmc.vram_end >> 12); 476 476 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 477 - adev->vram_scratch.gpu_addr >> 12); 477 + adev->mem_scratch.gpu_addr >> 12); 478 478 479 479 if (amdgpu_sriov_vf(adev)) { 480 480 tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16; ··· 587 587 } 588 588 #endif 589 589 590 - /* In case the PCI BAR is larger than the actual amount of vram */ 591 590 adev->gmc.visible_vram_size = adev->gmc.aper_size; 592 - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 593 - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 594 591 595 592 /* set the gart size */ 596 593 if (amdgpu_gart_size == -1) {
+30 -9
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 484 484 for (i = 0; i < 16; i++) { 485 485 reg = hub->vm_context0_cntl + i; 486 486 487 + /* This works because this interrupt is only 488 + * enabled at init/resume and disabled in 489 + * fini/suspend, so the overall state doesn't 490 + * change over the course of suspend/resume. 491 + */ 492 + if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) 493 + continue; 494 + 487 495 if (j == AMDGPU_GFXHUB_0) 488 496 tmp = RREG32_SOC15_IP(GC, reg); 489 497 else ··· 511 503 hub = &adev->vmhub[j]; 512 504 for (i = 0; i < 16; i++) { 513 505 reg = hub->vm_context0_cntl + i; 506 + 507 + /* This works because this interrupt is only 508 + * enabled at init/resume and disabled in 509 + * fini/suspend, so the overall state doesn't 510 + * change over the course of suspend/resume. 511 + */ 512 + if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) 513 + continue; 514 514 515 515 if (j == AMDGPU_GFXHUB_0) 516 516 tmp = RREG32_SOC15_IP(GC, reg); ··· 1552 1536 } 1553 1537 1554 1538 #endif 1555 - /* In case the PCI BAR is larger than the actual amount of vram */ 1556 1539 adev->gmc.visible_vram_size = adev->gmc.aper_size; 1557 - if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 1558 - adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 1559 1540 1560 1541 /* set the gart size */ 1561 1542 if (amdgpu_gart_size == -1) { ··· 1875 1862 } 1876 1863 1877 1864 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); 1878 - r = adev->gfxhub.funcs->gart_enable(adev); 1879 - if (r) 1880 - return r; 1865 + 1866 + if (!adev->in_s0ix) { 1867 + r = adev->gfxhub.funcs->gart_enable(adev); 1868 + if (r) 1869 + return r; 1870 + } 1881 1871 1882 1872 r = adev->mmhub.funcs->gart_enable(adev); 1883 1873 if (r) ··· 1927 1911 value = true; 1928 1912 1929 1913 if (!amdgpu_sriov_vf(adev)) { 1930 - adev->gfxhub.funcs->set_fault_enable_default(adev, value); 1914 + if (!adev->in_s0ix) 1915 + adev->gfxhub.funcs->set_fault_enable_default(adev, value); 1931 1916 adev->mmhub.funcs->set_fault_enable_default(adev, value); 1932 1917 } 1933 - for (i = 0; i < adev->num_vmhubs; ++i) 1918 + for (i = 0; i < adev->num_vmhubs; ++i) { 1919 + if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0)) 1920 + continue; 1934 1921 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 1922 + } 1935 1923 1936 1924 if (adev->umc.funcs && adev->umc.funcs->init_registers) 1937 1925 adev->umc.funcs->init_registers(adev); ··· 1959 1939 */ 1960 1940 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) 1961 1941 { 1962 - adev->gfxhub.funcs->gart_disable(adev); 1942 + if (!adev->in_s0ix) 1943 + adev->gfxhub.funcs->gart_disable(adev); 1963 1944 adev->mmhub.funcs->gart_disable(adev); 1964 1945 } 1965 1946
+6 -2
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 549 549 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); 550 550 551 551 r = amdgpu_bo_create_reserved(adev, fw_size, 552 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 552 + PAGE_SIZE, 553 + AMDGPU_GEM_DOMAIN_VRAM | 554 + AMDGPU_GEM_DOMAIN_GTT, 553 555 &adev->mes.ucode_fw_obj[pipe], 554 556 &adev->mes.ucode_fw_gpu_addr[pipe], 555 557 (void **)&adev->mes.ucode_fw_ptr[pipe]); ··· 584 582 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); 585 583 586 584 r = amdgpu_bo_create_reserved(adev, fw_size, 587 - 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 585 + 64 * 1024, 586 + AMDGPU_GEM_DOMAIN_VRAM | 587 + AMDGPU_GEM_DOMAIN_GTT, 588 588 &adev->mes.data_fw_obj[pipe], 589 589 &adev->mes.data_fw_gpu_addr[pipe], 590 590 (void **)&adev->mes.data_fw_ptr[pipe]);
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
··· 114 114 return; 115 115 116 116 /* Set default page address. */ 117 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 117 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 118 118 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 119 119 (u32)(value >> 12)); 120 120 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
··· 134 134 } 135 135 136 136 /* Set default page address. */ 137 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 137 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 138 138 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 139 139 (u32)(value >> 12)); 140 140 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 234 234 } 235 235 236 236 /* Set default page address. */ 237 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 237 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 238 238 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 239 239 (u32)(value >> 12)); 240 240 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 164 164 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 165 165 166 166 /* Set default page address. */ 167 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 167 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 168 168 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 169 169 (u32)(value >> 12)); 170 170 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
··· 188 188 } 189 189 190 190 /* Set default page address. */ 191 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 191 + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + 192 192 adev->vm_manager.vram_base_offset; 193 193 WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 194 194 (u32)(value >> 12));
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
··· 188 188 adev->gmc.vram_end >> 18); 189 189 190 190 /* Set default page address. */ 191 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 191 + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + 192 192 adev->vm_manager.vram_base_offset; 193 193 WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 194 194 (u32)(value >> 12));
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
··· 181 181 } 182 182 183 183 /* Set default page address. */ 184 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 184 + value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + 185 185 adev->vm_manager.vram_base_offset; 186 186 WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 187 187 (u32)(value >> 12));
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
··· 136 136 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 137 137 138 138 /* Set default page address. */ 139 - value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 139 + value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr); 140 140 WREG32_SOC15_OFFSET( 141 141 MMHUB, 0, 142 142 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+6
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 404 404 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); 405 405 } 406 406 407 + static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev) 408 + { 409 + xgpu_ai_send_access_requests(adev, IDH_RAS_POISON); 410 + } 411 + 407 412 const struct amdgpu_virt_ops xgpu_ai_virt_ops = { 408 413 .req_full_gpu = xgpu_ai_request_full_gpu_access, 409 414 .rel_full_gpu = xgpu_ai_release_full_gpu_access, ··· 416 411 .wait_reset = NULL, 417 412 .trans_msg = xgpu_ai_mailbox_trans_msg, 418 413 .req_init_data = xgpu_ai_request_init_data, 414 + .ras_poison_handler = xgpu_ai_ras_poison_handler, 419 415 };
+1
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 39 39 40 40 IDH_LOG_VF_ERROR = 200, 41 41 IDH_READY_TO_RESET = 201, 42 + IDH_RAS_POISON = 202, 42 43 }; 43 44 44 45 enum idh_event {
+6
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 426 426 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); 427 427 } 428 428 429 + static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev) 430 + { 431 + xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); 432 + } 433 + 429 434 const struct amdgpu_virt_ops xgpu_nv_virt_ops = { 430 435 .req_full_gpu = xgpu_nv_request_full_gpu_access, 431 436 .rel_full_gpu = xgpu_nv_release_full_gpu_access, ··· 438 433 .reset_gpu = xgpu_nv_request_reset, 439 434 .wait_reset = NULL, 440 435 .trans_msg = xgpu_nv_mailbox_trans_msg, 436 + .ras_poison_handler = xgpu_nv_ras_poison_handler, 441 437 };
+1
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
··· 39 39 40 40 IDH_LOG_VF_ERROR = 200, 41 41 IDH_READY_TO_RESET = 201, 42 + IDH_RAS_POISON = 202, 42 43 }; 43 44 44 45 enum idh_event {
-8
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 809 809 msleep(1000); 810 810 } 811 811 812 - /* TODO: check whether can submit a doorbell request to raise 813 - * a doorbell fence to exit gfxoff. 814 - */ 815 - if (adev->in_s0ix) 816 - amdgpu_gfx_off_ctrl(adev, false); 817 - 818 812 sdma_v5_2_soft_reset(adev); 819 813 /* unhalt the MEs */ 820 814 sdma_v5_2_enable(adev, true); ··· 817 823 818 824 /* start the gfx rings and rlc compute queues */ 819 825 r = sdma_v5_2_gfx_resume(adev); 820 - if (adev->in_s0ix) 821 - amdgpu_gfx_off_ctrl(adev, true); 822 826 if (r) 823 827 return r; 824 828 r = sdma_v5_2_rlc_resume(adev);
+12 -12
drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
··· 55 55 TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/ 56 56 }; 57 57 58 - /** @enum ta_securedisplay_max_phy 58 + /** @enum ta_securedisplay_phy_ID 59 59 * Physical ID number to use for reading corresponding DIO Scratch register for ROI 60 60 */ 61 - enum ta_securedisplay_max_phy { 61 + enum ta_securedisplay_phy_ID { 62 62 TA_SECUREDISPLAY_PHY0 = 0, 63 63 TA_SECUREDISPLAY_PHY1 = 1, 64 64 TA_SECUREDISPLAY_PHY2 = 2, ··· 139 139 uint32_t reserved[4]; 140 140 }; 141 141 142 - /** @struct securedisplay_cmd 143 - * Secure Display Command which is shared buffer memory 144 - */ 145 - struct securedisplay_cmd { 146 - uint32_t cmd_id; /* +0 Bytes Command ID */ 147 - enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */ 148 - uint32_t reserved[2]; /* +8 Bytes Reserved */ 149 - union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */ 150 - union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */ 151 - /**@note Total 48 Bytes */ 142 + /** @struct ta_securedisplay_cmd 143 + * Secure display command which is shared buffer memory 144 + */ 145 + struct ta_securedisplay_cmd { 146 + uint32_t cmd_id; /**< +0 Bytes Command ID */ 147 + enum ta_securedisplay_status status; /**< +4 Bytes Status code returned by the secure display TA */ 148 + uint32_t reserved[2]; /**< +8 Bytes Reserved */ 149 + union ta_securedisplay_cmd_input securedisplay_in_message; /**< +16 Bytes Command input buffer */ 150 + union ta_securedisplay_cmd_output securedisplay_out_message; /**< +32 Bytes Command output buffer */ 151 + /**@note Total 48 Bytes */ 152 152 }; 153 153 154 154 #endif //_TA_SECUREDISPLAY_IF_H
+4 -20
drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
··· 340 340 } 341 341 } 342 342 343 - static uint32_t umc_v8_10_query_ras_poison_mode_per_channel( 344 - struct amdgpu_device *adev, 345 - uint32_t umc_reg_offset) 346 - { 347 - uint32_t ecc_ctrl_addr, ecc_ctrl; 348 - 349 - ecc_ctrl_addr = 350 - SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl); 351 - ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr + 352 - umc_reg_offset) * 4); 353 - 354 - return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn); 355 - } 356 - 357 343 static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev) 358 344 { 359 - uint32_t umc_reg_offset = 0; 360 - 361 - /* Enabling fatal error in umc node0 instance0 channel0 will be 362 - * considered as fatal error mode 345 + /* 346 + * Force return true, because UMCCH0_0_GeccCtrl 347 + * is not accessible from host side 363 348 */ 364 - umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0); 365 - return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset); 349 + return true; 366 350 } 367 351 368 352 const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
-11
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 262 262 f2g = &gfx_v8_kfd2kgd; 263 263 break; 264 264 case CHIP_FIJI: 265 - gfx_target_version = 80003; 266 - f2g = &gfx_v8_kfd2kgd; 267 - break; 268 265 case CHIP_POLARIS10: 269 266 gfx_target_version = 80003; 270 267 f2g = &gfx_v8_kfd2kgd; 271 268 break; 272 269 case CHIP_POLARIS11: 273 - gfx_target_version = 80003; 274 - if (!vf) 275 - f2g = &gfx_v8_kfd2kgd; 276 - break; 277 270 case CHIP_POLARIS12: 278 - gfx_target_version = 80003; 279 - if (!vf) 280 - f2g = &gfx_v8_kfd2kgd; 281 - break; 282 271 case CHIP_VEGAM: 283 272 gfx_target_version = 80003; 284 273 if (!vf)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 801 801 802 802 p2plink->attr.name = "properties"; 803 803 p2plink->attr.mode = KFD_SYSFS_FILE_MODE; 804 - sysfs_attr_init(&iolink->attr); 804 + sysfs_attr_init(&p2plink->attr); 805 805 ret = sysfs_create_file(p2plink->kobj, &p2plink->attr); 806 806 if (ret < 0) 807 807 return ret;
+254 -114
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 210 210 211 211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 212 212 struct amdgpu_dm_connector *amdgpu_dm_connector, 213 - uint32_t link_index, 213 + u32 link_index, 214 214 struct amdgpu_encoder *amdgpu_encoder); 215 215 static int amdgpu_dm_encoder_init(struct drm_device *dev, 216 216 struct amdgpu_encoder *aencoder, ··· 262 262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 263 263 u32 *vbl, u32 *position) 264 264 { 265 - uint32_t v_blank_start, v_blank_end, h_position, v_position; 265 + u32 v_blank_start, v_blank_end, h_position, v_position; 266 266 267 267 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 268 268 return -EINVAL; ··· 361 361 struct amdgpu_device *adev = irq_params->adev; 362 362 unsigned long flags; 363 363 struct drm_pending_vblank_event *e; 364 - uint32_t vpos, hpos, v_blank_start, v_blank_end; 364 + u32 vpos, hpos, v_blank_start, v_blank_end; 365 365 bool vrr_active; 366 366 367 367 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); ··· 648 648 struct drm_connector *connector; 649 649 struct drm_connector_list_iter iter; 650 650 struct dc_link *link; 651 - uint8_t link_index = 0; 651 + u8 link_index = 0; 652 652 struct drm_device *dev; 653 653 654 654 if (adev == NULL) ··· 749 749 struct amdgpu_device *adev = irq_params->adev; 750 750 struct amdgpu_display_manager *dm = &adev->dm; 751 751 struct dmcub_trace_buf_entry entry = { 0 }; 752 - uint32_t count = 0; 752 + u32 count = 0; 753 753 struct dmub_hpd_work *dmub_hpd_wrk; 754 754 struct dc_link *plink = NULL; 755 755 ··· 1015 1015 struct dmub_srv_hw_params hw_params; 1016 1016 enum dmub_status status; 1017 1017 const unsigned char *fw_inst_const, *fw_bss_data; 1018 - uint32_t i, fw_inst_const_size, fw_bss_data_size; 1018 + u32 i, fw_inst_const_size, fw_bss_data_size; 1019 1019 bool has_hw_support; 1020 1020 1021 1021 if (!dmub_srv) ··· 1176 1176 1177 1177 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1178 1178 { 1179 - uint64_t pt_base; 1180 - uint32_t logical_addr_low; 1181 - uint32_t logical_addr_high; 1182 - uint32_t agp_base, agp_bot, agp_top; 1179 + u64 pt_base; 1180 + u32 logical_addr_low; 1181 + u32 logical_addr_high; 1182 + u32 agp_base, agp_bot, agp_top; 1183 1183 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1184 1184 1185 1185 memset(pa_config, 0, sizeof(*pa_config)); ··· 1642 1642 } 1643 1643 #endif 1644 1644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1645 - adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1645 + adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); 1646 + if (!adev->dm.secure_display_ctxs) { 1647 + DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n"); 1648 + } 1646 1649 #endif 1647 1650 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1648 1651 init_completion(&adev->dm.dmub_aux_transfer_done); ··· 1740 1737 amdgpu_dm_destroy_drm_device(&adev->dm); 1741 1738 1742 1739 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1743 - if (adev->dm.crc_rd_wrk) { 1744 - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 1745 - kfree(adev->dm.crc_rd_wrk); 1746 - adev->dm.crc_rd_wrk = NULL; 1740 + if (adev->dm.secure_display_ctxs) { 1741 + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1742 + if (adev->dm.secure_display_ctxs[i].crtc) { 1743 + flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 1744 + flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 1745 + } 1746 + } 1747 + kfree(adev->dm.secure_display_ctxs); 1748 + adev->dm.secure_display_ctxs = NULL; 1747 1749 } 1748 1750 #endif 1749 1751 #ifdef CONFIG_DRM_AMD_DC_HDCP ··· 2088 2080 * TODO: Move this into GART. 2089 2081 */ 2090 2082 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2091 - AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 2083 + AMDGPU_GEM_DOMAIN_VRAM | 2084 + AMDGPU_GEM_DOMAIN_GTT, 2085 + &adev->dm.dmub_bo, 2092 2086 &adev->dm.dmub_bo_gpu_addr, 2093 2087 &adev->dm.dmub_bo_cpu_addr); 2094 2088 if (r) ··· 2175 2165 DRM_ERROR("DM_MST: Failed to start MST\n"); 2176 2166 aconnector->dc_link->type = 2177 2167 dc_connection_single; 2168 + ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2169 + aconnector->dc_link); 2178 2170 break; 2179 2171 } 2180 2172 } ··· 2498 2486 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2499 2487 struct drm_crtc *crtc) 2500 2488 { 2501 - uint32_t i; 2489 + u32 i; 2502 2490 struct drm_connector_state *new_con_state; 2503 2491 struct drm_connector *connector; 2504 2492 struct drm_crtc *crtc_from_state; ··· 2746 2734 drm_for_each_connector_iter(connector, &iter) { 2747 2735 aconnector = to_amdgpu_dm_connector(connector); 2748 2736 2737 + if (!aconnector->dc_link) 2738 + continue; 2739 + 2749 2740 /* 2750 2741 * this is the case when traversing through already created 2751 2742 * MST connectors, should be skipped 2752 2743 */ 2753 - if (aconnector->dc_link && 2754 - aconnector->dc_link->type == dc_connection_mst_branch) 2744 + if (aconnector->dc_link->type == dc_connection_mst_branch) 2755 2745 continue; 2756 2746 2757 2747 mutex_lock(&aconnector->hpd_lock); ··· 3131 3117 3132 3118 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) 3133 3119 { 3134 - uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 3135 - uint8_t dret; 3120 + u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 3121 + u8 dret; 3136 3122 bool new_irq_handled = false; 3137 3123 int dpcd_addr; 3138 3124 int dpcd_bytes_to_read; ··· 3160 3146 3161 3147 while (dret == dpcd_bytes_to_read && 3162 3148 process_count < max_process_count) { 3163 - uint8_t retry; 3149 + u8 retry; 3164 3150 dret = 0; 3165 3151 3166 3152 process_count++; ··· 3179 3165 dpcd_bytes_to_read - 1; 3180 3166 3181 3167 for (retry = 0; retry < 3; retry++) { 3182 - uint8_t wret; 3168 + u8 wret; 3183 3169 3184 3170 wret = drm_dp_dpcd_write( 3185 3171 &aconnector->dm_dp_aux.aux, ··· 4193 4179 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4194 4180 { 4195 4181 struct amdgpu_display_manager *dm = &adev->dm; 4196 - int32_t i; 4182 + s32 i; 4197 4183 struct amdgpu_dm_connector *aconnector = NULL; 4198 4184 struct amdgpu_encoder *aencoder = NULL; 4199 4185 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4200 - uint32_t link_cnt; 4201 - int32_t primary_planes; 4186 + u32 link_cnt; 4187 + s32 primary_planes; 4202 4188 enum dc_connection_type new_connection_type = dc_connection_none; 4203 4189 const struct dc_plane_cap *plane; 4204 4190 bool psr_feature_enabled = false; ··· 4715 4701 static int 4716 4702 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 4717 4703 const struct drm_plane_state *plane_state, 4718 - const uint64_t tiling_flags, 4704 + const u64 tiling_flags, 4719 4705 struct dc_plane_info *plane_info, 4720 4706 struct dc_plane_address *address, 4721 4707 bool tmz_surface, ··· 4890 4876 4891 4877 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 4892 4878 struct rect *dirty_rect, int32_t x, 4893 - int32_t y, int32_t width, int32_t height, 4879 + s32 y, s32 width, s32 height, 4894 4880 int *i, bool ffu) 4895 4881 { 4896 4882 if (*i > DC_MAX_DIRTY_RECTS) ··· 4946 4932 { 4947 4933 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 4948 4934 struct rect *dirty_rects = flip_addrs->dirty_rects; 4949 - uint32_t num_clips; 4935 + u32 num_clips; 4950 4936 struct drm_mode_rect *clips; 4951 4937 bool bb_changed; 4952 4938 bool fb_changed; 4953 - uint32_t i = 0; 4939 + u32 i = 0; 4954 4940 4955 4941 /* 4956 4942 * Cursor plane has it's own dirty rect update interface. See ··· 5096 5082 convert_color_depth_from_display_info(const struct drm_connector *connector, 5097 5083 bool is_y420, int requested_bpc) 5098 5084 { 5099 - uint8_t bpc; 5085 + u8 bpc; 5100 5086 5101 5087 if (is_y420) { 5102 5088 bpc = 8; ··· 5640 5626 uint32_t max_dsc_target_bpp_limit_override) 5641 5627 { 5642 5628 const struct dc_link_settings *verified_link_cap = NULL; 5643 - uint32_t link_bw_in_kbps; 5644 - uint32_t edp_min_bpp_x16, edp_max_bpp_x16; 5629 + u32 link_bw_in_kbps; 5630 + u32 edp_min_bpp_x16, edp_max_bpp_x16; 5645 5631 struct dc *dc = sink->ctx->dc; 5646 5632 struct dc_dsc_bw_range bw_range = {0}; 5647 5633 struct dc_dsc_config dsc_cfg = {0}; ··· 5698 5684 struct dsc_dec_dpcd_caps *dsc_caps) 5699 5685 { 5700 5686 struct drm_connector *drm_connector = &aconnector->base; 5701 - uint32_t link_bandwidth_kbps; 5687 + u32 link_bandwidth_kbps; 5702 5688 struct dc *dc = sink->ctx->dc; 5703 - uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; 5704 - uint32_t dsc_max_supported_bw_in_kbps; 5705 - uint32_t max_dsc_target_bpp_limit_override = 5689 + u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 5690 + u32 dsc_max_supported_bw_in_kbps; 5691 + u32 max_dsc_target_bpp_limit_override = 5706 5692 drm_connector->display_info.max_dsc_bpp; 5707 5693 5708 5694 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, ··· 5849 5835 */ 5850 5836 DRM_DEBUG_DRIVER("No preferred mode found\n"); 5851 5837 } else { 5852 - recalculate_timing = is_freesync_video_mode(&mode, aconnector); 5838 + recalculate_timing = amdgpu_freesync_vid_mode && 5839 + is_freesync_video_mode(&mode, aconnector); 5853 5840 if (recalculate_timing) { 5854 5841 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 5855 5842 drm_mode_copy(&saved_mode, &mode); ··· 6924 6909 const struct drm_display_mode *m; 6925 6910 struct drm_display_mode *new_mode; 6926 6911 uint i; 6927 - uint32_t new_modes_count = 0; 6912 + u32 new_modes_count = 0; 6928 6913 6929 6914 /* Standard FPS values 6930 6915 * ··· 6938 6923 * 60 - Commonly used 6939 6924 * 48,72,96,120 - Multiples of 24 6940 6925 */ 6941 - static const uint32_t common_rates[] = { 6926 + static const u32 common_rates[] = { 6942 6927 23976, 24000, 25000, 29970, 30000, 6943 6928 48000, 50000, 60000, 72000, 96000, 120000 6944 6929 }; ··· 6954 6939 return 0; 6955 6940 6956 6941 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 6957 - uint64_t target_vtotal, target_vtotal_diff; 6958 - uint64_t num, den; 6942 + u64 target_vtotal, target_vtotal_diff; 6943 + u64 num, den; 6959 6944 6960 6945 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 6961 6946 continue; ··· 7001 6986 struct amdgpu_dm_connector *amdgpu_dm_connector = 7002 6987 to_amdgpu_dm_connector(connector); 7003 6988 7004 - if (!edid) 6989 + if (!(amdgpu_freesync_vid_mode && edid)) 7005 6990 return; 7006 6991 7007 6992 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) ··· 7197 7182 */ 7198 7183 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 7199 7184 struct amdgpu_dm_connector *aconnector, 7200 - uint32_t link_index, 7185 + u32 link_index, 7201 7186 struct amdgpu_encoder *aencoder) 7202 7187 { 7203 7188 int res = 0; ··· 7382 7367 } 7383 7368 7384 7369 #ifdef CONFIG_DRM_AMD_DC_HDCP 7385 - static bool is_content_protection_different(struct drm_connector_state *state, 7386 - const struct drm_connector_state *old_state, 7387 - const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 7370 + static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 7371 + struct drm_crtc_state *old_crtc_state, 7372 + struct drm_connector_state *new_conn_state, 7373 + struct drm_connector_state *old_conn_state, 7374 + const struct drm_connector *connector, 7375 + struct hdcp_workqueue *hdcp_w) 7388 7376 { 7389 7377 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7390 7378 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 7391 7379 7392 - /* Handle: Type0/1 change */ 7393 - if (old_state->hdcp_content_type != state->hdcp_content_type && 7394 - state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 7395 - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7380 + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 7381 + connector->index, connector->status, connector->dpms); 7382 + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 7383 + old_conn_state->content_protection, new_conn_state->content_protection); 7384 + 7385 + if (old_crtc_state) 7386 + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7387 + old_crtc_state->enable, 7388 + old_crtc_state->active, 7389 + old_crtc_state->mode_changed, 7390 + old_crtc_state->active_changed, 7391 + old_crtc_state->connectors_changed); 7392 + 7393 + if (new_crtc_state) 7394 + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7395 + new_crtc_state->enable, 7396 + new_crtc_state->active, 7397 + new_crtc_state->mode_changed, 7398 + new_crtc_state->active_changed, 7399 + new_crtc_state->connectors_changed); 7400 + 7401 + /* hdcp content type change */ 7402 + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 7403 + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 7404 + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7405 + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 7396 7406 return true; 7397 7407 } 7398 7408 7399 - /* CP is being re enabled, ignore this 7400 - * 7401 - * Handles: ENABLED -> DESIRED 7402 - */ 7403 - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 7404 - state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7405 - state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 7409 + /* CP is being re enabled, ignore this */ 7410 + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 7411 + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7412 + if (new_crtc_state && new_crtc_state->mode_changed) { 7413 + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7414 + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 7415 + return true; 7416 + }; 7417 + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 7418 + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 7406 7419 return false; 7407 7420 } 7408 7421 ··· 7438 7395 * 7439 7396 * Handles: UNDESIRED -> ENABLED 7440 7397 */ 7441 - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 7442 - state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 7443 - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7398 + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 7399 + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 7400 + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7444 7401 7445 7402 /* Stream removed and re-enabled 7446 7403 * ··· 7450 7407 * 7451 7408 * Handles: DESIRED -> DESIRED (Special case) 7452 7409 */ 7453 - if (!(old_state->crtc && old_state->crtc->enabled) && 7454 - state->crtc && state->crtc->enabled && 7410 + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 7411 + new_conn_state->crtc && new_conn_state->crtc->enabled && 7455 7412 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7456 7413 dm_con_state->update_hdcp = false; 7414 + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 7415 + __func__); 7457 7416 return true; 7458 7417 } 7459 7418 ··· 7467 7422 * 7468 7423 * Handles: DESIRED -> DESIRED (Special case) 7469 7424 */ 7470 - if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 7471 - connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 7425 + if (dm_con_state->update_hdcp && 7426 + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 7427 + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 7472 7428 dm_con_state->update_hdcp = false; 7429 + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 7430 + __func__); 7473 7431 return true; 7474 7432 } 7475 7433 7476 - /* 7477 - * Handles: UNDESIRED -> UNDESIRED 7478 - * DESIRED -> DESIRED 7479 - * ENABLED -> ENABLED 7480 - */ 7481 - if (old_state->content_protection == state->content_protection) 7434 + if (old_conn_state->content_protection == new_conn_state->content_protection) { 7435 + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7436 + if (new_crtc_state && new_crtc_state->mode_changed) { 7437 + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 7438 + __func__); 7439 + return true; 7440 + }; 7441 + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 7442 + __func__); 7443 + return false; 7444 + }; 7445 + 7446 + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 7482 7447 return false; 7448 + } 7483 7449 7484 - /* 7485 - * Handles: UNDESIRED -> DESIRED 7486 - * DESIRED -> UNDESIRED 7487 - * ENABLED -> UNDESIRED 7488 - */ 7489 - if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) 7450 + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 7451 + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 7452 + __func__); 7490 7453 return true; 7454 + } 7491 7455 7492 - /* 7493 - * Handles: DESIRED -> ENABLED 7494 - */ 7456 + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 7495 7457 return false; 7496 7458 } 7497 - 7498 7459 #endif 7460 + 7499 7461 static void remove_stream(struct amdgpu_device *adev, 7500 7462 struct amdgpu_crtc *acrtc, 7501 7463 struct dc_stream_state *stream) ··· 7718 7666 struct drm_crtc *pcrtc, 7719 7667 bool wait_for_vblank) 7720 7668 { 7721 - uint32_t i; 7722 - uint64_t timestamp_ns; 7669 + u32 i; 7670 + u64 timestamp_ns; 7723 7671 struct drm_plane *plane; 7724 7672 struct drm_plane_state *old_plane_state, *new_plane_state; 7725 7673 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); ··· 7730 7678 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 7731 7679 int planes_count = 0, vpos, hpos; 7732 7680 unsigned long flags; 7733 - uint32_t target_vblank, last_flip_vblank; 7681 + u32 target_vblank, last_flip_vblank; 7734 7682 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 7735 7683 bool cursor_update = false; 7736 7684 bool pflip_present = false; ··· 8168 8116 struct amdgpu_display_manager *dm = &adev->dm; 8169 8117 struct dm_atomic_state *dm_state; 8170 8118 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 8171 - uint32_t i, j; 8119 + u32 i, j; 8172 8120 struct drm_crtc *crtc; 8173 8121 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8174 8122 unsigned long flags; ··· 8342 8290 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8343 8291 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8344 8292 8345 - new_crtc_state = NULL; 8293 + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 8346 8294 8347 - if (acrtc) 8295 + if (!connector) 8296 + continue; 8297 + 8298 + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8299 + connector->index, connector->status, connector->dpms); 8300 + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8301 + old_con_state->content_protection, new_con_state->content_protection); 8302 + 8303 + if (aconnector->dc_sink) { 8304 + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 8305 + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 8306 + pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 8307 + aconnector->dc_sink->edid_caps.display_name); 8308 + } 8309 + } 8310 + 8311 + new_crtc_state = NULL; 8312 + old_crtc_state = NULL; 8313 + 8314 + if (acrtc) { 8348 8315 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8316 + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8317 + } 8318 + 8319 + if (old_crtc_state) 8320 + pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8321 + old_crtc_state->enable, 8322 + old_crtc_state->active, 8323 + old_crtc_state->mode_changed, 8324 + old_crtc_state->active_changed, 8325 + old_crtc_state->connectors_changed); 8326 + 8327 + if (new_crtc_state) 8328 + pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8329 + new_crtc_state->enable, 8330 + new_crtc_state->active, 8331 + new_crtc_state->mode_changed, 8332 + new_crtc_state->active_changed, 8333 + new_crtc_state->connectors_changed); 8334 + } 8335 + 8336 + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8337 + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8338 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8339 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8340 + 8341 + new_crtc_state = NULL; 8342 + old_crtc_state = NULL; 8343 + 8344 + if (acrtc) { 8345 + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8346 + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8347 + } 8349 8348 8350 8349 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8351 8350 ··· 8408 8305 continue; 8409 8306 } 8410 8307 8411 - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 8308 + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 8309 + old_con_state, connector, adev->dm.hdcp_workqueue)) { 8310 + /* when display is unplugged from mst hub, connctor will 8311 + * be destroyed within dm_dp_mst_connector_destroy. connector 8312 + * hdcp perperties, like type, undesired, desired, enabled, 8313 + * will be lost. So, save hdcp properties into hdcp_work within 8314 + * amdgpu_dm_atomic_commit_tail. if the same display is 8315 + * plugged back with same display index, its hdcp properties 8316 + * will be retrieved from hdcp_work within dm_dp_mst_get_modes 8317 + */ 8318 + 8319 + bool enable_encryption = false; 8320 + 8321 + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 8322 + enable_encryption = true; 8323 + 8324 + if (aconnector->dc_link && aconnector->dc_sink && 8325 + aconnector->dc_link->type == dc_connection_mst_branch) { 8326 + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 8327 + struct hdcp_workqueue *hdcp_w = 8328 + &hdcp_work[aconnector->dc_link->link_index]; 8329 + 8330 + hdcp_w->hdcp_content_type[connector->index] = 8331 + new_con_state->hdcp_content_type; 8332 + hdcp_w->content_protection[connector->index] = 8333 + new_con_state->content_protection; 8334 + } 8335 + 8336 + if (new_crtc_state && new_crtc_state->mode_changed && 8337 + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 8338 + enable_encryption = true; 8339 + 8340 + DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 8341 + 8412 8342 hdcp_update_display( 8413 8343 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 8414 - new_con_state->hdcp_content_type, 8415 - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); 8344 + new_con_state->hdcp_content_type, enable_encryption); 8345 + } 8416 8346 } 8417 8347 #endif 8418 8348 ··· 8543 8407 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8544 8408 #ifdef CONFIG_DEBUG_FS 8545 8409 enum amdgpu_dm_pipe_crc_source cur_crc_src; 8546 - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8547 - struct crc_rd_work *crc_rd_wrk; 8548 - #endif 8549 8410 #endif 8550 8411 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 8551 8412 if (old_crtc_state->active && !new_crtc_state->active) ··· 8555 8422 update_stream_irq_parameters(dm, dm_new_crtc_state); 8556 8423 8557 8424 #ifdef CONFIG_DEBUG_FS 8558 - #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8559 - crc_rd_wrk = dm->crc_rd_wrk; 8560 - #endif 8561 8425 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8562 8426 cur_crc_src = acrtc->dm_irq_params.crc_src; 8563 8427 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); ··· 8583 8453 if (amdgpu_dm_crc_window_is_activated(crtc)) { 8584 8454 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8585 8455 acrtc->dm_irq_params.window_param.update_win = true; 8456 + 8457 + /** 8458 + * It takes 2 frames for HW to stably generate CRC when 8459 + * resuming from suspend, so we set skip_frame_cnt 2. 8460 + */ 8586 8461 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 8587 - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 8588 - crc_rd_wrk->crtc = crtc; 8589 - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 8590 8462 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8591 8463 } 8592 8464 #endif ··· 8811 8679 struct drm_display_mode *mode = &new_crtc_state->base.mode; 8812 8680 int vrefresh = drm_mode_vrefresh(mode); 8813 8681 bool fs_vid_mode = false; 8682 + bool drr_active = false; 8814 8683 8815 8684 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 8816 8685 vrefresh >= aconnector->min_vfreq && 8817 8686 vrefresh <= aconnector->max_vfreq; 8818 8687 8819 - if (new_crtc_state->vrr_supported) { 8820 - new_crtc_state->stream->ignore_msa_timing_param = true; 8821 - fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 8688 + drr_active = new_crtc_state->vrr_supported && 8689 + new_crtc_state->freesync_config.state != VRR_STATE_DISABLED && 8690 + new_crtc_state->freesync_config.state != VRR_STATE_INACTIVE && 8691 + new_crtc_state->freesync_config.state != VRR_STATE_UNSUPPORTED; 8822 8692 8693 + if (drr_active) 8694 + new_crtc_state->stream->ignore_msa_timing_param = true; 8695 + 8696 + if (new_crtc_state->vrr_supported) { 8697 + fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 8823 8698 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 8824 8699 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 8825 8700 config.vsif_supported = true; ··· 8886 8747 } 8887 8748 8888 8749 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { 8889 - uint64_t num, den, res; 8750 + u64 num, den, res; 8890 8751 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 8891 8752 8892 8753 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; ··· 8989 8850 * TODO: Refactor this function to allow this check to work 8990 8851 * in all conditions. 8991 8852 */ 8992 - if (dm_new_crtc_state->stream && 8853 + if (amdgpu_freesync_vid_mode && 8854 + dm_new_crtc_state->stream && 8993 8855 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 8994 8856 goto skip_modeset; 8995 8857 ··· 9025 8885 if (!dm_old_crtc_state->stream) 9026 8886 goto skip_modeset; 9027 8887 9028 - if (dm_new_crtc_state->stream && 8888 + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 9029 8889 is_timing_unchanged_for_freesync(new_crtc_state, 9030 8890 old_crtc_state)) { 9031 8891 new_crtc_state->mode_changed = false; ··· 9037 8897 set_freesync_fixed_config(dm_new_crtc_state); 9038 8898 9039 8899 goto skip_modeset; 9040 - } else if (aconnector && 8900 + } else if (amdgpu_freesync_vid_mode && aconnector && 9041 8901 is_freesync_video_mode(&new_crtc_state->mode, 9042 8902 aconnector)) { 9043 8903 struct drm_display_mode *high_mode; ··· 10023 9883 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10024 9884 struct amdgpu_dm_connector *amdgpu_dm_connector) 10025 9885 { 10026 - uint8_t dpcd_data; 9886 + u8 dpcd_data; 10027 9887 bool capable = false; 10028 9888 10029 9889 if (amdgpu_dm_connector->dc_link && ··· 10042 9902 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10043 9903 unsigned int offset, 10044 9904 unsigned int total_length, 10045 - uint8_t *data, 9905 + u8 *data, 10046 9906 unsigned int length, 10047 9907 struct amdgpu_hdmi_vsdb_info *vsdb) 10048 9908 { ··· 10097 9957 } 10098 9958 10099 9959 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 10100 - uint8_t *edid_ext, int len, 9960 + u8 *edid_ext, int len, 10101 9961 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10102 9962 { 10103 9963 int i; ··· 10138 9998 } 10139 9999 10140 10000 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 10141 - uint8_t *edid_ext, int len, 10001 + u8 *edid_ext, int len, 10142 10002 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10143 10003 { 10144 10004 int i; ··· 10154 10014 } 10155 10015 10156 10016 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 10157 - uint8_t *edid_ext, int len, 10017 + u8 *edid_ext, int len, 10158 10018 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10159 10019 { 10160 10020 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); ··· 10168 10028 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10169 10029 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10170 10030 { 10171 - uint8_t *edid_ext = NULL; 10031 + u8 *edid_ext = NULL; 10172 10032 int i; 10173 10033 bool valid_vsdb_found = false; 10174 10034 ··· 10344 10204 } 10345 10205 10346 10206 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 10347 - uint32_t value, const char *func_name) 10207 + u32 value, const char *func_name) 10348 10208 { 10349 10209 #ifdef DM_CHECK_ADDR_0 10350 10210 if (address == 0) { ··· 10359 10219 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 10360 10220 const char *func_name) 10361 10221 { 10362 - uint32_t value; 10222 + u32 value; 10363 10223 #ifdef DM_CHECK_ADDR_0 10364 10224 if (address == 0) { 10365 10225 DC_ERR("invalid register read; address = 0\n");
+4 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 494 494 495 495 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 496 496 /** 497 - * @crc_rd_wrk: 497 + * @secure_display_ctxs: 498 498 * 499 - * Work to be executed in a separate thread to communicate with PSP. 499 + * Store the ROI information and the work_struct to command dmub and psp for 500 + * all crtcs. 500 501 */ 501 - struct crc_rd_work *crc_rd_wrk; 502 + struct secure_display_context *secure_display_ctxs; 502 503 #endif 503 504 /** 504 505 * @hpd_rx_offload_wq:
+88 -75
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 101 101 102 102 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) 103 103 { 104 - struct crc_rd_work *crc_rd_wrk; 105 - struct amdgpu_device *adev; 104 + struct secure_display_context *secure_display_ctx; 106 105 struct psp_context *psp; 107 - struct securedisplay_cmd *securedisplay_cmd; 106 + struct ta_securedisplay_cmd *securedisplay_cmd; 108 107 struct drm_crtc *crtc; 109 - uint8_t phy_id; 108 + struct dc_stream_state *stream; 109 + uint8_t phy_inst; 110 110 int ret; 111 111 112 - crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); 113 - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 114 - crtc = crc_rd_wrk->crtc; 112 + secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); 113 + crtc = secure_display_ctx->crtc; 115 114 116 115 if (!crtc) { 117 - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 118 116 return; 119 117 } 120 118 121 - adev = drm_to_adev(crtc->dev); 122 - psp = &adev->psp; 123 - phy_id = crc_rd_wrk->phy_inst; 124 - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 119 + psp = &drm_to_adev(crtc->dev)->psp; 125 120 121 + if (!psp->securedisplay_context.context.initialized) { 122 + DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); 123 + return; 124 + } 125 + 126 + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; 127 + phy_inst = stream->link->link_enc_hw_inst; 128 + 129 + /* need lock for multiple crtcs to use the command buffer */ 126 130 mutex_lock(&psp->securedisplay_context.mutex); 127 131 128 132 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 129 133 TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); 130 - securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = 131 - phy_id; 134 + 135 + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; 136 + 137 + /* PSP TA is expected to finish data transmission over I2C within current frame, 138 + * even there are up to 4 crtcs request to send in this frame. 139 + */ 132 140 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); 141 + 133 142 if (!ret) { 134 143 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 135 144 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); ··· 151 142 static void 152 143 amdgpu_dm_forward_crc_window(struct work_struct *work) 153 144 { 154 - struct crc_fw_work *crc_fw_wrk; 145 + struct secure_display_context *secure_display_ctx; 155 146 struct amdgpu_display_manager *dm; 147 + struct drm_crtc *crtc; 148 + struct dc_stream_state *stream; 156 149 157 - crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work); 158 - dm = crc_fw_wrk->dm; 150 + secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work); 151 + crtc = secure_display_ctx->crtc; 152 + 153 + if (!crtc) 154 + return; 155 + 156 + dm = &drm_to_adev(crtc->dev)->dm; 157 + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; 159 158 160 159 mutex_lock(&dm->dc_lock); 161 - dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd); 160 + dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false); 162 161 mutex_unlock(&dm->dc_lock); 163 - 164 - kfree(crc_fw_wrk); 165 162 } 166 163 167 164 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) ··· 204 189 struct dm_crtc_state *dm_crtc_state, 205 190 enum amdgpu_dm_pipe_crc_source source) 206 191 { 192 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 193 + int i; 194 + #endif 207 195 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 208 196 struct dc_stream_state *stream_state = dm_crtc_state->stream; 209 197 bool enable = amdgpu_dm_is_valid_crc_source(source); ··· 218 200 219 201 mutex_lock(&adev->dm.dc_lock); 220 202 221 - /* Enable CRTC CRC generation if necessary. */ 203 + /* Enable or disable CRTC CRC generation */ 222 204 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 223 205 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 206 + /* Disable secure_display if it was enabled */ 224 207 if (!enable) { 225 - if (adev->dm.crc_rd_wrk) { 226 - flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 227 - spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); 228 - 229 - if (adev->dm.crc_rd_wrk->crtc == crtc) { 208 + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 209 + if (adev->dm.secure_display_ctxs[i].crtc == crtc) { 230 210 /* stop ROI update on this crtc */ 231 - dc_stream_forward_crc_window(stream_state->ctx->dc, 232 - NULL, stream_state, true); 233 - adev->dm.crc_rd_wrk->crtc = NULL; 211 + flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 212 + flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 213 + dc_stream_forward_crc_window(stream_state, NULL, true); 234 214 } 235 - spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); 236 215 } 237 216 } 238 217 #endif ··· 362 347 } 363 348 364 349 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 350 + /* Reset secure_display when we change crc source from debugfs */ 365 351 amdgpu_dm_set_crc_window_default(crtc); 366 352 #endif 367 353 ··· 472 456 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 473 457 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) 474 458 { 475 - struct dc_stream_state *stream_state; 476 459 struct drm_device *drm_dev = NULL; 477 460 enum amdgpu_dm_pipe_crc_source cur_crc_src; 478 461 struct amdgpu_crtc *acrtc = NULL; 479 462 struct amdgpu_device *adev = NULL; 480 - struct crc_rd_work *crc_rd_wrk; 481 - struct crc_fw_work *crc_fw_wrk; 482 - unsigned long flags1, flags2; 463 + struct secure_display_context *secure_display_ctx = NULL; 464 + unsigned long flags1; 483 465 484 466 if (crtc == NULL) 485 467 return; ··· 487 473 drm_dev = crtc->dev; 488 474 489 475 spin_lock_irqsave(&drm_dev->event_lock, flags1); 490 - stream_state = acrtc->dm_irq_params.stream; 491 476 cur_crc_src = acrtc->dm_irq_params.crc_src; 492 477 493 478 /* Early return if CRC capture is not enabled. */ 494 - if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) 495 - goto cleanup; 496 - 497 - if (!dm_is_crc_source_crtc(cur_crc_src)) 479 + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || 480 + !dm_is_crc_source_crtc(cur_crc_src)) 498 481 goto cleanup; 499 482 500 483 if (!acrtc->dm_irq_params.window_param.activated) 501 484 goto cleanup; 502 485 486 + if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { 487 + acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; 488 + goto cleanup; 489 + } 490 + 491 + secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id]; 492 + if (WARN_ON(secure_display_ctx->crtc != crtc)) { 493 + /* We have set the crtc when creating secure_display_context, 494 + * don't expect it to be changed here. 495 + */ 496 + secure_display_ctx->crtc = crtc; 497 + } 498 + 503 499 if (acrtc->dm_irq_params.window_param.update_win) { 504 - if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { 505 - acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; 506 - goto cleanup; 507 - } 508 - 509 500 /* prepare work for dmub to update ROI */ 510 - crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC); 511 - if (!crc_fw_wrk) 512 - goto cleanup; 513 - 514 - INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window); 515 - crc_fw_wrk->dm = &adev->dm; 516 - crc_fw_wrk->stream = stream_state; 517 - crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start; 518 - crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start; 519 - crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end - 501 + secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start; 502 + secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start; 503 + secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end - 520 504 acrtc->dm_irq_params.window_param.x_start; 521 - crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end - 505 + secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end - 522 506 acrtc->dm_irq_params.window_param.y_start; 523 - schedule_work(&crc_fw_wrk->forward_roi_work); 507 + schedule_work(&secure_display_ctx->forward_roi_work); 524 508 525 509 acrtc->dm_irq_params.window_param.update_win = false; 510 + 511 + /* Statically skip 1 frame, because we may need to wait below things 512 + * before sending ROI to dmub: 513 + * 1. We defer the work by using system workqueue. 514 + * 2. We may need to wait for dc_lock before accessing dmub. 515 + */ 526 516 acrtc->dm_irq_params.window_param.skip_frame_cnt = 1; 527 517 528 518 } else { 529 - if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { 530 - acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; 531 - goto cleanup; 532 - } 533 - 534 - if (adev->dm.crc_rd_wrk) { 535 - crc_rd_wrk = adev->dm.crc_rd_wrk; 536 - spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); 537 - crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst; 538 - spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); 539 - schedule_work(&crc_rd_wrk->notify_ta_work); 540 - } 519 + /* prepare work for psp to read ROI/CRC and send to I2C */ 520 + schedule_work(&secure_display_ctx->notify_ta_work); 541 521 } 542 522 543 523 cleanup: 544 524 spin_unlock_irqrestore(&drm_dev->event_lock, flags1); 545 525 } 546 526 547 - struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) 527 + struct secure_display_context * 528 + amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) 548 529 { 549 - struct crc_rd_work *crc_rd_wrk = NULL; 530 + struct secure_display_context *secure_display_ctxs = NULL; 531 + int i; 550 532 551 - crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); 533 + secure_display_ctxs = kcalloc(AMDGPU_MAX_CRTCS, sizeof(struct secure_display_context), GFP_KERNEL); 552 534 553 - if (!crc_rd_wrk) 535 + if (!secure_display_ctxs) 554 536 return NULL; 555 537 556 - spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); 557 - INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); 538 + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 539 + INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); 540 + INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); 541 + secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; 542 + } 558 543 559 - return crc_rd_wrk; 544 + return secure_display_ctxs; 560 545 } 561 546 #endif
+11 -15
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
··· 45 45 uint16_t y_start; 46 46 uint16_t x_end; 47 47 uint16_t y_end; 48 - /* CRC windwo is activated or not*/ 48 + /* CRC window is activated or not*/ 49 49 bool activated; 50 50 /* Update crc window during vertical blank or not */ 51 51 bool update_win; ··· 53 53 int skip_frame_cnt; 54 54 }; 55 55 56 - /* read_work for driver to call PSP to read */ 57 - struct crc_rd_work { 56 + struct secure_display_context { 57 + /* work to notify PSP TA*/ 58 58 struct work_struct notify_ta_work; 59 - /* To protect crc_rd_work carried fields*/ 60 - spinlock_t crc_rd_work_lock; 61 - struct drm_crtc *crtc; 62 - uint8_t phy_inst; 63 - }; 64 59 65 - /* forward_work for driver to forward ROI to dmu */ 66 - struct crc_fw_work { 60 + /* work to forward ROI to dmcu/dmub */ 67 61 struct work_struct forward_roi_work; 68 - struct amdgpu_display_manager *dm; 69 - struct dc_stream_state *stream; 62 + 63 + struct drm_crtc *crtc; 64 + 65 + /* Region of Interest (ROI) */ 70 66 struct rect rect; 71 - bool is_stop_cmd; 72 67 }; 73 68 #endif 74 69 ··· 95 100 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 96 101 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); 97 102 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); 98 - struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); 103 + struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( 104 + struct amdgpu_device *adev); 99 105 #else 100 106 #define amdgpu_dm_crc_window_is_activated(x) 101 107 #define amdgpu_dm_crtc_handle_crc_window_irq(x) 102 - #define amdgpu_dm_crtc_secure_display_create_work() 108 + #define amdgpu_dm_crtc_secure_display_create_contexts() 103 109 #endif 104 110 105 111 #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
+24 -86
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 1375 1375 1376 1376 for (i = 0; i < MAX_PIPES; i++) { 1377 1377 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1378 - if (pipe_ctx && pipe_ctx->stream && 1378 + if (pipe_ctx->stream && 1379 1379 pipe_ctx->stream->link == aconnector->dc_link) 1380 1380 break; 1381 - } 1382 - 1383 - if (!pipe_ctx) { 1384 - kfree(rd_buf); 1385 - return -ENXIO; 1386 1381 } 1387 1382 1388 1383 dsc = pipe_ctx->stream_res.dsc; ··· 1476 1481 1477 1482 for (i = 0; i < MAX_PIPES; i++) { 1478 1483 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1479 - if (pipe_ctx && pipe_ctx->stream && 1484 + if (pipe_ctx->stream && 1480 1485 pipe_ctx->stream->link == aconnector->dc_link) 1481 1486 break; 1482 1487 } 1483 1488 1484 - if (!pipe_ctx || !pipe_ctx->stream) 1489 + if (!pipe_ctx->stream) 1485 1490 goto done; 1486 1491 1487 1492 // Get CRTC state ··· 1561 1566 1562 1567 for (i = 0; i < MAX_PIPES; i++) { 1563 1568 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1564 - if (pipe_ctx && pipe_ctx->stream && 1569 + if (pipe_ctx->stream && 1565 1570 pipe_ctx->stream->link == aconnector->dc_link) 1566 1571 break; 1567 - } 1568 - 1569 - if (!pipe_ctx) { 1570 - kfree(rd_buf); 1571 - return -ENXIO; 1572 1572 } 1573 1573 1574 1574 dsc = pipe_ctx->stream_res.dsc; ··· 1660 1670 1661 1671 for (i = 0; i < MAX_PIPES; i++) { 1662 1672 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1663 - if (pipe_ctx && pipe_ctx->stream && 1673 + if (pipe_ctx->stream && 1664 1674 pipe_ctx->stream->link == aconnector->dc_link) 1665 1675 break; 1666 1676 } 1667 1677 1668 - if (!pipe_ctx || !pipe_ctx->stream) 1678 + if (!pipe_ctx->stream) 1669 1679 goto done; 1670 1680 1671 1681 // Safely get CRTC state ··· 1745 1755 1746 1756 for (i = 0; i < MAX_PIPES; i++) { 1747 1757 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1748 - if (pipe_ctx && pipe_ctx->stream && 1758 + if (pipe_ctx->stream && 1749 1759 pipe_ctx->stream->link == aconnector->dc_link) 1750 1760 break; 1751 - } 1752 - 1753 - if (!pipe_ctx) { 1754 - kfree(rd_buf); 1755 - return -ENXIO; 1756 1761 } 1757 1762 1758 1763 dsc = pipe_ctx->stream_res.dsc; ··· 1844 1859 1845 1860 for (i = 0; i < MAX_PIPES; i++) { 1846 1861 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1847 - if (pipe_ctx && pipe_ctx->stream && 1862 + if (pipe_ctx->stream && 1848 1863 pipe_ctx->stream->link == aconnector->dc_link) 1849 1864 break; 1850 1865 } 1851 1866 1852 - if (!pipe_ctx || !pipe_ctx->stream) 1867 + if (!pipe_ctx->stream) 1853 1868 goto done; 1854 1869 1855 1870 // Get CRTC state ··· 1925 1940 1926 1941 for (i = 0; i < MAX_PIPES; i++) { 1927 1942 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 1928 - if (pipe_ctx && pipe_ctx->stream && 1943 + if (pipe_ctx->stream && 1929 1944 pipe_ctx->stream->link == aconnector->dc_link) 1930 1945 break; 1931 - } 1932 - 1933 - if (!pipe_ctx) { 1934 - kfree(rd_buf); 1935 - return -ENXIO; 1936 1946 } 1937 1947 1938 1948 dsc = pipe_ctx->stream_res.dsc; ··· 2021 2041 2022 2042 for (i = 0; i < MAX_PIPES; i++) { 2023 2043 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2024 - if (pipe_ctx && pipe_ctx->stream && 2044 + if (pipe_ctx->stream && 2025 2045 pipe_ctx->stream->link == aconnector->dc_link) 2026 2046 break; 2027 2047 } 2028 2048 2029 - if (!pipe_ctx || !pipe_ctx->stream) 2049 + if (!pipe_ctx->stream) 2030 2050 goto done; 2031 2051 2032 2052 // Get CRTC state ··· 2100 2120 2101 2121 for (i = 0; i < MAX_PIPES; i++) { 2102 2122 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2103 - if (pipe_ctx && pipe_ctx->stream && 2123 + if (pipe_ctx->stream && 2104 2124 pipe_ctx->stream->link == aconnector->dc_link) 2105 2125 break; 2106 - } 2107 - 2108 - if (!pipe_ctx) { 2109 - kfree(rd_buf); 2110 - return -ENXIO; 2111 2126 } 2112 2127 2113 2128 dsc = pipe_ctx->stream_res.dsc; ··· 2156 2181 2157 2182 for (i = 0; i < MAX_PIPES; i++) { 2158 2183 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2159 - if (pipe_ctx && pipe_ctx->stream && 2184 + if (pipe_ctx->stream && 2160 2185 pipe_ctx->stream->link == aconnector->dc_link) 2161 2186 break; 2162 - } 2163 - 2164 - if (!pipe_ctx) { 2165 - kfree(rd_buf); 2166 - return -ENXIO; 2167 2187 } 2168 2188 2169 2189 dsc = pipe_ctx->stream_res.dsc; ··· 2227 2257 2228 2258 for (i = 0; i < MAX_PIPES; i++) { 2229 2259 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2230 - if (pipe_ctx && pipe_ctx->stream && 2260 + if (pipe_ctx->stream && 2231 2261 pipe_ctx->stream->link == aconnector->dc_link) 2232 2262 break; 2233 - } 2234 - 2235 - if (!pipe_ctx) { 2236 - kfree(rd_buf); 2237 - return -ENXIO; 2238 2263 } 2239 2264 2240 2265 dsc = pipe_ctx->stream_res.dsc; ··· 2298 2333 2299 2334 for (i = 0; i < MAX_PIPES; i++) { 2300 2335 pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; 2301 - if (pipe_ctx && pipe_ctx->stream && 2336 + if (pipe_ctx->stream && 2302 2337 pipe_ctx->stream->link == aconnector->dc_link) 2303 2338 break; 2304 - } 2305 - 2306 - if (!pipe_ctx) { 2307 - kfree(rd_buf); 2308 - return -ENXIO; 2309 2339 } 2310 2340 2311 2341 dsc = pipe_ctx->stream_res.dsc; ··· 3205 3245 */ 3206 3246 static int crc_win_update_set(void *data, u64 val) 3207 3247 { 3208 - struct drm_crtc *new_crtc = data; 3209 - struct drm_crtc *old_crtc = NULL; 3210 - struct amdgpu_crtc *new_acrtc, *old_acrtc; 3211 - struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); 3212 - struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; 3213 - 3214 - if (!crc_rd_wrk) 3215 - return 0; 3248 + struct drm_crtc *crtc = data; 3249 + struct amdgpu_crtc *acrtc; 3250 + struct amdgpu_device *adev = drm_to_adev(crtc->dev); 3216 3251 3217 3252 if (val) { 3218 - new_acrtc = to_amdgpu_crtc(new_crtc); 3253 + acrtc = to_amdgpu_crtc(crtc); 3219 3254 mutex_lock(&adev->dm.dc_lock); 3220 3255 /* PSR may write to OTG CRC window control register, 3221 3256 * so close it before starting secure_display. 3222 3257 */ 3223 - amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream); 3258 + amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); 3224 3259 3225 3260 spin_lock_irq(&adev_to_drm(adev)->event_lock); 3226 - spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 3227 - if (crc_rd_wrk->crtc) { 3228 - old_crtc = crc_rd_wrk->crtc; 3229 - old_acrtc = to_amdgpu_crtc(old_crtc); 3230 - } 3231 3261 3232 - if (old_crtc && old_crtc != new_crtc) { 3233 - old_acrtc->dm_irq_params.window_param.activated = false; 3234 - old_acrtc->dm_irq_params.window_param.update_win = false; 3235 - old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; 3262 + acrtc->dm_irq_params.window_param.activated = true; 3263 + acrtc->dm_irq_params.window_param.update_win = true; 3264 + acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; 3236 3265 3237 - new_acrtc->dm_irq_params.window_param.activated = true; 3238 - new_acrtc->dm_irq_params.window_param.update_win = true; 3239 - new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; 3240 - crc_rd_wrk->crtc = new_crtc; 3241 - } else { 3242 - new_acrtc->dm_irq_params.window_param.activated = true; 3243 - new_acrtc->dm_irq_params.window_param.update_win = true; 3244 - new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; 3245 - crc_rd_wrk->crtc = new_crtc; 3246 - } 3247 - spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 3248 3266 spin_unlock_irq(&adev_to_drm(adev)->event_lock); 3249 3267 mutex_unlock(&adev->dm.dc_lock); 3250 3268 }
+14
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
··· 52 52 struct mod_hdcp_link link; 53 53 54 54 enum mod_hdcp_encryption_status encryption_status; 55 + 56 + /* when display is unplugged from mst hub, connctor will be 57 + * destroyed within dm_dp_mst_connector_destroy. connector 58 + * hdcp perperties, like type, undesired, desired, enabled, 59 + * will be lost. So, save hdcp properties into hdcp_work within 60 + * amdgpu_dm_atomic_commit_tail. if the same display is 61 + * plugged back with same display index, its hdcp properties 62 + * will be retrieved from hdcp_work within dm_dp_mst_get_modes 63 + */ 64 + /* un-desired, desired, enabled */ 65 + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; 66 + /* hdcp1.x, hdcp2.x */ 67 + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; 68 + 55 69 uint8_t max_link; 56 70 57 71 uint8_t *srm;
+26
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 32 32 #include "amdgpu_dm.h" 33 33 #include "amdgpu_dm_mst_types.h" 34 34 35 + #ifdef CONFIG_DRM_AMD_DC_HDCP 36 + #include "amdgpu_dm_hdcp.h" 37 + #endif 38 + 35 39 #include "dc.h" 36 40 #include "dm_helpers.h" 37 41 ··· 347 343 dc_sink->priv = aconnector; 348 344 /* dc_link_add_remote_sink returns a new reference */ 349 345 aconnector->dc_sink = dc_sink; 346 + 347 + /* when display is unplugged from mst hub, connctor will be 348 + * destroyed within dm_dp_mst_connector_destroy. connector 349 + * hdcp perperties, like type, undesired, desired, enabled, 350 + * will be lost. So, save hdcp properties into hdcp_work within 351 + * amdgpu_dm_atomic_commit_tail. if the same display is 352 + * plugged back with same display index, its hdcp properties 353 + * will be retrieved from hdcp_work within dm_dp_mst_get_modes 354 + */ 355 + #ifdef CONFIG_DRM_AMD_DC_HDCP 356 + if (aconnector->dc_sink && connector->state) { 357 + struct drm_device *dev = connector->dev; 358 + struct amdgpu_device *adev = drm_to_adev(dev); 359 + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 360 + struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; 361 + 362 + connector->state->hdcp_content_type = 363 + hdcp_w->hdcp_content_type[connector->index]; 364 + connector->state->content_protection = 365 + hdcp_w->content_protection[connector->index]; 366 + } 367 + #endif 350 368 351 369 if (aconnector->dc_sink) { 352 370 amdgpu_dm_update_freesync_caps(
+54 -52
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 382 382 } 383 383 384 384 /** 385 - * dc_stream_adjust_vmin_vmax: 385 + * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR 386 + * @dc: dc reference 387 + * @stream: Initial dc stream state 388 + * @adjust: Updated parameters for vertical_total_min and vertical_total_max 386 389 * 387 390 * Looks up the pipe context of dc_stream_state and updates the 388 391 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 389 392 * Rate, which is a power-saving feature that targets reducing panel 390 393 * refresh rate while the screen is static 391 394 * 392 - * @dc: dc reference 393 - * @stream: Initial dc stream state 394 - * @adjust: Updated parameters for vertical_total_min and vertical_total_max 395 + * Return: %true if the pipe context is found and adjusted; 396 + * %false if the pipe context is not found. 395 397 */ 396 398 bool dc_stream_adjust_vmin_vmax(struct dc *dc, 397 399 struct dc_stream_state *stream, ··· 421 419 } 422 420 423 421 /** 424 - * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal 422 + * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of 423 + * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate) 425 424 * 426 425 * @dc: [in] dc reference 427 426 * @stream: [in] Initial dc stream state 428 - * @adjust: [in] Updated parameters for vertical_total_min and 427 + * @refresh_rate: [in] new refresh_rate 429 428 * 430 - * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used 431 - * by DRR (Dynamic Refresh Rate) 429 + * Return: %true if the pipe context is found and there is an associated 430 + * timing_generator for the DC; 431 + * %false if the pipe context is not found or there is no 432 + * timing_generator for the DC. 432 433 */ 433 434 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 434 435 struct dc_stream_state *stream, ··· 523 518 } 524 519 525 520 bool 526 - dc_stream_forward_crc_window(struct dc *dc, 527 - struct rect *rect, struct dc_stream_state *stream, bool is_stop) 521 + dc_stream_forward_crc_window(struct dc_stream_state *stream, 522 + struct rect *rect, bool is_stop) 528 523 { 529 524 struct dmcu *dmcu; 530 525 struct dc_dmub_srv *dmub_srv; 531 526 struct otg_phy_mux mux_mapping; 532 527 struct pipe_ctx *pipe; 533 528 int i; 529 + struct dc *dc = stream->ctx->dc; 534 530 535 531 for (i = 0; i < MAX_PIPES; i++) { 536 532 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; ··· 572 566 * once. 573 567 * 574 568 * By default, only CRC0 is configured, and the entire frame is used to 575 - * calculate the crc. 569 + * calculate the CRC. 570 + * 571 + * Return: %false if the stream is not found or CRC capture is not supported; 572 + * %true if the stream has been configured. 576 573 */ 577 574 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 578 575 struct crc_params *crc_window, bool enable, bool continuous) ··· 644 635 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 645 636 * 646 637 * Return: 647 - * false if stream is not found, or if CRCs are not enabled. 638 + * %false if stream is not found, or if CRCs are not enabled. 648 639 */ 649 640 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 650 641 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) ··· 1749 1740 * 1750 1741 * Applies given context to the hardware and copy it into current context. 1751 1742 * It's up to the user to release the src context afterwards. 1743 + * 1744 + * Return: an enum dc_status result code for the operation 1752 1745 */ 1753 1746 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1754 1747 { ··· 2018 2007 return result == DC_OK; 2019 2008 } 2020 2009 2021 - if (!streams_changed(dc, context->streams, context->stream_count)) 2010 + if (!streams_changed(dc, context->streams, context->stream_count)) { 2022 2011 return DC_OK; 2012 + } 2023 2013 2024 2014 DC_LOG_DC("%s: %d streams\n", 2025 2015 __func__, context->stream_count); ··· 3337 3325 struct pipe_ctx *top_pipe_to_program = NULL; 3338 3326 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 3339 3327 bool subvp_prev_use = false; 3328 + bool subvp_curr_use = false; 3340 3329 3341 3330 // Once we apply the new subvp context to hardware it won't be in the 3342 3331 // dc->current_state anymore, so we have to cache it before we apply ··· 3392 3379 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3393 3380 if (subvp_prev_use) 3394 3381 break; 3382 + } 3383 + 3384 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 3385 + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3386 + 3387 + if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3388 + subvp_curr_use = true; 3389 + break; 3390 + } 3395 3391 } 3396 3392 3397 3393 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { ··· 3674 3652 top_pipe_to_program->stream_res.tg); 3675 3653 } 3676 3654 3677 - /* For phantom pipe OTG enable, it has to be done after any previous pipe 3678 - * that was in use has already been programmed at gotten its double buffer 3679 - * update for "disable". 3680 - */ 3681 - if (update_type != UPDATE_TYPE_FAST) { 3682 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 3683 - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3684 - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3685 - 3686 - /* If an active, non-phantom pipe is being transitioned into a phantom 3687 - * pipe, wait for the double buffer update to complete first before we do 3688 - * ANY phantom pipe programming. 3689 - */ 3690 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && 3691 - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 3692 - old_pipe->stream_res.tg->funcs->wait_for_state( 3693 - old_pipe->stream_res.tg, 3694 - CRTC_STATE_VBLANK); 3695 - old_pipe->stream_res.tg->funcs->wait_for_state( 3696 - old_pipe->stream_res.tg, 3697 - CRTC_STATE_VACTIVE); 3698 - } 3655 + if (subvp_curr_use) { 3656 + /* If enabling subvp or transitioning from subvp->subvp, enable the 3657 + * phantom streams before we program front end for the phantom pipes. 3658 + */ 3659 + if (update_type != UPDATE_TYPE_FAST) { 3660 + if (dc->hwss.enable_phantom_streams) 3661 + dc->hwss.enable_phantom_streams(dc, context); 3699 3662 } 3700 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 3701 - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3663 + } 3702 3664 3703 - if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) || 3704 - subvp_prev_use) { 3705 - // If old context or new context has phantom pipes, apply 3706 - // the phantom timings now. We can't change the phantom 3707 - // pipe configuration safely without driver acquiring 3708 - // the DMCUB lock first. 3709 - dc->hwss.apply_ctx_to_hw(dc, context); 3710 - break; 3711 - } 3712 - } 3665 + if (subvp_prev_use && !subvp_curr_use) { 3666 + /* If disabling subvp, disable phantom streams after front end 3667 + * programming has completed (we turn on phantom OTG in order 3668 + * to complete the plane disable for phantom pipes). 3669 + */ 3670 + dc->hwss.apply_ctx_to_hw(dc, context); 3713 3671 } 3714 3672 3715 3673 if (update_type != UPDATE_TYPE_FAST) ··· 4706 4704 /** 4707 4705 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification 4708 4706 * 4709 - * dc: [in] dc structure 4707 + * @dc: [in] dc structure 4710 4708 * 4711 4709 * Enables DMUB unsolicited notifications to x86 via outbox. 4712 4710 */ ··· 4907 4905 /** 4908 4906 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption 4909 4907 * 4910 - * @dc [in]: dc structure 4911 - * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable 4908 + * @dc: [in] dc structure 4909 + * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable 4912 4910 * 4913 4911 * Submits dpia hpd int enable command to dmub via inbox message 4914 4912 */ ··· 4989 4987 } 4990 4988 4991 4989 /** 4992 - * dc_extended_blank_supported 0 Decide whether extended blank is supported 4990 + * dc_extended_blank_supported - Decide whether extended blank is supported 4993 4991 * 4994 4992 * @dc: [in] Current DC state 4995 4993 * ··· 4998 4996 * ability to enter z9/z10. 4999 4997 * 5000 4998 * Return: 5001 - * Indicate whether extended blank is supported (true or false) 4999 + * Indicate whether extended blank is supported (%true or %false) 5002 5000 */ 5003 5001 bool dc_extended_blank_supported(struct dc *dc) 5004 5002 {
-9
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 1916 1916 if (false == dc_link_construct(link, init_params)) 1917 1917 goto construct_fail; 1918 1918 1919 - /* 1920 - * Must use preferred_link_setting, not reported_link_cap or verified_link_cap, 1921 - * since struct preferred_link_setting won't be reset after S3. 1922 - */ 1923 - link->preferred_link_setting.dpcd_source_device_specific_field_support = true; 1924 - 1925 1919 return link; 1926 1920 1927 1921 construct_fail: ··· 4649 4655 4650 4656 if (link_setting != NULL) { 4651 4657 link->preferred_link_setting = *link_setting; 4652 - if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) 4653 - /* TODO: add dc update for acquiring link res */ 4654 - skip_immediate_retrain = true; 4655 4658 } else { 4656 4659 link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; 4657 4660 link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
+51 -3
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 3810 3810 int i; 3811 3811 struct pipe_ctx *pipe_ctx, *pipe_ctx_check; 3812 3812 3813 + DC_LOGGER_INIT(dc->ctx->logger); 3814 + 3813 3815 pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx]; 3814 3816 if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) || 3815 3817 !IS_PIPE_SYNCD_VALID(pipe_ctx)) ··· 3822 3820 pipe_ctx_check = &context->res_ctx.pipe_ctx[i]; 3823 3821 3824 3822 if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) && 3825 - IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) 3826 - DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", 3827 - i, disabled_master_pipe_idx); 3823 + IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) { 3824 + /* On dcn32, this error isn't fatal since hw supports odm transition in fast update*/ 3825 + if (dc->ctx->dce_version == DCN_VERSION_3_2 || 3826 + dc->ctx->dce_version == DCN_VERSION_3_21) 3827 + DC_LOG_DEBUG("DC: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", 3828 + i, disabled_master_pipe_idx); 3829 + else 3830 + DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n", 3831 + i, disabled_master_pipe_idx); 3832 + } 3828 3833 } 3829 3834 } 3830 3835 ··· 3990 3981 3991 3982 return true; 3992 3983 } 3984 + 3985 + enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, 3986 + struct dc_state *context, 3987 + struct pipe_ctx *pipe_ctx) 3988 + { 3989 + if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) { 3990 + if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) { 3991 + pipe_ctx->stream_res.hpo_dp_stream_enc = 3992 + find_first_free_match_hpo_dp_stream_enc_for_link( 3993 + &context->res_ctx, dc->res_pool, pipe_ctx->stream); 3994 + 3995 + if (!pipe_ctx->stream_res.hpo_dp_stream_enc) 3996 + return DC_NO_STREAM_ENC_RESOURCE; 3997 + 3998 + update_hpo_dp_stream_engine_usage( 3999 + &context->res_ctx, dc->res_pool, 4000 + pipe_ctx->stream_res.hpo_dp_stream_enc, 4001 + true); 4002 + } 4003 + 4004 + if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) { 4005 + if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream)) 4006 + return DC_NO_LINK_ENC_RESOURCE; 4007 + } 4008 + } else { 4009 + if (pipe_ctx->stream_res.hpo_dp_stream_enc) { 4010 + update_hpo_dp_stream_engine_usage( 4011 + &context->res_ctx, dc->res_pool, 4012 + pipe_ctx->stream_res.hpo_dp_stream_enc, 4013 + false); 4014 + pipe_ctx->stream_res.hpo_dp_stream_enc = NULL; 4015 + } 4016 + if (pipe_ctx->link_res.hpo_dp_link_enc) 4017 + remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream); 4018 + } 4019 + 4020 + return DC_OK; 4021 + } 4022 +
+1
drivers/gpu/drm/amd/display/dc/core/dc_stat.c
··· 65 65 /* For HPD/HPD RX, convert dpia port index into link index */ 66 66 if (notify->type == DMUB_NOTIFICATION_HPD || 67 67 notify->type == DMUB_NOTIFICATION_HPD_IRQ || 68 + notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || 68 69 notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { 69 70 notify->link_index = 70 71 get_link_index_from_dpia_port_index(dc, notify->link_index);
+8 -1
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 481 481 } 482 482 483 483 if (!isDrc) { 484 + ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES); 484 485 stream->writeback_info[stream->num_wb_info++] = *wb_info; 485 486 } 486 487 ··· 527 526 return false; 528 527 } 529 528 529 + if (stream->num_wb_info > MAX_DWB_PIPES) { 530 + dm_error("DC: num_wb_info is invalid!\n"); 531 + return false; 532 + } 533 + 530 534 // stream->writeback_info[dwb_pipe_inst].wb_enabled = false; 531 535 for (i = 0; i < stream->num_wb_info; i++) { 532 536 /*dynamic update*/ ··· 546 540 if (stream->writeback_info[i].wb_enabled) { 547 541 if (j < i) 548 542 /* trim the array */ 549 - stream->writeback_info[j] = stream->writeback_info[i]; 543 + memcpy(&stream->writeback_info[j], &stream->writeback_info[i], 544 + sizeof(struct dc_writeback_info)); 550 545 j++; 551 546 } 552 547 }
+2 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 47 47 struct set_config_cmd_payload; 48 48 struct dmub_notification; 49 49 50 - #define DC_VER "3.2.215" 50 + #define DC_VER "3.2.217" 51 51 52 52 #define MAX_SURFACES 3 53 53 #define MAX_PLANES 6 ··· 872 872 enum lttpr_mode lttpr_mode_override; 873 873 unsigned int dsc_delay_factor_wa_x1000; 874 874 unsigned int min_prefetch_in_strobe_ns; 875 + bool disable_unbounded_requesting; 875 876 }; 876 877 877 878 struct gpu_info_soc_bounding_box_v1_0;
+3 -1
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
··· 149 149 enum dc_link_spread link_spread; 150 150 bool use_link_rate_set; 151 151 uint8_t link_rate_set; 152 - bool dpcd_source_device_specific_field_support; 153 152 }; 154 153 155 154 union dc_dp_ffe_preset { ··· 924 925 #endif 925 926 #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL 926 927 #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 928 + #endif 929 + #ifndef DP_LINK_SQUARE_PATTERN 930 + #define DP_LINK_SQUARE_PATTERN 0x10F 927 931 #endif 928 932 #ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 929 933 #define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
+9 -6
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 335 335 unsigned int *inst_out) 336 336 { 337 337 struct dc_link *edp_links[MAX_NUM_EDP]; 338 - int edp_num; 338 + int edp_num, i; 339 339 340 - if (link->connector_signal != SIGNAL_TYPE_EDP) 340 + *inst_out = 0; 341 + if (link->connector_signal != SIGNAL_TYPE_EDP || !link->local_sink) 341 342 return false; 342 343 get_edp_links(dc, edp_links, &edp_num); 343 - if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index)) 344 - *inst_out = 1; 345 - else 346 - *inst_out = 0; 344 + for (i = 0; i < edp_num; i++) { 345 + if (link == edp_links[i]) 346 + break; 347 + if (edp_links[i]->local_sink) 348 + (*inst_out)++; 349 + } 347 350 return true; 348 351 } 349 352
+1 -2
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 543 543 unsigned int *nom_v_pos); 544 544 545 545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 546 - bool dc_stream_forward_crc_window(struct dc *dc, 546 + bool dc_stream_forward_crc_window(struct dc_stream_state *stream, 547 547 struct rect *rect, 548 - struct dc_stream_state *stream, 549 548 bool is_stop); 550 549 #endif 551 550
+13
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 1142 1142 struct dc_link *link = stream->link; 1143 1143 struct dc *dc = pipe_ctx->stream->ctx->dc; 1144 1144 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 1145 + struct dccg *dccg = dc->res_pool->dccg; 1146 + struct timing_generator *tg = pipe_ctx->stream_res.tg; 1147 + struct dtbclk_dto_params dto_params = {0}; 1148 + int dp_hpo_inst; 1145 1149 1146 1150 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { 1147 1151 pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( ··· 1164 1160 dc->hwss.disable_audio_stream(pipe_ctx); 1165 1161 1166 1162 link_hwss->reset_stream_encoder(pipe_ctx); 1163 + 1164 + if (is_dp_128b_132b_signal(pipe_ctx)) { 1165 + dto_params.otg_inst = tg->inst; 1166 + dto_params.timing = &pipe_ctx->stream->timing; 1167 + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; 1168 + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 1169 + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 1170 + dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); 1171 + } 1167 1172 1168 1173 if (is_dp_128b_132b_signal(pipe_ctx)) { 1169 1174 /* TODO: This looks like a bug to me as we are disabling HPO IO when
-1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
··· 200 200 bool is_config_ok; 201 201 struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); 202 202 203 - DC_LOG_DSC(" "); 204 203 DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst); 205 204 dsc_config_log(dsc, dsc_cfg); 206 205 is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
+86
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 582 582 if (pipe_ctx->stream_res.gsl_group != 0) 583 583 dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false); 584 584 585 + if (hubp->funcs->hubp_update_mall_sel) 586 + hubp->funcs->hubp_update_mall_sel(hubp, 0, false); 587 + 585 588 dc->hwss.set_flip_control_gsl(pipe_ctx, false); 586 589 587 590 hubp->funcs->hubp_clk_cntl(hubp, false); ··· 608 605 609 606 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 610 607 { 608 + bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; 609 + struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; 610 + 611 611 DC_LOGGER_INIT(dc->ctx->logger); 612 612 613 613 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) 614 614 return; 615 615 616 616 dcn20_plane_atomic_disable(dc, pipe_ctx); 617 + 618 + /* Turn back off the phantom OTG after the phantom plane is fully disabled 619 + */ 620 + if (is_phantom) 621 + if (tg && tg->funcs->disable_phantom_crtc) 622 + tg->funcs->disable_phantom_crtc(tg); 617 623 618 624 DC_LOG_DC("Power down front end %d\n", 619 625 pipe_ctx->pipe_idx); ··· 1815 1803 dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], 1816 1804 &context->res_ctx.pipe_ctx[i]); 1817 1805 1806 + /* When disabling phantom pipes, turn on phantom OTG first (so we can get double 1807 + * buffer updates properly) 1808 + */ 1809 + for (i = 0; i < dc->res_pool->pipe_count; i++) 1810 + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable 1811 + && dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { 1812 + struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 1813 + 1814 + if (tg->funcs->enable_crtc) 1815 + tg->funcs->enable_crtc(tg); 1816 + } 1817 + 1818 1818 /* OTG blank before disabling all front ends */ 1819 1819 for (i = 0; i < dc->res_pool->pipe_count; i++) 1820 1820 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ··· 2639 2615 hubp->mpcc_id = mpcc_id; 2640 2616 } 2641 2617 2618 + static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) 2619 + { 2620 + switch (link->link_enc->transmitter) { 2621 + case TRANSMITTER_UNIPHY_A: 2622 + return PHYD32CLKA; 2623 + case TRANSMITTER_UNIPHY_B: 2624 + return PHYD32CLKB; 2625 + case TRANSMITTER_UNIPHY_C: 2626 + return PHYD32CLKC; 2627 + case TRANSMITTER_UNIPHY_D: 2628 + return PHYD32CLKD; 2629 + case TRANSMITTER_UNIPHY_E: 2630 + return PHYD32CLKE; 2631 + default: 2632 + return PHYD32CLKA; 2633 + } 2634 + } 2635 + 2636 + static int get_odm_segment_count(struct pipe_ctx *pipe_ctx) 2637 + { 2638 + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 2639 + int count = 1; 2640 + 2641 + while (odm_pipe != NULL) { 2642 + count++; 2643 + odm_pipe = odm_pipe->next_odm_pipe; 2644 + } 2645 + 2646 + return count; 2647 + } 2648 + 2642 2649 void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) 2643 2650 { 2644 2651 enum dc_lane_count lane_count = ··· 2683 2628 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2684 2629 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 2685 2630 struct dc *dc = pipe_ctx->stream->ctx->dc; 2631 + struct dtbclk_dto_params dto_params = {0}; 2632 + struct dccg *dccg = dc->res_pool->dccg; 2633 + enum phyd32clk_clock_source phyd32clk; 2634 + int dp_hpo_inst; 2635 + struct dce_hwseq *hws = dc->hwseq; 2636 + unsigned int k1_div = PIXEL_RATE_DIV_NA; 2637 + unsigned int k2_div = PIXEL_RATE_DIV_NA; 2686 2638 2687 2639 if (is_dp_128b_132b_signal(pipe_ctx)) { 2688 2640 if (dc->hwseq->funcs.setup_hpo_hw_control) 2689 2641 dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); 2642 + } 2643 + 2644 + if (is_dp_128b_132b_signal(pipe_ctx)) { 2645 + dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst; 2646 + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); 2647 + 2648 + phyd32clk = get_phyd32clk_src(link); 2649 + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 2650 + 2651 + dto_params.otg_inst = tg->inst; 2652 + dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; 2653 + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); 2654 + dto_params.timing = &pipe_ctx->stream->timing; 2655 + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); 2656 + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 2657 + } 2658 + 2659 + if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) { 2660 + hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div); 2661 + 2662 + dc->res_pool->dccg->funcs->set_pixel_rate_div( 2663 + dc->res_pool->dccg, 2664 + pipe_ctx->stream_res.tg->inst, 2665 + k1_div, k2_div); 2690 2666 } 2691 2667 2692 2668 link_hwss->setup_stream_encoder(pipe_ctx);
+3
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 1389 1389 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1390 1390 struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i]; 1391 1391 1392 + if (pipe_ctx->top_pipe) 1393 + continue; 1394 + 1392 1395 if (pipe_ctx->stream != dc_stream) 1393 1396 continue; 1394 1397
+2 -1
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
··· 1414 1414 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 1415 1415 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 1416 1416 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 1417 - .update_bw_bounding_box = dcn301_update_bw_bounding_box 1417 + .update_bw_bounding_box = dcn301_update_bw_bounding_box, 1418 + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state 1418 1419 }; 1419 1420 1420 1421 static bool dcn301_resource_construct(
-40
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
··· 623 623 if (hws->ctx->dc->debug.hpo_optimization) 624 624 REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); 625 625 } 626 - void dcn31_set_drr(struct pipe_ctx **pipe_ctx, 627 - int num_pipes, struct dc_crtc_timing_adjust adjust) 628 - { 629 - int i = 0; 630 - struct drr_params params = {0}; 631 - unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/ 632 - unsigned int num_frames = 2; 633 - params.vertical_total_max = adjust.v_total_max; 634 - params.vertical_total_min = adjust.v_total_min; 635 - params.vertical_total_mid = adjust.v_total_mid; 636 - params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num; 637 - for (i = 0; i < num_pipes; i++) { 638 - if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) { 639 - if (pipe_ctx[i]->stream_res.tg->funcs->set_drr) 640 - pipe_ctx[i]->stream_res.tg->funcs->set_drr( 641 - pipe_ctx[i]->stream_res.tg, &params); 642 - if (adjust.v_total_max != 0 && adjust.v_total_min != 0) 643 - if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control) 644 - pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control( 645 - pipe_ctx[i]->stream_res.tg, 646 - event_triggers, num_frames); 647 - } 648 - } 649 - } 650 - void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, 651 - int num_pipes, const struct dc_static_screen_params *params) 652 - { 653 - unsigned int i; 654 - unsigned int triggers = 0; 655 - if (params->triggers.surface_update) 656 - triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/ 657 - if (params->triggers.cursor_update) 658 - triggers |= 0x10;/*bit4*/ 659 - if (params->triggers.force_trigger) 660 - triggers |= 0x1; 661 - for (i = 0; i < num_pipes; i++) 662 - pipe_ctx[i]->stream_res.tg->funcs-> 663 - set_static_screen_control(pipe_ctx[i]->stream_res.tg, 664 - triggers, params->num_frames); 665 - }
-4
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
··· 56 56 void dcn31_init_pipes(struct dc *dc, struct dc_state *context); 57 57 void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); 58 58 59 - void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx, 60 - int num_pipes, const struct dc_static_screen_params *params); 61 - void dcn31_set_drr(struct pipe_ctx **pipe_ctx, 62 - int num_pipes, struct dc_crtc_timing_adjust adjust); 63 59 #endif /* __DC_HWSS_DCN31_H__ */
+2 -2
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
··· 64 64 .prepare_bandwidth = dcn20_prepare_bandwidth, 65 65 .optimize_bandwidth = dcn20_optimize_bandwidth, 66 66 .update_bandwidth = dcn20_update_bandwidth, 67 - .set_drr = dcn31_set_drr, 67 + .set_drr = dcn10_set_drr, 68 68 .get_position = dcn10_get_position, 69 - .set_static_screen_control = dcn31_set_static_screen_control, 69 + .set_static_screen_control = dcn10_set_static_screen_control, 70 70 .setup_stereo = dcn10_setup_stereo, 71 71 .set_avmute = dcn30_set_avmute, 72 72 .log_hw_state = dcn10_log_hw_state,
+1 -28
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
··· 40 40 #define FN(reg_name, field_name) \ 41 41 optc1->tg_shift->field_name, optc1->tg_mask->field_name 42 42 43 - #define STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN 0x2000 /*bit 13*/ 44 43 static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 45 44 struct dc_crtc_timing *timing) 46 45 { ··· 231 232 OPTC_MEM_SEL, 0); 232 233 optc1->opp_count = 1; 233 234 } 234 - void optc31_set_static_screen_control( 235 - struct timing_generator *optc, 236 - uint32_t event_triggers, 237 - uint32_t num_frames) 238 - { 239 - struct optc *optc1 = DCN10TG_FROM_TG(optc); 240 - uint32_t framecount; 241 - uint32_t events; 242 - 243 - if (num_frames > 0xFF) 244 - num_frames = 0xFF; 245 - REG_GET_2(OTG_STATIC_SCREEN_CONTROL, 246 - OTG_STATIC_SCREEN_EVENT_MASK, &events, 247 - OTG_STATIC_SCREEN_FRAME_COUNT, &framecount); 248 - 249 - if (events == event_triggers && num_frames == framecount) 250 - return; 251 - if ((event_triggers & STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN) 252 - != 0) 253 - event_triggers = event_triggers & 254 - ~STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN; 255 - 256 - REG_UPDATE_2(OTG_STATIC_SCREEN_CONTROL, 257 - OTG_STATIC_SCREEN_EVENT_MASK, event_triggers, 258 - OTG_STATIC_SCREEN_FRAME_COUNT, num_frames); 259 - } 260 235 261 236 static struct timing_generator_funcs dcn31_tg_funcs = { 262 237 .validate_timing = optc1_validate_timing, ··· 266 293 .set_drr = optc31_set_drr, 267 294 .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, 268 295 .set_vtotal_min_max = optc1_set_vtotal_min_max, 269 - .set_static_screen_control = optc31_set_static_screen_control, 296 + .set_static_screen_control = optc1_set_static_screen_control, 270 297 .program_stereo = optc1_program_stereo, 271 298 .is_stereo_left_eye = optc1_is_stereo_left_eye, 272 299 .tg_init = optc3_tg_init,
+1 -4
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
··· 263 263 void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params); 264 264 265 265 void optc3_init_odm(struct timing_generator *optc); 266 - void optc31_set_static_screen_control( 267 - struct timing_generator *optc, 268 - uint32_t event_triggers, 269 - uint32_t num_frames); 266 + 270 267 #endif /* __DC_OPTC_DCN31_H__ */
+3 -3
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
··· 278 278 struct dc_link *link, 279 279 struct stream_encoder *enc) 280 280 { 281 - /* New to DCN314 - disable the FIFO before VID stream disable. */ 282 - enc314_disable_fifo(enc); 283 - 284 281 enc1_stream_encoder_dp_blank(link, enc); 282 + 283 + /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */ 284 + enc314_disable_fifo(enc); 285 285 } 286 286 287 287 static void enc314_stream_encoder_dp_unblank(
+2 -2
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
··· 66 66 .prepare_bandwidth = dcn20_prepare_bandwidth, 67 67 .optimize_bandwidth = dcn20_optimize_bandwidth, 68 68 .update_bandwidth = dcn20_update_bandwidth, 69 - .set_drr = dcn31_set_drr, 69 + .set_drr = dcn10_set_drr, 70 70 .get_position = dcn10_get_position, 71 - .set_static_screen_control = dcn31_set_static_screen_control, 71 + .set_static_screen_control = dcn10_set_static_screen_control, 72 72 .setup_stereo = dcn10_setup_stereo, 73 73 .set_avmute = dcn30_set_avmute, 74 74 .log_hw_state = dcn10_log_hw_state,
+1 -2
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
··· 228 228 .set_drr = optc31_set_drr, 229 229 .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, 230 230 .set_vtotal_min_max = optc1_set_vtotal_min_max, 231 - .set_static_screen_control = optc31_set_static_screen_control, 231 + .set_static_screen_control = optc1_set_static_screen_control, 232 232 .program_stereo = optc1_program_stereo, 233 233 .is_stereo_left_eye = optc1_is_stereo_left_eye, 234 234 .tg_init = optc3_tg_init, ··· 241 241 .set_dsc_config = optc3_set_dsc_config, 242 242 .get_dsc_status = optc2_get_dsc_status, 243 243 .set_dwb_source = NULL, 244 - .set_odm_combine = optc314_set_odm_combine, 245 244 .get_optc_source = optc2_get_optc_source, 246 245 .set_out_mux = optc3_set_out_mux, 247 246 .set_drr_trigger_window = optc3_set_drr_trigger_window,
+38 -1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
··· 188 188 189 189 /* First, check no-memory-request case */ 190 190 for (i = 0; i < dc->current_state->stream_count; i++) { 191 - if (dc->current_state->stream_status[i].plane_count) 191 + if ((dc->current_state->stream_status[i].plane_count) && 192 + (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)) 192 193 /* Fail eligibility on a visible stream */ 193 194 break; 194 195 } ··· 1448 1447 if (is_dsc_ungated) { 1449 1448 hws->funcs.dsc_pg_control(hws, dsc->inst, false); 1450 1449 } 1450 + } 1451 + } 1452 + } 1453 + 1454 + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context) 1455 + { 1456 + unsigned int i; 1457 + 1458 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 1459 + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1460 + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1461 + 1462 + /* If an active, non-phantom pipe is being transitioned into a phantom 1463 + * pipe, wait for the double buffer update to complete first before we do 1464 + * ANY phantom pipe programming. 1465 + */ 1466 + if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && 1467 + old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 1468 + old_pipe->stream_res.tg->funcs->wait_for_state( 1469 + old_pipe->stream_res.tg, 1470 + CRTC_STATE_VBLANK); 1471 + old_pipe->stream_res.tg->funcs->wait_for_state( 1472 + old_pipe->stream_res.tg, 1473 + CRTC_STATE_VACTIVE); 1474 + } 1475 + } 1476 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 1477 + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 1478 + 1479 + if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1480 + // If old context or new context has phantom pipes, apply 1481 + // the phantom timings now. We can't change the phantom 1482 + // pipe configuration safely without driver acquiring 1483 + // the DMCUB lock first. 1484 + dc->hwss.apply_ctx_to_hw(dc, context); 1485 + break; 1451 1486 } 1452 1487 } 1453 1488 }
+2
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
··· 102 102 struct dc_state *context, 103 103 bool safe_to_disable); 104 104 105 + void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context); 106 + 105 107 #endif /* __DC_HWSS_DCN32_H__ */
+1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
··· 106 106 .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, 107 107 .get_dcc_en_bits = dcn10_get_dcc_en_bits, 108 108 .commit_subvp_config = dcn32_commit_subvp_config, 109 + .enable_phantom_streams = dcn32_enable_phantom_streams, 109 110 .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock, 110 111 .update_visual_confirm_color = dcn20_update_visual_confirm_color, 111 112 .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
··· 726 726 .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" 727 727 .alloc_extra_way_for_cursor = true, 728 728 .min_prefetch_in_strobe_ns = 60000, // 60us 729 + .disable_unbounded_requesting = false, 729 730 }; 730 731 731 732 static const struct dc_debug_options debug_defaults_diags = {
+1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
··· 112 112 bool dcn32_mpo_in_use(struct dc_state *context); 113 113 114 114 bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); 115 + bool dcn32_is_center_timing(struct pipe_ctx *pipe); 115 116 116 117 struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( 117 118 struct dc_state *state,
+15 -1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
··· 255 255 return false; 256 256 } 257 257 258 + bool dcn32_is_center_timing(struct pipe_ctx *pipe) 259 + { 260 + bool is_center_timing = false; 261 + 262 + if (pipe->stream) { 263 + if (pipe->stream->timing.v_addressable != pipe->stream->dst.height || 264 + pipe->stream->timing.v_addressable != pipe->stream->src.height) { 265 + is_center_timing = true; 266 + } 267 + } 268 + return is_center_timing; 269 + } 270 + 258 271 /** 259 272 * ******************************************************************************************* 260 273 * dcn32_determine_det_override: Determine DET allocation for each pipe ··· 370 357 int i, pipe_cnt; 371 358 struct resource_context *res_ctx = &context->res_ctx; 372 359 struct pipe_ctx *pipe; 360 + bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; 373 361 374 362 for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { 375 363 ··· 387 373 */ 388 374 if (pipe_cnt == 1) { 389 375 pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE; 390 - if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 376 + if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 391 377 if (!is_dual_plane(pipe->plane_state->format)) { 392 378 pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE; 393 379 pipes[0].pipe.src.unbounded_req_mode = true;
+1
drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
··· 724 724 .allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback" 725 725 .alloc_extra_way_for_cursor = true, 726 726 .min_prefetch_in_strobe_ns = 60000, // 60us 727 + .disable_unbounded_requesting = false, 727 728 }; 728 729 729 730 static const struct dc_debug_options debug_defaults_diags = {
+21 -4
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 691 691 * to combine this with SubVP can cause issues with the scheduling). 692 692 * - Not TMZ surface 693 693 */ 694 - if (pipe->plane_state && !pipe->top_pipe && 694 + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && 695 695 pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && 696 696 vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { 697 697 while (pipe) { ··· 979 979 } 980 980 // Use ignore_msa_timing_param flag to identify as DRR 981 981 if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) { 982 - // SUBVP + DRR case 983 - schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); 982 + // SUBVP + DRR case -- don't enable SubVP + DRR for HDMI VRR cases 983 + if (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync) 984 + schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); 985 + else 986 + schedulable = false; 984 987 } else if (found) { 985 988 main_timing = &subvp_pipe->stream->timing; 986 989 phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; ··· 1172 1169 pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); 1173 1170 *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); 1174 1171 1172 + /* Check that vlevel requested supports pstate or not 1173 + * if not, select the lowest vlevel that supports it 1174 + */ 1175 + for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) { 1176 + if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) { 1177 + *vlevel = i; 1178 + break; 1179 + } 1180 + } 1181 + 1175 1182 if (*vlevel < context->bw_ctx.dml.soc.num_states && 1176 1183 vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported 1177 1184 && subvp_validate_static_schedulability(dc, context, *vlevel)) { ··· 1198 1185 pipe->stream->mall_stream_config.type == SUBVP_NONE) { 1199 1186 non_subvp_pipes++; 1200 1187 // Use ignore_msa_timing_param flag to identify as DRR 1201 - if (pipe->stream->ignore_msa_timing_param) { 1188 + if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) { 1202 1189 drr_pipe_found = true; 1203 1190 drr_pipe_index = i; 1204 1191 } ··· 1564 1551 context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = 1565 1552 dm_prefetch_support_fclk_and_stutter; 1566 1553 1554 + context->bw_ctx.dml.validate_max_state = fast_validate; 1567 1555 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1568 1556 1569 1557 /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */ ··· 1573 1559 dm_prefetch_support_stutter; 1574 1560 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 1575 1561 } 1562 + context->bw_ctx.dml.validate_max_state = false; 1576 1563 1577 1564 if (vlevel < context->bw_ctx.dml.soc.num_states) { 1578 1565 memset(split, 0, sizeof(split)); ··· 1660 1645 dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); 1661 1646 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1662 1647 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 1648 + memset(&pipe->link_res, 0, sizeof(pipe->link_res)); 1663 1649 repopulate_pipes = true; 1664 1650 } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { 1665 1651 struct pipe_ctx *top_pipe = pipe->top_pipe; ··· 1676 1660 pipe->stream = NULL; 1677 1661 memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); 1678 1662 memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); 1663 + memset(&pipe->link_res, 0, sizeof(pipe->link_res)); 1679 1664 repopulate_pipes = true; 1680 1665 } else 1681 1666 ASSERT(0); /* Should never try to merge master pipe */
+20 -17
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
··· 1707 1707 void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) 1708 1708 { 1709 1709 struct vba_vars_st *v = &mode_lib->vba; 1710 - int i, j; 1710 + int i, j, start_state; 1711 1711 unsigned int k, m; 1712 1712 unsigned int MaximumMPCCombine; 1713 1713 unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth; ··· 1720 1720 #endif 1721 1721 1722 1722 /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ 1723 - 1723 + if (mode_lib->validate_max_state) 1724 + start_state = v->soc.num_states - 1; 1725 + else 1726 + start_state = 0; 1724 1727 /*Scale Ratio, taps Support Check*/ 1725 1728 1726 1729 mode_lib->vba.ScaleRatioAndTapsSupport = true; ··· 2012 2009 mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage 2013 2010 && v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible; 2014 2011 2015 - for (i = 0; i < v->soc.num_states; i++) { 2012 + for (i = start_state; i < v->soc.num_states; i++) { 2016 2013 for (j = 0; j < 2; j++) { 2017 2014 mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0; 2018 2015 mode_lib->vba.TotalAvailablePipesSupport[i][j] = true; ··· 2289 2286 } 2290 2287 } 2291 2288 2292 - for (i = 0; i < v->soc.num_states; ++i) { 2289 + for (i = start_state; i < v->soc.num_states; ++i) { 2293 2290 mode_lib->vba.ExceededMultistreamSlots[i] = false; 2294 2291 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { 2295 2292 if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) { ··· 2389 2386 } 2390 2387 } 2391 2388 2392 - for (i = 0; i < v->soc.num_states; ++i) { 2389 + for (i = start_state; i < v->soc.num_states; ++i) { 2393 2390 mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false; 2394 2391 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { 2395 2392 if (mode_lib->vba.BlendingAndTiming[k] == k ··· 2406 2403 } 2407 2404 } 2408 2405 2409 - for (i = 0; i < v->soc.num_states; ++i) { 2406 + for (i = start_state; i < v->soc.num_states; ++i) { 2410 2407 mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true; 2411 2408 mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true; 2412 2409 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { ··· 2424 2421 } 2425 2422 } 2426 2423 2427 - for (i = 0; i < v->soc.num_states; i++) { 2424 + for (i = start_state; i < v->soc.num_states; i++) { 2428 2425 mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false; 2429 2426 for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { 2430 2427 if (mode_lib->vba.BlendingAndTiming[k] == k) { ··· 2461 2458 /* Check DSC Unit and Slices Support */ 2462 2459 v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; 2463 2460 2464 - for (i = 0; i < v->soc.num_states; ++i) { 2461 + for (i = start_state; i < v->soc.num_states; ++i) { 2465 2462 mode_lib->vba.NotEnoughDSCUnits[i] = false; 2466 2463 mode_lib->vba.NotEnoughDSCSlices[i] = false; 2467 2464 v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; ··· 2496 2493 } 2497 2494 2498 2495 /*DSC Delay per state*/ 2499 - for (i = 0; i < v->soc.num_states; ++i) { 2496 + for (i = start_state; i < v->soc.num_states; ++i) { 2500 2497 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { 2501 2498 mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement( 2502 2499 mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k], ··· 2523 2520 2524 2521 //Calculate Swath, DET Configuration, DCFCLKDeepSleep 2525 2522 // 2526 - for (i = 0; i < (int) v->soc.num_states; ++i) { 2523 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 2527 2524 for (j = 0; j <= 1; ++j) { 2528 2525 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { 2529 2526 mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k]; ··· 2658 2655 mode_lib->vba.SurfaceSizeInMALL, 2659 2656 &mode_lib->vba.ExceededMALLSize); 2660 2657 2661 - for (i = 0; i < v->soc.num_states; i++) { 2658 + for (i = start_state; i < v->soc.num_states; i++) { 2662 2659 for (j = 0; j < 2; j++) { 2663 2660 for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { 2664 2661 mode_lib->vba.swath_width_luma_ub_this_state[k] = ··· 2885 2882 } 2886 2883 2887 2884 //Calculate Return BW 2888 - for (i = 0; i < (int) v->soc.num_states; ++i) { 2885 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 2889 2886 for (j = 0; j <= 1; ++j) { 2890 2887 for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { 2891 2888 if (mode_lib->vba.BlendingAndTiming[k] == k) { ··· 2964 2961 &mode_lib->vba.MinPrefetchMode, 2965 2962 &mode_lib->vba.MaxPrefetchMode); 2966 2963 2967 - for (i = 0; i < (int) v->soc.num_states; ++i) { 2964 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 2968 2965 for (j = 0; j <= 1; ++j) 2969 2966 mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i]; 2970 2967 } ··· 3086 3083 mode_lib->vba.DCFCLKState); 3087 3084 } // UseMinimumRequiredDCFCLK == true 3088 3085 3089 - for (i = 0; i < (int) v->soc.num_states; ++i) { 3086 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 3090 3087 for (j = 0; j <= 1; ++j) { 3091 3088 mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i, 3092 3089 mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j], ··· 3095 3092 } 3096 3093 3097 3094 //Re-ordering Buffer Support Check 3098 - for (i = 0; i < (int) v->soc.num_states; ++i) { 3095 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 3099 3096 for (j = 0; j <= 1; ++j) { 3100 3097 if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 3101 3098 / mode_lib->vba.ReturnBWPerState[i][j] ··· 3117 3114 + mode_lib->vba.ReadBandwidthChroma[k]; 3118 3115 } 3119 3116 3120 - for (i = 0; i < (int) v->soc.num_states; ++i) { 3117 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 3121 3118 for (j = 0; j <= 1; ++j) { 3122 3119 mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] = 3123 3120 dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j] ··· 3141 3138 3142 3139 /* Prefetch Check */ 3143 3140 3144 - for (i = 0; i < (int) v->soc.num_states; ++i) { 3141 + for (i = start_state; i < (int) v->soc.num_states; ++i) { 3145 3142 for (j = 0; j <= 1; ++j) { 3146 3143 3147 3144 mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
+3 -3
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
··· 6257 6257 double SwathSizePerSurfaceC[DC__NUM_DPP__MAX]; 6258 6258 bool NotEnoughDETSwathFillLatencyHiding = false; 6259 6259 6260 - /* calculate sum of single swath size for all pipes in bytes*/ 6260 + /* calculate sum of single swath size for all pipes in bytes */ 6261 6261 for (k = 0; k < NumberOfActiveSurfaces; k++) { 6262 - SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; 6262 + SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k]; 6263 6263 6264 6264 if (SwathHeightC[k] != 0) 6265 - SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; 6265 + SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k]; 6266 6266 else 6267 6267 SwathSizePerSurfaceC[k] = 0; 6268 6268
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
··· 136 136 .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, 137 137 .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, 138 138 .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, 139 - .pct_ideal_sdp_bw_after_urgent = 100.0, 139 + .pct_ideal_sdp_bw_after_urgent = 90.0, 140 140 .pct_ideal_fabric_bw_after_urgent = 67.0, 141 141 .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, 142 142 .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
+1
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
··· 91 91 struct dal_logger *logger; 92 92 struct dml_funcs funcs; 93 93 struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; 94 + bool validate_max_state; 94 95 }; 95 96 96 97 void dml_init_instance(struct display_mode_lib *lib,
+9 -9
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 547 547 struct resource_context res_ctx; 548 548 549 549 /** 550 - * @bw_ctx: The output from bandwidth and watermark calculations and the DML 551 - * 552 - * Each context must have its own instance of VBA, and in order to 553 - * initialize and obtain IP and SOC, the base DML instance from DC is 554 - * initially copied into every context. 555 - */ 556 - struct bw_context bw_ctx; 557 - 558 - /** 559 550 * @pp_display_cfg: PowerPlay clocks and settings 560 551 * Note: this is a big struct, do *not* put on stack! 561 552 */ ··· 559 568 struct dcn_bw_internal_vars dcn_bw_vars; 560 569 561 570 struct clk_mgr *clk_mgr; 571 + 572 + /** 573 + * @bw_ctx: The output from bandwidth and watermark calculations and the DML 574 + * 575 + * Each context must have its own instance of VBA, and in order to 576 + * initialize and obtain IP and SOC, the base DML instance from DC is 577 + * initially copied into every context. 578 + */ 579 + struct bw_context bw_ctx; 562 580 563 581 /** 564 582 * @refcount: refcount reference
+1
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
··· 266 266 void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe); 267 267 268 268 void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); 269 + void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); 269 270 void (*subvp_pipe_control_lock)(struct dc *dc, 270 271 struct dc_state *context, 271 272 bool lock,
+9
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 236 236 struct pipe_ctx *pri_pipe, 237 237 struct pipe_ctx *sec_pipe, 238 238 bool odm); 239 + 240 + /* A test harness interface that modifies dp encoder resources in the given dc 241 + * state and bypasses the need to revalidate. The interface assumes that the 242 + * test harness interface is called with pre-validated link config stored in the 243 + * pipe_ctx and updates dp encoder resources according to the link config. 244 + */ 245 + enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, 246 + struct dc_state *context, 247 + struct pipe_ctx *pipe_ctx); 239 248 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
-5
drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
··· 136 136 .ack = NULL 137 137 }; 138 138 139 - static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { 140 - .set = NULL, 141 - .ack = NULL 142 - }; 143 - 144 139 #undef BASE_INNER 145 140 #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg 146 141
+3
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
··· 126 126 DMUB_NOTIFICATION_HPD, 127 127 DMUB_NOTIFICATION_HPD_IRQ, 128 128 DMUB_NOTIFICATION_SET_CONFIG_REPLY, 129 + DMUB_NOTIFICATION_DPIA_NOTIFICATION, 129 130 DMUB_NOTIFICATION_MAX 130 131 }; 131 132 ··· 454 453 * @pending_notification: Indicates there are other pending notifications 455 454 * @aux_reply: aux reply 456 455 * @hpd_status: hpd status 456 + * @bw_alloc_reply: BW Allocation reply from CM/DPIA 457 457 */ 458 458 struct dmub_notification { 459 459 enum dmub_notification_type type; ··· 465 463 struct aux_reply_data aux_reply; 466 464 enum dp_hpd_status hpd_status; 467 465 enum set_config_status sc_status; 466 + struct dpia_notification_reply_data bw_alloc_reply; 468 467 }; 469 468 }; 470 469
+83
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 770 770 * Command type used for SET_CONFIG Reply notification 771 771 */ 772 772 DMUB_OUT_CMD__SET_CONFIG_REPLY = 3, 773 + DMUB_OUT_CMD__DPIA_NOTIFICATION = 5 773 774 }; 774 775 775 776 /* DMUB_CMD__DPIA command sub-types. */ ··· 1515 1514 * Alignment only 1516 1515 */ 1517 1516 uint8_t pad; 1517 + }; 1518 + 1519 + /** 1520 + * DPIA NOTIFICATION Response Type 1521 + */ 1522 + enum dpia_notify_bw_alloc_status { 1523 + 1524 + DPIA_BW_REQ_FAILED = 0, 1525 + DPIA_BW_REQ_SUCCESS, 1526 + DPIA_EST_BW_CHANGED, 1527 + DPIA_BW_ALLOC_CAPS_CHANGED 1528 + }; 1529 + 1530 + /* DMUB_OUT_CMD__DPIA_NOTIFY Reply command - OutBox Cmd */ 1531 + /** 1532 + * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command. 1533 + */ 1534 + struct dpia_notification_reply_data { 1535 + uint8_t allocated_bw; 1536 + uint8_t estimated_bw; 1537 + }; 1538 + 1539 + struct dpia_notification_common { 1540 + bool shared; 1541 + }; 1542 + 1543 + struct dpia_bw_allocation_notify_data { 1544 + union { 1545 + struct { 1546 + uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */ 1547 + uint16_t bw_request_failed: 1; /**< BW_Request_Failed */ 1548 + uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */ 1549 + uint16_t est_bw_changed: 1; /**< Estimated_BW changed */ 1550 + uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */ 1551 + uint16_t reserved: 11; 1552 + } bits; 1553 + uint16_t flags; 1554 + }; 1555 + uint8_t cm_id; /**< CM ID */ 1556 + uint8_t group_id; /**< Group ID */ 1557 + uint8_t granularity; /**< BW Allocation Granularity */ 1558 + uint8_t estimated_bw; /**< Estimated_BW */ 1559 + uint8_t allocated_bw; /**< Allocated_BW */ 1560 + uint8_t reserved; 1561 + }; 1562 + 1563 + union dpia_notification_data { 1564 + struct dpia_notification_common common_data; 1565 + struct dpia_bw_allocation_notify_data dpia_bw_alloc; /**< Used for DPIA BW Allocation mode notification */ 1566 + }; 1567 + 1568 + enum dmub_cmd_dpia_notification_type { 1569 + DPIA_NOTIFY__BW_ALLOCATION = 0, 1570 + }; 1571 + 1572 + struct dpia_notification_header { 1573 + uint8_t instance; /**< DPIA Instance */ 1574 + uint8_t reserved[3]; 1575 + enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */ 1576 + }; 1577 + 1578 + struct dpia_notification_payload { 1579 + struct dpia_notification_header header; 1580 + union dpia_notification_data data; /**< DPIA notification data */ 1581 + }; 1582 + 1583 + /** 1584 + * Definition of a DMUB_OUT_CMD__DPIA_NOTIFY command. 1585 + */ 1586 + struct dmub_rb_cmd_dpia_notification { 1587 + /** 1588 + * Command header. 1589 + */ 1590 + struct dmub_cmd_header header; /**< DPIA notification header */ 1591 + /** 1592 + * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command. 1593 + */ 1594 + struct dpia_notification_payload payload; /**< DPIA notification payload */ 1518 1595 }; 1519 1596 1520 1597 /** ··· 3501 3422 * SET_CONFIG reply command. 3502 3423 */ 3503 3424 struct dmub_rb_cmd_dp_set_config_reply set_config_reply; 3425 + /** 3426 + * BW ALLOCATION notification command. 3427 + */ 3428 + struct dmub_rb_cmd_dpia_notification dpia_notify; 3504 3429 }; 3505 3430 #pragma pack(pop) 3506 3431
+21
drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
··· 92 92 notify->link_index = cmd.set_config_reply.set_config_reply_control.instance; 93 93 notify->sc_status = cmd.set_config_reply.set_config_reply_control.status; 94 94 break; 95 + case DMUB_OUT_CMD__DPIA_NOTIFICATION: 96 + notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION; 97 + notify->link_index = cmd.dpia_notify.payload.header.instance; 98 + 99 + if (cmd.dpia_notify.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) { 100 + 101 + if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_failed) { 102 + notify->result = DPIA_BW_REQ_FAILED; 103 + } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) { 104 + notify->result = DPIA_BW_REQ_SUCCESS; 105 + notify->bw_alloc_reply.allocated_bw = 106 + cmd.dpia_notify.payload.data.dpia_bw_alloc.allocated_bw; 107 + } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.est_bw_changed) { 108 + notify->result = DPIA_EST_BW_CHANGED; 109 + notify->bw_alloc_reply.estimated_bw = 110 + cmd.dpia_notify.payload.data.dpia_bw_alloc.estimated_bw; 111 + } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) { 112 + notify->result = DPIA_BW_ALLOC_CAPS_CHANGED; 113 + } 114 + } 115 + break; 95 116 default: 96 117 notify->type = DMUB_NOTIFICATION_NO_DATA; 97 118 break;
+4 -1
drivers/gpu/drm/amd/display/include/dpcd_defs.h
··· 88 88 PHY_TEST_PATTERN_PRBS23 = 0x30, 89 89 PHY_TEST_PATTERN_PRBS31 = 0x38, 90 90 PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40, 91 - PHY_TEST_PATTERN_SQUARE_PULSE = 0x48, 91 + PHY_TEST_PATTERN_SQUARE = 0x48, 92 + PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49, 93 + PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A, 94 + PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B, 92 95 }; 93 96 94 97 enum dpcd_test_dyn_range {
+30
drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
··· 1 + /* 2 + * Copyright (C) 2022 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + 22 + #ifndef _df_4_3_OFFSET_HEADER 23 + #define _df_4_3_OFFSET_HEADER 24 + 25 + #define regDF_CS_UMC_AON0_HardwareAssertMaskLow 0x0e3e 26 + #define regDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 4 27 + #define regDF_NCS_PG0_HardwareAssertMaskHigh 0x0e3f 28 + #define regDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 4 29 + 30 + #endif
+157
drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
··· 1 + /* 2 + * Copyright (C) 2022 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + 22 + #ifndef _df_4_3_SH_MASK_HEADER 23 + #define _df_4_3_SH_MASK_HEADER 24 + 25 + //DF_CS_UMC_AON0_HardwareAssertMaskLow 26 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0 27 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1 28 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2 29 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3 30 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4 31 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5 32 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6 33 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7 34 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8 35 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9 36 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa 37 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb 38 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc 39 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd 40 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe 41 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf 42 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10 43 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11 44 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12 45 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13 46 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14 47 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15 48 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16 49 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17 50 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18 51 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19 52 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a 53 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b 54 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c 55 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d 56 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e 57 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f 58 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L 59 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L 60 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L 61 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L 62 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L 63 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L 64 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L 65 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L 66 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L 67 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L 68 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L 69 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L 70 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L 71 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L 72 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L 73 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L 74 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L 75 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L 76 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L 77 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L 78 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L 79 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L 80 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L 81 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L 82 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L 83 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L 84 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L 85 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L 86 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L 87 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L 88 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L 89 + #define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L 90 + 91 + //DF_NCS_PG0_HardwareAssertMaskHigh 92 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0 93 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1 94 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2 95 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3 96 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4 97 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5 98 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6 99 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7 100 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8 101 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9 102 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa 103 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb 104 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc 105 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd 106 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe 107 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf 108 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10 109 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11 110 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12 111 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13 112 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14 113 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15 114 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16 115 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17 116 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18 117 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19 118 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a 119 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b 120 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c 121 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d 122 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e 123 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f 124 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L 125 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L 126 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L 127 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L 128 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L 129 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L 130 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L 131 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L 132 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L 133 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L 134 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L 135 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L 136 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L 137 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L 138 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L 139 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L 140 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L 141 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L 142 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L 143 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L 144 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L 145 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L 146 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L 147 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L 148 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L 149 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L 150 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L 151 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L 152 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L 153 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L 154 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L 155 + #define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L 156 + 157 + #endif
+2
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 139 139 AMDGPU_PP_SENSOR_MIN_FAN_RPM, 140 140 AMDGPU_PP_SENSOR_MAX_FAN_RPM, 141 141 AMDGPU_PP_SENSOR_VCN_POWER_STATE, 142 + AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, 143 + AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, 142 144 }; 143 145 144 146 enum amd_pp_task {
+8 -2
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 769 769 770 770 switch (idx) { 771 771 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 772 - *((uint32_t *)value) = hwmgr->pstate_sclk; 772 + *((uint32_t *)value) = hwmgr->pstate_sclk * 100; 773 773 return 0; 774 774 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 775 - *((uint32_t *)value) = hwmgr->pstate_mclk; 775 + *((uint32_t *)value) = hwmgr->pstate_mclk * 100; 776 + return 0; 777 + case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 778 + *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100; 779 + return 0; 780 + case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 781 + *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100; 776 782 return 0; 777 783 case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 778 784 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+13 -3
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
··· 375 375 return 0; 376 376 } 377 377 378 + static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 379 + { 380 + hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK; 381 + hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK; 382 + 383 + smum_send_msg_to_smc(hwmgr, 384 + PPSMC_MSG_GetMaxGfxclkFrequency, 385 + &hwmgr->pstate_sclk_peak); 386 + hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK; 387 + } 388 + 378 389 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 379 390 { 380 391 struct amdgpu_device *adev = hwmgr->adev; ··· 408 397 if (ret) 409 398 return ret; 410 399 } 400 + 401 + smu10_populate_umdpstate_clocks(hwmgr); 411 402 412 403 return 0; 413 404 } ··· 586 573 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 587 574 588 575 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; 589 - 590 - hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100; 591 - hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100; 592 576 593 577 /* enable the pp_od_clk_voltage sysfs file */ 594 578 hwmgr->od_enabled = 1;
+63 -13
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
··· 1501 1501 return ret; 1502 1502 } 1503 1503 1504 + static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1505 + { 1506 + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1507 + struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; 1508 + struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk = 1509 + hwmgr->dyn_state.vddc_dependency_on_sclk; 1510 + struct phm_ppt_v1_information *table_info = 1511 + (struct phm_ppt_v1_information *)(hwmgr->pptable); 1512 + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk = 1513 + table_info->vdd_dep_on_sclk; 1514 + int32_t tmp_sclk, count, percentage; 1515 + 1516 + if (golden_dpm_table->mclk_table.count == 1) { 1517 + percentage = 70; 1518 + hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value; 1519 + } else { 1520 + percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / 1521 + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 1522 + hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; 1523 + } 1524 + 1525 + tmp_sclk = hwmgr->pstate_mclk * percentage / 100; 1526 + 1527 + if (hwmgr->pp_table_version == PP_TABLE_V0) { 1528 + for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) { 1529 + if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) { 1530 + hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk; 1531 + break; 1532 + } 1533 + } 1534 + if (count < 0) 1535 + hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk; 1536 + 1537 + hwmgr->pstate_sclk_peak = 1538 + vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk; 1539 + } else if (hwmgr->pp_table_version == PP_TABLE_V1) { 1540 + for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) { 1541 + if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) { 1542 + hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk; 1543 + break; 1544 + } 1545 + } 1546 + if (count < 0) 1547 + hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk; 1548 + 1549 + hwmgr->pstate_sclk_peak = 1550 + vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk; 1551 + } 1552 + 1553 + hwmgr->pstate_mclk_peak = 1554 + golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; 1555 + 1556 + /* make sure the output is in Mhz */ 1557 + hwmgr->pstate_sclk /= 100; 1558 + hwmgr->pstate_mclk /= 100; 1559 + hwmgr->pstate_sclk_peak /= 100; 1560 + hwmgr->pstate_mclk_peak /= 100; 1561 + } 1562 + 1504 1563 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1505 1564 { 1506 1565 int tmp_result = 0; ··· 1683 1624 tmp_result = smu7_pcie_performance_request(hwmgr); 1684 1625 PP_ASSERT_WITH_CODE((0 == tmp_result), 1685 1626 "pcie performance request failed!", result = tmp_result); 1627 + 1628 + smu7_populate_umdpstate_clocks(hwmgr); 1686 1629 1687 1630 return 0; 1688 1631 } ··· 3204 3143 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; 3205 3144 count >= 0; count--) { 3206 3145 if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { 3207 - tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; 3208 3146 *sclk_mask = count; 3209 3147 break; 3210 3148 } 3211 3149 } 3212 - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3150 + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 3213 3151 *sclk_mask = 0; 3214 - tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; 3215 - } 3216 3152 3217 3153 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3218 3154 *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; ··· 3219 3161 3220 3162 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { 3221 3163 if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { 3222 - tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; 3223 3164 *sclk_mask = count; 3224 3165 break; 3225 3166 } 3226 3167 } 3227 - if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { 3168 + if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 3228 3169 *sclk_mask = 0; 3229 - tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3230 - } 3231 3170 3232 3171 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 3233 3172 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; ··· 3236 3181 *mclk_mask = golden_dpm_table->mclk_table.count - 1; 3237 3182 3238 3183 *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; 3239 - hwmgr->pstate_sclk = tmp_sclk; 3240 - hwmgr->pstate_mclk = tmp_mclk; 3241 3184 3242 3185 return 0; 3243 3186 } ··· 3247 3194 uint32_t sclk_mask = 0; 3248 3195 uint32_t mclk_mask = 0; 3249 3196 uint32_t pcie_mask = 0; 3250 - 3251 - if (hwmgr->pstate_sclk == 0) 3252 - smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); 3253 3197 3254 3198 switch (level) { 3255 3199 case AMD_DPM_FORCED_LEVEL_HIGH:
+14 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
··· 1016 1016 data->acp_boot_level = 0xff; 1017 1017 } 1018 1018 1019 + static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1020 + { 1021 + struct phm_clock_voltage_dependency_table *table = 1022 + hwmgr->dyn_state.vddc_dependency_on_sclk; 1023 + 1024 + hwmgr->pstate_sclk = table->entries[0].clk / 100; 1025 + hwmgr->pstate_mclk = 0; 1026 + 1027 + hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100; 1028 + hwmgr->pstate_mclk_peak = 0; 1029 + } 1030 + 1019 1031 static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1020 1032 { 1021 1033 smu8_program_voting_clients(hwmgr); ··· 1035 1023 return -EINVAL; 1036 1024 smu8_program_bootup_state(hwmgr); 1037 1025 smu8_reset_acp_boot_level(hwmgr); 1026 + 1027 + smu8_populate_umdpstate_clocks(hwmgr); 1038 1028 1039 1029 return 0; 1040 1030 } ··· 1181 1167 1182 1168 data->sclk_dpm.soft_min_clk = table->entries[0].clk; 1183 1169 data->sclk_dpm.hard_min_clk = table->entries[0].clk; 1184 - hwmgr->pstate_sclk = table->entries[0].clk; 1185 - hwmgr->pstate_mclk = 0; 1186 1170 1187 1171 level = smu8_get_max_sclk_level(hwmgr) - 1; 1188 1172
+26 -5
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
··· 3008 3008 return 0; 3009 3009 } 3010 3010 3011 + static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 3012 + { 3013 + struct phm_ppt_v2_information *table_info = 3014 + (struct phm_ppt_v2_information *)(hwmgr->pptable); 3015 + 3016 + if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL && 3017 + table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) { 3018 + hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; 3019 + hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; 3020 + } else { 3021 + hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; 3022 + hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk; 3023 + } 3024 + 3025 + hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk; 3026 + hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk; 3027 + 3028 + /* make sure the output is in Mhz */ 3029 + hwmgr->pstate_sclk /= 100; 3030 + hwmgr->pstate_mclk /= 100; 3031 + hwmgr->pstate_sclk_peak /= 100; 3032 + hwmgr->pstate_mclk_peak /= 100; 3033 + } 3034 + 3011 3035 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 3012 3036 { 3013 3037 struct vega10_hwmgr *data = hwmgr->backend; ··· 3105 3081 "Failed to enable ULV!", 3106 3082 result = tmp_result); 3107 3083 } 3084 + 3085 + vega10_populate_umdpstate_clocks(hwmgr); 3108 3086 3109 3087 return result; 3110 3088 } ··· 4195 4169 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL; 4196 4170 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL; 4197 4171 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL; 4198 - hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; 4199 - hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; 4200 4172 } 4201 4173 4202 4174 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { ··· 4304 4280 uint32_t sclk_mask = 0; 4305 4281 uint32_t mclk_mask = 0; 4306 4282 uint32_t soc_mask = 0; 4307 - 4308 - if (hwmgr->pstate_sclk == 0) 4309 - vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask); 4310 4283 4311 4284 switch (level) { 4312 4285 case AMD_DPM_FORCED_LEVEL_HIGH:
+22
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
··· 1026 1026 return 0; 1027 1027 } 1028 1028 1029 + static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1030 + { 1031 + struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); 1032 + struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table); 1033 + struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table); 1034 + 1035 + if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL && 1036 + mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) { 1037 + hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value; 1038 + hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value; 1039 + } else { 1040 + hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value; 1041 + hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value; 1042 + } 1043 + 1044 + hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value; 1045 + hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value; 1046 + } 1047 + 1029 1048 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 1030 1049 { 1031 1050 int tmp_result, result = 0; ··· 1096 1077 PP_ASSERT_WITH_CODE(!result, 1097 1078 "Failed to setup default DPM tables!", 1098 1079 return result); 1080 + 1081 + vega12_populate_umdpstate_clocks(hwmgr); 1082 + 1099 1083 return result; 1100 1084 } 1101 1085
+7 -13
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
··· 1555 1555 return 0; 1556 1556 } 1557 1557 1558 - static int vega20_populate_umdpstate_clocks( 1559 - struct pp_hwmgr *hwmgr) 1558 + static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr) 1560 1559 { 1561 1560 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 1562 1561 struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table); 1563 1562 struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table); 1564 1563 1565 - hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1566 - hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1567 - 1568 1564 if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL && 1569 1565 mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) { 1570 1566 hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value; 1571 1567 hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value; 1568 + } else { 1569 + hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value; 1570 + hwmgr->pstate_mclk = mem_table->dpm_levels[0].value; 1572 1571 } 1573 1572 1574 - hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100; 1575 - hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100; 1576 - 1577 - return 0; 1573 + hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value; 1574 + hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value; 1578 1575 } 1579 1576 1580 1577 static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, ··· 1750 1753 "[EnableDPMTasks] Failed to initialize odn settings!", 1751 1754 return result); 1752 1755 1753 - result = vega20_populate_umdpstate_clocks(hwmgr); 1754 - PP_ASSERT_WITH_CODE(!result, 1755 - "[EnableDPMTasks] Failed to populate umdpstate clocks!", 1756 - return result); 1756 + vega20_populate_umdpstate_clocks(hwmgr); 1757 1757 1758 1758 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1759 1759 POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
+2
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
··· 809 809 uint32_t workload_prority[Workload_Policy_Max]; 810 810 uint32_t workload_setting[Workload_Policy_Max]; 811 811 bool gfxoff_state_changed_by_workload; 812 + uint32_t pstate_sclk_peak; 813 + uint32_t pstate_mclk_peak; 812 814 }; 813 815 814 816 int hwmgr_early_init(struct pp_hwmgr *hwmgr);
+4 -6
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
··· 250 250 251 251 /* allocate space for watermarks table */ 252 252 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 253 - sizeof(Watermarks_t), 254 - PAGE_SIZE, 255 - AMDGPU_GEM_DOMAIN_VRAM, 253 + sizeof(Watermarks_t), PAGE_SIZE, 254 + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 256 255 &priv->smu_tables.entry[SMU10_WMTABLE].handle, 257 256 &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, 258 257 &priv->smu_tables.entry[SMU10_WMTABLE].table); ··· 265 266 266 267 /* allocate space for watermarks table */ 267 268 r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, 268 - sizeof(DpmClocks_t), 269 - PAGE_SIZE, 270 - AMDGPU_GEM_DOMAIN_VRAM, 269 + sizeof(DpmClocks_t), PAGE_SIZE, 270 + AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, 271 271 &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, 272 272 &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, 273 273 &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
+8
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 2473 2473 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2474 2474 *size = 4; 2475 2475 break; 2476 + case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 2477 + *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 2478 + *size = 4; 2479 + break; 2480 + case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 2481 + *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 2482 + *size = 4; 2483 + break; 2476 2484 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 2477 2485 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 2478 2486 *size = 8;
-5
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
··· 244 244 enum smu_clk_type clk_type, 245 245 struct smu_13_0_dpm_table *single_dpm_table); 246 246 247 - int smu_v13_0_get_dpm_level_range(struct smu_context *smu, 248 - enum smu_clk_type clk_type, 249 - uint32_t *min_value, 250 - uint32_t *max_value); 251 - 252 247 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu); 253 248 254 249 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu);
+2 -41
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 1261 1261 uint32_t speed) 1262 1262 { 1263 1263 struct amdgpu_device *adev = smu->adev; 1264 - uint32_t tach_period, crystal_clock_freq; 1264 + uint32_t crystal_clock_freq = 2500; 1265 + uint32_t tach_period; 1265 1266 int ret; 1266 1267 1267 1268 if (!speed) ··· 1272 1271 if (ret) 1273 1272 return ret; 1274 1273 1275 - crystal_clock_freq = amdgpu_asic_get_xclk(adev); 1276 1274 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1277 1275 WREG32_SOC15(THM, 0, regCG_TACH_CTRL, 1278 1276 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), ··· 2062 2062 } 2063 2063 2064 2064 return 0; 2065 - } 2066 - 2067 - int smu_v13_0_get_dpm_level_range(struct smu_context *smu, 2068 - enum smu_clk_type clk_type, 2069 - uint32_t *min_value, 2070 - uint32_t *max_value) 2071 - { 2072 - uint32_t level_count = 0; 2073 - int ret = 0; 2074 - 2075 - if (!min_value && !max_value) 2076 - return -EINVAL; 2077 - 2078 - if (min_value) { 2079 - /* by default, level 0 clock value as min value */ 2080 - ret = smu_v13_0_get_dpm_freq_by_index(smu, 2081 - clk_type, 2082 - 0, 2083 - min_value); 2084 - if (ret) 2085 - return ret; 2086 - } 2087 - 2088 - if (max_value) { 2089 - ret = smu_v13_0_get_dpm_level_count(smu, 2090 - clk_type, 2091 - &level_count); 2092 - if (ret) 2093 - return ret; 2094 - 2095 - ret = smu_v13_0_get_dpm_freq_by_index(smu, 2096 - clk_type, 2097 - level_count - 1, 2098 - max_value); 2099 - if (ret) 2100 - return ret; 2101 - } 2102 - 2103 - return ret; 2104 2065 } 2105 2066 2106 2067 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
+4 -2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 213 213 FEA_MAP(SOC_PCC), 214 214 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 215 215 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 216 + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 216 217 }; 217 218 218 219 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { ··· 241 240 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 242 241 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 243 242 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 243 + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 244 244 }; 245 245 246 246 static const uint8_t smu_v13_0_0_throttler_map[] = { ··· 1557 1555 title[0], title[1], title[2], title[3], title[4], title[5], 1558 1556 title[6], title[7], title[8], title[9]); 1559 1557 1560 - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { 1558 + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { 1561 1559 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1562 1560 workload_type = smu_cmn_to_asic_specific_index(smu, 1563 1561 CMN2ASIC_MAPPING_WORKLOAD, ··· 1619 1617 1620 1618 smu->power_profile_mode = input[size]; 1621 1619 1622 - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1620 + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { 1623 1621 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 1624 1622 return -EINVAL; 1625 1623 }
+1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
··· 192 192 FEA_MAP(SOC_PCC), 193 193 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 194 194 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 195 + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 195 196 }; 196 197 197 198 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
+5 -5
drivers/gpu/drm/radeon/atombios.h
··· 4020 4020 USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH 4021 4021 USHORT usConnObjectId; //Connector Object ID 4022 4022 USHORT usGPUObjectId; //GPU ID 4023 - USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. 4023 + USHORT usGraphicObjIds[]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. 4024 4024 }ATOM_DISPLAY_OBJECT_PATH; 4025 4025 4026 4026 typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH ··· 4037 4037 UCHAR ucNumOfDispPath; 4038 4038 UCHAR ucVersion; 4039 4039 UCHAR ucPadding[2]; 4040 - ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; 4040 + ATOM_DISPLAY_OBJECT_PATH asDispPath[]; 4041 4041 }ATOM_DISPLAY_OBJECT_PATH_TABLE; 4042 4042 4043 4043 ··· 4053 4053 { 4054 4054 UCHAR ucNumberOfObjects; 4055 4055 UCHAR ucPadding[3]; 4056 - ATOM_OBJECT asObjects[1]; 4056 + ATOM_OBJECT asObjects[]; 4057 4057 }ATOM_OBJECT_TABLE; 4058 4058 4059 4059 typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure ··· 4615 4615 UCHAR ucPhaseDelay; // phase delay in unit of micro second 4616 4616 UCHAR ucReserved; 4617 4617 ULONG ulGpioMaskVal; // GPIO Mask value 4618 - VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1]; 4618 + VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[]; 4619 4619 }ATOM_GPIO_VOLTAGE_OBJECT_V3; 4620 4620 4621 4621 typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 ··· 7964 7964 7965 7965 typedef struct { 7966 7966 VFCT_IMAGE_HEADER VbiosHeader; 7967 - UCHAR VbiosContent[1]; 7967 + UCHAR VbiosContent[]; 7968 7968 }GOP_VBIOS_CONTENT; 7969 7969 7970 7970 typedef struct {
+6
include/drm/display/drm_dp.h
··· 603 603 604 604 #define DP_DOWNSPREAD_CTRL 0x107 605 605 # define DP_SPREAD_AMP_0_5 (1 << 4) 606 + # define DP_FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE (1 << 6) 606 607 # define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ 607 608 608 609 #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 ··· 1105 1104 # define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED (1 << 5) /* DP 1.4 */ 1106 1105 # define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */ 1107 1106 # define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */ 1107 + 1108 + #define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */ 1109 + # define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0) 1110 + # define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1) 1111 + # define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4) 1108 1112 1109 1113 #define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */ 1110 1114 # define DP_UHBR10 (1 << 0)
+6
include/uapi/drm/amdgpu_drm.h
··· 832 832 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8 833 833 /* Subquery id: Query GPU stable pstate memory clock */ 834 834 #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9 835 + /* Subquery id: Query GPU peak pstate shader clock */ 836 + #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa 837 + /* Subquery id: Query GPU peak pstate memory clock */ 838 + #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb 835 839 /* Number of VRAM page faults on CPU access. */ 836 840 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E 837 841 #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F ··· 1111 1107 __u32 pa_sc_tile_steering_override; 1112 1108 /* disabled TCCs */ 1113 1109 __u64 tcc_disabled_mask; 1110 + __u64 min_engine_clock; 1111 + __u64 min_memory_clock; 1114 1112 }; 1115 1113 1116 1114 struct drm_amdgpu_info_hw_ip {